code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9
values | license stringclasses 15
values | size int32 3 1.05M |
|---|---|---|---|---|---|
module ExercismWeb
module Routes
class User < Core
# User's profile page.
# This is going to get a serious overhaul.
get '/:username' do |username|
please_login
user = ::User.find_by_username(username)
if user
title(user.username)
erb :"user/show", locals: { profile: Presenters::Profile.new(user, current_user) }
else
status 404
erb :"errors/not_found"
end
end
# linked to from the /looks page
get '/:username/:key' do |username, key|
please_login
user = ::User.find_by_username(username)
if user.nil?
flash[:notice] = "Couldn't find that user."
redirect '/'
end
exercise = user.exercises.find_by_key(key)
if exercise.nil?
flash[:notice] = "Couldn't find that exercise."
redirect '/dashboard'
end
if exercise.submissions.empty?
# We have orphan exercises at the moment.
flash[:notice] = "That submission no longer exists."
redirect '/dashboard'
end
redirect "/submissions/%s" % exercise.submissions.last.key
end
end
end
end
| nathanbwright/exercism.io | app/routes/user.rb | Ruby | agpl-3.0 | 1,214 |
<?php
header("HTTP/1.0 404 Not Found");
die;
| ratliff/server | alpha/web/error.php | PHP | agpl-3.0 | 45 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import matplotlib.pyplot as plt
import glob
import collections
import pandas
import numpy as np
class ConvergencePlot(object):
"""
A tool for making convergence plots.
Args:
x[np.array]: The x data of the graph (e.g., dofs)
y[np.array]: The y data of the graph (e.g., L2_error)
Key, value Options:
xlabel[str]: The label for the x-axis
ylabel[str]: The label for the y-axis
"""
Line = collections.namedtuple('Line', 'x y label')
def __init__(self, xlabel='x', ylabel='y', fontsize=12, fit=True):
self._figure = plt.figure(figsize=(10,6), facecolor='w')
self._axes = plt.gca()
self._axes.set_yscale('log')
self._axes.set_xscale('log')
# Add axis labels
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
# Adjust tick mark fonts
for tick in self._axes.xaxis.get_major_ticks() + self._axes.yaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
# Apply grid marks
plt.grid(True, which='both', color=[0.8]*3)
self.label_to_slope = {}
self.label_to_intercept = {}
def plot(self, df, label=None, title=None, num_fitted_points=None, slope_precision=3, **kwargs):
num_y_columns = len(df.columns) - 1
if label:
if num_y_columns > 1:
if not isinstance(label, list):
raise TypeError("For multiple y-data label must be a list")
if isinstance(label, list) and num_y_columns != len(label):
raise IOError("The length of the label and the number of y columns must be the same")
if not isinstance(label, list):
label = [label]
x = df[df.columns[0]]
lines = []
for i in range(1,len(df.columns)):
y = df[df.columns[i]]
if label is None:
this_label = 'line-{}'.format(len(lines))
else:
this_label = label[i-1]
if num_fitted_points is not None:
coeffs = self._fit(x[-num_fitted_points:], y[-num_fitted_points:])
else:
coeffs = self._fit(x, y)
slope = coeffs[0]
intercept = coeffs[1]
self.label_to_slope.update({this_label:slope})
self.label_to_intercept.update({this_label:intercept})
this_label = '{}: {:.{precision}f}'.format(this_label, slope, precision=slope_precision)
lines.append(self._axes.plot(x, y, label=this_label, **kwargs)[0])
if title:
self._axes.set_title(title)
self._axes.legend()
return lines
def set_title(self, title):
self._axes.set_title(title)
def _fit(self, x, y):
"""
Apply the fit and report the slope.
Key, value Options:
x[float]: The x-position in data coordinates.
y[float]: The y-position in data coordinates.
"""
# Perform fit
coefficients = np.polyfit(np.log10(x), np.log10(y), 1)
return coefficients
def save(self, filename):
"""
Save figure to a file.
Args:
filename[str]: The destination file.
"""
plt.savefig(filename)
def show(self):
"""
Display the plot.
"""
plt.show()
| harterj/moose | python/mms/ConvergencePlot.py | Python | lgpl-2.1 | 3,697 |
package org.jboss.hal.testsuite.page.config;
import org.jboss.arquillian.graphene.page.Location;
import org.jboss.hal.testsuite.page.BasePage;
/**
* @author jcechace
*
* This class represents a meta page entry point to the Config part of the consle in domain.
* As such it is meant for navigation purposes only and thus can't be instantiated. Also note
* that the actual landing page is determined by console and may change in the future.
*
*/
@Location("#profiles")
public class DomainConfigEntryPoint extends BasePage {
}
| mkrajcov/testsuite | src/main/java/org/jboss/hal/testsuite/page/config/DomainConfigEntryPoint.java | Java | lgpl-2.1 | 533 |
<?php
/*************************************************************************************/
/* This file is part of the Thelia package. */
/* */
/* Copyright (c) OpenStudio */
/* email : dev@thelia.net */
/* web : http://www.thelia.net */
/* */
/* For the full copyright and license information, please view the LICENSE.txt */
/* file that was distributed with this source code. */
/*************************************************************************************/
namespace Thelia\Core\Event\Address;
use Thelia\Core\Event\ActionEvent;
use Thelia\Model\Address;
use Thelia\Model\Customer;
/**
* Class AddressCreateOrUpdateEvent
* @package Thelia\Core\Event
* @author Manuel Raynaud <manu@raynaud.io>
*/
class AddressCreateOrUpdateEvent extends ActionEvent
{
/**
* @var string address label
*/
protected $label;
/**
* @var int title id
*/
protected $title;
/**
* @var string|null company name
*/
protected $company;
/**
* @var string first name
*/
protected $firstname;
/**
* @var string last name
*/
protected $lastname;
/**
* @var string address
*/
protected $address1;
/**
* @var string address line 2
*/
protected $address2;
/**
* @var string address line 3
*/
protected $address3;
/**
* @var string zipcode
*/
protected $zipcode;
/**
* @var string city
*/
protected $city;
/**
* @var int country id
*/
protected $country;
/**
* @var string cell phone
*/
protected $cellphone;
/**
* @var string phone
*/
protected $phone;
/**
* @var \Thelia\Model\Customer
*/
protected $customer;
/**
* @var \Thelia\Model\Address
*/
protected $address;
/**
* @var int
*/
protected $isDefault;
public function __construct($label, $title, $firstname, $lastname, $address1, $address2, $address3, $zipcode, $city, $country, $cellphone, $phone, $company, $isDefault = 0)
{
$this->address1 = $address1;
$this->address2 = $address2;
$this->address3 = $address3;
$this->cellphone = $cellphone;
$this->city = $city;
$this->company = $company;
$this->country = $country;
$this->firstname = $firstname;
$this->label = $label;
$this->lastname = $lastname;
$this->phone = $phone;
$this->title = $title;
$this->zipcode = $zipcode;
$this->isDefault = $isDefault;
}
/**
* @return string
*/
public function getAddress1()
{
return $this->address1;
}
/**
* @return string
*/
public function getAddress2()
{
return $this->address2;
}
/**
* @return string
*/
public function getAddress3()
{
return $this->address3;
}
/**
* @return string
*/
public function getCellphone()
{
return $this->cellphone;
}
/**
* @return string
*/
public function getCity()
{
return $this->city;
}
/**
* @return null|string
*/
public function getCompany()
{
return $this->company;
}
/**
* @return int
*/
public function getCountry()
{
return $this->country;
}
/**
* @return string
*/
public function getFirstname()
{
return $this->firstname;
}
/**
* @return string
*/
public function getLabel()
{
return $this->label;
}
/**
* @return string
*/
public function getLastname()
{
return $this->lastname;
}
/**
* @return string
*/
public function getPhone()
{
return $this->phone;
}
/**
* @return int
*/
public function getTitle()
{
return $this->title;
}
/**
* @return string
*/
public function getZipcode()
{
return $this->zipcode;
}
/**
* @return int
*/
public function getIsDefault()
{
return $this->isDefault;
}
/**
* @param \Thelia\Model\Customer $customer
*/
public function setCustomer(Customer $customer)
{
$this->customer = $customer;
}
/**
* @return \Thelia\Model\Customer
*/
public function getCustomer()
{
return $this->customer;
}
/**
* @param \Thelia\Model\Address $address
*/
public function setAddress(Address $address)
{
$this->address = $address;
$this->setCustomer($address->getCustomer());
}
/**
* @return \Thelia\Model\Address
*/
public function getAddress()
{
return $this->address;
}
}
| adirkuhn/thelia | core/lib/Thelia/Core/Event/Address/AddressCreateOrUpdateEvent.php | PHP | lgpl-3.0 | 5,242 |
/*
* Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.carbon.business.rules.core.datasource.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
/**
* Cleaning connections, statements and result sets
*/
public class BusinessRuleDatasourceUtils {
private static Logger log = LoggerFactory.getLogger(BusinessRuleDatasourceUtils.class);
public static void cleanupConnection(ResultSet rs, Statement stmt, Connection conn) {
if (rs != null) {
try {
rs.close();
} catch (SQLException e) {
if (log.isDebugEnabled()) {
log.error("Failed to close the result set. ", e);
}
}
}
if (stmt != null) {
try {
stmt.close();
} catch (SQLException e) {
log.error("Failed to close the prepared statement. ", e);
}
}
if (conn != null) {
try {
conn.close();
} catch (SQLException e) {
log.error("Failed to close the connection. ", e);
}
}
}
}
| minudika/carbon-analytics | components/org.wso2.carbon.business.rules.core/src/main/java/org/wso2/carbon/business/rules/core/datasource/util/BusinessRuleDatasourceUtils.java | Java | apache-2.0 | 1,886 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.security.basic.authentication.validator;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonTypeName;
import com.google.inject.Provider;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.logger.Logger;
import org.apache.druid.security.basic.BasicAuthUtils;
import org.apache.druid.security.basic.BasicSecurityAuthenticationException;
import org.apache.druid.security.basic.authentication.db.cache.BasicAuthenticatorCacheManager;
import org.apache.druid.security.basic.authentication.entity.BasicAuthenticatorCredentials;
import org.apache.druid.security.basic.authentication.entity.BasicAuthenticatorUser;
import org.apache.druid.server.security.AuthenticationResult;
import javax.annotation.Nullable;
import java.util.Arrays;
import java.util.Map;
@JsonTypeName("metadata")
public class MetadataStoreCredentialsValidator implements CredentialsValidator
{
private static final Logger LOG = new Logger(MetadataStoreCredentialsValidator.class);
private final Provider<BasicAuthenticatorCacheManager> cacheManager;
@JsonCreator
public MetadataStoreCredentialsValidator(
@JacksonInject Provider<BasicAuthenticatorCacheManager> cacheManager
)
{
this.cacheManager = cacheManager;
}
@Override
@Nullable
public AuthenticationResult validateCredentials(
String authenticatorName,
String authorizerName,
String username,
char[] password
)
{
Map<String, BasicAuthenticatorUser> userMap = cacheManager.get().getUserMap(authenticatorName);
if (userMap == null) {
throw new IAE("No userMap is available for authenticator with prefix: [%s]", authenticatorName);
}
BasicAuthenticatorUser user = userMap.get(username);
if (user == null) {
return null;
}
BasicAuthenticatorCredentials credentials = user.getCredentials();
if (credentials == null) {
return null;
}
byte[] recalculatedHash = BasicAuthUtils.hashPassword(
password,
credentials.getSalt(),
credentials.getIterations()
);
if (Arrays.equals(recalculatedHash, credentials.getHash())) {
return new AuthenticationResult(username, authorizerName, authenticatorName, null);
} else {
LOG.debug("Password incorrect for metadata store user %s", username);
throw new BasicSecurityAuthenticationException("User metadata store authentication failed.");
}
}
}
| nishantmonu51/druid | extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authentication/validator/MetadataStoreCredentialsValidator.java | Java | apache-2.0 | 3,346 |
/*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.plugins.groovy.lang.psi.dataFlow.reachingDefs;
import com.intellij.psi.*;
import com.intellij.psi.util.PsiTreeUtil;
import gnu.trove.TIntHashSet;
import gnu.trove.TIntObjectHashMap;
import gnu.trove.TIntObjectProcedure;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.plugins.groovy.lang.psi.GrControlFlowOwner;
import org.jetbrains.plugins.groovy.lang.psi.GroovyPsiElement;
import org.jetbrains.plugins.groovy.lang.psi.GroovyRecursiveElementVisitor;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.GrField;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.GrStatement;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.GrVariable;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.blocks.GrClosableBlock;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.expressions.GrReferenceExpression;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.typedef.members.GrMember;
import org.jetbrains.plugins.groovy.lang.psi.controlFlow.Instruction;
import org.jetbrains.plugins.groovy.lang.psi.controlFlow.ReadWriteVariableInstruction;
import org.jetbrains.plugins.groovy.lang.psi.dataFlow.DFAEngine;
import org.jetbrains.plugins.groovy.lang.psi.impl.statements.expressions.TypesUtil;
import org.jetbrains.plugins.groovy.lang.psi.impl.synthetic.ClosureSyntheticParameter;
import org.jetbrains.plugins.groovy.lang.psi.impl.synthetic.GroovyScriptClass;
import org.jetbrains.plugins.groovy.lang.resolve.ResolveUtil;
import java.util.*;
import static org.jetbrains.plugins.groovy.lang.psi.controlFlow.OrderUtil.reversedPostOrder;
/**
* @author ven
*/
public class ReachingDefinitionsCollector {
private ReachingDefinitionsCollector() {
}
@NotNull
public static FragmentVariableInfos obtainVariableFlowInformation(@NotNull final GrStatement first,
@NotNull final GrStatement last,
@NotNull final GrControlFlowOwner flowOwner,
@NotNull final Instruction[] flow) {
final DefinitionMap dfaResult = inferDfaResult(flow);
final LinkedHashSet<Integer> fragmentInstructions = getFragmentInstructions(first, last, flow);
final int[] postorder = reversedPostOrder(flow);
LinkedHashSet<Integer> reachableFromFragmentReads = getReachable(fragmentInstructions, flow, dfaResult, postorder);
LinkedHashSet<Integer> fragmentReads = filterReads(fragmentInstructions, flow);
final Map<String, VariableInfo> imap = new LinkedHashMap<>();
final Map<String, VariableInfo> omap = new LinkedHashMap<>();
final PsiManager manager = first.getManager();
for (final Integer ref : fragmentReads) {
ReadWriteVariableInstruction rwInstruction = (ReadWriteVariableInstruction)flow[ref];
String name = rwInstruction.getVariableName();
final int[] defs = dfaResult.getDefinitions(ref);
if (!allDefsInFragment(defs, fragmentInstructions)) {
addVariable(name, imap, manager, getType(rwInstruction.getElement()));
}
}
for (final Integer ref : reachableFromFragmentReads) {
ReadWriteVariableInstruction rwInstruction = (ReadWriteVariableInstruction)flow[ref];
String name = rwInstruction.getVariableName();
final int[] defs = dfaResult.getDefinitions(ref);
if (anyDefInFragment(defs, fragmentInstructions)) {
for (int def : defs) {
if (fragmentInstructions.contains(def)) {
PsiType outputType = getType(flow[def].getElement());
addVariable(name, omap, manager, outputType);
}
}
if (!allProperDefsInFragment(defs, ref, fragmentInstructions, postorder)) {
PsiType inputType = getType(rwInstruction.getElement());
addVariable(name, imap, manager, inputType);
}
}
}
addClosureUsages(imap, omap, first, last, flowOwner);
final VariableInfo[] iarr = filterNonlocals(imap, last);
final VariableInfo[] oarr = filterNonlocals(omap, last);
return new FragmentVariableInfos() {
@Override
public VariableInfo[] getInputVariableNames() {
return iarr;
}
@Override
public VariableInfo[] getOutputVariableNames() {
return oarr;
}
};
}
private static DefinitionMap inferDfaResult(Instruction[] flow) {
final ReachingDefinitionsDfaInstance dfaInstance = new ReachingDefinitionsDfaInstance(flow);
final ReachingDefinitionsSemilattice lattice = new ReachingDefinitionsSemilattice();
final DFAEngine<DefinitionMap> engine = new DFAEngine<>(flow, dfaInstance, lattice);
return postprocess(engine.performForceDFA(), flow, dfaInstance);
}
private static void addClosureUsages(final Map<String, VariableInfo> imap,
final Map<String, VariableInfo> omap,
final GrStatement first,
final GrStatement last,
GrControlFlowOwner flowOwner) {
flowOwner.accept(new GroovyRecursiveElementVisitor() {
@Override
public void visitClosure(@NotNull GrClosableBlock closure) {
addUsagesInClosure(imap, omap, closure, first, last);
super.visitClosure(closure);
}
private void addUsagesInClosure(final Map<String, VariableInfo> imap,
final Map<String, VariableInfo> omap,
final GrClosableBlock closure,
final GrStatement first,
final GrStatement last) {
closure.accept(new GroovyRecursiveElementVisitor() {
@Override
public void visitReferenceExpression(@NotNull GrReferenceExpression refExpr) {
if (refExpr.isQualified()) {
return;
}
PsiElement resolved = refExpr.resolve();
if (!(resolved instanceof GrVariable)) {
return;
}
GrVariable variable = (GrVariable)resolved;
if (PsiTreeUtil.isAncestor(closure, variable, true)) {
return;
}
if (variable instanceof ClosureSyntheticParameter &&
PsiTreeUtil.isAncestor(closure, ((ClosureSyntheticParameter)variable).getClosure(), false)) {
return;
}
String name = variable.getName();
if (!(variable instanceof GrField)) {
if (!isInFragment(first, last, resolved)) {
if (isInFragment(first, last, closure)) {
addVariable(name, imap, variable.getManager(), variable.getType());
}
}
else {
if (!isInFragment(first, last, closure)) {
addVariable(name, omap, variable.getManager(), variable.getType());
}
}
}
}
});
}
});
}
private static void addVariable(String name, Map<String, VariableInfo> map, PsiManager manager, PsiType type) {
VariableInfoImpl info = (VariableInfoImpl)map.get(name);
if (info == null) {
info = new VariableInfoImpl(name, manager);
map.put(name, info);
}
info.addSubtype(type);
}
private static LinkedHashSet<Integer> filterReads(final LinkedHashSet<Integer> instructions, final Instruction[] flow) {
final LinkedHashSet<Integer> result = new LinkedHashSet<>();
for (final Integer i : instructions) {
final Instruction instruction = flow[i];
if (isReadInsn(instruction)) {
result.add(i);
}
}
return result;
}
private static boolean allDefsInFragment(int[] defs, LinkedHashSet<Integer> fragmentInstructions) {
for (int def : defs) {
if (!fragmentInstructions.contains(def)) return false;
}
return true;
}
private static boolean allProperDefsInFragment(int[] defs, int ref, LinkedHashSet<Integer> fragmentInstructions, int[] postorder) {
for (int def : defs) {
if (!fragmentInstructions.contains(def) && postorder[def] < postorder[ref]) return false;
}
return true;
}
private static boolean anyDefInFragment(int[] defs, LinkedHashSet<Integer> fragmentInstructions) {
for (int def : defs) {
if (fragmentInstructions.contains(def)) return true;
}
return false;
}
@Nullable
private static PsiType getType(PsiElement element) {
if (element instanceof GrVariable) {
return ((GrVariable)element).getTypeGroovy();
}
else if (element instanceof GrReferenceExpression) return ((GrReferenceExpression)element).getType();
return null;
}
private static VariableInfo[] filterNonlocals(Map<String, VariableInfo> infos, GrStatement place) {
List<VariableInfo> result = new ArrayList<>();
for (Iterator<VariableInfo> iterator = infos.values().iterator(); iterator.hasNext(); ) {
VariableInfo info = iterator.next();
String name = info.getName();
GroovyPsiElement property = ResolveUtil.resolveProperty(place, name);
if (property instanceof GrVariable) {
iterator.remove();
}
else if (property instanceof GrReferenceExpression) {
GrMember member = PsiTreeUtil.getParentOfType(property, GrMember.class);
if (member == null) {
continue;
}
else if (!member.hasModifierProperty(PsiModifier.STATIC)) {
if (member.getContainingClass() instanceof GroovyScriptClass) {
//binding variable
continue;
}
}
}
if (ResolveUtil.resolveClass(place, name) == null) {
result.add(info);
}
}
return result.toArray(new VariableInfo[result.size()]);
}
private static LinkedHashSet<Integer> getFragmentInstructions(GrStatement first, GrStatement last, Instruction[] flow) {
LinkedHashSet<Integer> result = new LinkedHashSet<>();
for (Instruction instruction : flow) {
if (isInFragment(instruction, first, last)) {
result.add(instruction.num());
}
}
return result;
}
private static boolean isInFragment(Instruction instruction, GrStatement first, GrStatement last) {
final PsiElement element = instruction.getElement();
if (element == null) return false;
return isInFragment(first, last, element);
}
private static boolean isInFragment(GrStatement first, GrStatement last, PsiElement element) {
final PsiElement parent = first.getParent();
if (!PsiTreeUtil.isAncestor(parent, element, true)) return false;
PsiElement run = element;
while (run.getParent() != parent) run = run.getParent();
return isBetween(first, last, run);
}
private static boolean isBetween(PsiElement first, PsiElement last, PsiElement run) {
while (first != null && first != run) first = first.getNextSibling();
if (first == null) return false;
while (last != null && last != run) last = last.getPrevSibling();
if (last == null) return false;
return true;
}
private static LinkedHashSet<Integer> getReachable(final LinkedHashSet<Integer> fragmentInsns,
final Instruction[] flow,
final DefinitionMap dfaResult,
final int[] postorder) {
final LinkedHashSet<Integer> result = new LinkedHashSet<>();
for (final Instruction insn : flow) {
if (isReadInsn(insn)) {
final int ref = insn.num();
int[] definitions = dfaResult.getDefinitions(ref);
if (definitions != null) {
for (final int def : definitions) {
if (fragmentInsns.contains(def) &&
(!fragmentInsns.contains(ref) || postorder[ref] < postorder[def] && checkPathIsOutsideOfFragment(def, ref, flow, fragmentInsns))) {
result.add(ref);
break;
}
}
}
}
}
return result;
}
private static boolean checkPathIsOutsideOfFragment(int def, int ref, Instruction[] flow, LinkedHashSet<Integer> fragmentInsns) {
Boolean path = findPath(flow[def], ref, fragmentInsns, false, new HashMap<>());
assert path != null : "def=" + def + ", ref=" + ref;
return path.booleanValue();
}
/**
* return true if path is outside of fragment, null if there is no pathand false if path is inside fragment
*/
@Nullable
private static Boolean findPath(Instruction cur,
int destination,
LinkedHashSet<Integer> fragmentInsns,
boolean wasOutside,
HashMap<Instruction, Boolean> visited) {
wasOutside = wasOutside || !fragmentInsns.contains(cur.num());
visited.put(cur, null);
Iterable<? extends Instruction> instructions = cur.allSuccessors();
boolean pathExists = false;
for (Instruction i : instructions) {
if (i.num() == destination) return wasOutside;
Boolean result;
if (visited.containsKey(i)) {
result = visited.get(i);
}
else {
result = findPath(i, destination, fragmentInsns, wasOutside, visited);
visited.put(i, result);
}
if (result != null) {
if (result.booleanValue()) {
visited.put(cur, true);
return true;
}
pathExists = true;
}
}
if (pathExists) {
visited.put(cur, false);
return false;
}
else {
visited.put(cur, null);
return null;
}
}
private static boolean isReadInsn(Instruction insn) {
return insn instanceof ReadWriteVariableInstruction && !((ReadWriteVariableInstruction)insn).isWrite();
}
@SuppressWarnings({"UnusedDeclaration"})
private static String dumpDfaResult(ArrayList<TIntObjectHashMap<TIntHashSet>> dfaResult, ReachingDefinitionsDfaInstance dfa) {
final StringBuffer buffer = new StringBuffer();
for (int i = 0; i < dfaResult.size(); i++) {
TIntObjectHashMap<TIntHashSet> map = dfaResult.get(i);
buffer.append("At ").append(i).append(":\n");
map.forEachEntry(new TIntObjectProcedure<TIntHashSet>() {
@Override
public boolean execute(int i, TIntHashSet defs) {
buffer.append(i).append(" -> ");
defs.forEach(i1 -> {
buffer.append(i1).append(" ");
return true;
});
return true;
}
});
buffer.append("\n");
}
return buffer.toString();
}
private static class VariableInfoImpl implements VariableInfo {
@NotNull private final String myName;
private final PsiManager myManager;
@Nullable private
PsiType myType;
VariableInfoImpl(@NotNull String name, PsiManager manager) {
myName = name;
myManager = manager;
}
@Override
@NotNull
public String getName() {
return myName;
}
@Override
@Nullable
public PsiType getType() {
if (myType instanceof PsiIntersectionType) return ((PsiIntersectionType)myType).getConjuncts()[0];
return myType;
}
void addSubtype(PsiType t) {
if (t != null) {
if (myType == null) {
myType = t;
}
else {
if (!myType.isAssignableFrom(t)) {
if (t.isAssignableFrom(myType)) {
myType = t;
}
else {
myType = TypesUtil.getLeastUpperBound(myType, t, myManager);
}
}
}
}
}
}
@NotNull
private static DefinitionMap postprocess(@NotNull final List<DefinitionMap> dfaResult,
@NotNull Instruction[] flow,
@NotNull ReachingDefinitionsDfaInstance dfaInstance) {
DefinitionMap result = new DefinitionMap();
for (int i = 0; i < flow.length; i++) {
Instruction insn = flow[i];
if (insn instanceof ReadWriteVariableInstruction) {
ReadWriteVariableInstruction rwInsn = (ReadWriteVariableInstruction)insn;
if (!rwInsn.isWrite()) {
int idx = dfaInstance.getVarIndex(rwInsn.getVariableName());
result.copyFrom(dfaResult.get(i), idx, i);
}
}
}
return result;
}
}
| asedunov/intellij-community | plugins/groovy/groovy-psi/src/org/jetbrains/plugins/groovy/lang/psi/dataFlow/reachingDefs/ReachingDefinitionsCollector.java | Java | apache-2.0 | 17,084 |
// Checks that we don't fall over on fields that depend on expressions.
//- @T defines/binding TyvarT
template <typename T> struct S {
T t;
//- @f ref DepF
//- DepF.node/kind lookup
//- DepF.text f
//- !{DepF param.0 Anything}
//- @thing ref DepThing
//- DepThing.node/kind lookup
//- DepThing param.0 TyvarT
int i = (t.thing(3) + 4).f;
};
| kythe/kythe | kythe/cxx/indexer/cxx/testdata/tvar_template/template_depexpr_field_ref.cc | C++ | apache-2.0 | 358 |
/*
---------------------------------------------------------------------------
Open Asset Import Library (assimp)
---------------------------------------------------------------------------
Copyright (c) 2006-2016, assimp team
All rights reserved.
Redistribution and use of this software in source and binary forms,
with or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
* Neither the name of the assimp team, nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior
written permission of the assimp team.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
*/
/** @file CompareDump.cpp
* @brief Implementation of the 'assimp cmpdmp', which compares
* two model dumps for equality. It plays an important role
* in the regression test suite.
*/
#include "Main.h"
const char* AICMD_MSG_CMPDUMP_HELP =
"assimp cmpdump <actual> <expected>\n"
"\tCompare two short dumps produced with \'assimp dump <..> -s\' for equality.\n"
;
#include "../../code/assbin_chunks.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
#include "generic_inserter.hpp"
#include <map>
#include <deque>
#include <stack>
#include <sstream>
#include <iostream>
#include "../../include/assimp/ai_assert.h"
// get << for aiString
template <typename char_t, typename traits_t>
void mysprint(std::basic_ostream<char_t, traits_t>& os, const aiString& vec) {
os << "[length: \'" << std::dec << vec.length << "\' content: \'" << vec.data << "\']";
}
template <typename char_t, typename traits_t>
std::basic_ostream<char_t, traits_t>& operator<< (std::basic_ostream<char_t, traits_t>& os, const aiString& vec) {
return generic_inserter(mysprint<char_t,traits_t>, os, vec);
}
class sliced_chunk_iterator;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// @class compare_fails_exception
///
/// @brief Sentinel exception to return quickly from deeply nested control paths
////////////////////////////////////////////////////////////////////////////////////////////////////
class compare_fails_exception : public virtual std::exception {
public:
enum {MAX_ERR_LEN = 4096};
/* public c'tors */
compare_fails_exception(const char* msg) {
strncpy(mywhat,msg,MAX_ERR_LEN-1);
strcat(mywhat,"\n");
}
/* public member functions */
const char* what() const throw() {
return mywhat;
}
private:
char mywhat[MAX_ERR_LEN+1];
};
#define MY_FLT_EPSILON 1e-1f
#define MY_DBL_EPSILON 1e-1
////////////////////////////////////////////////////////////////////////////////////////////////////
/// @class comparer_context
///
/// @brief Record our way through the files to be compared and dump useful information if we fail.
////////////////////////////////////////////////////////////////////////////////////////////////////
class comparer_context {
friend class sliced_chunk_iterator;
public:
/* construct given two file handles to compare */
comparer_context(FILE* actual,FILE* expect)
: actual(actual)
, expect(expect)
, cnt_chunks(0)
{
ai_assert(actual);
ai_assert(expect);
fseek(actual,0,SEEK_END);
lengths.push(std::make_pair(static_cast<uint32_t>(ftell(actual)),0));
fseek(actual,0,SEEK_SET);
history.push_back(HistoryEntry("---",PerChunkCounter()));
}
public:
/* set new scope */
void push_elem(const char* msg) {
const std::string s = msg;
PerChunkCounter::const_iterator it = history.back().second.find(s);
if(it != history.back().second.end()) {
++history.back().second[s];
}
else history.back().second[s] = 0;
history.push_back(HistoryEntry(s,PerChunkCounter()));
debug_trace.push_back("PUSH " + s);
}
/* leave current scope */
void pop_elem() {
ai_assert(history.size());
debug_trace.push_back("POP "+ history.back().first);
history.pop_back();
}
/* push current chunk length and start offset on top of stack */
void push_length(uint32_t nl, uint32_t start) {
lengths.push(std::make_pair(nl,start));
++cnt_chunks;
}
/* pop the chunk length stack */
void pop_length() {
ai_assert(lengths.size());
lengths.pop();
}
/* access the current chunk length */
uint32_t get_latest_chunk_length() {
ai_assert(lengths.size());
return lengths.top().first;
}
/* access the current chunk start offset */
uint32_t get_latest_chunk_start() {
ai_assert(lengths.size());
return lengths.top().second;
}
/* total number of chunk headers passed so far*/
uint32_t get_num_chunks() {
return cnt_chunks;
}
/* get ACTUAL file desc. != NULL */
FILE* get_actual() const {
return actual;
}
/* get EXPECT file desc. != NULL */
FILE* get_expect() const {
return expect;
}
/* compare next T from both streams, name occurs in error messages */
template<typename T> T cmp(const std::string& name) {
T a,e;
read(a,e);
if(a != e) {
std::stringstream ss;
failure((ss<< "Expected " << e << ", but actual is " << a,
ss.str()),name);
}
// std::cout << name << " " << std::hex << a << std::endl;
return a;
}
/* compare next num T's from both streams, name occurs in error messages */
template<typename T> void cmp(size_t num,const std::string& name) {
for(size_t n = 0; n < num; ++n) {
std::stringstream ss;
cmp<T>((ss<<name<<"["<<n<<"]",ss.str()));
// std::cout << name << " " << std::hex << a << std::endl;
}
}
/* Bounds of an aiVector3D array (separate function
* because partial specializations of member functions are illegal--)*/
template<typename T> void cmp_bounds(const std::string& name) {
cmp<T> (name+".<minimum-value>");
cmp<T> (name+".<maximum-value>");
}
private:
/* Report failure */
AI_WONT_RETURN void failure(const std::string& err, const std::string& name) AI_WONT_RETURN_SUFFIX {
std::stringstream ss;
throw compare_fails_exception((ss
<< "Files are different at "
<< history.back().first
<< "."
<< name
<< ".\nError is: "
<< err
<< ".\nCurrent position in scene hierarchy is "
<< print_hierarchy(),ss.str().c_str()
));
}
/** print our 'stack' */
std::string print_hierarchy() {
std::stringstream ss;
ss << "\n";
const char* last = history.back().first.c_str();
std::string pad;
for(ChunkHistory::reverse_iterator rev = history.rbegin(),
end = history.rend(); rev != end; ++rev, pad += " ")
{
ss << pad << (*rev).first << "(Index: " << (*rev).second[last] << ")" << "\n";
last = (*rev).first.c_str();
}
ss << std::endl << "Debug trace: "<< "\n";
for (std::vector<std::string>::const_iterator it = debug_trace.begin(); it != debug_trace.end(); ++it) {
ss << *it << "\n";
}
ss << std::flush;
return ss.str();
}
/* read from both streams at the same time */
template <typename T> void read(T& filla,T& fille) {
if(1 != fread(&filla,sizeof(T),1,actual)) {
EOFActual();
}
if(1 != fread(&fille,sizeof(T),1,expect)) {
EOFExpect();
}
}
private:
void EOFActual() {
std::stringstream ss;
throw compare_fails_exception((ss
<< "Unexpected EOF reading ACTUAL.\nCurrent position in scene hierarchy is "
<< print_hierarchy(),ss.str().c_str()
));
}
void EOFExpect() {
std::stringstream ss;
throw compare_fails_exception((ss
<< "Unexpected EOF reading EXPECT.\nCurrent position in scene hierarchy is "
<< print_hierarchy(),ss.str().c_str()
));
}
FILE *const actual, *const expect;
typedef std::map<std::string,unsigned int> PerChunkCounter;
typedef std::pair<std::string,PerChunkCounter> HistoryEntry;
typedef std::deque<HistoryEntry> ChunkHistory;
ChunkHistory history;
std::vector<std::string> debug_trace;
typedef std::stack<std::pair<uint32_t,uint32_t> > LengthStack;
LengthStack lengths;
uint32_t cnt_chunks;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/* specialization for aiString (it needs separate handling because its on-disk representation
* differs from its binary representation in memory and can't be treated as an array of n T's.*/
template <> void comparer_context :: read<aiString>(aiString& filla,aiString& fille) {
uint32_t lena,lene;
read(lena,lene);
if(lena && 1 != fread(&filla.data,lena,1,actual)) {
EOFActual();
}
if(lene && 1 != fread(&fille.data,lene,1,expect)) {
EOFExpect();
}
fille.data[fille.length=static_cast<unsigned int>(lene)] = '\0';
filla.data[filla.length=static_cast<unsigned int>(lena)] = '\0';
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/* Specialization for float, uses epsilon for comparisons*/
template<> float comparer_context :: cmp<float>(const std::string& name)
{
float a,e,t;
read(a,e);
if((t=fabs(a-e)) > MY_FLT_EPSILON) {
std::stringstream ss;
failure((ss<< "Expected " << e << ", but actual is "
<< a << " (delta is " << t << ")", ss.str()),name);
}
return a;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/* Specialization for double, uses epsilon for comparisons*/
template<> double comparer_context :: cmp<double>(const std::string& name)
{
double a,e,t;
read(a,e);
if((t=fabs(a-e)) > MY_DBL_EPSILON) {
std::stringstream ss;
failure((ss<< "Expected " << e << ", but actual is "
<< a << " (delta is " << t << ")", ss.str()),name);
}
return a;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/* Specialization for aiVector3D */
template<> aiVector3D comparer_context :: cmp<aiVector3D >(const std::string& name)
{
const float x = cmp<float>(name+".x");
const float y = cmp<float>(name+".y");
const float z = cmp<float>(name+".z");
return aiVector3D(x,y,z);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/* Specialization for aiColor4D */
template<> aiColor4D comparer_context :: cmp<aiColor4D >(const std::string& name)
{
const float r = cmp<float>(name+".r");
const float g = cmp<float>(name+".g");
const float b = cmp<float>(name+".b");
const float a = cmp<float>(name+".a");
return aiColor4D(r,g,b,a);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/* Specialization for aiQuaternion */
template<> aiQuaternion comparer_context :: cmp<aiQuaternion >(const std::string& name)
{
const float w = cmp<float>(name+".w");
const float x = cmp<float>(name+".x");
const float y = cmp<float>(name+".y");
const float z = cmp<float>(name+".z");
return aiQuaternion(w,x,y,z);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/* Specialization for aiQuatKey */
template<> aiQuatKey comparer_context :: cmp<aiQuatKey >(const std::string& name)
{
const double mTime = cmp<double>(name+".mTime");
const aiQuaternion mValue = cmp<aiQuaternion>(name+".mValue");
return aiQuatKey(mTime,mValue);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/* Specialization for aiVectorKey */
template<> aiVectorKey comparer_context :: cmp<aiVectorKey >(const std::string& name)
{
const double mTime = cmp<double>(name+".mTime");
const aiVector3D mValue = cmp<aiVector3D>(name+".mValue");
return aiVectorKey(mTime,mValue);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/* Specialization for aiMatrix4x4 */
template<> aiMatrix4x4 comparer_context :: cmp<aiMatrix4x4 >(const std::string& name)
{
aiMatrix4x4 res;
for(unsigned int i = 0; i < 4; ++i) {
for(unsigned int j = 0; j < 4; ++j) {
std::stringstream ss;
res[i][j] = cmp<float>(name+(ss<<".m"<<i<<j,ss.str()));
}
}
return res;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/* Specialization for aiVertexWeight */
template<> aiVertexWeight comparer_context :: cmp<aiVertexWeight >(const std::string& name)
{
const unsigned int mVertexId = cmp<unsigned int>(name+".mVertexId");
const float mWeight = cmp<float>(name+".mWeight");
return aiVertexWeight(mVertexId,mWeight);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// @class sliced_chunk_iterator
///
/// @brief Helper to iterate easily through corresponding chunks of two dumps simultaneously.
///
/// Not a *real* iterator, doesn't fully conform to the isocpp iterator spec
////////////////////////////////////////////////////////////////////////////////////////////////////
class sliced_chunk_iterator {
friend class sliced_chunk_reader;
sliced_chunk_iterator(comparer_context& ctx, long end)
: ctx(ctx)
, endit(false)
, next(std::numeric_limits<long>::max())
, end(end)
{
load_next();
}
public:
~sliced_chunk_iterator() {
fseek(ctx.get_actual(),end,SEEK_SET);
fseek(ctx.get_expect(),end,SEEK_SET);
}
public:
/* get current chunk head */
typedef std::pair<uint32_t,uint32_t> Chunk;
const Chunk& operator*() {
return current;
}
/* get to next chunk head */
const sliced_chunk_iterator& operator++() {
cleanup();
load_next();
return *this;
}
/* */
bool is_end() const {
return endit;
}
private:
/* get to the end of *this* chunk */
void cleanup() {
if(next != std::numeric_limits<long>::max()) {
fseek(ctx.get_actual(),next,SEEK_SET);
fseek(ctx.get_expect(),next,SEEK_SET);
ctx.pop_length();
}
}
/* advance to the next chunk */
void load_next() {
Chunk actual;
size_t res=0;
const long cur = ftell(ctx.get_expect());
if(end-cur<8) {
current = std::make_pair(0u,0u);
endit = true;
return;
}
res|=fread(¤t.first,4,1,ctx.get_expect());
res|=fread(¤t.second,4,1,ctx.get_expect()) <<1u;
res|=fread(&actual.first,4,1,ctx.get_actual()) <<2u;
res|=fread(&actual.second,4,1,ctx.get_actual()) <<3u;
if(res!=0xf) {
ctx.failure("IO Error reading chunk head, dumps are malformed","<ChunkHead>");
}
if (current.first != actual.first) {
std::stringstream ss;
ctx.failure((ss
<<"Chunk headers do not match. EXPECT: "
<< std::hex << current.first
<<" ACTUAL: "
<< /*std::hex */actual.first,
ss.str()),
"<ChunkHead>");
}
if (current.first != actual.first) {
std::stringstream ss;
ctx.failure((ss
<<"Chunk lengths do not match. EXPECT: "
<<current.second
<<" ACTUAL: "
<< actual.second,
ss.str()),
"<ChunkHead>");
}
next = cur+current.second+8;
ctx.push_length(current.second,cur+8);
}
comparer_context& ctx;
Chunk current;
bool endit;
long next,end;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// @class sliced_chunk_reader
///
/// @brief Helper to iterate easily through corresponding chunks of two dumps simultaneously.
////////////////////////////////////////////////////////////////////////////////////////////////////
class sliced_chunk_reader {
public:
//
sliced_chunk_reader(comparer_context& ctx)
: ctx(ctx)
{}
//
~sliced_chunk_reader() {
}
public:
sliced_chunk_iterator begin() const {
return sliced_chunk_iterator(ctx,ctx.get_latest_chunk_length()+
ctx.get_latest_chunk_start());
}
private:
comparer_context& ctx;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// @class scoped_chunk
///
/// @brief Utility to simplify usage of comparer_context.push_elem/pop_elem
////////////////////////////////////////////////////////////////////////////////////////////////////
class scoped_chunk {
public:
//
scoped_chunk(comparer_context& ctx,const char* msg)
: ctx(ctx)
{
ctx.push_elem(msg);
}
//
~scoped_chunk()
{
ctx.pop_elem();
}
private:
comparer_context& ctx;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
void CompareOnTheFlyMaterialProperty(comparer_context& comp) {
scoped_chunk chunk(comp,"aiMaterialProperty");
comp.cmp<aiString>("mKey");
comp.cmp<uint32_t>("mSemantic");
comp.cmp<uint32_t>("mIndex");
const uint32_t length = comp.cmp<uint32_t>("mDataLength");
const aiPropertyTypeInfo type = static_cast<aiPropertyTypeInfo>(
comp.cmp<uint32_t>("mType"));
switch (type)
{
case aiPTI_Float:
comp.cmp<float>(length/4,"mData");
break;
case aiPTI_String:
comp.cmp<aiString>("mData");
break;
case aiPTI_Integer:
comp.cmp<uint32_t>(length/4,"mData");
break;
case aiPTI_Buffer:
comp.cmp<uint8_t>(length,"mData");
break;
default:
break;
};
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void CompareOnTheFlyMaterial(comparer_context& comp) {
scoped_chunk chunk(comp,"aiMaterial");
comp.cmp<uint32_t>("aiMaterial::mNumProperties");
sliced_chunk_reader reader(comp);
for(sliced_chunk_iterator it = reader.begin(); !it.is_end(); ++it) {
if ((*it).first == ASSBIN_CHUNK_AIMATERIALPROPERTY) {
CompareOnTheFlyMaterialProperty(comp);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void CompareOnTheFlyBone(comparer_context& comp) {
scoped_chunk chunk(comp,"aiBone");
comp.cmp<aiString>("mName");
comp.cmp<uint32_t>("mNumWeights");
comp.cmp<aiMatrix4x4>("mOffsetMatrix");
comp.cmp_bounds<aiVertexWeight>("mWeights");
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void CompareOnTheFlyNodeAnim(comparer_context& comp) {
scoped_chunk chunk(comp,"aiNodeAnim");
comp.cmp<aiString>("mNodeName");
comp.cmp<uint32_t>("mNumPositionKeys");
comp.cmp<uint32_t>("mNumRotationKeys");
comp.cmp<uint32_t>("mNumScalingKeys");
comp.cmp<uint32_t>("mPreState");
comp.cmp<uint32_t>("mPostState");
comp.cmp_bounds<aiVectorKey>("mPositionKeys");
comp.cmp_bounds<aiQuatKey>("mRotationKeys");
comp.cmp_bounds<aiVectorKey>("mScalingKeys");
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void CompareOnTheFlyMesh(comparer_context& comp) {
scoped_chunk chunk(comp,"aiMesh");
comp.cmp<uint32_t>("mPrimitiveTypes");
comp.cmp<uint32_t>("mNumVertices");
const uint32_t nf = comp.cmp<uint32_t>("mNumFaces");
comp.cmp<uint32_t>("mNumBones");
comp.cmp<uint32_t>("mMaterialIndex");
const uint32_t present = comp.cmp<uint32_t>("<vertex-components-present>");
if(present & ASSBIN_MESH_HAS_POSITIONS) {
comp.cmp_bounds<aiVector3D>("mVertices");
}
if(present & ASSBIN_MESH_HAS_NORMALS) {
comp.cmp_bounds<aiVector3D>("mNormals");
}
if(present & ASSBIN_MESH_HAS_TANGENTS_AND_BITANGENTS) {
comp.cmp_bounds<aiVector3D>("mTangents");
comp.cmp_bounds<aiVector3D>("mBitangents");
}
for(unsigned int i = 0; present & ASSBIN_MESH_HAS_COLOR(i); ++i) {
std::stringstream ss;
comp.cmp_bounds<aiColor4D>((ss<<"mColors["<<i<<"]",ss.str()));
}
for(unsigned int i = 0; present & ASSBIN_MESH_HAS_TEXCOORD(i); ++i) {
std::stringstream ss;
comp.cmp<uint32_t>((ss<<"mNumUVComponents["<<i<<"]",ss.str()));
comp.cmp_bounds<aiVector3D>((ss.clear(),ss<<"mTextureCoords["<<i<<"]",ss.str()));
}
for(unsigned int i = 0; i< ((nf+511)/512); ++i) {
std::stringstream ss;
comp.cmp<uint32_t>((ss<<"mFaces["<<i*512<<"-"<<std::min(static_cast<
uint32_t>((i+1)*512),nf)<<"]",ss.str()));
}
sliced_chunk_reader reader(comp);
for(sliced_chunk_iterator it = reader.begin(); !it.is_end(); ++it) {
if ((*it).first == ASSBIN_CHUNK_AIBONE) {
CompareOnTheFlyBone(comp);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void CompareOnTheFlyCamera(comparer_context& comp) {
scoped_chunk chunk(comp,"aiCamera");
comp.cmp<aiString>("mName");
comp.cmp<aiVector3D>("mPosition");
comp.cmp<aiVector3D>("mLookAt");
comp.cmp<aiVector3D>("mUp");
comp.cmp<float>("mHorizontalFOV");
comp.cmp<float>("mClipPlaneNear");
comp.cmp<float>("mClipPlaneFar");
comp.cmp<float>("mAspect");
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void CompareOnTheFlyLight(comparer_context& comp) {
scoped_chunk chunk(comp,"aiLight");
comp.cmp<aiString>("mName");
const aiLightSourceType type = static_cast<aiLightSourceType>(
comp.cmp<uint32_t>("mType"));
if(type!=aiLightSource_DIRECTIONAL) {
comp.cmp<float>("mAttenuationConstant");
comp.cmp<float>("mAttenuationLinear");
comp.cmp<float>("mAttenuationQuadratic");
}
comp.cmp<aiVector3D>("mColorDiffuse");
comp.cmp<aiVector3D>("mColorSpecular");
comp.cmp<aiVector3D>("mColorAmbient");
if(type==aiLightSource_SPOT) {
comp.cmp<float>("mAngleInnerCone");
comp.cmp<float>("mAngleOuterCone");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void CompareOnTheFlyAnimation(comparer_context& comp) {
scoped_chunk chunk(comp,"aiAnimation");
comp.cmp<aiString>("mName");
comp.cmp<double>("mDuration");
comp.cmp<double>("mTicksPerSecond");
comp.cmp<uint32_t>("mNumChannels");
sliced_chunk_reader reader(comp);
for(sliced_chunk_iterator it = reader.begin(); !it.is_end(); ++it) {
if ((*it).first == ASSBIN_CHUNK_AINODEANIM) {
CompareOnTheFlyNodeAnim(comp);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void CompareOnTheFlyTexture(comparer_context& comp) {
scoped_chunk chunk(comp,"aiTexture");
const uint32_t w = comp.cmp<uint32_t>("mWidth");
const uint32_t h = comp.cmp<uint32_t>("mHeight");
(void)w; (void)h;
comp.cmp<char>("achFormatHint[0]");
comp.cmp<char>("achFormatHint[1]");
comp.cmp<char>("achFormatHint[2]");
comp.cmp<char>("achFormatHint[3]");
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void CompareOnTheFlyNode(comparer_context& comp) {
scoped_chunk chunk(comp,"aiNode");
comp.cmp<aiString>("mName");
comp.cmp<aiMatrix4x4>("mTransformation");
comp.cmp<uint32_t>("mNumChildren");
comp.cmp<uint32_t>(comp.cmp<uint32_t>("mNumMeshes"),"mMeshes");
sliced_chunk_reader reader(comp);
for(sliced_chunk_iterator it = reader.begin(); !it.is_end(); ++it) {
if ((*it).first == ASSBIN_CHUNK_AINODE) {
CompareOnTheFlyNode(comp);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void CompareOnTheFlyScene(comparer_context& comp) {
scoped_chunk chunk(comp,"aiScene");
comp.cmp<uint32_t>("mFlags");
comp.cmp<uint32_t>("mNumMeshes");
comp.cmp<uint32_t>("mNumMaterials");
comp.cmp<uint32_t>("mNumAnimations");
comp.cmp<uint32_t>("mNumTextures");
comp.cmp<uint32_t>("mNumLights");
comp.cmp<uint32_t>("mNumCameras");
sliced_chunk_reader reader(comp);
for(sliced_chunk_iterator it = reader.begin(); !it.is_end(); ++it) {
if ((*it).first == ASSBIN_CHUNK_AIMATERIAL) {
CompareOnTheFlyMaterial(comp);
}
else if ((*it).first == ASSBIN_CHUNK_AITEXTURE) {
CompareOnTheFlyTexture(comp);
}
else if ((*it).first == ASSBIN_CHUNK_AIMESH) {
CompareOnTheFlyMesh(comp);
}
else if ((*it).first == ASSBIN_CHUNK_AIANIMATION) {
CompareOnTheFlyAnimation(comp);
}
else if ((*it).first == ASSBIN_CHUNK_AICAMERA) {
CompareOnTheFlyCamera(comp);
}
else if ((*it).first == ASSBIN_CHUNK_AILIGHT) {
CompareOnTheFlyLight(comp);
}
else if ((*it).first == ASSBIN_CHUNK_AINODE) {
CompareOnTheFlyNode(comp);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void CompareOnTheFly(comparer_context& comp)
{
sliced_chunk_reader reader(comp);
for(sliced_chunk_iterator it = reader.begin(); !it.is_end(); ++it) {
if ((*it).first == ASSBIN_CHUNK_AISCENE) {
CompareOnTheFlyScene(comp);
break;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void CheckHeader(comparer_context& comp)
{
fseek(comp.get_actual(),ASSBIN_HEADER_LENGTH,SEEK_CUR);
fseek(comp.get_expect(),ASSBIN_HEADER_LENGTH,SEEK_CUR);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
int Assimp_CompareDump (const char* const* params, unsigned int num)
{
// --help
if ((num == 1 && !strcmp( params[0], "-h")) || !strcmp( params[0], "--help") || !strcmp( params[0], "-?") ) {
printf("%s",AICMD_MSG_CMPDUMP_HELP);
return 0;
}
// assimp cmpdump actual expected
if (num < 2) {
std::cout << "assimp cmpdump: Invalid number of arguments. "
"See \'assimp cmpdump --help\'\r\n" << std::endl;
return 1;
}
if(!strcmp(params[0],params[1])) {
std::cout << "assimp cmpdump: same file, same content." << std::endl;
return 0;
}
class file_ptr
{
public:
file_ptr(FILE *p)
: m_file(p)
{}
~file_ptr()
{
if (m_file)
{
fclose(m_file);
m_file = NULL;
}
}
operator FILE *() { return m_file; }
private:
FILE *m_file;
};
file_ptr actual(fopen(params[0],"rb"));
if (!actual) {
std::cout << "assimp cmpdump: Failure reading ACTUAL data from " <<
params[0] << std::endl;
return -5;
}
file_ptr expected(fopen(params[1],"rb"));
if (!expected) {
std::cout << "assimp cmpdump: Failure reading EXPECT data from " <<
params[1] << std::endl;
return -6;
}
comparer_context comp(actual,expected);
try {
CheckHeader(comp);
CompareOnTheFly(comp);
}
catch(const compare_fails_exception& ex) {
printf("%s",ex.what());
return -1;
}
catch(...) {
// we don't bother checking too rigourously here, so
// we might end up here ...
std::cout << "Unknown failure, are the input files well-defined?";
return -3;
}
std::cout << "Success (totally " << std::dec << comp.get_num_chunks() <<
" chunks)" << std::endl;
return 0;
}
| EmilNorden/candle | lib/assimp-3.2/tools/assimp_cmd/CompareDump.cpp | C++ | apache-2.0 | 29,850 |
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using Android.App;
// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
[assembly: AssemblyTitle("SwitchDemo.Droid")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyProduct("SwitchDemo.Droid")]
[assembly: AssemblyCopyright("Copyright © 2014")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
[assembly: ComVisible(false)]
// Version information for an assembly consists of the following four values:
//
// Major Version
// Minor Version
// Build Number
// Revision
//
// You can specify all the values or you can default the Build and Revision Numbers
// by using the '*' as shown below:
// [assembly: AssemblyVersion("1.0.*")]
[assembly: AssemblyVersion("1.0.0.0")]
[assembly: AssemblyFileVersion("1.0.0.0")]
// Add some common permissions, these can be removed if not needed
[assembly: UsesPermission(Android.Manifest.Permission.Internet)]
[assembly: UsesPermission(Android.Manifest.Permission.WriteExternalStorage)]
| tchenowe/xamarin-forms-book-preview-2 | Chapter15/SwitchDemo/SwitchDemo/SwitchDemo.Droid/Properties/AssemblyInfo.cs | C# | apache-2.0 | 1,282 |
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.16.0
// source: envoy/api/v2/core/event_service_config.proto
package envoy_api_v2_core
import (
_ "github.com/cncf/xds/go/udpa/annotations"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// [#not-implemented-hide:]
// Configuration of the event reporting service endpoint.
type EventServiceConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Types that are assignable to ConfigSourceSpecifier:
// *EventServiceConfig_GrpcService
ConfigSourceSpecifier isEventServiceConfig_ConfigSourceSpecifier `protobuf_oneof:"config_source_specifier"`
}
func (x *EventServiceConfig) Reset() {
*x = EventServiceConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_envoy_api_v2_core_event_service_config_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *EventServiceConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EventServiceConfig) ProtoMessage() {}
func (x *EventServiceConfig) ProtoReflect() protoreflect.Message {
mi := &file_envoy_api_v2_core_event_service_config_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EventServiceConfig.ProtoReflect.Descriptor instead.
func (*EventServiceConfig) Descriptor() ([]byte, []int) {
return file_envoy_api_v2_core_event_service_config_proto_rawDescGZIP(), []int{0}
}
func (m *EventServiceConfig) GetConfigSourceSpecifier() isEventServiceConfig_ConfigSourceSpecifier {
if m != nil {
return m.ConfigSourceSpecifier
}
return nil
}
func (x *EventServiceConfig) GetGrpcService() *GrpcService {
if x, ok := x.GetConfigSourceSpecifier().(*EventServiceConfig_GrpcService); ok {
return x.GrpcService
}
return nil
}
type isEventServiceConfig_ConfigSourceSpecifier interface {
isEventServiceConfig_ConfigSourceSpecifier()
}
type EventServiceConfig_GrpcService struct {
// Specifies the gRPC service that hosts the event reporting service.
GrpcService *GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3,oneof"`
}
func (*EventServiceConfig_GrpcService) isEventServiceConfig_ConfigSourceSpecifier() {}
var File_envoy_api_v2_core_event_service_config_proto protoreflect.FileDescriptor
var file_envoy_api_v2_core_event_service_config_proto_rawDesc = []byte{
0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63,
0x6f, 0x72, 0x65, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11,
0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72,
0x65, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f,
0x63, 0x6f, 0x72, 0x65, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e,
0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e,
0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
0x79, 0x0a, 0x12, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x43, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e,
0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x67,
0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x1e, 0x0a, 0x17, 0x63, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63,
0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x60, 0x0a, 0x1f, 0x69, 0x6f,
0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x17, 0x45,
0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14,
0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
0x65, 0x2e, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_envoy_api_v2_core_event_service_config_proto_rawDescOnce sync.Once
file_envoy_api_v2_core_event_service_config_proto_rawDescData = file_envoy_api_v2_core_event_service_config_proto_rawDesc
)
func file_envoy_api_v2_core_event_service_config_proto_rawDescGZIP() []byte {
file_envoy_api_v2_core_event_service_config_proto_rawDescOnce.Do(func() {
file_envoy_api_v2_core_event_service_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_api_v2_core_event_service_config_proto_rawDescData)
})
return file_envoy_api_v2_core_event_service_config_proto_rawDescData
}
var file_envoy_api_v2_core_event_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_envoy_api_v2_core_event_service_config_proto_goTypes = []interface{}{
(*EventServiceConfig)(nil), // 0: envoy.api.v2.core.EventServiceConfig
(*GrpcService)(nil), // 1: envoy.api.v2.core.GrpcService
}
var file_envoy_api_v2_core_event_service_config_proto_depIdxs = []int32{
1, // 0: envoy.api.v2.core.EventServiceConfig.grpc_service:type_name -> envoy.api.v2.core.GrpcService
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_envoy_api_v2_core_event_service_config_proto_init() }
func file_envoy_api_v2_core_event_service_config_proto_init() {
if File_envoy_api_v2_core_event_service_config_proto != nil {
return
}
file_envoy_api_v2_core_grpc_service_proto_init()
if !protoimpl.UnsafeEnabled {
file_envoy_api_v2_core_event_service_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EventServiceConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_envoy_api_v2_core_event_service_config_proto_msgTypes[0].OneofWrappers = []interface{}{
(*EventServiceConfig_GrpcService)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_envoy_api_v2_core_event_service_config_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_envoy_api_v2_core_event_service_config_proto_goTypes,
DependencyIndexes: file_envoy_api_v2_core_event_service_config_proto_depIdxs,
MessageInfos: file_envoy_api_v2_core_event_service_config_proto_msgTypes,
}.Build()
File_envoy_api_v2_core_event_service_config_proto = out.File
file_envoy_api_v2_core_event_service_config_proto_rawDesc = nil
file_envoy_api_v2_core_event_service_config_proto_goTypes = nil
file_envoy_api_v2_core_event_service_config_proto_depIdxs = nil
}
| knative-sandbox/eventing-awssqs | vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/event_service_config.pb.go | GO | apache-2.0 | 8,657 |
/* Generated by camel build tools - do NOT edit this file! */
package org.apache.camel.component.pgevent;
import java.util.Map;
import org.apache.camel.CamelContext;
import org.apache.camel.spi.ExtendedPropertyConfigurerGetter;
import org.apache.camel.spi.PropertyConfigurerGetter;
import org.apache.camel.spi.ConfigurerStrategy;
import org.apache.camel.spi.GeneratedPropertyConfigurer;
import org.apache.camel.util.CaseInsensitiveMap;
import org.apache.camel.support.component.PropertyConfigurerSupport;
/**
* Generated by camel build tools - do NOT edit this file!
*/
@SuppressWarnings("unchecked")
public class PgEventEndpointConfigurer extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
PgEventEndpoint target = (PgEventEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "datasource": target.setDatasource(property(camelContext, javax.sql.DataSource.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "pass": target.setPass(property(camelContext, java.lang.String.class, value)); return true;
case "user": target.setUser(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "datasource": return javax.sql.DataSource.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "pass": return java.lang.String.class;
case "user": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
PgEventEndpoint target = (PgEventEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "datasource": return target.getDatasource();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "pass": return target.getPass();
case "user": return target.getUser();
default: return null;
}
}
}
| nikhilvibhav/camel | components/camel-pgevent/src/generated/java/org/apache/camel/component/pgevent/PgEventEndpointConfigurer.java | Java | apache-2.0 | 3,632 |
module Puppet::Parser::Functions
newfunction(:ip_in_cidr, :type => :rvalue, :doc => <<-'ENDHEREDOC'
Checks if an ip address is contained within a CIDR address of the form 192.168.0.1/24
ENDHEREDOC
) do |args|
require 'ipaddr'
unless args.length == 2 then
raise Puppet::ParseError, ("ip_in_cidr(): wrong number of arguments (#{args.length}; must be 2: ip and cidr)")
end
ip = args[0]
unless ip.respond_to?('to_s') then
raise Puppet::ParseError, ("#{ip.inspect} is not a string. It looks to be a #{ip.class}")
end
ip = ip.to_s
cidr = args[1]
unless cidr.respond_to?('to_s') then
raise Puppet::ParseError, ("#{cidr.inspect} is not a string. It looks to be a #{cidr.class}")
end
cidr = cidr.to_s
begin
IPAddr.new(cidr).include? IPAddr.new(ip)
rescue ArgumentError => e
raise Puppet::ParseError, (e)
end
end
end
| apache/infrastructure-puppet | modules/customfact/lib/puppet/parser/functions/ip_in_cidr.rb | Ruby | apache-2.0 | 913 |
package store
import (
"errors"
"fmt"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/go-events"
"github.com/docker/swarmkit/api"
pb "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/manager/state"
"github.com/docker/swarmkit/manager/state/watch"
"github.com/docker/swarmkit/protobuf/ptypes"
memdb "github.com/hashicorp/go-memdb"
"golang.org/x/net/context"
)
const (
indexID = "id"
indexName = "name"
indexServiceID = "serviceid"
indexNodeID = "nodeid"
indexSlot = "slot"
indexCN = "cn"
indexDesiredState = "desiredstate"
indexRole = "role"
indexMembership = "membership"
prefix = "_prefix"
// MaxChangesPerTransaction is the number of changes after which a new
// transaction should be started within Batch.
MaxChangesPerTransaction = 200
// MaxTransactionBytes is the maximum serialized transaction size.
MaxTransactionBytes = 1.5 * 1024 * 1024
)
var (
// ErrExist is returned by create operations if the provided ID is already
// taken.
ErrExist = errors.New("object already exists")
// ErrNotExist is returned by altering operations (update, delete) if the
// provided ID is not found.
ErrNotExist = errors.New("object does not exist")
// ErrNameConflict is returned by create/update if the object name is
// already in use by another object.
ErrNameConflict = errors.New("name conflicts with an existing object")
// ErrInvalidFindBy is returned if an unrecognized type is passed to Find.
ErrInvalidFindBy = errors.New("invalid find argument type")
// ErrSequenceConflict is returned when trying to update an object
// whose sequence information does not match the object in the store's.
ErrSequenceConflict = errors.New("update out of sequence")
objectStorers []ObjectStoreConfig
schema = &memdb.DBSchema{
Tables: map[string]*memdb.TableSchema{},
}
errUnknownStoreAction = errors.New("unknown store action")
)
func register(os ObjectStoreConfig) {
objectStorers = append(objectStorers, os)
schema.Tables[os.Name] = os.Table
}
// MemoryStore is a concurrency-safe, in-memory implementation of the Store
// interface.
type MemoryStore struct {
// updateLock must be held during an update transaction.
updateLock sync.Mutex
memDB *memdb.MemDB
queue *watch.Queue
proposer state.Proposer
}
// NewMemoryStore returns an in-memory store. The argument is an optional
// Proposer which will be used to propagate changes to other members in a
// cluster.
func NewMemoryStore(proposer state.Proposer) *MemoryStore {
memDB, err := memdb.NewMemDB(schema)
if err != nil {
// This shouldn't fail
panic(err)
}
return &MemoryStore{
memDB: memDB,
queue: watch.NewQueue(),
proposer: proposer,
}
}
// Close closes the memory store and frees its associated resources.
func (s *MemoryStore) Close() error {
return s.queue.Close()
}
func fromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 {
return nil, fmt.Errorf("must provide only a single argument")
}
arg, ok := args[0].(string)
if !ok {
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
}
// Add the null character as a terminator
arg += "\x00"
return []byte(arg), nil
}
func prefixFromArgs(args ...interface{}) ([]byte, error) {
val, err := fromArgs(args...)
if err != nil {
return nil, err
}
// Strip the null terminator, the rest is a prefix
n := len(val)
if n > 0 {
return val[:n-1], nil
}
return val, nil
}
// ReadTx is a read transaction. Note that transaction does not imply
// any internal batching. It only means that the transaction presents a
// consistent view of the data that cannot be affected by other
// transactions.
type ReadTx interface {
lookup(table, index, id string) Object
get(table, id string) Object
find(table string, by By, checkType func(By) error, appendResult func(Object)) error
}
type readTx struct {
memDBTx *memdb.Txn
}
// View executes a read transaction.
func (s *MemoryStore) View(cb func(ReadTx)) {
memDBTx := s.memDB.Txn(false)
readTx := readTx{
memDBTx: memDBTx,
}
cb(readTx)
memDBTx.Commit()
}
// Tx is a read/write transaction. Note that transaction does not imply
// any internal batching. The purpose of this transaction is to give the
// user a guarantee that its changes won't be visible to other transactions
// until the transaction is over.
type Tx interface {
ReadTx
create(table string, o Object) error
update(table string, o Object) error
delete(table, id string) error
}
type tx struct {
readTx
curVersion *api.Version
changelist []state.Event
}
// ApplyStoreActions updates a store based on StoreAction messages.
func (s *MemoryStore) ApplyStoreActions(actions []*api.StoreAction) error {
s.updateLock.Lock()
memDBTx := s.memDB.Txn(true)
tx := tx{
readTx: readTx{
memDBTx: memDBTx,
},
}
for _, sa := range actions {
if err := applyStoreAction(&tx, sa); err != nil {
memDBTx.Abort()
s.updateLock.Unlock()
return err
}
}
memDBTx.Commit()
for _, c := range tx.changelist {
s.queue.Publish(c)
}
if len(tx.changelist) != 0 {
s.queue.Publish(state.EventCommit{})
}
s.updateLock.Unlock()
return nil
}
func applyStoreAction(tx Tx, sa *api.StoreAction) error {
for _, os := range objectStorers {
err := os.ApplyStoreAction(tx, sa)
if err != errUnknownStoreAction {
return err
}
}
return errors.New("unrecognized action type")
}
func (s *MemoryStore) update(proposer state.Proposer, cb func(Tx) error) error {
s.updateLock.Lock()
memDBTx := s.memDB.Txn(true)
var curVersion *api.Version
if proposer != nil {
curVersion = proposer.GetVersion()
}
var tx tx
tx.init(memDBTx, curVersion)
err := cb(&tx)
if err == nil {
if proposer == nil {
memDBTx.Commit()
} else {
var sa []*api.StoreAction
sa, err = tx.changelistStoreActions()
if err == nil {
if len(sa) != 0 {
err = proposer.ProposeValue(context.Background(), sa, func() {
memDBTx.Commit()
})
} else {
memDBTx.Commit()
}
}
}
}
if err == nil {
for _, c := range tx.changelist {
s.queue.Publish(c)
}
if len(tx.changelist) != 0 {
s.queue.Publish(state.EventCommit{})
}
} else {
memDBTx.Abort()
}
s.updateLock.Unlock()
return err
}
func (s *MemoryStore) updateLocal(cb func(Tx) error) error {
return s.update(nil, cb)
}
// Update executes a read/write transaction.
func (s *MemoryStore) Update(cb func(Tx) error) error {
return s.update(s.proposer, cb)
}
// Batch provides a mechanism to batch updates to a store.
type Batch struct {
tx tx
store *MemoryStore
// applied counts the times Update has run successfully
applied int
// committed is the number of times Update had run successfully as of
// the time pending changes were committed.
committed int
// transactionSizeEstimate is the running count of the size of the
// current transaction.
transactionSizeEstimate int
// changelistLen is the last known length of the transaction's
// changelist.
changelistLen int
err error
}
// Update adds a single change to a batch. Each call to Update is atomic, but
// different calls to Update may be spread across multiple transactions to
// circumvent transaction size limits.
func (batch *Batch) Update(cb func(Tx) error) error {
if batch.err != nil {
return batch.err
}
if err := cb(&batch.tx); err != nil {
return err
}
batch.applied++
for batch.changelistLen < len(batch.tx.changelist) {
sa, err := newStoreAction(batch.tx.changelist[batch.changelistLen])
if err != nil {
return err
}
batch.transactionSizeEstimate += sa.Size()
batch.changelistLen++
}
if batch.changelistLen >= MaxChangesPerTransaction || batch.transactionSizeEstimate >= (MaxTransactionBytes*3)/4 {
if err := batch.commit(); err != nil {
return err
}
// Yield the update lock
batch.store.updateLock.Unlock()
runtime.Gosched()
batch.store.updateLock.Lock()
batch.newTx()
}
return nil
}
func (batch *Batch) newTx() {
var curVersion *api.Version
if batch.store.proposer != nil {
curVersion = batch.store.proposer.GetVersion()
}
batch.tx.init(batch.store.memDB.Txn(true), curVersion)
batch.transactionSizeEstimate = 0
batch.changelistLen = 0
}
func (batch *Batch) commit() error {
if batch.store.proposer != nil {
var sa []*api.StoreAction
sa, batch.err = batch.tx.changelistStoreActions()
if batch.err == nil {
if len(sa) != 0 {
batch.err = batch.store.proposer.ProposeValue(context.Background(), sa, func() {
batch.tx.memDBTx.Commit()
})
} else {
batch.tx.memDBTx.Commit()
}
}
} else {
batch.tx.memDBTx.Commit()
}
if batch.err != nil {
batch.tx.memDBTx.Abort()
return batch.err
}
batch.committed = batch.applied
for _, c := range batch.tx.changelist {
batch.store.queue.Publish(c)
}
if len(batch.tx.changelist) != 0 {
batch.store.queue.Publish(state.EventCommit{})
}
return nil
}
// Batch performs one or more transactions that allow reads and writes
// It invokes a callback that is passed a Batch object. The callback may
// call batch.Update for each change it wants to make as part of the
// batch. The changes in the batch may be split over multiple
// transactions if necessary to keep transactions below the size limit.
// Batch holds a lock over the state, but will yield this lock every
// it creates a new transaction to allow other writers to proceed.
// Thus, unrelated changes to the state may occur between calls to
// batch.Update.
//
// This method allows the caller to iterate over a data set and apply
// changes in sequence without holding the store write lock for an
// excessive time, or producing a transaction that exceeds the maximum
// size.
//
// Batch returns the number of calls to batch.Update whose changes were
// successfully committed to the store.
func (s *MemoryStore) Batch(cb func(*Batch) error) (int, error) {
s.updateLock.Lock()
batch := Batch{
store: s,
}
batch.newTx()
if err := cb(&batch); err != nil {
batch.tx.memDBTx.Abort()
s.updateLock.Unlock()
return batch.committed, err
}
err := batch.commit()
s.updateLock.Unlock()
return batch.committed, err
}
func (tx *tx) init(memDBTx *memdb.Txn, curVersion *api.Version) {
tx.memDBTx = memDBTx
tx.curVersion = curVersion
tx.changelist = nil
}
func newStoreAction(c state.Event) (*api.StoreAction, error) {
for _, os := range objectStorers {
sa, err := os.NewStoreAction(c)
if err == nil {
return &sa, nil
} else if err != errUnknownStoreAction {
return nil, err
}
}
return nil, errors.New("unrecognized event type")
}
func (tx tx) changelistStoreActions() ([]*api.StoreAction, error) {
var actions []*api.StoreAction
for _, c := range tx.changelist {
sa, err := newStoreAction(c)
if err != nil {
return nil, err
}
actions = append(actions, sa)
}
return actions, nil
}
// lookup is an internal typed wrapper around memdb.
func (tx readTx) lookup(table, index, id string) Object {
j, err := tx.memDBTx.First(table, index, id)
if err != nil {
return nil
}
if j != nil {
return j.(Object)
}
return nil
}
// create adds a new object to the store.
// Returns ErrExist if the ID is already taken.
func (tx *tx) create(table string, o Object) error {
if tx.lookup(table, indexID, o.ID()) != nil {
return ErrExist
}
copy := o.Copy()
meta := copy.Meta()
if err := touchMeta(&meta, tx.curVersion); err != nil {
return err
}
copy.SetMeta(meta)
err := tx.memDBTx.Insert(table, copy)
if err == nil {
tx.changelist = append(tx.changelist, copy.EventCreate())
o.SetMeta(meta)
}
return err
}
// Update updates an existing object in the store.
// Returns ErrNotExist if the object doesn't exist.
func (tx *tx) update(table string, o Object) error {
oldN := tx.lookup(table, indexID, o.ID())
if oldN == nil {
return ErrNotExist
}
if tx.curVersion != nil {
if oldN.(Object).Meta().Version != o.Meta().Version {
return ErrSequenceConflict
}
}
copy := o.Copy()
meta := copy.Meta()
if err := touchMeta(&meta, tx.curVersion); err != nil {
return err
}
copy.SetMeta(meta)
err := tx.memDBTx.Insert(table, copy)
if err == nil {
tx.changelist = append(tx.changelist, copy.EventUpdate())
o.SetMeta(meta)
}
return err
}
// Delete removes an object from the store.
// Returns ErrNotExist if the object doesn't exist.
func (tx *tx) delete(table, id string) error {
n := tx.lookup(table, indexID, id)
if n == nil {
return ErrNotExist
}
err := tx.memDBTx.Delete(table, n)
if err == nil {
tx.changelist = append(tx.changelist, n.EventDelete())
}
return err
}
// Get looks up an object by ID.
// Returns nil if the object doesn't exist.
func (tx readTx) get(table, id string) Object {
o := tx.lookup(table, indexID, id)
if o == nil {
return nil
}
return o.Copy()
}
// findIterators returns a slice of iterators. The union of items from these
// iterators provides the result of the query.
func (tx readTx) findIterators(table string, by By, checkType func(By) error) ([]memdb.ResultIterator, error) {
switch by.(type) {
case byAll, orCombinator: // generic types
default: // all other types
if err := checkType(by); err != nil {
return nil, err
}
}
switch v := by.(type) {
case byAll:
it, err := tx.memDBTx.Get(table, indexID)
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case orCombinator:
var iters []memdb.ResultIterator
for _, subBy := range v.bys {
it, err := tx.findIterators(table, subBy, checkType)
if err != nil {
return nil, err
}
iters = append(iters, it...)
}
return iters, nil
case byName:
it, err := tx.memDBTx.Get(table, indexName, strings.ToLower(string(v)))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case byCN:
it, err := tx.memDBTx.Get(table, indexCN, string(v))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case byIDPrefix:
it, err := tx.memDBTx.Get(table, indexID+prefix, string(v))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case byNamePrefix:
it, err := tx.memDBTx.Get(table, indexName+prefix, strings.ToLower(string(v)))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case byNode:
it, err := tx.memDBTx.Get(table, indexNodeID, string(v))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case byService:
it, err := tx.memDBTx.Get(table, indexServiceID, string(v))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case bySlot:
it, err := tx.memDBTx.Get(table, indexSlot, v.serviceID+"\x00"+strconv.FormatUint(uint64(v.slot), 10))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case byDesiredState:
it, err := tx.memDBTx.Get(table, indexDesiredState, strconv.FormatInt(int64(v), 10))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case byRole:
it, err := tx.memDBTx.Get(table, indexRole, strconv.FormatInt(int64(v), 10))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case byMembership:
it, err := tx.memDBTx.Get(table, indexMembership, strconv.FormatInt(int64(v), 10))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
default:
return nil, ErrInvalidFindBy
}
}
// find selects a set of objects calls a callback for each matching object.
func (tx readTx) find(table string, by By, checkType func(By) error, appendResult func(Object)) error {
fromResultIterators := func(its ...memdb.ResultIterator) {
ids := make(map[string]struct{})
for _, it := range its {
for {
obj := it.Next()
if obj == nil {
break
}
o := obj.(Object)
id := o.ID()
if _, exists := ids[id]; !exists {
appendResult(o.Copy())
ids[id] = struct{}{}
}
}
}
}
iters, err := tx.findIterators(table, by, checkType)
if err != nil {
return err
}
fromResultIterators(iters...)
return nil
}
// Save serializes the data in the store.
func (s *MemoryStore) Save(tx ReadTx) (*pb.StoreSnapshot, error) {
var snapshot pb.StoreSnapshot
for _, os := range objectStorers {
if err := os.Save(tx, &snapshot); err != nil {
return nil, err
}
}
return &snapshot, nil
}
// Restore sets the contents of the store to the serialized data in the
// argument.
func (s *MemoryStore) Restore(snapshot *pb.StoreSnapshot) error {
return s.updateLocal(func(tx Tx) error {
for _, os := range objectStorers {
if err := os.Restore(tx, snapshot); err != nil {
return err
}
}
return nil
})
}
// WatchQueue returns the publish/subscribe queue.
func (s *MemoryStore) WatchQueue() *watch.Queue {
return s.queue
}
// ViewAndWatch calls a callback which can observe the state of this
// MemoryStore. It also returns a channel that will return further events from
// this point so the snapshot can be kept up to date. The watch channel must be
// released with watch.StopWatch when it is no longer needed. The channel is
// guaranteed to get all events after the moment of the snapshot, and only
// those events.
func ViewAndWatch(store *MemoryStore, cb func(ReadTx) error, specifiers ...state.Event) (watch chan events.Event, cancel func(), err error) {
// Using Update to lock the store and guarantee consistency between
// the watcher and the the state seen by the callback. snapshotReadTx
// exposes this Tx as a ReadTx so the callback can't modify it.
err = store.Update(func(tx Tx) error {
if err := cb(tx); err != nil {
return err
}
watch, cancel = state.Watch(store.WatchQueue(), specifiers...)
return nil
})
if watch != nil && err != nil {
cancel()
cancel = nil
watch = nil
}
return
}
// touchMeta updates an object's timestamps when necessary and bumps the version
// if provided.
func touchMeta(meta *api.Meta, version *api.Version) error {
// Skip meta update if version is not defined as it means we're applying
// from raft or restoring from a snapshot.
if version == nil {
return nil
}
now, err := ptypes.TimestampProto(time.Now())
if err != nil {
return err
}
meta.Version = *version
// Updated CreatedAt if not defined
if meta.CreatedAt == nil {
meta.CreatedAt = now
}
meta.UpdatedAt = now
return nil
}
| shakamunyi/docker | vendor/src/github.com/docker/swarmkit/manager/state/store/memory.go | GO | apache-2.0 | 18,356 |
// Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.testing;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Collection;
import java.util.Locale;
import javax.servlet.ServletOutputStream;
import javax.servlet.WriteListener;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletResponse;
public class FakeHttpServletResponse extends HeaderContainer
implements HttpServletResponse {
private final StringWriter stringWriter = new StringWriter();
private final ServletOutputStream servletOutputStream =
new StringServletOutputStream(stringWriter);
private final PrintWriter printWriter = new PrintWriter(servletOutputStream);
private int status = HttpServletResponse.SC_OK;
@Override
public int getStatus() {
return status;
}
public String getBody() {
return stringWriter.toString();
}
@Override
public Collection<String> getHeaders(String name) {
return getHeaders().get(name);
}
@Override
public Collection<String> getHeaderNames() {
return getHeaders().keySet();
}
/////////////////////////////////////////////////////////////////////////////
//
// HttpServletResponse methods.
//
/////////////////////////////////////////////////////////////////////////////
@Override
public void addCookie(Cookie cookie) {
throw new UnsupportedOperationException();
}
@Override
public String encodeURL(String s) {
throw new UnsupportedOperationException();
}
@Override
public String encodeRedirectURL(String s) {
throw new UnsupportedOperationException();
}
@Override
public String encodeUrl(String s) {
throw new UnsupportedOperationException();
}
@Override
public String encodeRedirectUrl(String s) {
throw new UnsupportedOperationException();
}
@Override
public void sendError(int i, String s) {
throw new UnsupportedOperationException();
}
@Override
public void sendError(int i) {
setStatus(i);
}
@Override
public void sendRedirect(String s) {
setStatus(SC_SEE_OTHER);
setHeader("Location", s);
}
@Override
public void setStatus(int i) {
this.status = i;
}
@Override
public void setStatus(int i, String s) {
throw new UnsupportedOperationException();
}
@Override
public String getCharacterEncoding() {
throw new UnsupportedOperationException();
}
@Override
public String getContentType() {
return getHeader("Content-Type");
}
@Override
public ServletOutputStream getOutputStream() {
return servletOutputStream;
}
@Override
public PrintWriter getWriter() {
return printWriter;
}
@Override
public void setCharacterEncoding(String s) {
String type = getHeader("content-type");
setHeader("content-type", type + "; charset=" + s);
}
@Override
public void setContentLength(int i) {
setIntHeader("content-length", i);
}
@Override
public void setContentLengthLong(long len) {
setIntHeader("content-length", (int) len);
}
@Override
public void setContentType(String type) {
setHeader("content-type", type);
}
@Override
public void setBufferSize(int i) {
throw new UnsupportedOperationException();
}
@Override
public int getBufferSize() {
throw new UnsupportedOperationException();
}
@Override
public void flushBuffer() {
// no-op
}
@Override
public void resetBuffer() {
throw new UnsupportedOperationException();
}
@Override
public boolean isCommitted() {
throw new UnsupportedOperationException();
}
@Override
public void reset() {
getHeaders().clear();
}
@Override
public void setLocale(Locale locale) {
throw new UnsupportedOperationException();
}
@Override
public Locale getLocale() {
throw new UnsupportedOperationException();
}
private static class StringServletOutputStream extends ServletOutputStream {
private final PrintWriter printWriter;
private StringServletOutputStream(StringWriter stringWriter) {
this.printWriter = new PrintWriter(stringWriter);
}
@Override
public void write(int i) {
printWriter.write(i);
}
@Override
public boolean isReady() {
return true;
}
@Override
public void setWriteListener(WriteListener writeListener) {
throw new UnsupportedOperationException();
}
}
}
| twalpole/selenium | java/server/test/org/openqa/testing/FakeHttpServletResponse.java | Java | apache-2.0 | 5,143 |
/*
* Copyright 2018 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.ssl;
import io.netty.internal.tcnative.SSL;
import io.netty.util.AbstractReferenceCounted;
import io.netty.util.IllegalReferenceCountException;
import io.netty.util.ResourceLeakDetector;
import io.netty.util.ResourceLeakDetectorFactory;
import io.netty.util.ResourceLeakTracker;
import java.security.cert.X509Certificate;
final class DefaultOpenSslKeyMaterial extends AbstractReferenceCounted implements OpenSslKeyMaterial {
private static final ResourceLeakDetector<DefaultOpenSslKeyMaterial> leakDetector =
ResourceLeakDetectorFactory.instance().newResourceLeakDetector(DefaultOpenSslKeyMaterial.class);
private final ResourceLeakTracker<DefaultOpenSslKeyMaterial> leak;
private final X509Certificate[] x509CertificateChain;
private long chain;
private long privateKey;
DefaultOpenSslKeyMaterial(long chain, long privateKey, X509Certificate[] x509CertificateChain) {
this.chain = chain;
this.privateKey = privateKey;
this.x509CertificateChain = x509CertificateChain;
leak = leakDetector.track(this);
}
@Override
public X509Certificate[] certificateChain() {
return x509CertificateChain.clone();
}
@Override
public long certificateChainAddress() {
if (refCnt() <= 0) {
throw new IllegalReferenceCountException();
}
return chain;
}
@Override
public long privateKeyAddress() {
if (refCnt() <= 0) {
throw new IllegalReferenceCountException();
}
return privateKey;
}
@Override
protected void deallocate() {
SSL.freeX509Chain(chain);
chain = 0;
SSL.freePrivateKey(privateKey);
privateKey = 0;
if (leak != null) {
boolean closed = leak.close(this);
assert closed;
}
}
@Override
public DefaultOpenSslKeyMaterial retain() {
if (leak != null) {
leak.record();
}
super.retain();
return this;
}
@Override
public DefaultOpenSslKeyMaterial retain(int increment) {
if (leak != null) {
leak.record();
}
super.retain(increment);
return this;
}
@Override
public DefaultOpenSslKeyMaterial touch() {
if (leak != null) {
leak.record();
}
super.touch();
return this;
}
@Override
public DefaultOpenSslKeyMaterial touch(Object hint) {
if (leak != null) {
leak.record(hint);
}
return this;
}
@Override
public boolean release() {
if (leak != null) {
leak.record();
}
return super.release();
}
@Override
public boolean release(int decrement) {
if (leak != null) {
leak.record();
}
return super.release(decrement);
}
}
| doom369/netty | handler/src/main/java/io/netty/handler/ssl/DefaultOpenSslKeyMaterial.java | Java | apache-2.0 | 3,553 |
"""Test the Advantage Air Sensor Platform."""
from datetime import timedelta
from json import loads
from homeassistant.components.advantage_air.const import DOMAIN as ADVANTAGE_AIR_DOMAIN
from homeassistant.components.advantage_air.sensor import (
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
ADVANTAGE_AIR_SET_COUNTDOWN_VALUE,
)
from homeassistant.config_entries import RELOAD_AFTER_UPDATE_DELAY
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers import entity_registry as er
from homeassistant.util import dt
from tests.common import async_fire_time_changed
from tests.components.advantage_air import (
TEST_SET_RESPONSE,
TEST_SET_URL,
TEST_SYSTEM_DATA,
TEST_SYSTEM_URL,
add_mock_config,
)
async def test_sensor_platform(hass, aioclient_mock):
"""Test sensor platform."""
aioclient_mock.get(
TEST_SYSTEM_URL,
text=TEST_SYSTEM_DATA,
)
aioclient_mock.get(
TEST_SET_URL,
text=TEST_SET_RESPONSE,
)
await add_mock_config(hass)
registry = er.async_get(hass)
assert len(aioclient_mock.mock_calls) == 1
# Test First TimeToOn Sensor
entity_id = "sensor.ac_one_time_to_on"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 0
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-timetoOn"
value = 20
await hass.services.async_call(
ADVANTAGE_AIR_DOMAIN,
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
{ATTR_ENTITY_ID: [entity_id], ADVANTAGE_AIR_SET_COUNTDOWN_VALUE: value},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 3
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["countDownToOn"] == value
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
# Test First TimeToOff Sensor
entity_id = "sensor.ac_one_time_to_off"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 10
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-timetoOff"
value = 0
await hass.services.async_call(
ADVANTAGE_AIR_DOMAIN,
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
{ATTR_ENTITY_ID: [entity_id], ADVANTAGE_AIR_SET_COUNTDOWN_VALUE: value},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 5
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["countDownToOff"] == value
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
# Test First Zone Vent Sensor
entity_id = "sensor.zone_open_with_sensor_vent"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 100
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-vent"
# Test Second Zone Vent Sensor
entity_id = "sensor.zone_closed_with_sensor_vent"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 0
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z02-vent"
# Test First Zone Signal Sensor
entity_id = "sensor.zone_open_with_sensor_signal"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 40
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-signal"
# Test Second Zone Signal Sensor
entity_id = "sensor.zone_closed_with_sensor_signal"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 10
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z02-signal"
# Test First Zone Temp Sensor (disabled by default)
entity_id = "sensor.zone_open_with_sensor_temperature"
assert not hass.states.get(entity_id)
registry.async_update_entity(entity_id=entity_id, disabled_by=None)
await hass.async_block_till_done()
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(seconds=RELOAD_AFTER_UPDATE_DELAY + 1),
)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 25
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-temp"
| aronsky/home-assistant | tests/components/advantage_air/test_sensor.py | Python | apache-2.0 | 4,781 |
using DynamicVsStaticCode.WinPhone.Resources;
namespace DynamicVsStaticCode.WinPhone
{
/// <summary>
/// Provides access to string resources.
/// </summary>
public class LocalizedStrings
{
private static AppResources _localizedResources = new AppResources();
public AppResources LocalizedResources { get { return _localizedResources; } }
}
}
| YOTOV-LIMITED/xamarin-forms-book-preview-2 | Chapter11/DynamicVsStaticCode/DynamicVsStaticCode/DynamicVsStaticCode.WinPhone/LocalizedStrings.cs | C# | apache-2.0 | 387 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Navigation;
using Microsoft.Phone.Controls;
using Microsoft.Phone.Shell;
namespace SearchBarDemo.WinPhone
{
public partial class MainPage : global::Xamarin.Forms.Platform.WinPhone.FormsApplicationPage
{
public MainPage()
{
InitializeComponent();
SupportedOrientations = SupportedPageOrientation.PortraitOrLandscape;
global::Xamarin.Forms.Forms.Init();
LoadApplication(new SearchBarDemo.App());
}
}
}
| YOTOV-LIMITED/xamarin-forms-book-preview-2 | Chapter15/SearchBarDemo/SearchBarDemo/SearchBarDemo.WinPhone/MainPage.xaml.cs | C# | apache-2.0 | 648 |
package com.box.androidsdk.content.models;
import com.eclipsesource.json.JsonObject;
import com.eclipsesource.json.JsonValue;
import java.util.Map;
/**
* Class that represents a collection on Box.
*/
public class BoxCollection extends BoxEntity {
/**
* Constructs an empty BoxCollection object.
*/
public BoxCollection() {
super();
}
/**
* Constructs a BoxCollection with the provided map values
*
* @param map - map of keys and values of the object
*/
public BoxCollection(Map<String, Object> map) {
super(map);
}
public static final String TYPE = "collection";
public static final String FIELD_NAME = "name";
public static final String FIELD_COLLECTION_TYPE = "collection_type";
/**
* Gets the name of the collection.
*
* @return the name of the collection.
*/
public String getName() {
return (String) mProperties.get(FIELD_NAME);
}
/**
* Gets the type of the collection. Currently only "favorites" is supported.
*
* @return type of collection.
*/
public String getCollectionType() {
return (String) mProperties.get(FIELD_COLLECTION_TYPE);
}
@Override
protected void parseJSONMember(JsonObject.Member member) {
String memberName = member.getName();
JsonValue value = member.getValue();
if (memberName.equals(FIELD_NAME)) {
mProperties.put(FIELD_NAME, value.asString());
return;
} else if (memberName.equals(FIELD_COLLECTION_TYPE)) {
mProperties.put(FIELD_COLLECTION_TYPE, value.asString());
return;
}
super.parseJSONMember(member);
}
}
| MariusVolkhart/box-android-sdk | box-content-sdk/src/main/java/com/box/androidsdk/content/models/BoxCollection.java | Java | apache-2.0 | 1,723 |
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iptables
//
// NOTE: this needs to be tested in e2e since it uses iptables for everything.
//
import (
"bytes"
"crypto/sha256"
"encoding/base32"
"fmt"
"net"
"reflect"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
v1 "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/events"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
"k8s.io/kubernetes/pkg/proxy/metaproxier"
"k8s.io/kubernetes/pkg/proxy/metrics"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
"k8s.io/kubernetes/pkg/util/async"
"k8s.io/kubernetes/pkg/util/conntrack"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilexec "k8s.io/utils/exec"
utilnet "k8s.io/utils/net"
)
const (
// the services chain
kubeServicesChain utiliptables.Chain = "KUBE-SERVICES"
// the external services chain
kubeExternalServicesChain utiliptables.Chain = "KUBE-EXTERNAL-SERVICES"
// the nodeports chain
kubeNodePortsChain utiliptables.Chain = "KUBE-NODEPORTS"
// the kubernetes postrouting chain
kubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING"
// KubeMarkMasqChain is the mark-for-masquerade chain
KubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
// KubeMarkDropChain is the mark-for-drop chain
KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
// the kubernetes forward chain
kubeForwardChain utiliptables.Chain = "KUBE-FORWARD"
// kube proxy canary chain is used for monitoring rule reload
kubeProxyCanaryChain utiliptables.Chain = "KUBE-PROXY-CANARY"
)
// KernelCompatTester tests whether the required kernel capabilities are
// present to run the iptables proxier.
type KernelCompatTester interface {
IsCompatible() error
}
// CanUseIPTablesProxier returns true if we should use the iptables Proxier
// instead of the "classic" userspace Proxier.
func CanUseIPTablesProxier(kcompat KernelCompatTester) (bool, error) {
if err := kcompat.IsCompatible(); err != nil {
return false, err
}
return true, nil
}
var _ KernelCompatTester = LinuxKernelCompatTester{}
// LinuxKernelCompatTester is the Linux implementation of KernelCompatTester
type LinuxKernelCompatTester struct{}
// IsCompatible checks for the required sysctls. We don't care about the value, just
// that it exists. If this Proxier is chosen, we'll initialize it as we
// need.
func (lkct LinuxKernelCompatTester) IsCompatible() error {
_, err := utilsysctl.New().GetSysctl(sysctlRouteLocalnet)
return err
}
const sysctlRouteLocalnet = "net/ipv4/conf/all/route_localnet"
const sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables"
// internal struct for string service information
type serviceInfo struct {
*proxy.BaseServiceInfo
// The following fields are computed and stored for performance reasons.
serviceNameString string
servicePortChainName utiliptables.Chain
serviceFirewallChainName utiliptables.Chain
serviceLBChainName utiliptables.Chain
}
// returns a new proxy.ServicePort which abstracts a serviceInfo
func newServiceInfo(port *v1.ServicePort, service *v1.Service, baseInfo *proxy.BaseServiceInfo) proxy.ServicePort {
info := &serviceInfo{BaseServiceInfo: baseInfo}
// Store the following for performance reasons.
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
svcPortName := proxy.ServicePortName{NamespacedName: svcName, Port: port.Name}
protocol := strings.ToLower(string(info.Protocol()))
info.serviceNameString = svcPortName.String()
info.servicePortChainName = servicePortChainName(info.serviceNameString, protocol)
info.serviceFirewallChainName = serviceFirewallChainName(info.serviceNameString, protocol)
info.serviceLBChainName = serviceLBChainName(info.serviceNameString, protocol)
return info
}
// internal struct for endpoints information
type endpointsInfo struct {
*proxy.BaseEndpointInfo
// The following fields we lazily compute and store here for performance
// reasons. If the protocol is the same as you expect it to be, then the
// chainName can be reused, otherwise it should be recomputed.
protocol string
chainName utiliptables.Chain
}
// returns a new proxy.Endpoint which abstracts a endpointsInfo
func newEndpointInfo(baseInfo *proxy.BaseEndpointInfo) proxy.Endpoint {
return &endpointsInfo{BaseEndpointInfo: baseInfo}
}
// Equal overrides the Equal() function implemented by proxy.BaseEndpointInfo.
func (e *endpointsInfo) Equal(other proxy.Endpoint) bool {
o, ok := other.(*endpointsInfo)
if !ok {
klog.ErrorS(nil, "Failed to cast endpointsInfo")
return false
}
return e.Endpoint == o.Endpoint &&
e.IsLocal == o.IsLocal &&
e.protocol == o.protocol &&
e.chainName == o.chainName
}
// Returns the endpoint chain name for a given endpointsInfo.
func (e *endpointsInfo) endpointChain(svcNameString, protocol string) utiliptables.Chain {
if e.protocol != protocol {
e.protocol = protocol
e.chainName = servicePortEndpointChainName(svcNameString, protocol, e.Endpoint)
}
return e.chainName
}
// Proxier is an iptables based proxy for connections between a localhost:lport
// and services that provide the actual backends.
type Proxier struct {
// endpointsChanges and serviceChanges contains all changes to endpoints and
// services that happened since iptables was synced. For a single object,
// changes are accumulated, i.e. previous is state from before all of them,
// current is state after applying all of those.
endpointsChanges *proxy.EndpointChangeTracker
serviceChanges *proxy.ServiceChangeTracker
mu sync.Mutex // protects the following fields
serviceMap proxy.ServiceMap
endpointsMap proxy.EndpointsMap
portsMap map[utilnet.LocalPort]utilnet.Closeable
nodeLabels map[string]string
// endpointSlicesSynced, and servicesSynced are set to true
// when corresponding objects are synced after startup. This is used to avoid
// updating iptables with some partial data after kube-proxy restart.
endpointSlicesSynced bool
servicesSynced bool
initialized int32
syncRunner *async.BoundedFrequencyRunner // governs calls to syncProxyRules
syncPeriod time.Duration
// These are effectively const and do not need the mutex to be held.
iptables utiliptables.Interface
masqueradeAll bool
masqueradeMark string
exec utilexec.Interface
localDetector proxyutiliptables.LocalTrafficDetector
hostname string
nodeIP net.IP
portMapper utilnet.PortOpener
recorder events.EventRecorder
serviceHealthServer healthcheck.ServiceHealthServer
healthzServer healthcheck.ProxierHealthUpdater
// Since converting probabilities (floats) to strings is expensive
// and we are using only probabilities in the format of 1/n, we are
// precomputing some number of those and cache for future reuse.
precomputedProbabilities []string
// The following buffers are used to reuse memory and avoid allocations
// that are significantly impacting performance.
iptablesData *bytes.Buffer
existingFilterChainsData *bytes.Buffer
filterChains *bytes.Buffer
filterRules *bytes.Buffer
natChains *bytes.Buffer
natRules *bytes.Buffer
// endpointChainsNumber is the total amount of endpointChains across all
// services that we will generate (it is computed at the beginning of
// syncProxyRules method). If that is large enough, comments in some
// iptable rules are dropped to improve performance.
endpointChainsNumber int
// Values are as a parameter to select the interfaces where nodeport works.
nodePortAddresses []string
// networkInterfacer defines an interface for several net library functions.
// Inject for test purpose.
networkInterfacer utilproxy.NetworkInterfacer
}
// Proxier implements proxy.Provider
var _ proxy.Provider = &Proxier{}
// NewProxier returns a new Proxier given an iptables Interface instance.
// Because of the iptables logic, it is assumed that there is only a single Proxier active on a machine.
// An error will be returned if iptables fails to update or acquire the initial lock.
// Once a proxier is created, it will keep iptables up to date in the background and
// will not terminate if a particular iptables call fails.
func NewProxier(ipt utiliptables.Interface,
sysctl utilsysctl.Interface,
exec utilexec.Interface,
syncPeriod time.Duration,
minSyncPeriod time.Duration,
masqueradeAll bool,
masqueradeBit int,
localDetector proxyutiliptables.LocalTrafficDetector,
hostname string,
nodeIP net.IP,
recorder events.EventRecorder,
healthzServer healthcheck.ProxierHealthUpdater,
nodePortAddresses []string,
) (*Proxier, error) {
// Set the route_localnet sysctl we need for
if err := utilproxy.EnsureSysctl(sysctl, sysctlRouteLocalnet, 1); err != nil {
return nil, err
}
// Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers
// are connected to a Linux bridge (but not SDN bridges). Until most
// plugins handle this, log when config is missing
if val, err := sysctl.GetSysctl(sysctlBridgeCallIPTables); err == nil && val != 1 {
klog.InfoS("Missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended")
}
// Generate the masquerade mark to use for SNAT rules.
masqueradeValue := 1 << uint(masqueradeBit)
masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue)
klog.V(2).InfoS("Using iptables mark for masquerade", "ipFamily", ipt.Protocol(), "mark", masqueradeMark)
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder)
ipFamily := v1.IPv4Protocol
if ipt.IsIPv6() {
ipFamily = v1.IPv6Protocol
}
ipFamilyMap := utilproxy.MapCIDRsByIPFamily(nodePortAddresses)
nodePortAddresses = ipFamilyMap[ipFamily]
// Log the IPs not matching the ipFamily
if ips, ok := ipFamilyMap[utilproxy.OtherIPFamily(ipFamily)]; ok && len(ips) > 0 {
klog.InfoS("Found node IPs of the wrong family", "ipFamily", ipFamily, "ips", strings.Join(ips, ","))
}
proxier := &Proxier{
portsMap: make(map[utilnet.LocalPort]utilnet.Closeable),
serviceMap: make(proxy.ServiceMap),
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, recorder, nil),
endpointsMap: make(proxy.EndpointsMap),
endpointsChanges: proxy.NewEndpointChangeTracker(hostname, newEndpointInfo, ipFamily, recorder, nil),
syncPeriod: syncPeriod,
iptables: ipt,
masqueradeAll: masqueradeAll,
masqueradeMark: masqueradeMark,
exec: exec,
localDetector: localDetector,
hostname: hostname,
nodeIP: nodeIP,
portMapper: &utilnet.ListenPortOpener,
recorder: recorder,
serviceHealthServer: serviceHealthServer,
healthzServer: healthzServer,
precomputedProbabilities: make([]string, 0, 1001),
iptablesData: bytes.NewBuffer(nil),
existingFilterChainsData: bytes.NewBuffer(nil),
filterChains: bytes.NewBuffer(nil),
filterRules: bytes.NewBuffer(nil),
natChains: bytes.NewBuffer(nil),
natRules: bytes.NewBuffer(nil),
nodePortAddresses: nodePortAddresses,
networkInterfacer: utilproxy.RealNetwork{},
}
burstSyncs := 2
klog.V(2).InfoS("Iptables sync params", "ipFamily", ipt.Protocol(), "minSyncPeriod", minSyncPeriod, "syncPeriod", syncPeriod, "burstSyncs", burstSyncs)
// We pass syncPeriod to ipt.Monitor, which will call us only if it needs to.
// We need to pass *some* maxInterval to NewBoundedFrequencyRunner anyway though.
// time.Hour is arbitrary.
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, time.Hour, burstSyncs)
go ipt.Monitor(kubeProxyCanaryChain, []utiliptables.Table{utiliptables.TableMangle, utiliptables.TableNAT, utiliptables.TableFilter},
proxier.syncProxyRules, syncPeriod, wait.NeverStop)
if ipt.HasRandomFully() {
klog.V(2).InfoS("Iptables supports --random-fully", "ipFamily", ipt.Protocol())
} else {
klog.V(2).InfoS("Iptables does not support --random-fully", "ipFamily", ipt.Protocol())
}
return proxier, nil
}
// NewDualStackProxier creates a MetaProxier instance, with IPv4 and IPv6 proxies.
func NewDualStackProxier(
ipt [2]utiliptables.Interface,
sysctl utilsysctl.Interface,
exec utilexec.Interface,
syncPeriod time.Duration,
minSyncPeriod time.Duration,
masqueradeAll bool,
masqueradeBit int,
localDetectors [2]proxyutiliptables.LocalTrafficDetector,
hostname string,
nodeIP [2]net.IP,
recorder events.EventRecorder,
healthzServer healthcheck.ProxierHealthUpdater,
nodePortAddresses []string,
) (proxy.Provider, error) {
// Create an ipv4 instance of the single-stack proxier
ipFamilyMap := utilproxy.MapCIDRsByIPFamily(nodePortAddresses)
ipv4Proxier, err := NewProxier(ipt[0], sysctl,
exec, syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit, localDetectors[0], hostname,
nodeIP[0], recorder, healthzServer, ipFamilyMap[v1.IPv4Protocol])
if err != nil {
return nil, fmt.Errorf("unable to create ipv4 proxier: %v", err)
}
ipv6Proxier, err := NewProxier(ipt[1], sysctl,
exec, syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit, localDetectors[1], hostname,
nodeIP[1], recorder, healthzServer, ipFamilyMap[v1.IPv6Protocol])
if err != nil {
return nil, fmt.Errorf("unable to create ipv6 proxier: %v", err)
}
return metaproxier.NewMetaProxier(ipv4Proxier, ipv6Proxier), nil
}
type iptablesJumpChain struct {
table utiliptables.Table
dstChain utiliptables.Chain
srcChain utiliptables.Chain
comment string
extraArgs []string
}
var iptablesJumpChains = []iptablesJumpChain{
{utiliptables.TableFilter, kubeExternalServicesChain, utiliptables.ChainInput, "kubernetes externally-visible service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}},
{utiliptables.TableFilter, kubeExternalServicesChain, utiliptables.ChainForward, "kubernetes externally-visible service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}},
{utiliptables.TableFilter, kubeNodePortsChain, utiliptables.ChainInput, "kubernetes health check service ports", nil},
{utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainForward, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}},
{utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}},
{utiliptables.TableFilter, kubeForwardChain, utiliptables.ChainForward, "kubernetes forwarding rules", nil},
{utiliptables.TableNAT, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals", nil},
{utiliptables.TableNAT, kubeServicesChain, utiliptables.ChainPrerouting, "kubernetes service portals", nil},
{utiliptables.TableNAT, kubePostroutingChain, utiliptables.ChainPostrouting, "kubernetes postrouting rules", nil},
}
var iptablesEnsureChains = []struct {
table utiliptables.Table
chain utiliptables.Chain
}{
{utiliptables.TableNAT, KubeMarkDropChain},
}
var iptablesCleanupOnlyChains = []iptablesJumpChain{
// Present in kube 1.13 - 1.19. Removed by #95252 in favor of adding reject rules for incoming/forwarding packets to kubeExternalServicesChain
{utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainInput, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}},
}
// CleanupLeftovers removes all iptables rules and chains created by the Proxier
// It returns true if an error was encountered. Errors are logged.
func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
// Unlink our chains
for _, jump := range append(iptablesJumpChains, iptablesCleanupOnlyChains...) {
args := append(jump.extraArgs,
"-m", "comment", "--comment", jump.comment,
"-j", string(jump.dstChain),
)
if err := ipt.DeleteRule(jump.table, jump.srcChain, args...); err != nil {
if !utiliptables.IsNotFoundError(err) {
klog.ErrorS(err, "Error removing pure-iptables proxy rule")
encounteredError = true
}
}
}
// Flush and remove all of our "-t nat" chains.
iptablesData := bytes.NewBuffer(nil)
if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil {
klog.ErrorS(err, "Failed to execute iptables-save", "table", utiliptables.TableNAT)
encounteredError = true
} else {
existingNATChains := utiliptables.GetChainLines(utiliptables.TableNAT, iptablesData.Bytes())
natChains := bytes.NewBuffer(nil)
natRules := bytes.NewBuffer(nil)
utilproxy.WriteLine(natChains, "*nat")
// Start with chains we know we need to remove.
for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain} {
if _, found := existingNATChains[chain]; found {
chainString := string(chain)
utilproxy.WriteBytesLine(natChains, existingNATChains[chain]) // flush
utilproxy.WriteLine(natRules, "-X", chainString) // delete
}
}
// Hunt for service and endpoint chains.
for chain := range existingNATChains {
chainString := string(chain)
if strings.HasPrefix(chainString, "KUBE-SVC-") || strings.HasPrefix(chainString, "KUBE-SEP-") || strings.HasPrefix(chainString, "KUBE-FW-") || strings.HasPrefix(chainString, "KUBE-XLB-") {
utilproxy.WriteBytesLine(natChains, existingNATChains[chain]) // flush
utilproxy.WriteLine(natRules, "-X", chainString) // delete
}
}
utilproxy.WriteLine(natRules, "COMMIT")
natLines := append(natChains.Bytes(), natRules.Bytes()...)
// Write it.
err = ipt.Restore(utiliptables.TableNAT, natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
klog.ErrorS(err, "Failed to execute iptables-restore", "table", utiliptables.TableNAT)
metrics.IptablesRestoreFailuresTotal.Inc()
encounteredError = true
}
}
// Flush and remove all of our "-t filter" chains.
iptablesData.Reset()
if err := ipt.SaveInto(utiliptables.TableFilter, iptablesData); err != nil {
klog.ErrorS(err, "Failed to execute iptables-save", "table", utiliptables.TableFilter)
encounteredError = true
} else {
existingFilterChains := utiliptables.GetChainLines(utiliptables.TableFilter, iptablesData.Bytes())
filterChains := bytes.NewBuffer(nil)
filterRules := bytes.NewBuffer(nil)
utilproxy.WriteLine(filterChains, "*filter")
for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain} {
if _, found := existingFilterChains[chain]; found {
chainString := string(chain)
utilproxy.WriteBytesLine(filterChains, existingFilterChains[chain])
utilproxy.WriteLine(filterRules, "-X", chainString)
}
}
utilproxy.WriteLine(filterRules, "COMMIT")
filterLines := append(filterChains.Bytes(), filterRules.Bytes()...)
// Write it.
if err := ipt.Restore(utiliptables.TableFilter, filterLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil {
klog.ErrorS(err, "Failed to execute iptables-restore", "table", utiliptables.TableFilter)
metrics.IptablesRestoreFailuresTotal.Inc()
encounteredError = true
}
}
return encounteredError
}
func computeProbability(n int) string {
return fmt.Sprintf("%0.10f", 1.0/float64(n))
}
// This assumes proxier.mu is held
func (proxier *Proxier) precomputeProbabilities(numberOfPrecomputed int) {
if len(proxier.precomputedProbabilities) == 0 {
proxier.precomputedProbabilities = append(proxier.precomputedProbabilities, "<bad value>")
}
for i := len(proxier.precomputedProbabilities); i <= numberOfPrecomputed; i++ {
proxier.precomputedProbabilities = append(proxier.precomputedProbabilities, computeProbability(i))
}
}
// This assumes proxier.mu is held
func (proxier *Proxier) probability(n int) string {
if n >= len(proxier.precomputedProbabilities) {
proxier.precomputeProbabilities(n)
}
return proxier.precomputedProbabilities[n]
}
// Sync is called to synchronize the proxier state to iptables as soon as possible.
func (proxier *Proxier) Sync() {
if proxier.healthzServer != nil {
proxier.healthzServer.QueuedUpdate()
}
metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime()
proxier.syncRunner.Run()
}
// SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return.
func (proxier *Proxier) SyncLoop() {
// Update healthz timestamp at beginning in case Sync() never succeeds.
if proxier.healthzServer != nil {
proxier.healthzServer.Updated()
}
// synthesize "last change queued" time as the informers are syncing.
metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime()
proxier.syncRunner.Loop(wait.NeverStop)
}
func (proxier *Proxier) setInitialized(value bool) {
var initialized int32
if value {
initialized = 1
}
atomic.StoreInt32(&proxier.initialized, initialized)
}
func (proxier *Proxier) isInitialized() bool {
return atomic.LoadInt32(&proxier.initialized) > 0
}
// OnServiceAdd is called whenever creation of new service object
// is observed.
func (proxier *Proxier) OnServiceAdd(service *v1.Service) {
proxier.OnServiceUpdate(nil, service)
}
// OnServiceUpdate is called whenever modification of an existing
// service object is observed.
func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) {
if proxier.serviceChanges.Update(oldService, service) && proxier.isInitialized() {
proxier.Sync()
}
}
// OnServiceDelete is called whenever deletion of an existing service
// object is observed.
func (proxier *Proxier) OnServiceDelete(service *v1.Service) {
proxier.OnServiceUpdate(service, nil)
}
// OnServiceSynced is called once all the initial event handlers were
// called and the state is fully propagated to local cache.
func (proxier *Proxier) OnServiceSynced() {
proxier.mu.Lock()
proxier.servicesSynced = true
proxier.setInitialized(proxier.endpointSlicesSynced)
proxier.mu.Unlock()
// Sync unconditionally - this is called once per lifetime.
proxier.syncProxyRules()
}
// iptables proxier only uses EndpointSlice, the following methods
// exist to implement the Proxier interface but are noops
// OnEndpointsAdd is called whenever creation of new endpoints object
// is observed.
func (proxier *Proxier) OnEndpointsAdd(endpoints *v1.Endpoints) {}
// OnEndpointsUpdate is called whenever modification of an existing
// endpoints object is observed.
func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {}
// OnEndpointsDelete is called whenever deletion of an existing endpoints
// object is observed.
func (proxier *Proxier) OnEndpointsDelete(endpoints *v1.Endpoints) {}
// OnEndpointsSynced is called once all the initial event handlers were
// called and the state is fully propagated to local cache.
func (proxier *Proxier) OnEndpointsSynced() {}
// OnEndpointSliceAdd is called whenever creation of a new endpoint slice object
// is observed.
func (proxier *Proxier) OnEndpointSliceAdd(endpointSlice *discovery.EndpointSlice) {
if proxier.endpointsChanges.EndpointSliceUpdate(endpointSlice, false) && proxier.isInitialized() {
proxier.Sync()
}
}
// OnEndpointSliceUpdate is called whenever modification of an existing endpoint
// slice object is observed.
func (proxier *Proxier) OnEndpointSliceUpdate(_, endpointSlice *discovery.EndpointSlice) {
if proxier.endpointsChanges.EndpointSliceUpdate(endpointSlice, false) && proxier.isInitialized() {
proxier.Sync()
}
}
// OnEndpointSliceDelete is called whenever deletion of an existing endpoint slice
// object is observed.
func (proxier *Proxier) OnEndpointSliceDelete(endpointSlice *discovery.EndpointSlice) {
if proxier.endpointsChanges.EndpointSliceUpdate(endpointSlice, true) && proxier.isInitialized() {
proxier.Sync()
}
}
// OnEndpointSlicesSynced is called once all the initial event handlers were
// called and the state is fully propagated to local cache.
func (proxier *Proxier) OnEndpointSlicesSynced() {
proxier.mu.Lock()
proxier.endpointSlicesSynced = true
proxier.setInitialized(proxier.servicesSynced)
proxier.mu.Unlock()
// Sync unconditionally - this is called once per lifetime.
proxier.syncProxyRules()
}
// OnNodeAdd is called whenever creation of new node object
// is observed.
func (proxier *Proxier) OnNodeAdd(node *v1.Node) {
if node.Name != proxier.hostname {
klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node",
"eventNode", node.Name, "currentNode", proxier.hostname)
return
}
if reflect.DeepEqual(proxier.nodeLabels, node.Labels) {
return
}
proxier.mu.Lock()
proxier.nodeLabels = map[string]string{}
for k, v := range node.Labels {
proxier.nodeLabels[k] = v
}
proxier.mu.Unlock()
klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels)
proxier.syncProxyRules()
}
// OnNodeUpdate is called whenever modification of an existing
// node object is observed.
func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) {
if node.Name != proxier.hostname {
klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node",
"eventNode", node.Name, "currentNode", proxier.hostname)
return
}
if reflect.DeepEqual(proxier.nodeLabels, node.Labels) {
return
}
proxier.mu.Lock()
proxier.nodeLabels = map[string]string{}
for k, v := range node.Labels {
proxier.nodeLabels[k] = v
}
proxier.mu.Unlock()
klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels)
proxier.syncProxyRules()
}
// OnNodeDelete is called whenever deletion of an existing node
// object is observed.
func (proxier *Proxier) OnNodeDelete(node *v1.Node) {
if node.Name != proxier.hostname {
klog.ErrorS(nil, "Received a watch event for a node that doesn't match the current node",
"eventNode", node.Name, "currentNode", proxier.hostname)
return
}
proxier.mu.Lock()
proxier.nodeLabels = nil
proxier.mu.Unlock()
proxier.syncProxyRules()
}
// OnNodeSynced is called once all the initial event handlers were
// called and the state is fully propagated to local cache.
func (proxier *Proxier) OnNodeSynced() {
}
// portProtoHash takes the ServicePortName and protocol for a service
// returns the associated 16 character hash. This is computed by hashing (sha256)
// then encoding to base32 and truncating to 16 chars. We do this because IPTables
// Chain Names must be <= 28 chars long, and the longer they are the harder they are to read.
func portProtoHash(servicePortName string, protocol string) string {
hash := sha256.Sum256([]byte(servicePortName + protocol))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return encoded[:16]
}
// servicePortChainName takes the ServicePortName for a service and
// returns the associated iptables chain. This is computed by hashing (sha256)
// then encoding to base32 and truncating with the prefix "KUBE-SVC-".
func servicePortChainName(servicePortName string, protocol string) utiliptables.Chain {
return utiliptables.Chain("KUBE-SVC-" + portProtoHash(servicePortName, protocol))
}
// serviceFirewallChainName takes the ServicePortName for a service and
// returns the associated iptables chain. This is computed by hashing (sha256)
// then encoding to base32 and truncating with the prefix "KUBE-FW-".
func serviceFirewallChainName(servicePortName string, protocol string) utiliptables.Chain {
return utiliptables.Chain("KUBE-FW-" + portProtoHash(servicePortName, protocol))
}
// serviceLBPortChainName takes the ServicePortName for a service and
// returns the associated iptables chain. This is computed by hashing (sha256)
// then encoding to base32 and truncating with the prefix "KUBE-XLB-". We do
// this because IPTables Chain Names must be <= 28 chars long, and the longer
// they are the harder they are to read.
func serviceLBChainName(servicePortName string, protocol string) utiliptables.Chain {
return utiliptables.Chain("KUBE-XLB-" + portProtoHash(servicePortName, protocol))
}
// This is the same as servicePortChainName but with the endpoint included.
func servicePortEndpointChainName(servicePortName string, protocol string, endpoint string) utiliptables.Chain {
hash := sha256.Sum256([]byte(servicePortName + protocol + endpoint))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return utiliptables.Chain("KUBE-SEP-" + encoded[:16])
}
// After a UDP or SCTP endpoint has been removed, we must flush any pending conntrack entries to it, or else we
// risk sending more traffic to it, all of which will be lost.
// This assumes the proxier mutex is held
// TODO: move it to util
func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceEndpoint) {
for _, epSvcPair := range connectionMap {
if svcInfo, ok := proxier.serviceMap[epSvcPair.ServicePortName]; ok && conntrack.IsClearConntrackNeeded(svcInfo.Protocol()) {
endpointIP := utilproxy.IPPart(epSvcPair.Endpoint)
nodePort := svcInfo.NodePort()
svcProto := svcInfo.Protocol()
var err error
if nodePort != 0 {
err = conntrack.ClearEntriesForPortNAT(proxier.exec, endpointIP, nodePort, svcProto)
if err != nil {
klog.ErrorS(err, "Failed to delete nodeport-related endpoint connections", "servicePortName", epSvcPair.ServicePortName.String())
}
}
err = conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIP().String(), endpointIP, svcProto)
if err != nil {
klog.ErrorS(err, "Failed to delete endpoint connections", "servicePortName", epSvcPair.ServicePortName.String())
}
for _, extIP := range svcInfo.ExternalIPStrings() {
err := conntrack.ClearEntriesForNAT(proxier.exec, extIP, endpointIP, svcProto)
if err != nil {
klog.ErrorS(err, "Failed to delete endpoint connections for externalIP", "servicePortName", epSvcPair.ServicePortName.String(), "externalIP", extIP)
}
}
for _, lbIP := range svcInfo.LoadBalancerIPStrings() {
err := conntrack.ClearEntriesForNAT(proxier.exec, lbIP, endpointIP, svcProto)
if err != nil {
klog.ErrorS(err, "Failed to delete endpoint connections for LoadBalancerIP", "servicePortName", epSvcPair.ServicePortName.String(), "loadBalancerIP", lbIP)
}
}
}
}
}
const endpointChainsNumberThreshold = 1000
// Assumes proxier.mu is held.
func (proxier *Proxier) appendServiceCommentLocked(args []string, svcName string) []string {
// Not printing these comments, can reduce size of iptables (in case of large
// number of endpoints) even by 40%+. So if total number of endpoint chains
// is large enough, we simply drop those comments.
if proxier.endpointChainsNumber > endpointChainsNumberThreshold {
return args
}
return append(args, "-m", "comment", "--comment", svcName)
}
// This is where all of the iptables-save/restore calls happen.
// The only other iptables rules are those that are setup in iptablesInit()
// This assumes proxier.mu is NOT held
func (proxier *Proxier) syncProxyRules() {
proxier.mu.Lock()
defer proxier.mu.Unlock()
// don't sync rules till we've received services and endpoints
if !proxier.isInitialized() {
klog.V(2).InfoS("Not syncing iptables until Services and Endpoints have been received from master")
return
}
// Keep track of how long syncs take.
start := time.Now()
defer func() {
metrics.SyncProxyRulesLatency.Observe(metrics.SinceInSeconds(start))
klog.V(2).InfoS("SyncProxyRules complete", "elapsed", time.Since(start))
}()
// We assume that if this was called, we really want to sync them,
// even if nothing changed in the meantime. In other words, callers are
// responsible for detecting no-op changes and not calling this function.
serviceUpdateResult := proxier.serviceMap.Update(proxier.serviceChanges)
endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges)
// We need to detect stale connections to UDP Services so we
// can clean dangling conntrack entries that can blackhole traffic.
conntrackCleanupServiceIPs := serviceUpdateResult.UDPStaleClusterIP
conntrackCleanupServiceNodePorts := sets.NewInt()
// merge stale services gathered from updateEndpointsMap
// an UDP service that changes from 0 to non-0 endpoints is considered stale.
for _, svcPortName := range endpointUpdateResult.StaleServiceNames {
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && conntrack.IsClearConntrackNeeded(svcInfo.Protocol()) {
klog.V(2).InfoS("Stale service", "protocol", strings.ToLower(string(svcInfo.Protocol())), "svcPortName", svcPortName.String(), "clusterIP", svcInfo.ClusterIP().String())
conntrackCleanupServiceIPs.Insert(svcInfo.ClusterIP().String())
for _, extIP := range svcInfo.ExternalIPStrings() {
conntrackCleanupServiceIPs.Insert(extIP)
}
nodePort := svcInfo.NodePort()
if svcInfo.Protocol() == v1.ProtocolUDP && nodePort != 0 {
klog.V(2).Infof("Stale %s service NodePort %v -> %d", strings.ToLower(string(svcInfo.Protocol())), svcPortName, nodePort)
conntrackCleanupServiceNodePorts.Insert(nodePort)
}
}
}
klog.V(2).InfoS("Syncing iptables rules")
success := false
defer func() {
if !success {
klog.InfoS("Sync failed", "retryingTime", proxier.syncPeriod)
proxier.syncRunner.RetryAfter(proxier.syncPeriod)
}
}()
// Create and link the kube chains.
for _, jump := range iptablesJumpChains {
if _, err := proxier.iptables.EnsureChain(jump.table, jump.dstChain); err != nil {
klog.ErrorS(err, "Failed to ensure chain exists", "table", jump.table, "chain", jump.dstChain)
return
}
args := append(jump.extraArgs,
"-m", "comment", "--comment", jump.comment,
"-j", string(jump.dstChain),
)
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jump.table, jump.srcChain, args...); err != nil {
klog.ErrorS(err, "Failed to ensure chain jumps", "table", jump.table, "srcChain", jump.srcChain, "dstChain", jump.dstChain)
return
}
}
// ensure KUBE-MARK-DROP chain exist but do not change any rules
for _, ch := range iptablesEnsureChains {
if _, err := proxier.iptables.EnsureChain(ch.table, ch.chain); err != nil {
klog.ErrorS(err, "Failed to ensure chain exists", "table", ch.table, "chain", ch.chain)
return
}
}
//
// Below this point we will not return until we try to write the iptables rules.
//
// Get iptables-save output so we can check for existing chains and rules.
// This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore
existingFilterChains := make(map[utiliptables.Chain][]byte)
proxier.existingFilterChainsData.Reset()
err := proxier.iptables.SaveInto(utiliptables.TableFilter, proxier.existingFilterChainsData)
if err != nil { // if we failed to get any rules
klog.ErrorS(err, "Failed to execute iptables-save, syncing all rules")
} else { // otherwise parse the output
existingFilterChains = utiliptables.GetChainLines(utiliptables.TableFilter, proxier.existingFilterChainsData.Bytes())
}
// IMPORTANT: existingNATChains may share memory with proxier.iptablesData.
existingNATChains := make(map[utiliptables.Chain][]byte)
proxier.iptablesData.Reset()
err = proxier.iptables.SaveInto(utiliptables.TableNAT, proxier.iptablesData)
if err != nil { // if we failed to get any rules
klog.ErrorS(err, "Failed to execute iptables-save, syncing all rules")
} else { // otherwise parse the output
existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, proxier.iptablesData.Bytes())
}
// Reset all buffers used later.
// This is to avoid memory reallocations and thus improve performance.
proxier.filterChains.Reset()
proxier.filterRules.Reset()
proxier.natChains.Reset()
proxier.natRules.Reset()
// Write table headers.
utilproxy.WriteLine(proxier.filterChains, "*filter")
utilproxy.WriteLine(proxier.natChains, "*nat")
// Make sure we keep stats for the top-level chains, if they existed
// (which most should have because we created them above).
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain} {
if chain, ok := existingFilterChains[chainName]; ok {
utilproxy.WriteBytesLine(proxier.filterChains, chain)
} else {
utilproxy.WriteLine(proxier.filterChains, utiliptables.MakeChainLine(chainName))
}
}
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, KubeMarkMasqChain} {
if chain, ok := existingNATChains[chainName]; ok {
utilproxy.WriteBytesLine(proxier.natChains, chain)
} else {
utilproxy.WriteLine(proxier.natChains, utiliptables.MakeChainLine(chainName))
}
}
// Install the kubernetes-specific postrouting rules. We use a whole chain for
// this so that it is easier to flush and change, for example if the mark
// value should ever change.
// NB: THIS MUST MATCH the corresponding code in the kubelet
utilproxy.WriteLine(proxier.natRules, []string{
"-A", string(kubePostroutingChain),
"-m", "mark", "!", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
"-j", "RETURN",
}...)
// Clear the mark to avoid re-masquerading if the packet re-traverses the network stack.
utilproxy.WriteLine(proxier.natRules, []string{
"-A", string(kubePostroutingChain),
// XOR proxier.masqueradeMark to unset it
"-j", "MARK", "--xor-mark", proxier.masqueradeMark,
}...)
masqRule := []string{
"-A", string(kubePostroutingChain),
"-m", "comment", "--comment", `"kubernetes service traffic requiring SNAT"`,
"-j", "MASQUERADE",
}
if proxier.iptables.HasRandomFully() {
masqRule = append(masqRule, "--random-fully")
}
utilproxy.WriteLine(proxier.natRules, masqRule...)
// Install the kubernetes-specific masquerade mark rule. We use a whole chain for
// this so that it is easier to flush and change, for example if the mark
// value should ever change.
utilproxy.WriteLine(proxier.natRules, []string{
"-A", string(KubeMarkMasqChain),
"-j", "MARK", "--or-mark", proxier.masqueradeMark,
}...)
// Accumulate NAT chains to keep.
activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set
// Accumulate the set of local ports that we will be holding open once this update is complete
replacementPortsMap := map[utilnet.LocalPort]utilnet.Closeable{}
// We are creating those slices ones here to avoid memory reallocations
// in every loop. Note that reuse the memory, instead of doing:
// slice = <some new slice>
// you should always do one of the below:
// slice = slice[:0] // and then append to it
// slice = append(slice[:0], ...)
endpoints := make([]*endpointsInfo, 0)
endpointChains := make([]utiliptables.Chain, 0)
readyEndpoints := make([]*endpointsInfo, 0)
readyEndpointChains := make([]utiliptables.Chain, 0)
localReadyEndpointChains := make([]utiliptables.Chain, 0)
localServingTerminatingEndpointChains := make([]utiliptables.Chain, 0)
// To avoid growing this slice, we arbitrarily set its size to 64,
// there is never more than that many arguments for a single line.
// Note that even if we go over 64, it will still be correct - it
// is just for efficiency, not correctness.
args := make([]string, 64)
// Compute total number of endpoint chains across all services.
proxier.endpointChainsNumber = 0
for svcName := range proxier.serviceMap {
proxier.endpointChainsNumber += len(proxier.endpointsMap[svcName])
}
localAddrSet := utilproxy.GetLocalAddrSet()
nodeAddresses, err := utilproxy.GetNodeAddresses(proxier.nodePortAddresses, proxier.networkInterfacer)
if err != nil {
klog.ErrorS(err, "Failed to get node ip address matching nodeport cidrs, services with nodeport may not work as intended", "CIDRs", proxier.nodePortAddresses)
}
// Build rules for each service.
for svcName, svc := range proxier.serviceMap {
svcInfo, ok := svc.(*serviceInfo)
if !ok {
klog.ErrorS(nil, "Failed to cast serviceInfo", "svcName", svcName.String())
continue
}
isIPv6 := utilnet.IsIPv6(svcInfo.ClusterIP())
localPortIPFamily := utilnet.IPv4
if isIPv6 {
localPortIPFamily = utilnet.IPv6
}
protocol := strings.ToLower(string(svcInfo.Protocol()))
svcNameString := svcInfo.serviceNameString
allEndpoints := proxier.endpointsMap[svcName]
// Filtering for topology aware endpoints. This function will only
// filter endpoints if appropriate feature gates are enabled and the
// Service does not have conflicting configuration such as
// externalTrafficPolicy=Local.
allEndpoints = proxy.FilterEndpoints(allEndpoints, svcInfo, proxier.nodeLabels)
hasEndpoints := len(allEndpoints) > 0
svcChain := svcInfo.servicePortChainName
if hasEndpoints {
// Create the per-service chain, retaining counters if possible.
if chain, ok := existingNATChains[svcChain]; ok {
utilproxy.WriteBytesLine(proxier.natChains, chain)
} else {
utilproxy.WriteLine(proxier.natChains, utiliptables.MakeChainLine(svcChain))
}
activeNATChains[svcChain] = true
}
svcXlbChain := svcInfo.serviceLBChainName
if svcInfo.NodeLocalExternal() {
// Only for services request OnlyLocal traffic
// create the per-service LB chain, retaining counters if possible.
if lbChain, ok := existingNATChains[svcXlbChain]; ok {
utilproxy.WriteBytesLine(proxier.natChains, lbChain)
} else {
utilproxy.WriteLine(proxier.natChains, utiliptables.MakeChainLine(svcXlbChain))
}
activeNATChains[svcXlbChain] = true
}
// Capture the clusterIP.
if hasEndpoints {
args = append(args[:0],
"-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(svcInfo.ClusterIP()),
"--dport", strconv.Itoa(svcInfo.Port()),
)
if proxier.masqueradeAll {
utilproxy.WriteRuleLine(proxier.natRules, string(svcChain), append(args, "-j", string(KubeMarkMasqChain))...)
} else if proxier.localDetector.IsImplemented() {
// This masquerades off-cluster traffic to a service VIP. The idea
// is that you can establish a static route for your Service range,
// routing to any node, and that node will bridge into the Service
// for you. Since that might bounce off-node, we masquerade here.
// If/when we support "Local" policy for VIPs, we should update this.
utilproxy.WriteRuleLine(proxier.natRules, string(svcChain), proxier.localDetector.JumpIfNotLocal(args, string(KubeMarkMasqChain))...)
}
utilproxy.WriteRuleLine(proxier.natRules, string(kubeServicesChain), append(args, "-j", string(svcChain))...)
} else {
// No endpoints.
utilproxy.WriteLine(proxier.filterRules,
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(svcInfo.ClusterIP()),
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", "REJECT",
)
}
// Capture externalIPs.
for _, externalIP := range svcInfo.ExternalIPStrings() {
// If the "external" IP happens to be an IP that is local to this
// machine, hold the local port open so no other process can open it
// (because the socket might open but it would never work).
if (svcInfo.Protocol() != v1.ProtocolSCTP) && localAddrSet.Has(net.ParseIP(externalIP)) {
lp := utilnet.LocalPort{
Description: "externalIP for " + svcNameString,
IP: externalIP,
IPFamily: localPortIPFamily,
Port: svcInfo.Port(),
Protocol: utilnet.Protocol(svcInfo.Protocol()),
}
if proxier.portsMap[lp] != nil {
klog.V(4).InfoS("Port was open before and is still needed", "port", lp.String())
replacementPortsMap[lp] = proxier.portsMap[lp]
} else {
socket, err := proxier.portMapper.OpenLocalPort(&lp)
if err != nil {
msg := fmt.Sprintf("can't open port %s, skipping it", lp.String())
proxier.recorder.Eventf(
&v1.ObjectReference{
Kind: "Node",
Name: proxier.hostname,
UID: types.UID(proxier.hostname),
Namespace: "",
}, nil, v1.EventTypeWarning, err.Error(), "SyncProxyRules", msg)
klog.ErrorS(err, "can't open port, skipping it", "port", lp.String())
continue
}
klog.V(2).InfoS("Opened local port", "port", lp.String())
replacementPortsMap[lp] = socket
}
}
if hasEndpoints {
args = append(args[:0],
"-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(net.ParseIP(externalIP)),
"--dport", strconv.Itoa(svcInfo.Port()),
)
destChain := svcXlbChain
// We have to SNAT packets to external IPs if externalTrafficPolicy is cluster
// and the traffic is NOT Local. Local traffic coming from Pods and Nodes will
// be always forwarded to the corresponding Service, so no need to SNAT
// If we can't differentiate the local traffic we always SNAT.
if !svcInfo.NodeLocalExternal() {
destChain = svcChain
// This masquerades off-cluster traffic to a External IP.
if proxier.localDetector.IsImplemented() {
utilproxy.WriteRuleLine(proxier.natRules, string(svcChain), proxier.localDetector.JumpIfNotLocal(args, string(KubeMarkMasqChain))...)
} else {
utilproxy.WriteRuleLine(proxier.natRules, string(svcChain), append(args, "-j", string(KubeMarkMasqChain))...)
}
}
// Send traffic bound for external IPs to the service chain.
utilproxy.WriteRuleLine(proxier.natRules, string(kubeServicesChain), append(args, "-j", string(destChain))...)
} else {
// No endpoints.
utilproxy.WriteLine(proxier.filterRules,
"-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(net.ParseIP(externalIP)),
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", "REJECT",
)
}
}
// Capture load-balancer ingress.
fwChain := svcInfo.serviceFirewallChainName
for _, ingress := range svcInfo.LoadBalancerIPStrings() {
if ingress != "" {
if hasEndpoints {
// create service firewall chain
if chain, ok := existingNATChains[fwChain]; ok {
utilproxy.WriteBytesLine(proxier.natChains, chain)
} else {
utilproxy.WriteLine(proxier.natChains, utiliptables.MakeChainLine(fwChain))
}
activeNATChains[fwChain] = true
// The service firewall rules are created based on ServiceSpec.loadBalancerSourceRanges field.
// This currently works for loadbalancers that preserves source ips.
// For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply.
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(net.ParseIP(ingress)),
"--dport", strconv.Itoa(svcInfo.Port()),
)
// jump to service firewall chain
utilproxy.WriteLine(proxier.natRules, append(args, "-j", string(fwChain))...)
args = append(args[:0],
"-A", string(fwChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
)
// Each source match rule in the FW chain may jump to either the SVC or the XLB chain
chosenChain := svcXlbChain
// If we are proxying globally, we need to masquerade in case we cross nodes.
// If we are proxying only locally, we can retain the source IP.
if !svcInfo.NodeLocalExternal() {
utilproxy.WriteLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...)
chosenChain = svcChain
}
if len(svcInfo.LoadBalancerSourceRanges()) == 0 {
// allow all sources, so jump directly to the KUBE-SVC or KUBE-XLB chain
utilproxy.WriteLine(proxier.natRules, append(args, "-j", string(chosenChain))...)
} else {
// firewall filter based on each source range
allowFromNode := false
for _, src := range svcInfo.LoadBalancerSourceRanges() {
utilproxy.WriteLine(proxier.natRules, append(args, "-s", src, "-j", string(chosenChain))...)
_, cidr, err := net.ParseCIDR(src)
if err != nil {
klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr)
} else if cidr.Contains(proxier.nodeIP) {
allowFromNode = true
}
}
// generally, ip route rule was added to intercept request to loadbalancer vip from the
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
// Need to add the following rule to allow request on host.
if allowFromNode {
utilproxy.WriteLine(proxier.natRules, append(args, "-s", utilproxy.ToCIDR(net.ParseIP(ingress)), "-j", string(chosenChain))...)
}
}
// If the packet was able to reach the end of firewall chain, then it did not get DNATed.
// It means the packet cannot go thru the firewall, then mark it for DROP
utilproxy.WriteLine(proxier.natRules, append(args, "-j", string(KubeMarkDropChain))...)
} else {
// No endpoints.
utilproxy.WriteLine(proxier.filterRules,
"-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(net.ParseIP(ingress)),
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", "REJECT",
)
}
}
}
// Capture nodeports. If we had more than 2 rules it might be
// worthwhile to make a new per-service chain for nodeport rules, but
// with just 2 rules it ends up being a waste and a cognitive burden.
if svcInfo.NodePort() != 0 {
// Hold the local port open so no other process can open it
// (because the socket might open but it would never work).
if len(nodeAddresses) == 0 {
continue
}
lps := make([]utilnet.LocalPort, 0)
for address := range nodeAddresses {
lp := utilnet.LocalPort{
Description: "nodePort for " + svcNameString,
IP: address,
IPFamily: localPortIPFamily,
Port: svcInfo.NodePort(),
Protocol: utilnet.Protocol(svcInfo.Protocol()),
}
if utilproxy.IsZeroCIDR(address) {
// Empty IP address means all
lp.IP = ""
lps = append(lps, lp)
// If we encounter a zero CIDR, then there is no point in processing the rest of the addresses.
break
}
lps = append(lps, lp)
}
// For ports on node IPs, open the actual port and hold it.
for _, lp := range lps {
if proxier.portsMap[lp] != nil {
klog.V(4).InfoS("Port was open before and is still needed", "port", lp.String())
replacementPortsMap[lp] = proxier.portsMap[lp]
} else if svcInfo.Protocol() != v1.ProtocolSCTP {
socket, err := proxier.portMapper.OpenLocalPort(&lp)
if err != nil {
msg := fmt.Sprintf("can't open port %s, skipping it", lp.String())
proxier.recorder.Eventf(
&v1.ObjectReference{
Kind: "Node",
Name: proxier.hostname,
UID: types.UID(proxier.hostname),
Namespace: "",
}, nil, v1.EventTypeWarning, err.Error(), "SyncProxyRules", msg)
klog.ErrorS(err, "can't open port, skipping it", "port", lp.String())
continue
}
klog.V(2).InfoS("Opened local port", "port", lp.String())
replacementPortsMap[lp] = socket
}
}
if hasEndpoints {
args = append(args[:0],
"-m", "comment", "--comment", svcNameString,
"-m", protocol, "-p", protocol,
"--dport", strconv.Itoa(svcInfo.NodePort()),
)
if !svcInfo.NodeLocalExternal() {
// Nodeports need SNAT, unless they're local.
utilproxy.WriteRuleLine(proxier.natRules, string(svcChain), append(args, "-j", string(KubeMarkMasqChain))...)
// Jump to the service chain.
utilproxy.WriteRuleLine(proxier.natRules, string(kubeNodePortsChain), append(args, "-j", string(svcChain))...)
} else {
// TODO: Make all nodePorts jump to the firewall chain.
// Currently we only create it for loadbalancers (#33586).
// Fix localhost martian source error
loopback := "127.0.0.0/8"
if isIPv6 {
loopback = "::1/128"
}
utilproxy.WriteRuleLine(proxier.natRules, string(kubeNodePortsChain), append(args, "-s", loopback, "-j", string(KubeMarkMasqChain))...)
utilproxy.WriteRuleLine(proxier.natRules, string(kubeNodePortsChain), append(args, "-j", string(svcXlbChain))...)
}
} else {
// No endpoints.
utilproxy.WriteLine(proxier.filterRules,
"-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", "addrtype", "--dst-type", "LOCAL",
"-m", protocol, "-p", protocol,
"--dport", strconv.Itoa(svcInfo.NodePort()),
"-j", "REJECT",
)
}
}
// Capture healthCheckNodePorts.
if svcInfo.HealthCheckNodePort() != 0 {
// no matter if node has local endpoints, healthCheckNodePorts
// need to add a rule to accept the incoming connection
utilproxy.WriteLine(proxier.filterRules,
"-A", string(kubeNodePortsChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s health check node port"`, svcNameString),
"-m", "tcp", "-p", "tcp",
"--dport", strconv.Itoa(svcInfo.HealthCheckNodePort()),
"-j", "ACCEPT",
)
}
if !hasEndpoints {
continue
}
// Generate the per-endpoint chains. We do this in multiple passes so we
// can group rules together.
// These two slices parallel each other - keep in sync
endpoints = endpoints[:0]
endpointChains = endpointChains[:0]
var endpointChain utiliptables.Chain
for _, ep := range allEndpoints {
epInfo, ok := ep.(*endpointsInfo)
if !ok {
klog.ErrorS(err, "Failed to cast endpointsInfo", "endpointsInfo", ep.String())
continue
}
endpoints = append(endpoints, epInfo)
endpointChain = epInfo.endpointChain(svcNameString, protocol)
endpointChains = append(endpointChains, endpointChain)
// Create the endpoint chain, retaining counters if possible.
if chain, ok := existingNATChains[endpointChain]; ok {
utilproxy.WriteBytesLine(proxier.natChains, chain)
} else {
utilproxy.WriteLine(proxier.natChains, utiliptables.MakeChainLine(endpointChain))
}
activeNATChains[endpointChain] = true
}
// First write session affinity rules, if applicable.
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
for _, endpointChain := range endpointChains {
args = append(args[:0],
"-A", string(svcChain),
)
args = proxier.appendServiceCommentLocked(args, svcNameString)
args = append(args,
"-m", "recent", "--name", string(endpointChain),
"--rcheck", "--seconds", strconv.Itoa(svcInfo.StickyMaxAgeSeconds()), "--reap",
"-j", string(endpointChain),
)
utilproxy.WriteLine(proxier.natRules, args...)
}
}
// Firstly, categorize each endpoint into three buckets:
// 1. all endpoints that are ready and NOT terminating.
// 2. all endpoints that are local, ready and NOT terminating, and externalTrafficPolicy=Local
// 3. all endpoints that are local, serving and terminating, and externalTrafficPolicy=Local
readyEndpointChains = readyEndpointChains[:0]
readyEndpoints := readyEndpoints[:0]
localReadyEndpointChains := localReadyEndpointChains[:0]
localServingTerminatingEndpointChains := localServingTerminatingEndpointChains[:0]
for i, endpointChain := range endpointChains {
if endpoints[i].Ready {
readyEndpointChains = append(readyEndpointChains, endpointChain)
readyEndpoints = append(readyEndpoints, endpoints[i])
}
if svc.NodeLocalExternal() && endpoints[i].IsLocal {
if endpoints[i].Ready {
localReadyEndpointChains = append(localReadyEndpointChains, endpointChain)
} else if endpoints[i].Serving && endpoints[i].Terminating {
localServingTerminatingEndpointChains = append(localServingTerminatingEndpointChains, endpointChain)
}
}
}
// Now write loadbalancing & DNAT rules.
numReadyEndpoints := len(readyEndpointChains)
for i, endpointChain := range readyEndpointChains {
epIP := readyEndpoints[i].IP()
if epIP == "" {
// Error parsing this endpoint has been logged. Skip to next endpoint.
continue
}
// Balancing rules in the per-service chain.
args = append(args[:0], "-A", string(svcChain))
args = proxier.appendServiceCommentLocked(args, svcNameString)
if i < (numReadyEndpoints - 1) {
// Each rule is a probabilistic match.
args = append(args,
"-m", "statistic",
"--mode", "random",
"--probability", proxier.probability(numReadyEndpoints-i))
}
// The final (or only if n == 1) rule is a guaranteed match.
args = append(args, "-j", string(endpointChain))
utilproxy.WriteLine(proxier.natRules, args...)
}
// Every endpoint gets a chain, regardless of its state. This is required later since we may
// want to jump to endpoint chains that are terminating.
for i, endpointChain := range endpointChains {
epIP := endpoints[i].IP()
if epIP == "" {
// Error parsing this endpoint has been logged. Skip to next endpoint.
continue
}
// Rules in the per-endpoint chain.
args = append(args[:0], "-A", string(endpointChain))
args = proxier.appendServiceCommentLocked(args, svcNameString)
// Handle traffic that loops back to the originator with SNAT.
utilproxy.WriteLine(proxier.natRules, append(args,
"-s", utilproxy.ToCIDR(net.ParseIP(epIP)),
"-j", string(KubeMarkMasqChain))...)
// Update client-affinity lists.
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
args = append(args, "-m", "recent", "--name", string(endpointChain), "--set")
}
// DNAT to final destination.
args = append(args, "-m", protocol, "-p", protocol, "-j", "DNAT", "--to-destination", endpoints[i].Endpoint)
utilproxy.WriteLine(proxier.natRules, args...)
}
// The logic below this applies only if this service is marked as OnlyLocal
if !svcInfo.NodeLocalExternal() {
continue
}
// First rule in the chain redirects all pod -> external VIP traffic to the
// Service's ClusterIP instead. This happens whether or not we have local
// endpoints; only if localDetector is implemented
if proxier.localDetector.IsImplemented() {
args = append(args[:0],
"-A", string(svcXlbChain),
"-m", "comment", "--comment",
`"Redirect pods trying to reach external loadbalancer VIP to clusterIP"`,
)
utilproxy.WriteLine(proxier.natRules, proxier.localDetector.JumpIfLocal(args, string(svcChain))...)
}
// Next, redirect all src-type=LOCAL -> LB IP to the service chain for externalTrafficPolicy=Local
// This allows traffic originating from the host to be redirected to the service correctly,
// otherwise traffic to LB IPs are dropped if there are no local endpoints.
args = append(args[:0], "-A", string(svcXlbChain))
utilproxy.WriteLine(proxier.natRules, append(args,
"-m", "comment", "--comment", fmt.Sprintf(`"masquerade LOCAL traffic for %s LB IP"`, svcNameString),
"-m", "addrtype", "--src-type", "LOCAL", "-j", string(KubeMarkMasqChain))...)
utilproxy.WriteLine(proxier.natRules, append(args,
"-m", "comment", "--comment", fmt.Sprintf(`"route LOCAL traffic for %s LB IP to service chain"`, svcNameString),
"-m", "addrtype", "--src-type", "LOCAL", "-j", string(svcChain))...)
// Prefer local ready endpoint chains, but fall back to ready terminating if none exist
localEndpointChains := localReadyEndpointChains
if utilfeature.DefaultFeatureGate.Enabled(features.ProxyTerminatingEndpoints) && len(localEndpointChains) == 0 {
localEndpointChains = localServingTerminatingEndpointChains
}
numLocalEndpoints := len(localEndpointChains)
if numLocalEndpoints == 0 {
// Blackhole all traffic since there are no local endpoints
args = append(args[:0],
"-A", string(svcXlbChain),
"-m", "comment", "--comment",
fmt.Sprintf(`"%s has no local endpoints"`, svcNameString),
"-j",
string(KubeMarkDropChain),
)
utilproxy.WriteLine(proxier.natRules, args...)
} else {
// First write session affinity rules only over local endpoints, if applicable.
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
for _, endpointChain := range localEndpointChains {
utilproxy.WriteLine(proxier.natRules,
"-A", string(svcXlbChain),
"-m", "comment", "--comment", svcNameString,
"-m", "recent", "--name", string(endpointChain),
"--rcheck", "--seconds", strconv.Itoa(svcInfo.StickyMaxAgeSeconds()), "--reap",
"-j", string(endpointChain))
}
}
// Setup probability filter rules only over local endpoints
for i, endpointChain := range localEndpointChains {
// Balancing rules in the per-service chain.
args = append(args[:0],
"-A", string(svcXlbChain),
"-m", "comment", "--comment",
fmt.Sprintf(`"Balancing rule %d for %s"`, i, svcNameString),
)
if i < (numLocalEndpoints - 1) {
// Each rule is a probabilistic match.
args = append(args,
"-m", "statistic",
"--mode", "random",
"--probability", proxier.probability(numLocalEndpoints-i))
}
// The final (or only if n == 1) rule is a guaranteed match.
args = append(args, "-j", string(endpointChain))
utilproxy.WriteLine(proxier.natRules, args...)
}
}
}
// Delete chains no longer in use.
for chain := range existingNATChains {
if !activeNATChains[chain] {
chainString := string(chain)
if !strings.HasPrefix(chainString, "KUBE-SVC-") && !strings.HasPrefix(chainString, "KUBE-SEP-") && !strings.HasPrefix(chainString, "KUBE-FW-") && !strings.HasPrefix(chainString, "KUBE-XLB-") {
// Ignore chains that aren't ours.
continue
}
// We must (as per iptables) write a chain-line for it, which has
// the nice effect of flushing the chain. Then we can remove the
// chain.
utilproxy.WriteBytesLine(proxier.natChains, existingNATChains[chain])
utilproxy.WriteLine(proxier.natRules, "-X", chainString)
}
}
// Finally, tail-call to the nodeports chain. This needs to be after all
// other service portal rules.
isIPv6 := proxier.iptables.IsIPv6()
for address := range nodeAddresses {
// TODO(thockin, m1093782566): If/when we have dual-stack support we will want to distinguish v4 from v6 zero-CIDRs.
if utilproxy.IsZeroCIDR(address) {
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", `"kubernetes service nodeports; NOTE: this must be the last rule in this chain"`,
"-m", "addrtype", "--dst-type", "LOCAL",
"-j", string(kubeNodePortsChain))
utilproxy.WriteLine(proxier.natRules, args...)
// Nothing else matters after the zero CIDR.
break
}
// Ignore IP addresses with incorrect version
if isIPv6 && !utilnet.IsIPv6String(address) || !isIPv6 && utilnet.IsIPv6String(address) {
klog.ErrorS(nil, "IP has incorrect IP version", "ip", address)
continue
}
// create nodeport rules for each IP one by one
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", `"kubernetes service nodeports; NOTE: this must be the last rule in this chain"`,
"-d", address,
"-j", string(kubeNodePortsChain))
utilproxy.WriteLine(proxier.natRules, args...)
}
// Drop the packets in INVALID state, which would potentially cause
// unexpected connection reset.
// https://github.com/kubernetes/kubernetes/issues/74839
utilproxy.WriteLine(proxier.filterRules,
"-A", string(kubeForwardChain),
"-m", "conntrack",
"--ctstate", "INVALID",
"-j", "DROP",
)
// If the masqueradeMark has been added then we want to forward that same
// traffic, this allows NodePort traffic to be forwarded even if the default
// FORWARD policy is not accept.
utilproxy.WriteLine(proxier.filterRules,
"-A", string(kubeForwardChain),
"-m", "comment", "--comment", `"kubernetes forwarding rules"`,
"-m", "mark", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
"-j", "ACCEPT",
)
// The following two rules ensure the traffic after the initial packet
// accepted by the "kubernetes forwarding rules" rule above will be
// accepted.
utilproxy.WriteLine(proxier.filterRules,
"-A", string(kubeForwardChain),
"-m", "comment", "--comment", `"kubernetes forwarding conntrack pod source rule"`,
"-m", "conntrack",
"--ctstate", "RELATED,ESTABLISHED",
"-j", "ACCEPT",
)
utilproxy.WriteLine(proxier.filterRules,
"-A", string(kubeForwardChain),
"-m", "comment", "--comment", `"kubernetes forwarding conntrack pod destination rule"`,
"-m", "conntrack",
"--ctstate", "RELATED,ESTABLISHED",
"-j", "ACCEPT",
)
// Write the end-of-table markers.
utilproxy.WriteLine(proxier.filterRules, "COMMIT")
utilproxy.WriteLine(proxier.natRules, "COMMIT")
// Sync rules.
// NOTE: NoFlushTables is used so we don't flush non-kubernetes chains in the table
proxier.iptablesData.Reset()
proxier.iptablesData.Write(proxier.filterChains.Bytes())
proxier.iptablesData.Write(proxier.filterRules.Bytes())
proxier.iptablesData.Write(proxier.natChains.Bytes())
proxier.iptablesData.Write(proxier.natRules.Bytes())
numberFilterIptablesRules := utilproxy.CountBytesLines(proxier.filterRules.Bytes())
metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)).Set(float64(numberFilterIptablesRules))
numberNatIptablesRules := utilproxy.CountBytesLines(proxier.natRules.Bytes())
metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)).Set(float64(numberNatIptablesRules))
klog.V(5).InfoS("Restoring iptables", "rules", proxier.iptablesData.Bytes())
err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
klog.ErrorS(err, "Failed to execute iptables-restore")
metrics.IptablesRestoreFailuresTotal.Inc()
// Revert new local ports.
klog.V(2).InfoS("Closing local ports after iptables-restore failure")
utilproxy.RevertPorts(replacementPortsMap, proxier.portsMap)
return
}
success = true
for name, lastChangeTriggerTimes := range endpointUpdateResult.LastChangeTriggerTimes {
for _, lastChangeTriggerTime := range lastChangeTriggerTimes {
latency := metrics.SinceInSeconds(lastChangeTriggerTime)
metrics.NetworkProgrammingLatency.Observe(latency)
klog.V(4).InfoS("Network programming", "endpoint", klog.KRef(name.Namespace, name.Name), "elapsed", latency)
}
}
// Close old local ports and save new ones.
for k, v := range proxier.portsMap {
if replacementPortsMap[k] == nil {
v.Close()
}
}
proxier.portsMap = replacementPortsMap
if proxier.healthzServer != nil {
proxier.healthzServer.Updated()
}
metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime()
// Update service healthchecks. The endpoints list might include services that are
// not "OnlyLocal", but the services list will not, and the serviceHealthServer
// will just drop those endpoints.
if err := proxier.serviceHealthServer.SyncServices(serviceUpdateResult.HCServiceNodePorts); err != nil {
klog.ErrorS(err, "Error syncing healthcheck services")
}
if err := proxier.serviceHealthServer.SyncEndpoints(endpointUpdateResult.HCEndpointsLocalIPSize); err != nil {
klog.ErrorS(err, "Error syncing healthcheck endpoints")
}
// Finish housekeeping.
// Clear stale conntrack entries for UDP Services, this has to be done AFTER the iptables rules are programmed.
// TODO: these could be made more consistent.
klog.V(4).InfoS("Deleting conntrack stale entries for Services", "ips", conntrackCleanupServiceIPs.UnsortedList())
for _, svcIP := range conntrackCleanupServiceIPs.UnsortedList() {
if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil {
klog.ErrorS(err, "Failed to delete stale service connections", "ip", svcIP)
}
}
klog.V(4).InfoS("Deleting conntrack stale entries for Services", "nodeports", conntrackCleanupServiceNodePorts.UnsortedList())
for _, nodePort := range conntrackCleanupServiceNodePorts.UnsortedList() {
err := conntrack.ClearEntriesForPort(proxier.exec, nodePort, isIPv6, v1.ProtocolUDP)
if err != nil {
klog.ErrorS(err, "Failed to clear udp conntrack", "port", nodePort)
}
}
klog.V(4).InfoS("Deleting stale endpoint connections", "endpoints", endpointUpdateResult.StaleEndpoints)
proxier.deleteEndpointConnections(endpointUpdateResult.StaleEndpoints)
}
| kubernetes-incubator/ocid | vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go | GO | apache-2.0 | 69,679 |
package com.cardpay.pccredit.product.constant;
import java.util.HashMap;
import java.util.Map;
import com.cardpay.pccredit.common.Dictionary;
public class DictTypeConstant {
public static Map<String, Object> TypeMap=new HashMap<String, Object>();
static{
TypeMap.put(ProductFilterColumn.TITLE,Dictionary.titleList);
TypeMap.put(ProductFilterColumn.POSITIO,Dictionary.positioList);
TypeMap.put(ProductFilterColumn.DEGREE_EDUCATION,Dictionary.degreeeducationList);
TypeMap.put(ProductFilterColumn.RESIDENTIAL_PROPERTIE,Dictionary.residentialPropertieList);
TypeMap.put(ProductFilterColumn.UNIT_NATURE,Dictionary.unitPropertisList);
TypeMap.put(ProductFilterColumn.INDUSTRY_TYPE,Dictionary.industryTypeList);
}
}
| zhuyuanyan/PCCredit_TY | src/java/com/cardpay/pccredit/product/constant/DictTypeConstant.java | Java | apache-2.0 | 746 |
/*******************************************************************************
* Copyright 2014 Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
/**
* Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior
* University
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
**/
/**
* @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
*/
package org.openflow.protocol.action;
import org.jboss.netty.buffer.ChannelBuffer;
/**
* Represents an ofp_action_strip_vlan
*
* @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
*/
public class OFActionStripVirtualLan extends OFAction {
public static int MINIMUM_LENGTH = 8;
public OFActionStripVirtualLan() {
super();
super.setType(OFActionType.STRIP_VLAN);
super.setLength((short) OFActionStripVirtualLan.MINIMUM_LENGTH);
}
@Override
public void readFrom(final ChannelBuffer data) {
super.readFrom(data);
// PAD
data.readInt();
}
@Override
public void writeTo(final ChannelBuffer data) {
super.writeTo(data);
// PAD
data.writeInt(0);
}
@Override
public String toString() {
final StringBuilder builder = new StringBuilder();
builder.append(this.type);
return builder.toString();
}
}
| fnkhan/New | src/main/java/org/openflow/protocol/action/OFActionStripVirtualLan.java | Java | apache-2.0 | 2,493 |
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"net/http"
"time"
computealpha "google.golang.org/api/compute/v0.alpha"
"github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
gcecloud "k8s.io/legacy-cloud-providers/gce"
"github.com/onsi/ginkgo"
)
var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
f := framework.NewDefaultFramework("services")
var cs clientset.Interface
serviceLBNames := []string{}
ginkgo.BeforeEach(func() {
// This test suite requires the GCE environment.
framework.SkipUnlessProviderIs("gce")
cs = f.ClientSet
})
ginkgo.AfterEach(func() {
if ginkgo.CurrentGinkgoTestDescription().Failed {
e2eservice.DescribeSvc(f.Namespace.Name)
}
for _, lb := range serviceLBNames {
framework.Logf("cleaning gce resource for %s", lb)
framework.TestContext.CloudConfig.Provider.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}
//reset serviceLBNames
serviceLBNames = []string{}
})
ginkgo.It("should be able to create and tear down a standard-tier load balancer [Slow]", func() {
lagTimeout := e2eservice.LoadBalancerLagTimeoutDefault
createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
svcName := "net-tiers-svc"
ns := f.Namespace.Name
jig := e2eservice.NewTestJig(cs, ns, svcName)
ginkgo.By("creating a pod to be part of the service " + svcName)
_, err := jig.Run(nil)
framework.ExpectNoError(err)
// Test 1: create a standard tiered LB for the Service.
ginkgo.By("creating a Service of type LoadBalancer using the standard network tier")
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer
setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard))
})
framework.ExpectNoError(err)
// Verify that service has been updated properly.
svcTier, err := gcecloud.GetServiceNetworkTier(svc)
framework.ExpectNoError(err)
framework.ExpectEqual(svcTier, cloud.NetworkTierStandard)
// Record the LB name for test cleanup.
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
// Wait and verify the LB.
ingressIP := waitAndVerifyLBWithTier(jig, "", createTimeout, lagTimeout)
// Test 2: re-create a LB of a different tier for the updated Service.
ginkgo.By("updating the Service to use the premium (default) tier")
svc, err = jig.UpdateService(func(svc *v1.Service) {
clearNetworkTier(svc)
})
framework.ExpectNoError(err)
// Verify that service has been updated properly.
svcTier, err = gcecloud.GetServiceNetworkTier(svc)
framework.ExpectNoError(err)
framework.ExpectEqual(svcTier, cloud.NetworkTierDefault)
// Wait until the ingress IP changes. Each tier has its own pool of
// IPs, so changing tiers implies changing IPs.
ingressIP = waitAndVerifyLBWithTier(jig, ingressIP, createTimeout, lagTimeout)
// Test 3: create a standard-tierd LB with a user-requested IP.
ginkgo.By("reserving a static IP for the load balancer")
requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunID)
gceCloud, err := gce.GetGCECloud()
framework.ExpectNoError(err)
requestedIP, err := reserveAlphaRegionalAddress(gceCloud, requestedAddrName, cloud.NetworkTierStandard)
framework.ExpectNoError(err, "failed to reserve a STANDARD tiered address")
defer func() {
if requestedAddrName != "" {
// Release GCE static address - this is not kube-managed and will not be automatically released.
if err := gceCloud.DeleteRegionAddress(requestedAddrName, gceCloud.Region()); err != nil {
framework.Logf("failed to release static IP address %q: %v", requestedAddrName, err)
}
}
}()
framework.ExpectNoError(err)
framework.Logf("Allocated static IP to be used by the load balancer: %q", requestedIP)
ginkgo.By("updating the Service to use the standard tier with a requested IP")
svc, err = jig.UpdateService(func(svc *v1.Service) {
svc.Spec.LoadBalancerIP = requestedIP
setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard))
})
framework.ExpectNoError(err)
// Verify that service has been updated properly.
framework.ExpectEqual(svc.Spec.LoadBalancerIP, requestedIP)
svcTier, err = gcecloud.GetServiceNetworkTier(svc)
framework.ExpectNoError(err)
framework.ExpectEqual(svcTier, cloud.NetworkTierStandard)
// Wait until the ingress IP changes and verifies the LB.
ingressIP = waitAndVerifyLBWithTier(jig, ingressIP, createTimeout, lagTimeout)
})
})
func waitAndVerifyLBWithTier(jig *e2eservice.TestJig, existingIP string, waitTimeout, checkTimeout time.Duration) string {
// If existingIP is "" this will wait for any ingress IP to show up. Otherwise
// it will wait for the ingress IP to change to something different.
svc, err := jig.WaitForNewIngressIP(existingIP, waitTimeout)
framework.ExpectNoError(err)
svcPort := int(svc.Spec.Ports[0].Port)
lbIngress := &svc.Status.LoadBalancer.Ingress[0]
ingressIP := e2eservice.GetIngressPoint(lbIngress)
ginkgo.By("running sanity and reachability checks")
if svc.Spec.LoadBalancerIP != "" {
// Verify that the new ingress IP is the requested IP if it's set.
framework.ExpectEqual(ingressIP, svc.Spec.LoadBalancerIP)
}
// If the IP has been used by previous test, sometimes we get the lingering
// 404 errors even after the LB is long gone. Tolerate and retry until the
// the new LB is fully established since this feature is still Alpha in GCP.
e2eservice.TestReachableHTTPWithRetriableErrorCodes(ingressIP, svcPort, []int{http.StatusNotFound}, checkTimeout)
// Verify the network tier matches the desired.
svcNetTier, err := gcecloud.GetServiceNetworkTier(svc)
framework.ExpectNoError(err)
netTier, err := getLBNetworkTierByIP(ingressIP)
framework.ExpectNoError(err, "failed to get the network tier of the load balancer")
framework.ExpectEqual(netTier, svcNetTier)
return ingressIP
}
func getLBNetworkTierByIP(ip string) (cloud.NetworkTier, error) {
var rule *computealpha.ForwardingRule
// Retry a few times to tolerate flakes.
err := wait.PollImmediate(5*time.Second, 15*time.Second, func() (bool, error) {
obj, err := getGCEForwardingRuleByIP(ip)
if err != nil {
return false, err
}
rule = obj
return true, nil
})
if err != nil {
return "", err
}
return cloud.NetworkTierGCEValueToType(rule.NetworkTier), nil
}
func getGCEForwardingRuleByIP(ip string) (*computealpha.ForwardingRule, error) {
cloud, err := gce.GetGCECloud()
if err != nil {
return nil, err
}
ruleList, err := cloud.ListAlphaRegionForwardingRules(cloud.Region())
if err != nil {
return nil, err
}
for _, rule := range ruleList {
if rule.IPAddress == ip {
return rule, nil
}
}
return nil, fmt.Errorf("forwarding rule with ip %q not found", ip)
}
func setNetworkTier(svc *v1.Service, tier string) {
key := gcecloud.NetworkTierAnnotationKey
if svc.ObjectMeta.Annotations == nil {
svc.ObjectMeta.Annotations = map[string]string{}
}
svc.ObjectMeta.Annotations[key] = tier
}
func clearNetworkTier(svc *v1.Service) {
key := gcecloud.NetworkTierAnnotationKey
if svc.ObjectMeta.Annotations == nil {
return
}
delete(svc.ObjectMeta.Annotations, key)
}
// TODO: add retries if this turns out to be flaky.
// TODO(#51665): remove this helper function once Network Tiers becomes beta.
func reserveAlphaRegionalAddress(cloud *gcecloud.Cloud, name string, netTier cloud.NetworkTier) (string, error) {
alphaAddr := &computealpha.Address{
Name: name,
NetworkTier: netTier.ToGCEValue(),
}
if err := cloud.ReserveAlphaRegionAddress(alphaAddr, cloud.Region()); err != nil {
return "", err
}
addr, err := cloud.GetRegionAddress(name, cloud.Region())
if err != nil {
return "", err
}
return addr.Address, nil
}
| pweil-/origin | vendor/k8s.io/kubernetes/test/e2e/network/network_tiers.go | GO | apache-2.0 | 8,670 |
const test = require('ava');
const {replaceUrls, toInaboxDocument} = require('../app-utils');
test('replaceUrls("minified", ...)', async (t) => {
const mode = 'minified';
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/v0.js"></script>'
),
'<script src="/dist/v0.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/shadow-v0.js"></script>'
),
'<script src="/dist/shadow-v0.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/amp4ads-v0.js"></script>'
),
'<script src="/dist/amp4ads-v0.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/video-iframe-integration-v0.js"></script>'
),
'<script src="/dist/video-iframe-integration-v0.js"></script>'
);
t.is(
replaceUrls(
mode,
'<link rel="stylesheet" href="https://cdn.ampproject.org/v0/amp-whatever-1.0.css" />'
),
'<link rel="stylesheet" href="/dist/v0/amp-whatever-1.0.css" />'
);
t.is(
replaceUrls(
mode,
`
<head>
<script src="https://cdn.ampproject.org/v0.js"></script>
<script src="https://cdn.ampproject.org/v0/amp-foo-0.1.js"></script>
<link rel="stylesheet" href="https://cdn.ampproject.org/v0/amp-foo-1.0.css" />
</head>
`
),
`
<head>
<script src="/dist/v0.js"></script>
<script src="/dist/v0/amp-foo-0.1.js"></script>
<link rel="stylesheet" href="/dist/v0/amp-foo-1.0.css" />
</head>
`
);
});
test('replaceUrls("minified", ..., hostName)', async (t) => {
const mode = 'minified';
const hostName = 'https://foo.bar';
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/v0.js"></script>',
hostName
),
'<script src="https://foo.bar/dist/v0.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/shadow-v0.js"></script>',
hostName
),
'<script src="https://foo.bar/dist/shadow-v0.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/amp4ads-v0.js"></script>',
hostName
),
'<script src="https://foo.bar/dist/amp4ads-v0.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/video-iframe-integration-v0.js"></script>',
hostName
),
'<script src="https://foo.bar/dist/video-iframe-integration-v0.js"></script>'
);
t.is(
replaceUrls(
mode,
'<link rel="stylesheet" href="https://cdn.ampproject.org/v0/amp-whatever-1.0.css" />',
hostName
),
'<link rel="stylesheet" href="https://foo.bar/dist/v0/amp-whatever-1.0.css" />'
);
t.is(
replaceUrls(
mode,
`
<head>
<script src="https://cdn.ampproject.org/v0.js"></script>
<script src="https://cdn.ampproject.org/v0/amp-foo-0.1.js"></script>
<link rel="stylesheet" href="https://cdn.ampproject.org/v0/amp-foo-1.0.css" />
</head>
`,
hostName
),
`
<head>
<script src="https://foo.bar/dist/v0.js"></script>
<script src="https://foo.bar/dist/v0/amp-foo-0.1.js"></script>
<link rel="stylesheet" href="https://foo.bar/dist/v0/amp-foo-1.0.css" />
</head>
`
);
});
test('replaceUrls("default", ...)', async (t) => {
const mode = 'default';
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/v0.js"></script>'
),
'<script src="/dist/amp.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/shadow-v0.js"></script>'
),
'<script src="/dist/amp-shadow.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/amp4ads-v0.js"></script>'
),
'<script src="/dist/amp-inabox.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/video-iframe-integration-v0.js"></script>'
),
'<script src="/dist/video-iframe-integration.js"></script>'
);
t.is(
replaceUrls(
mode,
'<link rel="stylesheet" href="https://cdn.ampproject.org/v0/amp-whatever-1.0.css" />'
),
'<link rel="stylesheet" href="/dist/v0/amp-whatever-1.0.css" />'
);
t.is(
replaceUrls(
mode,
`
<head>
<script src="https://cdn.ampproject.org/v0.js"></script>
<script src="https://cdn.ampproject.org/v0/amp-foo-0.1.js"></script>
<link rel="stylesheet" href="https://cdn.ampproject.org/v0/amp-foo-1.0.css" />
</head>
`
),
`
<head>
<script src="/dist/amp.js"></script>
<script src="/dist/v0/amp-foo-0.1.max.js"></script>
<link rel="stylesheet" href="/dist/v0/amp-foo-1.0.css" />
</head>
`
);
});
test('replaceUrls("default", ..., hostName)', async (t) => {
const mode = 'default';
const hostName = 'https://foo.bar';
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/v0.js"></script>',
hostName
),
'<script src="https://foo.bar/dist/amp.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/shadow-v0.js"></script>',
hostName
),
'<script src="https://foo.bar/dist/amp-shadow.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/amp4ads-v0.js"></script>',
hostName
),
'<script src="https://foo.bar/dist/amp-inabox.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/video-iframe-integration-v0.js"></script>',
hostName
),
'<script src="https://foo.bar/dist/video-iframe-integration.js"></script>'
);
t.is(
replaceUrls(
mode,
'<link rel="stylesheet" href="https://cdn.ampproject.org/v0/amp-whatever-1.0.css" />',
hostName
),
'<link rel="stylesheet" href="https://foo.bar/dist/v0/amp-whatever-1.0.css" />'
);
t.is(
replaceUrls(
mode,
`
<head>
<script src="https://cdn.ampproject.org/v0.js"></script>
<script src="https://cdn.ampproject.org/v0/amp-foo-0.1.js"></script>
<link rel="stylesheet" href="https://cdn.ampproject.org/v0/amp-foo-1.0.css" />
</head>
`,
hostName
),
`
<head>
<script src="https://foo.bar/dist/amp.js"></script>
<script src="https://foo.bar/dist/v0/amp-foo-0.1.max.js"></script>
<link rel="stylesheet" href="https://foo.bar/dist/v0/amp-foo-1.0.css" />
</head>
`
);
});
test('replaceUrls(rtv, ...)', async (t) => {
const mode = '123456789012345';
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/v0.js"></script>'
),
'<script src="https://cdn.ampproject.org/rtv/123456789012345/v0.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/shadow-v0.js"></script>'
),
'<script src="https://cdn.ampproject.org/rtv/123456789012345/shadow-v0.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/amp4ads-v0.js"></script>'
),
'<script src="https://cdn.ampproject.org/rtv/123456789012345/amp4ads-v0.js"></script>'
);
t.is(
replaceUrls(
mode,
'<script src="https://cdn.ampproject.org/video-iframe-integration-v0.js"></script>'
),
'<script src="https://cdn.ampproject.org/rtv/123456789012345/video-iframe-integration-v0.js"></script>'
);
t.is(
replaceUrls(
mode,
'<link rel="stylesheet" href="https://cdn.ampproject.org/v0/amp-whatever-1.0.css" />'
),
'<link rel="stylesheet" href="https://cdn.ampproject.org/rtv/123456789012345/v0/amp-whatever-1.0.css" />'
);
t.is(
replaceUrls(
mode,
`
<head>
<script src="https://cdn.ampproject.org/v0.js"></script>
<script src="https://cdn.ampproject.org/v0/amp-foo-0.1.js"></script>
<link rel="stylesheet" href="https://cdn.ampproject.org/v0/amp-foo-1.0.css" />
</head>
`
),
`
<head>
<script src="https://cdn.ampproject.org/rtv/123456789012345/v0.js"></script>
<script src="https://cdn.ampproject.org/rtv/123456789012345/v0/amp-foo-0.1.js"></script>
<link rel="stylesheet" href="https://cdn.ampproject.org/rtv/123456789012345/v0/amp-foo-1.0.css" />
</head>
`
);
});
test('toInaboxDocument(...)', async (t) => {
t.is(
toInaboxDocument(
`<html amp>
<head>
<script src="https://cdn.ampproject.org/v0.js"></script>
<script src="https://cdn.ampproject.org/v0/amp-video-0.1.js"></script>
</head>
</html>`
),
`<html amp4ads>
<head>
<script src="https://cdn.ampproject.org/amp4ads-v0.js"></script>
<script src="https://cdn.ampproject.org/v0/amp-video-0.1.js"></script>
</head>
</html>`
);
});
test('replaceUrls("minified", toInaboxDocument(...))', async (t) => {
const mode = 'minified';
const hostName = '';
t.is(
replaceUrls(
mode,
toInaboxDocument(
'<script src="https://cdn.ampproject.org/v0.js"></script>'
),
hostName
),
'<script src="/dist/amp4ads-v0.js"></script>'
);
});
test('replaceUrls("default", toInaboxDocument(...))', async (t) => {
const mode = 'default';
const hostName = '';
t.is(
replaceUrls(
mode,
toInaboxDocument(
'<script src="https://cdn.ampproject.org/v0.js"></script>'
),
hostName
),
'<script src="/dist/amp-inabox.js"></script>'
);
});
test('replaceUrls(rtv, toInaboxDocument(...))', async (t) => {
const mode = '123456789012345';
const hostName = '';
t.is(
replaceUrls(
mode,
toInaboxDocument(
'<script src="https://cdn.ampproject.org/v0.js"></script>'
),
hostName
),
'<script src="https://cdn.ampproject.org/rtv/123456789012345/amp4ads-v0.js"></script>'
);
});
| honeybadgerdontcare/amphtml | build-system/server/test/app-utils.test.js | JavaScript | apache-2.0 | 10,266 |
/**
*
* Copyright (c) Microsoft and contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Warning: This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if the
// code is regenerated.
package com.microsoft.windowsazure.management.websites.models;
import com.microsoft.windowsazure.core.OperationResponse;
/**
* The Create Web Site operation response.
*/
public class WebSiteCreateResponse extends OperationResponse {
private WebSite webSite;
/**
* Optional. Details of the created web site.
* @return The WebSite value.
*/
public WebSite getWebSite() {
return this.webSite;
}
/**
* Optional. Details of the created web site.
* @param webSiteValue The WebSite value.
*/
public void setWebSite(final WebSite webSiteValue) {
this.webSite = webSiteValue;
}
}
| flydream2046/azure-sdk-for-java | service-management/azure-svc-mgmt-websites/src/main/java/com/microsoft/windowsazure/management/websites/models/WebSiteCreateResponse.java | Java | apache-2.0 | 1,447 |
/**
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// @codekit-prepend 'third_party/signals.min.js'
// @codekit-prepend 'third_party/requestAnimationFrame.js'
var CDS = {};
// @codekit-append 'helper/event-publisher.js'
// @codekit-append 'helper/util.js'
// @codekit-append 'helper/history.js'
// @codekit-append 'helper/analytics.js'
// @codekit-append 'helper/theme.js'
// @codekit-append 'helper/video-embedder.js'
// @codekit-append 'components/button.js'
// @codekit-append 'components/card.js'
// @codekit-append 'components/cards.js'
// @codekit-append 'components/toast.js'
// @codekit-append 'components/masthead.js'
// @codekit-append 'components/schedule.js'
// @codekit-append 'bootstrap.js'
| andreimuntean/devsummit | src/static/scripts/cds.js | JavaScript | apache-2.0 | 1,278 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.hive.hcatalog.templeton;
/**
* ExecBean - The results of an exec call.
*/
public class ExecBean {
public String stdout;
public String stderr;
public int exitcode;
public ExecBean() {}
/**
* Create a new ExecBean.
*
* @param stdout standard output of the the program.
* @param stderr error output of the the program.
* @param exitcode exit code of the program.
*/
public ExecBean(String stdout, String stderr, int exitcode) {
this.stdout = stdout;
this.stderr = stderr;
this.exitcode = exitcode;
}
public String toString() {
return String.format("ExecBean(stdout=%s, stderr=%s, exitcode=%s)",
stdout, stderr, exitcode);
}
}
| vineetgarg02/hive | hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ExecBean.java | Java | apache-2.0 | 1,535 |
/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.apache.flex.forks.batik.ext.awt.image;
import org.apache.flex.forks.batik.ext.awt.image.renderable.Filter;
import org.apache.flex.forks.batik.util.ParsedURL;
import org.apache.flex.forks.batik.util.SoftReferenceCache;
/**
* This class manages a cache of soft references to Images that
* we have already loaded.
*
* <p>
* Adding an image is two fold. First you add the ParsedURL, this lets
* the cache know that someone is working on this ParsedURL. Then when
* the completed RenderedImage is ready you put it into the cache.
* </p>
* <p>
* If someone requests a ParsedURL after it has been added but before it has
* been put they will be blocked until the put.
* </p>
*
* @author <a href="mailto:thomas.deweese@kodak.com">Thomas DeWeese</a>
* @version $Id: URLImageCache.java 475477 2006-11-15 22:44:28Z cam $
*/
public class URLImageCache extends SoftReferenceCache {
static URLImageCache theCache = new URLImageCache();
public static URLImageCache getDefaultCache() { return theCache; }
/**
* Let people create there own caches.
*/
public URLImageCache() { }
/**
* Check if <tt>request(url)</tt> will return with a Filter
* (not putting you on the hook for it). Note that it is possible
* that this will return true but between this call and the call
* to request the soft-reference will be cleared. So it
* is still possible for request to return NULL, just much less
* likely (you can always call 'clear' in that case).
*/
public synchronized boolean isPresent(ParsedURL purl) {
return super.isPresentImpl(purl);
}
/**
* Check if <tt>request(url)</tt> will return immediately with the
* Filter. Note that it is possible that this will return
* true but between this call and the call to request the
* soft-reference will be cleared.
*/
public synchronized boolean isDone(ParsedURL purl) {
return super.isDoneImpl(purl);
}
/**
* If this returns null then you are now 'on the hook'.
* to put the Filter associated with ParsedURL into the
* cache. */
public synchronized Filter request(ParsedURL purl) {
return (Filter)super.requestImpl(purl);
}
/**
* Clear the entry for ParsedURL.
* This is the easiest way to 'get off the hook'.
* if you didn't indend to get on it.
*/
public synchronized void clear(ParsedURL purl) {
super.clearImpl(purl);
}
/**
* Associate bi with purl. bi is only referenced through
* a soft reference so don't rely on the cache to keep it
* around. If the map no longer contains our purl it was
* probably cleared or flushed since we were put on the hook
* for it, so in that case we will do nothing.
*/
public synchronized void put(ParsedURL purl, Filter filt) {
super.putImpl(purl, filt);
}
}
| adufilie/flex-sdk | modules/thirdparty/batik/sources/org/apache/flex/forks/batik/ext/awt/image/URLImageCache.java | Java | apache-2.0 | 3,727 |
<?php
/**
* @link http://www.yiiframework.com/
* @copyright Copyright (c) 2008 Yii Software LLC
* @license http://www.yiiframework.com/license/
*/
namespace yii\web;
use Yii;
use yii\base\Component;
use yii\base\InvalidConfigException;
use yii\base\InvalidParamException;
/**
* Session provides session data management and the related configurations.
*
* Session is a Web application component that can be accessed via `Yii::$app->session`.
*
* To start the session, call [[open()]]; To complete and send out session data, call [[close()]];
* To destroy the session, call [[destroy()]].
*
* Session can be used like an array to set and get session data. For example,
*
* ~~~
* $session = new Session;
* $session->open();
* $value1 = $session['name1']; // get session variable 'name1'
* $value2 = $session['name2']; // get session variable 'name2'
* foreach ($session as $name => $value) // traverse all session variables
* $session['name3'] = $value3; // set session variable 'name3'
* ~~~
*
* Session can be extended to support customized session storage.
* To do so, override [[useCustomStorage]] so that it returns true, and
* override these methods with the actual logic about using custom storage:
* [[openSession()]], [[closeSession()]], [[readSession()]], [[writeSession()]],
* [[destroySession()]] and [[gcSession()]].
*
* Session also supports a special type of session data, called *flash messages*.
* A flash message is available only in the current request and the next request.
* After that, it will be deleted automatically. Flash messages are particularly
* useful for displaying confirmation messages. To use flash messages, simply
* call methods such as [[setFlash()]], [[getFlash()]].
*
* @property array $allFlashes Flash messages (key => message). This property is read-only.
* @property array $cookieParams The session cookie parameters. This property is read-only.
* @property integer $count The number of session variables. This property is read-only.
* @property string $flash The key identifying the flash message. Note that flash messages and normal session
* variables share the same name space. If you have a normal session variable using the same name, its value will
* be overwritten by this method. This property is write-only.
* @property float $gCProbability The probability (percentage) that the GC (garbage collection) process is
* started on every session initialization, defaults to 1 meaning 1% chance.
* @property boolean $hasSessionId Whether the current request has sent the session ID.
* @property string $id The current session ID.
* @property boolean $isActive Whether the session has started. This property is read-only.
* @property SessionIterator $iterator An iterator for traversing the session variables. This property is
* read-only.
* @property string $name The current session name.
* @property string $savePath The current session save path, defaults to '/tmp'.
* @property integer $timeout The number of seconds after which data will be seen as 'garbage' and cleaned up.
* The default value is 1440 seconds (or the value of "session.gc_maxlifetime" set in php.ini).
* @property boolean|null $useCookies The value indicating whether cookies should be used to store session
* IDs.
* @property boolean $useCustomStorage Whether to use custom storage. This property is read-only.
* @property boolean $useTransparentSessionID Whether transparent sid support is enabled or not, defaults to
* false.
*
* @author Qiang Xue <qiang.xue@gmail.com>
* @since 2.0
*/
class Session extends Component implements \IteratorAggregate, \ArrayAccess, \Countable
{
/**
* @var string the name of the session variable that stores the flash message data.
*/
public $flashParam = '__flash';
/**
* @var \SessionHandlerInterface|array an object implementing the SessionHandlerInterface or a configuration array. If set, will be used to provide persistency instead of build-in methods.
*/
public $handler;
/**
* @var array parameter-value pairs to override default session cookie parameters that are used for session_set_cookie_params() function
* Array may have the following possible keys: 'lifetime', 'path', 'domain', 'secure', 'httponly'
* @see http://www.php.net/manual/en/function.session-set-cookie-params.php
*/
private $_cookieParams = ['httponly' => true];
/**
* Initializes the application component.
* This method is required by IApplicationComponent and is invoked by application.
*/
public function init()
{
parent::init();
register_shutdown_function([$this, 'close']);
}
/**
* Returns a value indicating whether to use custom session storage.
* This method should be overridden to return true by child classes that implement custom session storage.
* To implement custom session storage, override these methods: [[openSession()]], [[closeSession()]],
* [[readSession()]], [[writeSession()]], [[destroySession()]] and [[gcSession()]].
* @return boolean whether to use custom storage.
*/
public function getUseCustomStorage()
{
return false;
}
/**
* Starts the session.
*/
public function open()
{
if ($this->getIsActive()) {
return;
}
$this->registerSessionHandler();
$this->setCookieParamsInternal();
@session_start();
if ($this->getIsActive()) {
Yii::info('Session started', __METHOD__);
$this->updateFlashCounters();
} else {
$error = error_get_last();
$message = isset($error['message']) ? $error['message'] : 'Failed to start session.';
Yii::error($message, __METHOD__);
}
}
/**
* Registers session handler.
* @throws \yii\base\InvalidConfigException
*/
protected function registerSessionHandler()
{
if ($this->handler !== null) {
if (!is_object($this->handler)) {
$this->handler = Yii::createObject($this->handler);
}
if (!$this->handler instanceof \SessionHandlerInterface) {
throw new InvalidConfigException('"' . get_class($this) . '::handler" must implement the SessionHandlerInterface.');
}
@session_set_save_handler($this->handler, false);
} elseif ($this->getUseCustomStorage()) {
@session_set_save_handler(
[$this, 'openSession'],
[$this, 'closeSession'],
[$this, 'readSession'],
[$this, 'writeSession'],
[$this, 'destroySession'],
[$this, 'gcSession']
);
}
}
/**
* Ends the current session and store session data.
*/
public function close()
{
if ($this->getIsActive()) {
@session_write_close();
}
}
/**
* Frees all session variables and destroys all data registered to a session.
*/
public function destroy()
{
if ($this->getIsActive()) {
@session_unset();
@session_destroy();
}
}
/**
* @return boolean whether the session has started
*/
public function getIsActive()
{
return session_status() == PHP_SESSION_ACTIVE;
}
private $_hasSessionId;
/**
* Returns a value indicating whether the current request has sent the session ID.
* The default implementation will check cookie and $_GET using the session name.
* If you send session ID via other ways, you may need to override this method
* or call [[setHasSessionId()]] to explicitly set whether the session ID is sent.
* @return boolean whether the current request has sent the session ID.
*/
public function getHasSessionId()
{
if ($this->_hasSessionId === null) {
$name = $this->getName();
$request = Yii::$app->getRequest();
if (ini_get('session.use_cookies') && !empty($_COOKIE[$name])) {
$this->_hasSessionId = true;
} elseif (!ini_get('use_only_cookies') && ini_get('use_trans_sid')) {
$this->_hasSessionId = $request->get($name) !== null;
} else {
$this->_hasSessionId = false;
}
}
return $this->_hasSessionId;
}
/**
* Sets the value indicating whether the current request has sent the session ID.
* This method is provided so that you can override the default way of determining
* whether the session ID is sent.
* @param boolean $value whether the current request has sent the session ID.
*/
public function setHasSessionId($value)
{
$this->_hasSessionId = $value;
}
/**
* @return string the current session ID
*/
public function getId()
{
return session_id();
}
/**
* @param string $value the session ID for the current session
*/
public function setId($value)
{
session_id($value);
}
/**
* Updates the current session ID with a newly generated one .
* Please refer to <http://php.net/session_regenerate_id> for more details.
* @param boolean $deleteOldSession Whether to delete the old associated session file or not.
*/
public function regenerateID($deleteOldSession = false)
{
// add @ to inhibit possible warning due to race condition
// https://github.com/yiisoft/yii2/pull/1812
@session_regenerate_id($deleteOldSession);
}
/**
* @return string the current session name
*/
public function getName()
{
return session_name();
}
/**
* @param string $value the session name for the current session, must be an alphanumeric string.
* It defaults to "PHPSESSID".
*/
public function setName($value)
{
session_name($value);
}
/**
* @return string the current session save path, defaults to '/tmp'.
*/
public function getSavePath()
{
return session_save_path();
}
/**
* @param string $value the current session save path. This can be either a directory name or a path alias.
* @throws InvalidParamException if the path is not a valid directory
*/
public function setSavePath($value)
{
$path = Yii::getAlias($value);
if (is_dir($path)) {
session_save_path($path);
} else {
throw new InvalidParamException("Session save path is not a valid directory: $value");
}
}
/**
* @return array the session cookie parameters.
* @see http://us2.php.net/manual/en/function.session-get-cookie-params.php
*/
public function getCookieParams()
{
return array_merge(session_get_cookie_params(), array_change_key_case($this->_cookieParams));
}
/**
* Sets the session cookie parameters.
* The cookie parameters passed to this method will be merged with the result
* of `session_get_cookie_params()`.
* @param array $value cookie parameters, valid keys include: `lifetime`, `path`, `domain`, `secure` and `httponly`.
* @throws InvalidParamException if the parameters are incomplete.
* @see http://us2.php.net/manual/en/function.session-set-cookie-params.php
*/
public function setCookieParams(array $value)
{
$this->_cookieParams = $value;
}
/**
* Sets the session cookie parameters.
* This method is called by [[open()]] when it is about to open the session.
* @throws InvalidParamException if the parameters are incomplete.
* @see http://us2.php.net/manual/en/function.session-set-cookie-params.php
*/
private function setCookieParamsInternal()
{
$data = $this->getCookieParams();
extract($data);
if (isset($lifetime, $path, $domain, $secure, $httponly)) {
session_set_cookie_params($lifetime, $path, $domain, $secure, $httponly);
} else {
throw new InvalidParamException('Please make sure cookieParams contains these elements: lifetime, path, domain, secure and httponly.');
}
}
/**
* Returns the value indicating whether cookies should be used to store session IDs.
* @return boolean|null the value indicating whether cookies should be used to store session IDs.
* @see setUseCookies()
*/
public function getUseCookies()
{
if (ini_get('session.use_cookies') === '0') {
return false;
} elseif (ini_get('session.use_only_cookies') === '1') {
return true;
} else {
return null;
}
}
/**
* Sets the value indicating whether cookies should be used to store session IDs.
* Three states are possible:
*
* - true: cookies and only cookies will be used to store session IDs.
* - false: cookies will not be used to store session IDs.
* - null: if possible, cookies will be used to store session IDs; if not, other mechanisms will be used (e.g. GET parameter)
*
* @param boolean|null $value the value indicating whether cookies should be used to store session IDs.
*/
public function setUseCookies($value)
{
if ($value === false) {
ini_set('session.use_cookies', '0');
ini_set('session.use_only_cookies', '0');
} elseif ($value === true) {
ini_set('session.use_cookies', '1');
ini_set('session.use_only_cookies', '1');
} else {
ini_set('session.use_cookies', '1');
ini_set('session.use_only_cookies', '0');
}
}
/**
* @return float the probability (percentage) that the GC (garbage collection) process is started on every session initialization, defaults to 1 meaning 1% chance.
*/
public function getGCProbability()
{
return (float) (ini_get('session.gc_probability') / ini_get('session.gc_divisor') * 100);
}
/**
* @param float $value the probability (percentage) that the GC (garbage collection) process is started on every session initialization.
* @throws InvalidParamException if the value is not between 0 and 100.
*/
public function setGCProbability($value)
{
if ($value >= 0 && $value <= 100) {
// percent * 21474837 / 2147483647 ≈ percent * 0.01
ini_set('session.gc_probability', floor($value * 21474836.47));
ini_set('session.gc_divisor', 2147483647);
} else {
throw new InvalidParamException('GCProbability must be a value between 0 and 100.');
}
}
/**
* @return boolean whether transparent sid support is enabled or not, defaults to false.
*/
public function getUseTransparentSessionID()
{
return ini_get('session.use_trans_sid') == 1;
}
/**
* @param boolean $value whether transparent sid support is enabled or not.
*/
public function setUseTransparentSessionID($value)
{
ini_set('session.use_trans_sid', $value ? '1' : '0');
}
/**
* @return integer the number of seconds after which data will be seen as 'garbage' and cleaned up.
* The default value is 1440 seconds (or the value of "session.gc_maxlifetime" set in php.ini).
*/
public function getTimeout()
{
return (int) ini_get('session.gc_maxlifetime');
}
/**
* @param integer $value the number of seconds after which data will be seen as 'garbage' and cleaned up
*/
public function setTimeout($value)
{
ini_set('session.gc_maxlifetime', $value);
}
/**
* Session open handler.
* This method should be overridden if [[useCustomStorage]] returns true.
* Do not call this method directly.
* @param string $savePath session save path
* @param string $sessionName session name
* @return boolean whether session is opened successfully
*/
public function openSession($savePath, $sessionName)
{
return true;
}
/**
* Session close handler.
* This method should be overridden if [[useCustomStorage]] returns true.
* Do not call this method directly.
* @return boolean whether session is closed successfully
*/
public function closeSession()
{
return true;
}
/**
* Session read handler.
* This method should be overridden if [[useCustomStorage]] returns true.
* Do not call this method directly.
* @param string $id session ID
* @return string the session data
*/
public function readSession($id)
{
return '';
}
/**
* Session write handler.
* This method should be overridden if [[useCustomStorage]] returns true.
* Do not call this method directly.
* @param string $id session ID
* @param string $data session data
* @return boolean whether session write is successful
*/
public function writeSession($id, $data)
{
return true;
}
/**
* Session destroy handler.
* This method should be overridden if [[useCustomStorage]] returns true.
* Do not call this method directly.
* @param string $id session ID
* @return boolean whether session is destroyed successfully
*/
public function destroySession($id)
{
return true;
}
/**
* Session GC (garbage collection) handler.
* This method should be overridden if [[useCustomStorage]] returns true.
* Do not call this method directly.
* @param integer $maxLifetime the number of seconds after which data will be seen as 'garbage' and cleaned up.
* @return boolean whether session is GCed successfully
*/
public function gcSession($maxLifetime)
{
return true;
}
/**
* Returns an iterator for traversing the session variables.
* This method is required by the interface IteratorAggregate.
* @return SessionIterator an iterator for traversing the session variables.
*/
public function getIterator()
{
$this->open();
return new SessionIterator;
}
/**
* Returns the number of items in the session.
* @return integer the number of session variables
*/
public function getCount()
{
$this->open();
return count($_SESSION);
}
/**
* Returns the number of items in the session.
* This method is required by Countable interface.
* @return integer number of items in the session.
*/
public function count()
{
return $this->getCount();
}
/**
* Returns the session variable value with the session variable name.
* If the session variable does not exist, the `$defaultValue` will be returned.
* @param string $key the session variable name
* @param mixed $defaultValue the default value to be returned when the session variable does not exist.
* @return mixed the session variable value, or $defaultValue if the session variable does not exist.
*/
public function get($key, $defaultValue = null)
{
$this->open();
return isset($_SESSION[$key]) ? $_SESSION[$key] : $defaultValue;
}
/**
* Adds a session variable.
* If the specified name already exists, the old value will be overwritten.
* @param string $key session variable name
* @param mixed $value session variable value
*/
public function set($key, $value)
{
$this->open();
$_SESSION[$key] = $value;
}
/**
* Removes a session variable.
* @param string $key the name of the session variable to be removed
* @return mixed the removed value, null if no such session variable.
*/
public function remove($key)
{
$this->open();
if (isset($_SESSION[$key])) {
$value = $_SESSION[$key];
unset($_SESSION[$key]);
return $value;
} else {
return null;
}
}
/**
* Removes all session variables
*/
public function removeAll()
{
$this->open();
foreach (array_keys($_SESSION) as $key) {
unset($_SESSION[$key]);
}
}
/**
* @param mixed $key session variable name
* @return boolean whether there is the named session variable
*/
public function has($key)
{
$this->open();
return isset($_SESSION[$key]);
}
/**
* Updates the counters for flash messages and removes outdated flash messages.
* This method should only be called once in [[init()]].
*/
protected function updateFlashCounters()
{
$counters = $this->get($this->flashParam, []);
if (is_array($counters)) {
foreach ($counters as $key => $count) {
if ($count > 0) {
unset($counters[$key], $_SESSION[$key]);
} elseif ($count == 0) {
$counters[$key]++;
}
}
$_SESSION[$this->flashParam] = $counters;
} else {
// fix the unexpected problem that flashParam doesn't return an array
unset($_SESSION[$this->flashParam]);
}
}
/**
* Returns a flash message.
* @param string $key the key identifying the flash message
* @param mixed $defaultValue value to be returned if the flash message does not exist.
* @param boolean $delete whether to delete this flash message right after this method is called.
* If false, the flash message will be automatically deleted in the next request.
* @return mixed the flash message
* @see setFlash()
* @see hasFlash()
* @see getAllFlashes()
* @see removeFlash()
*/
public function getFlash($key, $defaultValue = null, $delete = false)
{
$counters = $this->get($this->flashParam, []);
if (isset($counters[$key])) {
$value = $this->get($key, $defaultValue);
if ($delete) {
$this->removeFlash($key);
} elseif ($counters[$key] < 0) {
// mark for deletion in the next request
$counters[$key] = 1;
$_SESSION[$this->flashParam] = $counters;
}
return $value;
} else {
return $defaultValue;
}
}
/**
* Returns all flash messages.
*
* You may use this method to display all the flash messages in a view file:
*
* ```php
* <?php
* foreach(Yii::$app->session->getAllFlashes() as $key => $message) {
* echo '<div class="alert alert-' . $key . '">' . $message . '</div>';
* } ?>
* ```
*
* With the above code you can use the [bootstrap alert][] classes such as `success`, `info`, `danger`
* as the flash message key to influence the color of the div.
*
* [bootstrap alert]: http://getbootstrap.com/components/#alerts
*
* @param boolean $delete whether to delete the flash messages right after this method is called.
* If false, the flash messages will be automatically deleted in the next request.
* @return array flash messages (key => message).
* @see setFlash()
* @see getFlash()
* @see hasFlash()
* @see removeFlash()
*/
public function getAllFlashes($delete = false)
{
$counters = $this->get($this->flashParam, []);
$flashes = [];
foreach (array_keys($counters) as $key) {
if (array_key_exists($key, $_SESSION)) {
$flashes[$key] = $_SESSION[$key];
if ($delete) {
unset($counters[$key], $_SESSION[$key]);
} elseif ($counters[$key] < 0) {
// mark for deletion in the next request
$counters[$key] = 1;
}
} else {
unset($counters[$key]);
}
}
$_SESSION[$this->flashParam] = $counters;
return $flashes;
}
/**
* Sets a flash message.
* A flash message will be automatically deleted after it is accessed in a request and the deletion will happen
* in the next request.
* If there is already an existing flash message with the same key, it will be overwritten by the new one.
* @param string $key the key identifying the flash message. Note that flash messages
* and normal session variables share the same name space. If you have a normal
* session variable using the same name, its value will be overwritten by this method.
* @param mixed $value flash message
* @param boolean $removeAfterAccess whether the flash message should be automatically removed only if
* it is accessed. If false, the flash message will be automatically removed after the next request,
* regardless if it is accessed or not. If true (default value), the flash message will remain until after
* it is accessed.
* @see getFlash()
* @see removeFlash()
*/
public function setFlash($key, $value = true, $removeAfterAccess = true)
{
$counters = $this->get($this->flashParam, []);
$counters[$key] = $removeAfterAccess ? -1 : 0;
$_SESSION[$key] = $value;
$_SESSION[$this->flashParam] = $counters;
}
/**
* Adds a flash message.
* If there are existing flash messages with the same key, the new one will be appended to the existing message array.
* @param string $key the key identifying the flash message.
* @param mixed $value flash message
* @param boolean $removeAfterAccess whether the flash message should be automatically removed only if
* it is accessed. If false, the flash message will be automatically removed after the next request,
* regardless if it is accessed or not. If true (default value), the flash message will remain until after
* it is accessed.
* @see getFlash()
* @see removeFlash()
*/
public function addFlash($key, $value = true, $removeAfterAccess = true)
{
$counters = $this->get($this->flashParam, []);
$counters[$key] = $removeAfterAccess ? -1 : 0;
$_SESSION[$this->flashParam] = $counters;
if (empty($_SESSION[$key])) {
$_SESSION[$key] = [$value];
} else {
if (is_array($_SESSION[$key])) {
$_SESSION[$key][] = $value;
} else {
$_SESSION[$key] = [$_SESSION[$key], $value];
}
}
}
/**
* Removes a flash message.
* @param string $key the key identifying the flash message. Note that flash messages
* and normal session variables share the same name space. If you have a normal
* session variable using the same name, it will be removed by this method.
* @return mixed the removed flash message. Null if the flash message does not exist.
* @see getFlash()
* @see setFlash()
* @see removeAllFlashes()
*/
public function removeFlash($key)
{
$counters = $this->get($this->flashParam, []);
$value = isset($_SESSION[$key], $counters[$key]) ? $_SESSION[$key] : null;
unset($counters[$key], $_SESSION[$key]);
$_SESSION[$this->flashParam] = $counters;
return $value;
}
/**
* Removes all flash messages.
* Note that flash messages and normal session variables share the same name space.
* If you have a normal session variable using the same name, it will be removed
* by this method.
* @see getFlash()
* @see setFlash()
* @see removeFlash()
*/
public function removeAllFlashes()
{
$counters = $this->get($this->flashParam, []);
foreach (array_keys($counters) as $key) {
unset($_SESSION[$key]);
}
unset($_SESSION[$this->flashParam]);
}
/**
* Returns a value indicating whether there are flash messages associated with the specified key.
* @param string $key key identifying the flash message type
* @return boolean whether any flash messages exist under specified key
*/
public function hasFlash($key)
{
return $this->getFlash($key) !== null;
}
/**
* This method is required by the interface ArrayAccess.
* @param mixed $offset the offset to check on
* @return boolean
*/
public function offsetExists($offset)
{
$this->open();
return isset($_SESSION[$offset]);
}
/**
* This method is required by the interface ArrayAccess.
* @param integer $offset the offset to retrieve element.
* @return mixed the element at the offset, null if no element is found at the offset
*/
public function offsetGet($offset)
{
$this->open();
return isset($_SESSION[$offset]) ? $_SESSION[$offset] : null;
}
/**
* This method is required by the interface ArrayAccess.
* @param integer $offset the offset to set element
* @param mixed $item the element value
*/
public function offsetSet($offset, $item)
{
$this->open();
$_SESSION[$offset] = $item;
}
/**
* This method is required by the interface ArrayAccess.
* @param mixed $offset the offset to unset element
*/
public function offsetUnset($offset)
{
$this->open();
unset($_SESSION[$offset]);
}
}
| eggao/fis-yii2-demo | vendor/yiisoft/yii2/web/Session.php | PHP | bsd-2-clause | 29,566 |
using System;
using System.Collections.Generic;
using System.IO;
using System.Web;
using Orchard.ContentManagement;
using Orchard.ContentManagement.MetaData.Models;
using Orchard.MediaLibrary.Factories;
using Orchard.MediaLibrary.Models;
namespace Orchard.MediaLibrary.Services {
public interface IMediaLibraryService : IDependency {
IEnumerable<ContentTypeDefinition> GetMediaTypes();
IContentQuery<MediaPart, MediaPartRecord> GetMediaContentItems(VersionOptions versionOptions = null);
IEnumerable<MediaPart> GetMediaContentItems(string folderPath, int skip, int count, string order, string mediaType, VersionOptions versionOptions = null);
IEnumerable<MediaPart> GetMediaContentItems(int skip, int count, string order, string mediaType, VersionOptions versionOptions = null);
int GetMediaContentItemsCount(string folderPath, string mediaType, VersionOptions versionOptions = null);
int GetMediaContentItemsCount(string mediaType, VersionOptions versionOptions = null);
MediaPart ImportMedia(string relativePath, string filename);
MediaPart ImportMedia(string relativePath, string filename, string contentType);
MediaPart ImportMedia(Stream stream, string relativePath, string filename);
MediaPart ImportMedia(Stream stream, string relativePath, string filename, string contentType);
IMediaFactory GetMediaFactory(Stream stream, string mimeType, string contentType);
/// <summary>
/// Creates a unique filename to prevent filename collisions.
/// </summary>
/// <param name="folderPath">The relative where collisions will be checked.</param>
/// <param name="filename">The desired filename.</param>
/// <returns>A string representing a unique filename.</returns>
string GetUniqueFilename(string folderPath, string filename);
/// <summary>
/// Returns the public URL for a media file.
/// </summary>
/// <param name="mediaPath">The relative path of the media folder containing the media.</param>
/// <param name="fileName">The media file name.</param>
/// <returns>The public URL for the media.</returns>
string GetMediaPublicUrl(string mediaPath, string fileName);
IMediaFolder GetRootMediaFolder();
/// <summary>
/// Retrieves the media folders within a given relative path.
/// </summary>
/// <param name="relativePath">The path where to retrieve the media folder from. null means root.</param>
/// <returns>The media folder in the given path.</returns>
IEnumerable<IMediaFolder> GetMediaFolders(string relativePath);
/// <summary>
/// Retrieves the media files within a given relative path.
/// </summary>
/// <param name="relativePath">The path where to retrieve the media files from. null means root.</param>
/// <returns>The media files in the given path.</returns>
IEnumerable<MediaFile> GetMediaFiles(string relativePath);
/// <summary>
/// Creates a media folder.
/// </summary>
/// <param name="relativePath">The path where to create the new folder. null means root.</param>
/// <param name="folderName">The name of the folder to be created.</param>
void CreateFolder(string relativePath, string folderName);
/// <summary>
/// Deletes a media folder.
/// </summary>
/// <param name="folderPath">The path to the folder to be deleted.</param>
void DeleteFolder(string folderPath);
/// <summary>
/// Renames a media folder.
/// </summary>
/// <param name="folderPath">The path to the folder to be renamed.</param>
/// <param name="newFolderName">The new folder name.</param>
void RenameFolder(string folderPath, string newFolderName);
/// <summary>
/// Deletes a media file.
/// </summary>
/// <param name="folderPath">The folder path.</param>
/// <param name="fileName">The file name.</param>
void DeleteFile(string folderPath, string fileName);
/// <summary>
/// Renames a media file.
/// </summary>
/// <param name="folderPath">The path to the file's parent folder.</param>
/// <param name="currentFileName">The current file name.</param>
/// <param name="newFileName">The new file name.</param>
void RenameFile(string folderPath, string currentFileName, string newFileName);
/// <summary>
/// Moves a media file.
/// </summary>
/// <param name="currentPath">The path to the file's parent folder.</param>
/// <param name="filename">The file name.</param>
/// <param name="newPath">The path where the file will be moved to.</param>
/// <param name="newFilename">The new file name.</param>
void MoveFile(string currentPath, string filename, string newPath, string newFilename);
/// <summary>
/// Moves a media file.
/// </summary>
/// <param name="currentPath">The path to the file's parent folder.</param>
/// <param name="filename">The file name.</param>
/// <param name="duplicatePath">The path where the file will be copied to.</param>
/// <param name="duplicateFilename">The new file name.</param>
void CopyFile(string currentPath, string filename, string duplicatePath, string duplicateFilename);
/// <summary>
/// Uploads a media file based on a posted file.
/// </summary>
/// <param name="folderPath">The path to the folder where to upload the file.</param>
/// <param name="postedFile">The file to upload.</param>
/// <returns>The path to the uploaded file.</returns>
string UploadMediaFile(string folderPath, HttpPostedFileBase postedFile);
/// <summary>
/// Uploads a media file based on an array of bytes.
/// </summary>
/// <param name="folderPath">The path to the folder where to upload the file.</param>
/// <param name="fileName">The file name.</param>
/// <param name="bytes">The array of bytes with the file's contents.</param>
/// <returns>The path to the uploaded file.</returns>
string UploadMediaFile(string folderPath, string fileName, byte[] bytes);
/// <summary>
/// Uploads a media file based on a stream.
/// </summary>
/// <param name="folderPath">The folder path to where to upload the file.</param>
/// <param name="fileName">The file name.</param>
/// <param name="inputStream">The stream with the file's contents.</param>
/// <returns>The path to the uploaded file.</returns>
string UploadMediaFile(string folderPath, string fileName, Stream inputStream);
}
public static class MediaLibrayServiceExtensions {
public static bool CanManageMediaFolder(this IMediaLibraryService service, string folderPath) {
// The current user can manage a media if he has access to the whole hierarchy
// or the media is under his personal storage folder.
var rootMediaFolder = service.GetRootMediaFolder();
if (rootMediaFolder == null) {
return true;
}
var mediaPath = folderPath + "\\";
var rootPath = rootMediaFolder.MediaPath + "\\";
return mediaPath.StartsWith(rootPath, StringComparison.OrdinalIgnoreCase);
}
public static string GetRootedFolderPath(this IMediaLibraryService service, string folderPath) {
var rootMediaFolder = service.GetRootMediaFolder();
if (rootMediaFolder != null) {
return Path.Combine(rootMediaFolder.MediaPath, folderPath ?? "");
}
return folderPath;
}
}
} | qt1/Orchard | src/Orchard.Web/Modules/Orchard.MediaLibrary/Services/IMediaLibraryService.cs | C# | bsd-3-clause | 7,917 |
""" This provides some useful code used by other modules. This is not to be
used by the end user which is why it is hidden. """
import string, sys
class LinkError(Exception):
pass
def refine_import_err(mod_name, extension_name, exc):
""" Checks to see if the ImportError was because the library
itself was not there or because there was a link error. If there
was a link error it raises a LinkError if not it does nothing.
Keyword arguments
-----------------
- mod_name : The name of the Python module that was imported.
- extension_name : The name of the extension module that is to be
imported by the module having mod_name.
- exc : The exception raised when the module called mod_name was
imported.
To see example usage look at __init__.py.
"""
try:
del sys.modules['vtk.%s'%mod_name]
except KeyError:
pass
if string.find(str(exc), extension_name) == -1:
raise LinkError, str(exc)
| b3c/VTK-5.8 | Wrapping/Python/vtk/__helper.py | Python | bsd-3-clause | 981 |
"""
A sub-package for efficiently dealing with polynomials.
Within the documentation for this sub-package, a "finite power series,"
i.e., a polynomial (also referred to simply as a "series") is represented
by a 1-D numpy array of the polynomial's coefficients, ordered from lowest
order term to highest. For example, array([1,2,3]) represents
``P_0 + 2*P_1 + 3*P_2``, where P_n is the n-th order basis polynomial
applicable to the specific module in question, e.g., `polynomial` (which
"wraps" the "standard" basis) or `chebyshev`. For optimal performance,
all operations on polynomials, including evaluation at an argument, are
implemented as operations on the coefficients. Additional (module-specific)
information can be found in the docstring for the module of interest.
This package provides *convenience classes* for each of six different kinds
of polynomials:
======================== ================
**Name** **Provides**
======================== ================
`~polynomial.Polynomial` Power series
`~chebyshev.Chebyshev` Chebyshev series
`~legendre.Legendre` Legendre series
`~laguerre.Laguerre` Laguerre series
`~hermite.Hermite` Hermite series
`~hermite_e.HermiteE` HermiteE series
======================== ================
These *convenience classes* provide a consistent interface for creating,
manipulating, and fitting data with polynomials of different bases.
The convenience classes are the preferred interface for the `~numpy.polynomial`
package, and are available from the ``numpy.polynomial`` namespace.
This eliminates the need to navigate to the corresponding submodules, e.g.
``np.polynomial.Polynomial`` or ``np.polynomial.Chebyshev`` instead of
``np.polynomial.polynomial.Polynomial`` or
``np.polynomial.chebyshev.Chebyshev``, respectively.
The classes provide a more consistent and concise interface than the
type-specific functions defined in the submodules for each type of polynomial.
For example, to fit a Chebyshev polynomial with degree ``1`` to data given
by arrays ``xdata`` and ``ydata``, the
`~chebyshev.Chebyshev.fit` class method::
>>> from numpy.polynomial import Chebyshev
>>> c = Chebyshev.fit(xdata, ydata, deg=1)
is preferred over the `chebyshev.chebfit` function from the
``np.polynomial.chebyshev`` module::
>>> from numpy.polynomial.chebyshev import chebfit
>>> c = chebfit(xdata, ydata, deg=1)
See :doc:`routines.polynomials.classes` for more details.
Convenience Classes
===================
The following lists the various constants and methods common to all of
the classes representing the various kinds of polynomials. In the following,
the term ``Poly`` represents any one of the convenience classes (e.g.
`~polynomial.Polynomial`, `~chebyshev.Chebyshev`, `~hermite.Hermite`, etc.)
while the lowercase ``p`` represents an **instance** of a polynomial class.
Constants
---------
- ``Poly.domain`` -- Default domain
- ``Poly.window`` -- Default window
- ``Poly.basis_name`` -- String used to represent the basis
- ``Poly.maxpower`` -- Maximum value ``n`` such that ``p**n`` is allowed
- ``Poly.nickname`` -- String used in printing
Creation
--------
Methods for creating polynomial instances.
- ``Poly.basis(degree)`` -- Basis polynomial of given degree
- ``Poly.identity()`` -- ``p`` where ``p(x) = x`` for all ``x``
- ``Poly.fit(x, y, deg)`` -- ``p`` of degree ``deg`` with coefficients
determined by the least-squares fit to the data ``x``, ``y``
- ``Poly.fromroots(roots)`` -- ``p`` with specified roots
- ``p.copy()`` -- Create a copy of ``p``
Conversion
----------
Methods for converting a polynomial instance of one kind to another.
- ``p.cast(Poly)`` -- Convert ``p`` to instance of kind ``Poly``
- ``p.convert(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` or map
between ``domain`` and ``window``
Calculus
--------
- ``p.deriv()`` -- Take the derivative of ``p``
- ``p.integ()`` -- Integrate ``p``
Validation
----------
- ``Poly.has_samecoef(p1, p2)`` -- Check if coefficients match
- ``Poly.has_samedomain(p1, p2)`` -- Check if domains match
- ``Poly.has_sametype(p1, p2)`` -- Check if types match
- ``Poly.has_samewindow(p1, p2)`` -- Check if windows match
Misc
----
- ``p.linspace()`` -- Return ``x, p(x)`` at equally-spaced points in ``domain``
- ``p.mapparms()`` -- Return the parameters for the linear mapping between
``domain`` and ``window``.
- ``p.roots()`` -- Return the roots of `p`.
- ``p.trim()`` -- Remove trailing coefficients.
- ``p.cutdeg(degree)`` -- Truncate p to given degree
- ``p.truncate(size)`` -- Truncate p to given size
"""
from .polynomial import Polynomial
from .chebyshev import Chebyshev
from .legendre import Legendre
from .hermite import Hermite
from .hermite_e import HermiteE
from .laguerre import Laguerre
__all__ = [
"set_default_printstyle",
"polynomial", "Polynomial",
"chebyshev", "Chebyshev",
"legendre", "Legendre",
"hermite", "Hermite",
"hermite_e", "HermiteE",
"laguerre", "Laguerre",
]
def set_default_printstyle(style):
"""
Set the default format for the string representation of polynomials.
Values for ``style`` must be valid inputs to ``__format__``, i.e. 'ascii'
or 'unicode'.
Parameters
----------
style : str
Format string for default printing style. Must be either 'ascii' or
'unicode'.
Notes
-----
The default format depends on the platform: 'unicode' is used on
Unix-based systems and 'ascii' on Windows. This determination is based on
default font support for the unicode superscript and subscript ranges.
Examples
--------
>>> p = np.polynomial.Polynomial([1, 2, 3])
>>> c = np.polynomial.Chebyshev([1, 2, 3])
>>> np.polynomial.set_default_printstyle('unicode')
>>> print(p)
1.0 + 2.0·x¹ + 3.0·x²
>>> print(c)
1.0 + 2.0·T₁(x) + 3.0·T₂(x)
>>> np.polynomial.set_default_printstyle('ascii')
>>> print(p)
1.0 + 2.0 x**1 + 3.0 x**2
>>> print(c)
1.0 + 2.0 T_1(x) + 3.0 T_2(x)
>>> # Formatting supersedes all class/package-level defaults
>>> print(f"{p:unicode}")
1.0 + 2.0·x¹ + 3.0·x²
"""
if style not in ('unicode', 'ascii'):
raise ValueError(
f"Unsupported format string '{style}'. Valid options are 'ascii' "
f"and 'unicode'"
)
_use_unicode = True
if style == 'ascii':
_use_unicode = False
from ._polybase import ABCPolyBase
ABCPolyBase._use_unicode = _use_unicode
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| simongibbons/numpy | numpy/polynomial/__init__.py | Python | bsd-3-clause | 6,788 |
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/controller/oom_intervention_impl.h"
#include <algorithm>
#include <memory>
#include <utility>
#include "base/bind.h"
#include "base/debug/crash_logging.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "third_party/blink/public/platform/platform.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_gc_for_context_dispose.h"
#include "third_party/blink/renderer/controller/crash_memory_metrics_reporter_impl.h"
#include "third_party/blink/renderer/core/frame/local_dom_window.h"
#include "third_party/blink/renderer/core/frame/local_frame.h"
#include "third_party/blink/renderer/core/loader/frame_load_request.h"
#include "third_party/blink/renderer/core/page/page.h"
#include "third_party/blink/renderer/platform/bindings/v8_per_isolate_data.h"
namespace blink {
namespace {
enum class OomInterventionState {
// Initial value for a variable.
None,
// Before the intervention has been triggered.
Before,
// While the intervention is active.
During,
// After the intervention has triggered at least once.
After
};
void UpdateStateCrashKey(OomInterventionState next_state) {
static OomInterventionState current_state = OomInterventionState::None;
// Once an intervention is trigger, the state shall never go back to the
// Before state.
if (next_state == OomInterventionState::Before &&
current_state != OomInterventionState::None)
return;
if (current_state == next_state)
return;
current_state = next_state;
static auto* crash_key = base::debug::AllocateCrashKeyString(
"oom_intervention_state", base::debug::CrashKeySize::Size32);
switch (current_state) {
case OomInterventionState::None:
base::debug::SetCrashKeyString(crash_key, "none");
break;
case OomInterventionState::Before:
base::debug::SetCrashKeyString(crash_key, "before");
break;
case OomInterventionState::During:
base::debug::SetCrashKeyString(crash_key, "during");
break;
case OomInterventionState::After:
base::debug::SetCrashKeyString(crash_key, "after");
break;
}
}
void NavigateLocalAdsFrames(LocalFrame* frame) {
// This navigates all the frames detected as an advertisement to about:blank.
DCHECK(frame);
for (Frame* child = frame->Tree().FirstChild(); child;
child = child->Tree().TraverseNext(frame)) {
if (auto* child_local_frame = DynamicTo<LocalFrame>(child)) {
if (child_local_frame->IsAdSubframe()) {
FrameLoadRequest request(frame->DomWindow(),
ResourceRequest(BlankURL()));
child_local_frame->Navigate(request, WebFrameLoadType::kStandard);
}
}
// TODO(yuzus): Once AdsTracker for remote frames is implemented and OOPIF
// is enabled on low-end devices, navigate remote ads as well.
}
}
OomInterventionImpl& GetOomIntervention() {
DEFINE_STATIC_LOCAL(OomInterventionImpl, oom_intervention, ());
return oom_intervention;
}
} // namespace
// static
void OomInterventionImpl::BindReceiver(
mojo::PendingReceiver<mojom::blink::OomIntervention> receiver) {
GetOomIntervention().Bind(std::move(receiver));
}
OomInterventionImpl::OomInterventionImpl()
: delayed_report_timer_(Thread::MainThread()->GetTaskRunner(),
this,
&OomInterventionImpl::TimerFiredUMAReport) {
UpdateStateCrashKey(OomInterventionState::Before);
}
OomInterventionImpl::~OomInterventionImpl() {
UpdateStateCrashKey(OomInterventionState::After);
MemoryUsageMonitorInstance().RemoveObserver(this);
}
void OomInterventionImpl::Bind(
mojo::PendingReceiver<mojom::blink::OomIntervention> receiver) {
// This interface can be bound multiple time, however, there should never be
// multiple callers bound at a time.
Reset();
receiver_.Bind(std::move(receiver));
// Disconnection means the user closed the dialog without activating the OOM
// intervention.
receiver_.set_disconnect_handler(
base::BindOnce(&OomInterventionImpl::Reset, base::Unretained(this)));
}
void OomInterventionImpl::Reset() {
receiver_.reset();
host_.reset();
pauser_.reset();
MemoryUsageMonitorInstance().RemoveObserver(this);
}
void OomInterventionImpl::StartDetection(
mojo::PendingRemote<mojom::blink::OomInterventionHost> host,
mojom::blink::DetectionArgsPtr detection_args,
bool renderer_pause_enabled,
bool navigate_ads_enabled,
bool purge_v8_memory_enabled) {
host_.Bind(std::move(host));
detection_args_ = std::move(detection_args);
renderer_pause_enabled_ = renderer_pause_enabled;
navigate_ads_enabled_ = navigate_ads_enabled;
purge_v8_memory_enabled_ = purge_v8_memory_enabled;
MemoryUsageMonitorInstance().AddObserver(this);
}
MemoryUsageMonitor& OomInterventionImpl::MemoryUsageMonitorInstance() {
return MemoryUsageMonitor::Instance();
}
void OomInterventionImpl::OnMemoryPing(MemoryUsage usage) {
// Ignore pings without process memory usage information.
if (std::isnan(usage.private_footprint_bytes) ||
std::isnan(usage.swap_bytes) || std::isnan(usage.vm_size_bytes))
return;
Check(usage);
}
void OomInterventionImpl::Check(MemoryUsage usage) {
DCHECK(host_);
OomInterventionMetrics current_memory =
CrashMemoryMetricsReporterImpl::MemoryUsageToMetrics(usage);
bool oom_detected = false;
oom_detected |= detection_args_->blink_workload_threshold > 0 &&
current_memory.current_blink_usage_kb * 1024 >
detection_args_->blink_workload_threshold;
oom_detected |= detection_args_->private_footprint_threshold > 0 &&
current_memory.current_private_footprint_kb * 1024 >
detection_args_->private_footprint_threshold;
oom_detected |=
detection_args_->swap_threshold > 0 &&
current_memory.current_swap_kb * 1024 > detection_args_->swap_threshold;
oom_detected |= detection_args_->virtual_memory_thresold > 0 &&
current_memory.current_vm_size_kb * 1024 >
detection_args_->virtual_memory_thresold;
// Report memory stats every second to send UMA.
ReportMemoryStats(current_memory);
if (oom_detected) {
UpdateStateCrashKey(OomInterventionState::During);
UMA_HISTOGRAM_MEMORY_MB(
"Memory.Experimental.OomIntervention.V8UsageBefore",
base::saturated_cast<int>(usage.v8_bytes / 1024 / 1024));
if (navigate_ads_enabled_ || purge_v8_memory_enabled_) {
for (const auto& page : Page::OrdinaryPages()) {
for (Frame* frame = page->MainFrame(); frame;
frame = frame->Tree().TraverseNext()) {
auto* local_frame = DynamicTo<LocalFrame>(frame);
if (!local_frame)
continue;
if (navigate_ads_enabled_)
NavigateLocalAdsFrames(local_frame);
if (purge_v8_memory_enabled_)
local_frame->ForciblyPurgeV8Memory();
}
}
}
if (renderer_pause_enabled_) {
// The ScopedPagePauser is destroyed when the intervention is declined and
// mojo strong binding is disconnected.
pauser_ = std::make_unique<ScopedPagePauser>();
}
host_->OnHighMemoryUsage();
MemoryUsageMonitorInstance().RemoveObserver(this);
// Send memory pressure notification to trigger GC.
Thread::MainThread()->GetTaskRunner()->PostTask(FROM_HERE,
base::BindOnce(&TriggerGC));
// Notify V8GCForContextDispose that page navigation gc is needed when
// intervention runs, as it indicates that memory usage is high.
V8GCForContextDispose::Instance().SetForcePageNavigationGC();
// Report the memory impact of intervention after 10, 20, 30 seconds.
metrics_at_intervention_ = current_memory;
number_of_report_needed_ = 3;
delayed_report_timer_.StartRepeating(base::Seconds(10), FROM_HERE);
}
}
void OomInterventionImpl::ReportMemoryStats(
OomInterventionMetrics& current_memory) {
UMA_HISTOGRAM_MEMORY_MB(
"Memory.Experimental.OomIntervention.RendererBlinkUsage",
base::saturated_cast<base::Histogram::Sample>(
current_memory.current_blink_usage_kb / 1024));
UMA_HISTOGRAM_MEMORY_LARGE_MB(
"Memory.Experimental.OomIntervention."
"RendererPrivateMemoryFootprint",
base::saturated_cast<base::Histogram::Sample>(
current_memory.current_private_footprint_kb / 1024));
UMA_HISTOGRAM_MEMORY_MB(
"Memory.Experimental.OomIntervention.RendererSwapFootprint",
base::saturated_cast<base::Histogram::Sample>(
current_memory.current_swap_kb / 1024));
UMA_HISTOGRAM_MEMORY_LARGE_MB(
"Memory.Experimental.OomIntervention.RendererVmSize",
base::saturated_cast<base::Histogram::Sample>(
current_memory.current_vm_size_kb / 1024));
}
int ToMemoryUsageDeltaSample(uint64_t after_kb, uint64_t before_kb) {
int delta_mb = (base::saturated_cast<int>(before_kb) -
base::saturated_cast<int>(after_kb)) /
1024;
return std::min(std::max(delta_mb, -500), 500);
}
void OomInterventionImpl::TimerFiredUMAReport(TimerBase*) {
MemoryUsage usage = MemoryUsageMonitorInstance().GetCurrentMemoryUsage();
OomInterventionMetrics current_memory =
CrashMemoryMetricsReporterImpl::MemoryUsageToMetrics(usage);
int blink_usage_delta =
ToMemoryUsageDeltaSample(current_memory.current_blink_usage_kb,
metrics_at_intervention_.current_blink_usage_kb);
int private_footprint_delta = ToMemoryUsageDeltaSample(
current_memory.current_private_footprint_kb,
metrics_at_intervention_.current_private_footprint_kb);
int v8_usage_mb = base::saturated_cast<int>(usage.v8_bytes / 1024 / 1024);
switch (number_of_report_needed_--) {
case 3:
UMA_HISTOGRAM_MEMORY_MB(
"Memory.Experimental.OomIntervention.V8UsageAfter10secs",
v8_usage_mb);
base::UmaHistogramSparse(
"Memory.Experimental.OomIntervention.ReducedBlinkUsageAfter10secs2",
blink_usage_delta);
base::UmaHistogramSparse(
"Memory.Experimental.OomIntervention.ReducedRendererPMFAfter10secs2",
private_footprint_delta);
break;
case 2:
UMA_HISTOGRAM_MEMORY_MB(
"Memory.Experimental.OomIntervention.V8UsageAfter20secs",
v8_usage_mb);
base::UmaHistogramSparse(
"Memory.Experimental.OomIntervention.ReducedBlinkUsageAfter20secs2",
blink_usage_delta);
base::UmaHistogramSparse(
"Memory.Experimental.OomIntervention.ReducedRendererPMFAfter20secs2",
private_footprint_delta);
break;
case 1:
UMA_HISTOGRAM_MEMORY_MB(
"Memory.Experimental.OomIntervention.V8UsageAfter30secs",
v8_usage_mb);
base::UmaHistogramSparse(
"Memory.Experimental.OomIntervention.ReducedBlinkUsageAfter30secs2",
blink_usage_delta);
base::UmaHistogramSparse(
"Memory.Experimental.OomIntervention.ReducedRendererPMFAfter30secs2",
private_footprint_delta);
delayed_report_timer_.Stop();
break;
}
}
void OomInterventionImpl::TriggerGC() {
V8PerIsolateData::MainThreadIsolate()->MemoryPressureNotification(
v8::MemoryPressureLevel::kCritical);
}
} // namespace blink
| scheib/chromium | third_party/blink/renderer/controller/oom_intervention_impl.cc | C++ | bsd-3-clause | 11,560 |
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "modules/navigatorconnect/ServicePort.h"
#include "bindings/core/v8/ScriptValue.h"
#include "bindings/core/v8/SerializedScriptValueFactory.h"
#include "core/dom/MessagePort.h"
#include "modules/navigatorconnect/ServicePortCollection.h"
#include "public/platform/Platform.h"
#include "public/platform/modules/navigator_services/WebServicePortProvider.h"
namespace blink {
ServicePort* ServicePort::create(ServicePortCollection* collection, const WebServicePort& port)
{
return new ServicePort(collection, port);
}
ServicePort::~ServicePort()
{
}
String ServicePort::targetURL() const
{
return m_port.targetUrl.string();
}
String ServicePort::name() const
{
return m_port.name;
}
ScriptValue ServicePort::data(ScriptState* scriptState) const
{
if (!m_serializedData)
return ScriptValue::createNull(scriptState);
return ScriptValue(scriptState, m_serializedData->deserialize(scriptState->isolate()));
}
void ServicePort::postMessage(ExecutionContext* executionContext, PassRefPtr<SerializedScriptValue> message, const MessagePortArray* ports, ExceptionState& exceptionState)
{
OwnPtr<MessagePortChannelArray> channels;
if (ports) {
channels = MessagePort::disentanglePorts(executionContext, ports, exceptionState);
if (exceptionState.hadException())
return;
}
WebString messageString = message->toWireString();
OwnPtr<WebMessagePortChannelArray> webChannels = MessagePort::toWebMessagePortChannelArray(channels.release());
if (m_collection) {
WebServicePortProvider* provider = m_collection->provider();
provider->postMessage(m_port.id, messageString, webChannels.leakPtr());
}
}
void ServicePort::close()
{
// TODO(mek): Figure out if this should throw instead of just quietly fail.
if (!m_isOpen)
return;
m_collection->closePort(this);
m_collection = nullptr;
m_isOpen = false;
}
DEFINE_TRACE(ServicePort)
{
visitor->trace(m_collection);
}
ServicePort::ServicePort(ServicePortCollection* collection, const WebServicePort& port)
: m_isOpen(true), m_port(port), m_collection(collection)
{
if (!m_port.data.isEmpty()) {
m_serializedData = SerializedScriptValueFactory::instance().createFromWire(m_port.data);
}
}
} // namespace blink
| js0701/chromium-crosswalk | third_party/WebKit/Source/modules/navigatorconnect/ServicePort.cpp | C++ | bsd-3-clause | 2,473 |
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "services/device/geolocation/wifi_data_provider.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/location.h"
#include "base/threading/thread_task_runner_handle.h"
namespace device {
WifiDataProvider::WifiDataProvider()
: client_task_runner_(base::ThreadTaskRunnerHandle::Get()) {
DCHECK(client_task_runner_);
}
WifiDataProvider::~WifiDataProvider() = default;
void WifiDataProvider::AddCallback(WifiDataUpdateCallback* callback) {
callbacks_.insert(callback);
}
bool WifiDataProvider::RemoveCallback(WifiDataUpdateCallback* callback) {
return callbacks_.erase(callback) == 1;
}
bool WifiDataProvider::has_callbacks() const {
return !callbacks_.empty();
}
void WifiDataProvider::RunCallbacks() {
client_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&WifiDataProvider::DoRunCallbacks, this));
}
bool WifiDataProvider::CalledOnClientThread() const {
return client_task_runner()->BelongsToCurrentThread();
}
void WifiDataProvider::DoRunCallbacks() {
// It's possible that all the callbacks went away whilst this task was
// pending. This is fine; the loop will be a no-op.
CallbackSet::const_iterator iter = callbacks_.begin();
while (iter != callbacks_.end()) {
WifiDataUpdateCallback* callback = *iter;
++iter; // Advance iter before running, in case callback unregisters.
callback->Run();
}
}
} // namespace device
| scheib/chromium | services/device/geolocation/wifi_data_provider.cc | C++ | bsd-3-clause | 1,569 |
package hello;
import static hello.Helper.getQueries;
import static hello.Helper.randomWorld;
import static hello.Helper.sendException;
import static hello.Helper.sendJson;
import com.mongodb.async.client.MongoCollection;
import com.mongodb.async.client.MongoDatabase;
import com.mongodb.client.model.Filters;
import com.mongodb.client.model.UpdateOneModel;
import com.mongodb.client.model.Updates;
import com.mongodb.client.model.WriteModel;
import io.undertow.server.HttpHandler;
import io.undertow.server.HttpServerExchange;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import org.bson.Document;
import org.bson.conversions.Bson;
/**
* Handles the updates test using MongoDB with an asynchronous API.
*/
final class UpdatesMongoAsyncHandler implements HttpHandler {
private final MongoCollection<Document> worldCollection;
UpdatesMongoAsyncHandler(MongoDatabase db) {
worldCollection = db.getCollection("world");
}
@Override
public void handleRequest(HttpServerExchange exchange) {
int queries = getQueries(exchange);
nUpdatedWorlds(queries).whenComplete(
(worlds, exception) -> {
if (exception != null) {
sendException(exchange, exception);
} else {
sendJson(exchange, worlds);
}
});
}
private CompletableFuture<World[]> nUpdatedWorlds(int n) {
return nWorlds(n).thenCompose(
worlds -> {
List<WriteModel<Document>> writes = new ArrayList<>(worlds.length);
for (World world : worlds) {
world.randomNumber = randomWorld();
Bson filter = Filters.eq(world.id);
Bson update = Updates.set("randomNumber", world.randomNumber);
writes.add(new UpdateOneModel<>(filter, update));
}
CompletableFuture<World[]> next = new CompletableFuture<>();
worldCollection.bulkWrite(
writes,
(result, exception) -> {
if (exception != null) {
next.completeExceptionally(exception);
} else {
next.complete(worlds);
}
});
return next;
});
}
private CompletableFuture<World[]> nWorlds(int n) {
@SuppressWarnings("unchecked")
CompletableFuture<World>[] futures = new CompletableFuture[n];
for (int i = 0; i < futures.length; i++) {
futures[i] = oneWorld();
}
return CompletableFuture.allOf(futures).thenApply(
nil -> {
World[] worlds = new World[futures.length];
for (int i = 0; i < futures.length; i++) {
worlds[i] = futures[i].join();
}
return worlds;
});
}
private CompletableFuture<World> oneWorld() {
CompletableFuture<World> future = new CompletableFuture<>();
worldCollection
.find(Filters.eq(randomWorld()))
.map(Helper::mongoDocumentToWorld)
.first(
(world, exception) -> {
if (exception != null) {
future.completeExceptionally(exception);
} else {
future.complete(world);
}
});
return future;
}
}
| k-r-g/FrameworkBenchmarks | frameworks/Java/undertow/src/main/java/hello/UpdatesMongoAsyncHandler.java | Java | bsd-3-clause | 3,233 |
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
(async function() {
TestRunner.addResult(`Tests Summary view of detailed heap snapshots. The "Show All" button must show all nodes.\n`);
await TestRunner.loadTestModule('heap_profiler_test_runner');
await TestRunner.showPanel('heap_profiler');
var instanceCount = 25;
function createHeapSnapshot() {
return HeapProfilerTestRunner.createHeapSnapshot(instanceCount);
}
HeapProfilerTestRunner.runHeapSnapshotTestSuite([function testShowAll(next) {
HeapProfilerTestRunner.takeAndOpenSnapshot(createHeapSnapshot, step1);
function step1() {
HeapProfilerTestRunner.switchToView('Summary', step2);
}
function step2() {
var row = HeapProfilerTestRunner.findRow('A');
TestRunner.assertEquals(true, !!row, '"A" row');
HeapProfilerTestRunner.expandRow(row, step3);
}
function step3(row) {
var count = row.data['count'];
TestRunner.assertEquals(instanceCount.toString(), count);
var buttonsNode = HeapProfilerTestRunner.findButtonsNode(row);
TestRunner.assertEquals(true, !!buttonsNode, 'buttons node');
var words = buttonsNode.showAll.textContent.split(' ');
for (var i = 0; i < words.length; ++i) {
var maybeNumber = parseInt(words[i], 10);
if (!isNaN(maybeNumber))
TestRunner.assertEquals(
instanceCount - row.dataGrid.defaultPopulateCount(), maybeNumber, buttonsNode.showAll.textContent);
}
HeapProfilerTestRunner.clickShowMoreButton('showAll', buttonsNode, step4);
}
function step4(row) {
var rowsShown = HeapProfilerTestRunner.countDataRows(row);
TestRunner.assertEquals(instanceCount, rowsShown, 'after showAll click');
var buttonsNode = HeapProfilerTestRunner.findButtonsNode(row);
TestRunner.assertEquals(false, !!buttonsNode, 'buttons node found when all instances are shown!');
setTimeout(next, 0);
}
}]);
})();
| chromium/chromium | third_party/blink/web_tests/http/tests/devtools/profiler/heap-snapshot-summary-show-all.js | JavaScript | bsd-3-clause | 2,080 |
//*********************************************************
//
// Copyright (c) Microsoft. All rights reserved.
// This code is licensed under the MIT License (MIT).
// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF
// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY
// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR
// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT.
//
//*********************************************************
using System;
using Windows.Media.Playback;
namespace SDKTemplate.Logging
{
public class MediaPlayerLogger : IDisposable
{
#region Common pattern for all loggers
private LogView logView;
private MediaPlayer mediaPlayer;
public MediaPlayerLogger(LogView _logView, MediaPlayer _mediaPlayer)
{
if (_logView == null)
throw new ArgumentNullException(nameof(_logView));
this.logView = _logView;
if (_mediaPlayer == null)
throw new ArgumentNullException(nameof(_mediaPlayer));
this.mediaPlayer = _mediaPlayer;
this.RegisterForMediaPlayerEvents();
}
public void Dispose()
{
UnregisterForMediaPlayerEvents();
this.logView = null;
this.mediaPlayer = null;
}
#endregion
#region MediaPlayer Event Handlers
private void RegisterForMediaPlayerEvents()
{
// Player Events
mediaPlayer.SourceChanged += MediaPlayer_SourceChanged;
mediaPlayer.MediaOpened += MediaPlayer_MediaOpened;
mediaPlayer.MediaEnded += MediaPlayer_MediaEnded;
mediaPlayer.MediaFailed += MediaPlayer_MediaFailed;
mediaPlayer.VolumeChanged += MediaPlayer_VolumeChanged;
mediaPlayer.IsMutedChanged += MediaPlayer_IsMutedChanged;
// NOTE: There are a number of deprecated events on MediaPlayer.
// Please use the equivalent events on the MediaPlayer.PlaybackSession as shown below.
// PlaybackSession Events
mediaPlayer.PlaybackSession.BufferedRangesChanged += MediaPlayer_PlaybackSession_BufferedRangesChanged;
mediaPlayer.PlaybackSession.BufferingEnded += MediaPlayer_PlaybackSession_BufferingEnded;
mediaPlayer.PlaybackSession.BufferingProgressChanged += MediaPlayer_PlaybackSession_BufferingProgressChanged;
mediaPlayer.PlaybackSession.BufferingStarted += MediaPlayer_PlaybackSession_BufferingStarted;
mediaPlayer.PlaybackSession.DownloadProgressChanged += MediaPlayer_PlaybackSession_DownloadProgressChanged;
mediaPlayer.PlaybackSession.NaturalDurationChanged += MediaPlayer_PlaybackSession_NaturalDurationChanged;
mediaPlayer.PlaybackSession.NaturalVideoSizeChanged += MediaPlayer_PlaybackSession_NaturalVideoSizeChanged;
mediaPlayer.PlaybackSession.PlaybackRateChanged += MediaPlayer_PlaybackSession_PlaybackRateChanged;
mediaPlayer.PlaybackSession.PlaybackStateChanged += MediaPlayer_PlaybackSession_PlaybackStateChanged;
mediaPlayer.PlaybackSession.PlayedRangesChanged += MediaPlayer_PlaybackSession_PlayedRangesChanged;
mediaPlayer.PlaybackSession.PositionChanged += MediaPlayer_PlaybackSession_PositionChanged;
mediaPlayer.PlaybackSession.SeekableRangesChanged += MediaPlayer_PlaybackSession_SeekableRangesChanged;
mediaPlayer.PlaybackSession.SeekCompleted += MediaPlayer_PlaybackSession_SeekCompleted;
mediaPlayer.PlaybackSession.SupportedPlaybackRatesChanged += MediaPlayer_PlaybackSession_SupportedPlaybackRatesChanged;
}
private void UnregisterForMediaPlayerEvents()
{
if (mediaPlayer == null)
{
return;
}
// Player Events
mediaPlayer.SourceChanged -= MediaPlayer_SourceChanged;
mediaPlayer.MediaOpened -= MediaPlayer_MediaOpened;
mediaPlayer.MediaEnded -= MediaPlayer_MediaEnded;
mediaPlayer.MediaFailed -= MediaPlayer_MediaFailed;
mediaPlayer.VolumeChanged -= MediaPlayer_VolumeChanged;
mediaPlayer.IsMutedChanged -= MediaPlayer_IsMutedChanged;
// PlaybackSession Events
mediaPlayer.PlaybackSession.BufferedRangesChanged -= MediaPlayer_PlaybackSession_BufferedRangesChanged;
mediaPlayer.PlaybackSession.BufferingEnded -= MediaPlayer_PlaybackSession_BufferingEnded;
mediaPlayer.PlaybackSession.BufferingProgressChanged -= MediaPlayer_PlaybackSession_BufferingProgressChanged;
mediaPlayer.PlaybackSession.BufferingStarted -= MediaPlayer_PlaybackSession_BufferingStarted;
mediaPlayer.PlaybackSession.DownloadProgressChanged -= MediaPlayer_PlaybackSession_DownloadProgressChanged;
mediaPlayer.PlaybackSession.NaturalDurationChanged -= MediaPlayer_PlaybackSession_NaturalDurationChanged;
mediaPlayer.PlaybackSession.NaturalVideoSizeChanged -= MediaPlayer_PlaybackSession_NaturalVideoSizeChanged;
mediaPlayer.PlaybackSession.PlaybackRateChanged -= MediaPlayer_PlaybackSession_PlaybackRateChanged;
mediaPlayer.PlaybackSession.PlaybackStateChanged -= MediaPlayer_PlaybackSession_PlaybackStateChanged;
mediaPlayer.PlaybackSession.PlayedRangesChanged -= MediaPlayer_PlaybackSession_PlayedRangesChanged;
mediaPlayer.PlaybackSession.PositionChanged -= MediaPlayer_PlaybackSession_PositionChanged;
mediaPlayer.PlaybackSession.SeekableRangesChanged -= MediaPlayer_PlaybackSession_SeekableRangesChanged;
mediaPlayer.PlaybackSession.SeekCompleted -= MediaPlayer_PlaybackSession_SeekCompleted;
mediaPlayer.PlaybackSession.SupportedPlaybackRatesChanged += MediaPlayer_PlaybackSession_SupportedPlaybackRatesChanged;
}
private void MediaPlayer_SourceChanged(MediaPlayer sender, object args)
{
logView.Log("MediaPlayer_SourceChanged", LogViewLoggingLevel.Information);
}
private void MediaPlayer_MediaOpened(MediaPlayer sender, object args)
{
logView.Log($"MediaPlayer_MediaOpened, Duration: {sender.PlaybackSession.NaturalDuration}", LogViewLoggingLevel.Information);
}
private void MediaPlayer_MediaEnded(MediaPlayer sender, object args)
{
logView.Log("MediaPlayer_MediaEnded", LogViewLoggingLevel.Information);
}
private void MediaPlayer_MediaFailed(MediaPlayer sender, MediaPlayerFailedEventArgs args)
{
logView.Log($"MediaPlayer_MediaFailed Error: {args.Error}, ErrorMessage: {args.ErrorMessage}, ExtendedError.Message:{args.ExtendedErrorCode.Message}, ExtendedError.HResult:{args.ExtendedErrorCode.HResult.ToString("X8")},", LogViewLoggingLevel.Critical);
}
private void MediaPlayer_VolumeChanged(MediaPlayer sender, object args)
{
logView.Log($"MediaPlayer_VolumeChanged, Volume: {sender.Volume}", LogViewLoggingLevel.Information);
}
private void MediaPlayer_IsMutedChanged(MediaPlayer sender, object args)
{
logView.Log($"MediaPlayer_IsMutedChanged, IsMuted={sender.IsMuted}", LogViewLoggingLevel.Information);
}
// PlaybackSession Events
private void MediaPlayer_PlaybackSession_BufferedRangesChanged(MediaPlaybackSession sender, object args)
{
var rangeList = sender.GetBufferedRanges();
string ranges = "";
foreach (var range in rangeList)
{
ranges += $"[{range.Start},{range.End}],";
}
ranges = ranges.TrimEnd(',');
logView.Log($"MediaPlayer_PlaybackSession_BufferedRangesChanged: {ranges}", LogViewLoggingLevel.Verbose);
}
private void MediaPlayer_PlaybackSession_BufferingEnded(MediaPlaybackSession sender, object args)
{
logView.Log("PlaybackSession_BufferingEnded", LogViewLoggingLevel.Information);
}
private void MediaPlayer_PlaybackSession_BufferingProgressChanged(MediaPlaybackSession sender, object args)
{
logView.Log($"PlaybackSession_BufferingProgressChanged, BufferingProgress: {sender.BufferingProgress}", LogViewLoggingLevel.Information);
}
private void MediaPlayer_PlaybackSession_BufferingStarted(MediaPlaybackSession sender, object args)
{
logView.Log("PlaybackSession_BufferingStarted", LogViewLoggingLevel.Warning);
}
private void MediaPlayer_PlaybackSession_DownloadProgressChanged(MediaPlaybackSession sender, object args)
{
logView.Log($"PlaybackSession_DownloadProgressChanged, DownloadProgress: {sender.DownloadProgress}", LogViewLoggingLevel.Verbose);
}
private void MediaPlayer_PlaybackSession_NaturalDurationChanged(MediaPlaybackSession sender, object args)
{
logView.Log($"PlaybackSession_NaturalDurationChanged, NaturalDuration: {sender.NaturalDuration}", LogViewLoggingLevel.Information);
}
private void MediaPlayer_PlaybackSession_NaturalVideoSizeChanged(MediaPlaybackSession sender, object args)
{
logView.Log($"PlaybackSession_NaturalVideoSizeChanged, NaturalVideoWidth: {sender.NaturalVideoWidth}, NaturalVideoHeight: {sender.NaturalVideoHeight}", LogViewLoggingLevel.Information);
}
private void MediaPlayer_PlaybackSession_PlaybackRateChanged(MediaPlaybackSession sender, object args)
{
logView.Log($"PlaybackSession_PlaybackRateChanged, PlaybackRate: {sender.PlaybackRate}", LogViewLoggingLevel.Information);
}
private void MediaPlayer_PlaybackSession_PlaybackStateChanged(MediaPlaybackSession sender, object args)
{
logView.Log($"PlaybackSession_PlaybackStateChanged, PlaybackState: {sender.PlaybackState}", LogViewLoggingLevel.Information);
}
private void MediaPlayer_PlaybackSession_PlayedRangesChanged(MediaPlaybackSession sender, object args)
{
var rangeList = sender.GetPlayedRanges();
string ranges = "";
foreach (var range in rangeList)
{
ranges += $"[{range.Start},{range.End}],";
}
ranges = ranges.TrimEnd(',');
logView.Log($"MediaPlayer_PlaybackSession_PlayedRangesChanged: {ranges}", LogViewLoggingLevel.Verbose);
}
public bool logPositionChanged { get; set; } = false;
private void MediaPlayer_PlaybackSession_PositionChanged(MediaPlaybackSession sender, object args)
{
if(logPositionChanged)
logView.Log("PlaybackSession_PositionChanged, Position: " + sender.Position, LogViewLoggingLevel.Verbose);
}
private void MediaPlayer_PlaybackSession_SeekableRangesChanged(MediaPlaybackSession sender, object args)
{
var rangeList = sender.GetSeekableRanges();
string ranges = "";
foreach (var range in rangeList)
{
ranges += $"[{range.Start},{range.End}],";
}
ranges = ranges.TrimEnd(',');
logView.Log($"MediaPlayer_PlaybackSession_SeekableRangesChanged: {ranges}", LogViewLoggingLevel.Verbose);
}
private void MediaPlayer_PlaybackSession_SeekCompleted(MediaPlaybackSession sender, object args)
{
logView.Log($"PlaybackSession_SeekCompleted, Position: {sender.Position}", LogViewLoggingLevel.Information);
}
private void MediaPlayer_PlaybackSession_SupportedPlaybackRatesChanged(MediaPlaybackSession sender, object args)
{
double minRate, maxRate;
GetMinMaxRates(sender, out minRate, out maxRate);
logView.Log($"MediaPlayer_PlaybackSession_SupportedPlaybackRatesChanged, MinRate: {minRate}, MaxRate: {maxRate}", LogViewLoggingLevel.Information);
}
private static void GetMinMaxRates(MediaPlaybackSession sender, out double minRate, out double maxRate)
{
minRate = -256.0;
bool foundMinRate = false;
for (; !foundMinRate; minRate = minRate / 2.0)
{
foundMinRate = sender.IsSupportedPlaybackRateRange(minRate, 1);
if (-0.25 < minRate)
{
foundMinRate = true;
minRate = 0;
}
}
maxRate = 256.0;
bool foundMaxRate = false;
for (; !foundMaxRate; maxRate = maxRate / 2.0)
{
foundMaxRate = sender.IsSupportedPlaybackRateRange(1, maxRate);
if (maxRate < 1.0)
{
foundMaxRate = true;
maxRate = 1;
}
}
}
#endregion
}
}
| oldnewthing/Windows-universal-samples | SharedContent/cs/Logging/MediaPlayerLogger.cs | C# | mit | 12,947 |
'use strict';
exports.baseTechPath = require.resolve('./level-proto.js');
| bem-archive/bem-tools | lib/techs/v2/tech-docs.js | JavaScript | mit | 75 |
using System;
using System.Collections.Generic;
namespace BackEnd.Data
{
public class Attendee : ConferenceDTO.Attendee
{
public virtual ICollection<SessionAttendee> SessionsAttendees { get; set; }
}
} | csharpfritz/aspnetcore-app-workshop | save-points/6-Deployment-docker/ConferencePlanner/BackEnd/Data/Attendee.cs | C# | mit | 225 |
import merge from "ember-data/system/merge";
import RootState from "ember-data/system/model/states";
import Relationships from "ember-data/system/relationships/state/create";
import Snapshot from "ember-data/system/snapshot";
import EmptyObject from "ember-data/system/empty-object";
var Promise = Ember.RSVP.Promise;
var get = Ember.get;
var set = Ember.set;
var _extractPivotNameCache = new EmptyObject();
var _splitOnDotCache = new EmptyObject();
function splitOnDot(name) {
return _splitOnDotCache[name] || (
_splitOnDotCache[name] = name.split('.')
);
}
function extractPivotName(name) {
return _extractPivotNameCache[name] || (
_extractPivotNameCache[name] = splitOnDot(name)[0]
);
}
function retrieveFromCurrentState(key) {
return function() {
return get(this.currentState, key);
};
}
var guid = 0;
/**
`InternalModel` is the Model class that we use internally inside Ember Data to represent models.
Internal ED methods should only deal with `InternalModel` objects. It is a fast, plain Javascript class.
We expose `DS.Model` to application code, by materializing a `DS.Model` from `InternalModel` lazily, as
a performance optimization.
`InternalModel` should never be exposed to application code. At the boundaries of the system, in places
like `find`, `push`, etc. we convert between Models and InternalModels.
We need to make sure that the properties from `InternalModel` are correctly exposed/proxied on `Model`
if they are needed.
@class InternalModel
*/
export default function InternalModel(type, id, store, container, data) {
this.type = type;
this.id = id;
this.store = store;
this.container = container;
this._data = data || new EmptyObject();
this.modelName = type.modelName;
this.dataHasInitialized = false;
//Look into making this lazy
this._deferredTriggers = [];
this._attributes = new EmptyObject();
this._inFlightAttributes = new EmptyObject();
this._relationships = new Relationships(this);
this._recordArrays = undefined;
this.currentState = RootState.empty;
this.isReloading = false;
this.isError = false;
this.error = null;
this[Ember.GUID_KEY] = guid++ + 'internal-model';
/*
implicit relationships are relationship which have not been declared but the inverse side exists on
another record somewhere
For example if there was
```app/models/comment.js
import DS from 'ember-data';
export default DS.Model.extend({
name: DS.attr()
})
```
but there is also
```app/models/post.js
import DS from 'ember-data';
export default DS.Model.extend({
name: DS.attr(),
comments: DS.hasMany('comment')
})
```
would have a implicit post relationship in order to be do things like remove ourselves from the post
when we are deleted
*/
this._implicitRelationships = new EmptyObject();
}
InternalModel.prototype = {
isEmpty: retrieveFromCurrentState('isEmpty'),
isLoading: retrieveFromCurrentState('isLoading'),
isLoaded: retrieveFromCurrentState('isLoaded'),
hasDirtyAttributes: retrieveFromCurrentState('hasDirtyAttributes'),
isSaving: retrieveFromCurrentState('isSaving'),
isDeleted: retrieveFromCurrentState('isDeleted'),
isNew: retrieveFromCurrentState('isNew'),
isValid: retrieveFromCurrentState('isValid'),
dirtyType: retrieveFromCurrentState('dirtyType'),
constructor: InternalModel,
materializeRecord: function() {
Ember.assert("Materialized " + this.modelName + " record with id:" + this.id + "more than once", this.record === null || this.record === undefined);
// lookupFactory should really return an object that creates
// instances with the injections applied
this.record = this.type._create({
store: this.store,
container: this.container,
_internalModel: this,
currentState: get(this, 'currentState'),
isError: this.isError,
adapterError: this.error
});
this._triggerDeferredTriggers();
},
recordObjectWillDestroy: function() {
this.record = null;
},
deleteRecord: function() {
this.send('deleteRecord');
},
save: function(options) {
var promiseLabel = "DS: Model#save " + this;
var resolver = Ember.RSVP.defer(promiseLabel);
this.store.scheduleSave(this, resolver, options);
return resolver.promise;
},
startedReloading: function() {
this.isReloading = true;
if (this.record) {
set(this.record, 'isReloading', true);
}
},
finishedReloading: function() {
this.isReloading = false;
if (this.record) {
set(this.record, 'isReloading', false);
}
},
reload: function() {
this.startedReloading();
var record = this;
var promiseLabel = "DS: Model#reload of " + this;
return new Promise(function(resolve) {
record.send('reloadRecord', resolve);
}, promiseLabel).then(function() {
record.didCleanError();
return record;
}, function(error) {
record.didError(error);
throw error;
}, "DS: Model#reload complete, update flags").finally(function () {
record.finishedReloading();
record.updateRecordArrays();
});
},
getRecord: function() {
if (!this.record) {
this.materializeRecord();
}
return this.record;
},
unloadRecord: function() {
this.send('unloadRecord');
},
eachRelationship: function(callback, binding) {
return this.type.eachRelationship(callback, binding);
},
eachAttribute: function(callback, binding) {
return this.type.eachAttribute(callback, binding);
},
inverseFor: function(key) {
return this.type.inverseFor(key);
},
setupData: function(data) {
var changedKeys = this._changedKeys(data.attributes);
merge(this._data, data.attributes);
this.pushedData();
if (this.record) {
this.record._notifyProperties(changedKeys);
}
this.didInitalizeData();
},
becameReady: function() {
Ember.run.schedule('actions', this.store.recordArrayManager, this.store.recordArrayManager.recordWasLoaded, this);
},
didInitalizeData: function() {
if (!this.dataHasInitialized) {
this.becameReady();
this.dataHasInitialized = true;
}
},
destroy: function() {
if (this.record) {
return this.record.destroy();
}
},
/**
@method createSnapshot
@private
*/
createSnapshot: function(options) {
var adapterOptions = options && options.adapterOptions;
var snapshot = new Snapshot(this);
snapshot.adapterOptions = adapterOptions;
return snapshot;
},
/**
@method loadingData
@private
@param {Promise} promise
*/
loadingData: function(promise) {
this.send('loadingData', promise);
},
/**
@method loadedData
@private
*/
loadedData: function() {
this.send('loadedData');
this.didInitalizeData();
},
/**
@method notFound
@private
*/
notFound: function() {
this.send('notFound');
},
/**
@method pushedData
@private
*/
pushedData: function() {
this.send('pushedData');
},
flushChangedAttributes: function() {
this._inFlightAttributes = this._attributes;
this._attributes = new EmptyObject();
},
/**
@method adapterWillCommit
@private
*/
adapterWillCommit: function() {
this.send('willCommit');
},
/**
@method adapterDidDirty
@private
*/
adapterDidDirty: function() {
this.send('becomeDirty');
this.updateRecordArraysLater();
},
/**
@method send
@private
@param {String} name
@param {Object} context
*/
send: function(name, context) {
var currentState = get(this, 'currentState');
if (!currentState[name]) {
this._unhandledEvent(currentState, name, context);
}
return currentState[name](this, context);
},
notifyHasManyAdded: function(key, record, idx) {
if (this.record) {
this.record.notifyHasManyAdded(key, record, idx);
}
},
notifyHasManyRemoved: function(key, record, idx) {
if (this.record) {
this.record.notifyHasManyRemoved(key, record, idx);
}
},
notifyBelongsToChanged: function(key, record) {
if (this.record) {
this.record.notifyBelongsToChanged(key, record);
}
},
notifyPropertyChange: function(key) {
if (this.record) {
this.record.notifyPropertyChange(key);
}
},
rollbackAttributes: function() {
var dirtyKeys = Object.keys(this._attributes);
this._attributes = new EmptyObject();
if (get(this, 'isError')) {
this._inFlightAttributes = new EmptyObject();
this.didCleanError();
}
//Eventually rollback will always work for relationships
//For now we support it only out of deleted state, because we
//have an explicit way of knowing when the server acked the relationship change
if (this.isDeleted()) {
//TODO: Should probably move this to the state machine somehow
this.becameReady();
}
if (this.isNew()) {
this.clearRelationships();
}
if (this.isValid()) {
this._inFlightAttributes = new EmptyObject();
}
this.send('rolledBack');
this.record._notifyProperties(dirtyKeys);
},
/**
@method transitionTo
@private
@param {String} name
*/
transitionTo: function(name) {
// POSSIBLE TODO: Remove this code and replace with
// always having direct reference to state objects
var pivotName = extractPivotName(name);
var currentState = get(this, 'currentState');
var state = currentState;
do {
if (state.exit) { state.exit(this); }
state = state.parentState;
} while (!state.hasOwnProperty(pivotName));
var path = splitOnDot(name);
var setups = [];
var enters = [];
var i, l;
for (i=0, l=path.length; i<l; i++) {
state = state[path[i]];
if (state.enter) { enters.push(state); }
if (state.setup) { setups.push(state); }
}
for (i=0, l=enters.length; i<l; i++) {
enters[i].enter(this);
}
set(this, 'currentState', state);
//TODO Consider whether this is the best approach for keeping these two in sync
if (this.record) {
set(this.record, 'currentState', state);
}
for (i=0, l=setups.length; i<l; i++) {
setups[i].setup(this);
}
this.updateRecordArraysLater();
},
_unhandledEvent: function(state, name, context) {
var errorMessage = "Attempted to handle event `" + name + "` ";
errorMessage += "on " + String(this) + " while in state ";
errorMessage += state.stateName + ". ";
if (context !== undefined) {
errorMessage += "Called with " + Ember.inspect(context) + ".";
}
throw new Ember.Error(errorMessage);
},
triggerLater: function() {
var length = arguments.length;
var args = new Array(length);
for (var i = 0; i < length; i++) {
args[i] = arguments[i];
}
if (this._deferredTriggers.push(args) !== 1) {
return;
}
Ember.run.scheduleOnce('actions', this, '_triggerDeferredTriggers');
},
_triggerDeferredTriggers: function() {
//TODO: Before 1.0 we want to remove all the events that happen on the pre materialized record,
//but for now, we queue up all the events triggered before the record was materialized, and flush
//them once we have the record
if (!this.record) {
return;
}
for (var i=0, l= this._deferredTriggers.length; i<l; i++) {
this.record.trigger.apply(this.record, this._deferredTriggers[i]);
}
this._deferredTriggers.length = 0;
},
/**
@method clearRelationships
@private
*/
clearRelationships: function() {
this.eachRelationship((name, relationship) => {
if (this._relationships.has(name)) {
var rel = this._relationships.get(name);
rel.clear();
rel.destroy();
}
});
Object.keys(this._implicitRelationships).forEach((key) => {
this._implicitRelationships[key].clear();
this._implicitRelationships[key].destroy();
});
},
/**
When a find request is triggered on the store, the user can optionally pass in
attributes and relationships to be preloaded. These are meant to behave as if they
came back from the server, except the user obtained them out of band and is informing
the store of their existence. The most common use case is for supporting client side
nested URLs, such as `/posts/1/comments/2` so the user can do
`store.find('comment', 2, {post:1})` without having to fetch the post.
Preloaded data can be attributes and relationships passed in either as IDs or as actual
models.
@method _preloadData
@private
@param {Object} preload
*/
_preloadData: function(preload) {
//TODO(Igor) consider the polymorphic case
Object.keys(preload).forEach((key) => {
var preloadValue = get(preload, key);
var relationshipMeta = this.type.metaForProperty(key);
if (relationshipMeta.isRelationship) {
this._preloadRelationship(key, preloadValue);
} else {
this._data[key] = preloadValue;
}
});
},
_preloadRelationship: function(key, preloadValue) {
var relationshipMeta = this.type.metaForProperty(key);
var type = relationshipMeta.type;
if (relationshipMeta.kind === 'hasMany') {
this._preloadHasMany(key, preloadValue, type);
} else {
this._preloadBelongsTo(key, preloadValue, type);
}
},
_preloadHasMany: function(key, preloadValue, type) {
Ember.assert("You need to pass in an array to set a hasMany property on a record", Ember.isArray(preloadValue));
var internalModel = this;
var recordsToSet = preloadValue.map((recordToPush) => {
return internalModel._convertStringOrNumberIntoInternalModel(recordToPush, type);
});
//We use the pathway of setting the hasMany as if it came from the adapter
//because the user told us that they know this relationships exists already
this._relationships.get(key).updateRecordsFromAdapter(recordsToSet);
},
_preloadBelongsTo: function(key, preloadValue, type) {
var recordToSet = this._convertStringOrNumberIntoInternalModel(preloadValue, type);
//We use the pathway of setting the hasMany as if it came from the adapter
//because the user told us that they know this relationships exists already
this._relationships.get(key).setRecord(recordToSet);
},
_convertStringOrNumberIntoInternalModel: function(value, type) {
if (typeof value === 'string' || typeof value === 'number') {
return this.store._internalModelForId(type, value);
}
if (value._internalModel) {
return value._internalModel;
}
return value;
},
/**
@method updateRecordArrays
@private
*/
updateRecordArrays: function() {
this._updatingRecordArraysLater = false;
this.store.dataWasUpdated(this.type, this);
},
setId: function(id) {
Ember.assert('A record\'s id cannot be changed once it is in the loaded state', this.id === null || this.id === id || this.isNew());
this.id = id;
},
didError: function(error) {
this.error = error;
this.isError = true;
if (this.record) {
this.record.setProperties({
isError: true,
adapterError: error
});
}
},
didCleanError: function() {
this.error = null;
this.isError = false;
if (this.record) {
this.record.setProperties({
isError: false,
adapterError: null
});
}
},
/**
If the adapter did not return a hash in response to a commit,
merge the changed attributes and relationships into the existing
saved data.
@method adapterDidCommit
*/
adapterDidCommit: function(data) {
if (data) {
data = data.attributes;
}
this.didCleanError();
var changedKeys = this._changedKeys(data);
merge(this._data, this._inFlightAttributes);
if (data) {
merge(this._data, data);
}
this._inFlightAttributes = new EmptyObject();
this.send('didCommit');
this.updateRecordArraysLater();
if (!data) { return; }
this.record._notifyProperties(changedKeys);
},
/**
@method updateRecordArraysLater
@private
*/
updateRecordArraysLater: function() {
// quick hack (something like this could be pushed into run.once
if (this._updatingRecordArraysLater) { return; }
this._updatingRecordArraysLater = true;
Ember.run.schedule('actions', this, this.updateRecordArrays);
},
addErrorMessageToAttribute: function(attribute, message) {
var record = this.getRecord();
get(record, 'errors').add(attribute, message);
},
removeErrorMessageFromAttribute: function(attribute) {
var record = this.getRecord();
get(record, 'errors').remove(attribute);
},
clearErrorMessages: function() {
var record = this.getRecord();
get(record, 'errors').clear();
},
// FOR USE DURING COMMIT PROCESS
/**
@method adapterDidInvalidate
@private
*/
adapterDidInvalidate: function(errors) {
var attribute;
for (attribute in errors) {
if (errors.hasOwnProperty(attribute)) {
this.addErrorMessageToAttribute(attribute, errors[attribute]);
}
}
this._saveWasRejected();
},
/**
@method adapterDidError
@private
*/
adapterDidError: function(error) {
this.send('becameError');
this.didError(error);
this._saveWasRejected();
},
_saveWasRejected: function() {
var keys = Object.keys(this._inFlightAttributes);
for (var i=0; i < keys.length; i++) {
if (this._attributes[keys[i]] === undefined) {
this._attributes[keys[i]] = this._inFlightAttributes[keys[i]];
}
}
this._inFlightAttributes = new EmptyObject();
},
/**
Ember Data has 3 buckets for storing the value of an attribute on an internalModel.
`_data` holds all of the attributes that have been acknowledged by
a backend via the adapter. When rollbackAttributes is called on a model all
attributes will revert to the record's state in `_data`.
`_attributes` holds any change the user has made to an attribute
that has not been acknowledged by the adapter. Any values in
`_attributes` are have priority over values in `_data`.
`_inFlightAttributes`. When a record is being synced with the
backend the values in `_attributes` are copied to
`_inFlightAttributes`. This way if the backend acknowledges the
save but does not return the new state Ember Data can copy the
values from `_inFlightAttributes` to `_data`. Without having to
worry about changes made to `_attributes` while the save was
happenign.
Changed keys builds a list of all of the values that may have been
changed by the backend after a successful save.
It does this by iterating over each key, value pair in the payload
returned from the server after a save. If the `key` is found in
`_attributes` then the user has a local changed to the attribute
that has not been synced with the server and the key is not
included in the list of changed keys.
If the value, for a key differs from the value in what Ember Data
believes to be the truth about the backend state (A merger of the
`_data` and `_inFlightAttributes` objects where
`_inFlightAttributes` has priority) then that means the backend
has updated the value and the key is added to the list of changed
keys.
@method _changedKeys
@private
*/
_changedKeys: function(updates) {
var changedKeys = [];
if (updates) {
var original, i, value, key;
var keys = Object.keys(updates);
var length = keys.length;
original = merge(new EmptyObject(), this._data);
original = merge(original, this._inFlightAttributes);
for (i = 0; i < length; i++) {
key = keys[i];
value = updates[key];
// A value in _attributes means the user has a local change to
// this attributes. We never override this value when merging
// updates from the backend so we should not sent a change
// notification if the server value differs from the original.
if (this._attributes[key] !== undefined) {
continue;
}
if (!Ember.isEqual(original[key], value)) {
changedKeys.push(key);
}
}
}
return changedKeys;
},
toString: function() {
if (this.record) {
return this.record.toString();
} else {
return `<${this.modelName}:${this.id}>`;
}
}
};
| Kuzirashi/data | packages/ember-data/lib/system/model/internal-model.js | JavaScript | mit | 20,528 |
<?php
namespace N98\Magento\Command;
use Symfony\Component\Console\Command\Command;
interface CommandAware
{
/**
* @param Command $command
* @return void
*/
public function setCommand(Command $command);
}
| pocallaghan/n98-magerun | src/N98/Magento/Command/CommandAware.php | PHP | mit | 231 |
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace Bond.Expressions
{
using System.Linq.Expressions;
/// <summary>
/// Helpers for creating loop expression trees
/// </summary>
internal static class ControlExpression
{
public static Expression While(Expression whileCondition, Expression body)
{
return While(whileCondition, body, Expression.Label("end"));
}
public static Expression While(Expression whileCondition, Expression body, LabelTarget breakLabel)
{
return Expression.Loop(
PrunedExpression.IfThenElse(
whileCondition,
body,
Expression.Break(breakLabel)),
breakLabel);
}
public static Expression DoWhile(Expression body, Expression condition)
{
return DoWhile(body, condition, Expression.Label("end"));
}
public static Expression DoWhile(Expression body, Expression condition, LabelTarget breakLabel)
{
return Expression.Loop(
Expression.Block(
body,
Expression.IfThen(Expression.Not(condition), Expression.Break(breakLabel))),
breakLabel);
}
}
}
| jdubrule/bond | cs/src/core/expressions/ControlExpression.cs | C# | mit | 1,408 |
<?php
/**
* @license GPL 2 (http://www.gnu.org/licenses/gpl.html)
*
* @author ilker rifat kapaç <irifat@gmail.com>
*/
$lang['bindpw'] = 'Üstteki kullanıcının şifresi';
| duangao/duangao.github.io | lib/plugins/authldap/lang/tr/settings.php | PHP | mit | 197 |
module Gitlab
module Metrics
# Rack middleware for tracking Rails requests.
class RackMiddleware
CONTROLLER_KEY = 'action_controller.instance'
def initialize(app)
@app = app
end
# env - A Hash containing Rack environment details.
def call(env)
trans = transaction_from_env(env)
retval = nil
begin
retval = trans.run { @app.call(env) }
# Even in the event of an error we want to submit any metrics we
# might've gathered up to this point.
ensure
if env[CONTROLLER_KEY]
tag_controller(trans, env)
end
trans.finish
end
retval
end
def transaction_from_env(env)
trans = Transaction.new
trans.set(:request_uri, env['REQUEST_URI'])
trans.set(:request_method, env['REQUEST_METHOD'])
trans
end
def tag_controller(trans, env)
controller = env[CONTROLLER_KEY]
trans.action = "#{controller.class.name}##{controller.action_name}"
end
end
end
end
| jrjang/gitlab-ce | lib/gitlab/metrics/rack_middleware.rb | Ruby | mit | 1,094 |
/*
* Copyright 2012-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.gradle.repackage;
import java.io.File;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.gradle.api.Action;
import org.gradle.api.DefaultTask;
import org.gradle.api.Project;
import org.gradle.api.Task;
import org.gradle.api.plugins.ExtraPropertiesExtension;
import org.gradle.api.tasks.TaskAction;
import org.gradle.api.tasks.bundling.Jar;
import org.springframework.boot.gradle.SpringBootPluginExtension;
import org.springframework.boot.loader.tools.DefaultLaunchScript;
import org.springframework.boot.loader.tools.LaunchScript;
import org.springframework.boot.loader.tools.Repackager;
import org.springframework.util.FileCopyUtils;
/**
* Repackage task.
*
* @author Phillip Webb
* @author Janne Valkealahti
* @author Andy Wilkinson
*/
public class RepackageTask extends DefaultTask {
private static final long FIND_WARNING_TIMEOUT = TimeUnit.SECONDS.toMillis(10);
private String customConfiguration;
private Object withJarTask;
private String mainClass;
private String classifier;
private File outputFile;
public void setCustomConfiguration(String customConfiguration) {
this.customConfiguration = customConfiguration;
}
public Object getWithJarTask() {
return this.withJarTask;
}
public void setWithJarTask(Object withJarTask) {
this.withJarTask = withJarTask;
}
public void setMainClass(String mainClass) {
this.mainClass = mainClass;
}
public String getMainClass() {
return this.mainClass;
}
public String getClassifier() {
return this.classifier;
}
public void setClassifier(String classifier) {
this.classifier = classifier;
}
void setOutputFile(File file) {
this.outputFile = file;
}
@TaskAction
public void repackage() {
Project project = getProject();
SpringBootPluginExtension extension = project.getExtensions()
.getByType(SpringBootPluginExtension.class);
ProjectLibraries libraries = getLibraries();
project.getTasks().withType(Jar.class, new RepackageAction(extension, libraries));
}
public ProjectLibraries getLibraries() {
Project project = getProject();
SpringBootPluginExtension extension = project.getExtensions()
.getByType(SpringBootPluginExtension.class);
ProjectLibraries libraries = new ProjectLibraries(project, extension);
if (extension.getProvidedConfiguration() != null) {
libraries.setProvidedConfigurationName(extension.getProvidedConfiguration());
}
if (this.customConfiguration != null) {
libraries.setCustomConfigurationName(this.customConfiguration);
}
else if (extension.getCustomConfiguration() != null) {
libraries.setCustomConfigurationName(extension.getCustomConfiguration());
}
return libraries;
}
/**
* Action to repackage JARs.
*/
private class RepackageAction implements Action<Jar> {
private final SpringBootPluginExtension extension;
private final ProjectLibraries libraries;
RepackageAction(SpringBootPluginExtension extension, ProjectLibraries libraries) {
this.extension = extension;
this.libraries = libraries;
}
@Override
public void execute(Jar jarTask) {
if (!RepackageTask.this.isEnabled()) {
getLogger().info("Repackage disabled");
return;
}
Object withJarTask = RepackageTask.this.withJarTask;
if (!isTaskMatch(jarTask, withJarTask)) {
getLogger().info(
"Jar task not repackaged (didn't match withJarTask): " + jarTask);
return;
}
File file = jarTask.getArchivePath();
if (file.exists()) {
repackage(file);
}
}
private boolean isTaskMatch(Jar task, Object withJarTask) {
if (withJarTask == null) {
if ("".equals(task.getClassifier())) {
Set<Object> tasksWithCustomRepackaging = new HashSet<Object>();
for (RepackageTask repackageTask : RepackageTask.this.getProject()
.getTasks().withType(RepackageTask.class)) {
if (repackageTask.getWithJarTask() != null) {
tasksWithCustomRepackaging
.add(repackageTask.getWithJarTask());
}
}
return !tasksWithCustomRepackaging.contains(task);
}
return false;
}
return task.equals(withJarTask) || task.getName().equals(withJarTask);
}
private void repackage(File file) {
File outputFile = RepackageTask.this.outputFile;
if (outputFile != null && !file.equals(outputFile)) {
copy(file, outputFile);
file = outputFile;
}
Repackager repackager = new LoggingRepackager(file);
setMainClass(repackager);
if (this.extension.convertLayout() != null) {
repackager.setLayout(this.extension.convertLayout());
}
repackager.setBackupSource(this.extension.isBackupSource());
try {
LaunchScript launchScript = getLaunchScript();
repackager.repackage(file, this.libraries, launchScript);
}
catch (IOException ex) {
throw new IllegalStateException(ex.getMessage(), ex);
}
}
private void copy(File source, File dest) {
try {
FileCopyUtils.copy(source, dest);
}
catch (IOException ex) {
throw new IllegalStateException(ex.getMessage(), ex);
}
}
private void setMainClass(Repackager repackager) {
String mainClass;
if (getProject().hasProperty("mainClassName")) {
mainClass = (String) getProject().property("mainClassName");
}
else {
ExtraPropertiesExtension extraProperties = (ExtraPropertiesExtension) getProject()
.getExtensions().getByName("ext");
mainClass = (String) extraProperties.get("mainClassName");
}
if (RepackageTask.this.mainClass != null) {
mainClass = RepackageTask.this.mainClass;
}
else if (this.extension.getMainClass() != null) {
mainClass = this.extension.getMainClass();
}
else {
Task runTask = getProject().getTasks().findByName("run");
if (runTask != null && runTask.hasProperty("main")) {
mainClass = (String) getProject().getTasks().getByName("run")
.property("main");
}
}
getLogger().info("Setting mainClass: " + mainClass);
repackager.setMainClass(mainClass);
}
private LaunchScript getLaunchScript() throws IOException {
if (this.extension.isExecutable()
|| this.extension.getEmbeddedLaunchScript() != null) {
return new DefaultLaunchScript(this.extension.getEmbeddedLaunchScript(),
this.extension.getEmbeddedLaunchScriptProperties());
}
return null;
}
}
/**
* {@link Repackager} that also logs when searching takes too long.
*/
private class LoggingRepackager extends Repackager {
LoggingRepackager(File source) {
super(source);
}
@Override
protected String findMainMethod(java.util.jar.JarFile source) throws IOException {
long startTime = System.currentTimeMillis();
try {
return super.findMainMethod(source);
}
finally {
long duration = System.currentTimeMillis() - startTime;
if (duration > FIND_WARNING_TIMEOUT) {
getLogger().warn("Searching for the main-class is taking "
+ "some time, consider using setting "
+ "'springBoot.mainClass'");
}
}
}
}
}
| rokn/Count_Words_2015 | testing/spring-boot-master/spring-boot-tools/spring-boot-gradle-plugin/src/main/java/org/springframework/boot/gradle/repackage/RepackageTask.java | Java | mit | 7,655 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This package contains various command line wrappers to programs used in
pymatgen that do not have Python equivalents.
"""
| gVallverdu/pymatgen | pymatgen/command_line/__init__.py | Python | mit | 237 |
package main
import (
"fmt"
"log"
"net/http"
"gopkg.in/jcelliott/turnpike.v1"
)
func main() {
s := turnpike.NewServer()
http.Handle("/ws", s.Handler)
http.Handle("/", http.FileServer(http.Dir("web")))
fmt.Println("Listening on port 8080")
if err := http.ListenAndServe(":8080", nil); err != nil {
log.Fatal("ListenAndServe:", err)
}
}
| DaniloMoura1/turnpike | examples/hello/server.go | GO | mit | 351 |
# Fact: lsbdistid
#
# Purpose: Return Linux Standard Base information for the host.
#
# Resolution:
# Uses the lsbdistid key of the os structured fact, which itself
# uses the `lsb_release` system command.
#
# Caveats:
# Only works on Linux (and the kfreebsd derivative) systems.
# Requires the `lsb_release` program, which may not be installed by default.
# Also is as only as accurate as that program outputs.
Facter.add(:lsbdistid) do
confine do
!Facter.value(:os)["lsb"].nil?
end
setcode { Facter.value("os")["lsb"]["distid"] }
end
| thejonanshow/my-boxen | vendor/bundle/ruby/2.3.0/gems/facter-2.5.1-universal-darwin/lib/facter/lsbdistid.rb | Ruby | mit | 558 |
import { sendMail } from '../functions/sendMail';
import { unsubscribe } from '../functions/unsubscribe';
export const Mailer = {
sendMail,
unsubscribe,
};
| pkgodara/Rocket.Chat | packages/rocketchat-mail-messages/server/lib/Mailer.js | JavaScript | mit | 159 |
/*
* The MIT License
*
* Copyright 2018 Victor Martinez.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package hudson.cli;
import hudson.matrix.Axis;
import hudson.matrix.AxisList;
import hudson.matrix.MatrixProject;
import hudson.maven.MavenModuleSet;
import hudson.model.DirectlyModifiableView;
import hudson.model.FreeStyleProject;
import hudson.model.Label;
import hudson.model.ListView;
import hudson.model.labels.LabelExpression;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.jvnet.hudson.test.Issue;
import org.jvnet.hudson.test.JenkinsRule;
import org.jvnet.hudson.test.MockFolder;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.isEmptyString;
import static org.hamcrest.Matchers.not;
public class ListJobsCommandTest {
@Rule public JenkinsRule j = new JenkinsRule();
private CLICommand listJobsCommand;
private CLICommandInvoker command;
@Before public void setUp() {
listJobsCommand = new ListJobsCommand();
command = new CLICommandInvoker(j, listJobsCommand);
}
@Test public void getAllJobsFromView() throws Exception {
MockFolder folder = j.createFolder("Folder");
MockFolder nestedFolder = folder.createProject(MockFolder.class, "NestedFolder");
FreeStyleProject job = folder.createProject(FreeStyleProject.class, "job");
FreeStyleProject nestedJob = nestedFolder.createProject(FreeStyleProject.class, "nestedJob");
ListView view = new ListView("OuterFolder");
view.setRecurse(true);
j.jenkins.addView(view);
((DirectlyModifiableView) j.jenkins.getView("OuterFolder")).add(folder);
((DirectlyModifiableView) j.jenkins.getView("OuterFolder")).add(job);
CLICommandInvoker.Result result = command.invokeWithArgs("OuterFolder");
assertThat(result, CLICommandInvoker.Matcher.succeeded());
assertThat(result.stdout(), containsString("Folder"));
assertThat(result.stdout(), containsString("job"));
assertThat(result.stdout(), not(containsString("nestedJob")));
}
@Issue("JENKINS-48220")
@Test public void getAllJobsFromFolder() throws Exception {
MockFolder folder = j.createFolder("Folder");
MockFolder nestedFolder = folder.createProject(MockFolder.class, "NestedFolder");
FreeStyleProject job = folder.createProject(FreeStyleProject.class, "job");
FreeStyleProject nestedJob = nestedFolder.createProject(FreeStyleProject.class, "nestedJob");
CLICommandInvoker.Result result = command.invokeWithArgs("Folder");
assertThat(result, CLICommandInvoker.Matcher.succeeded());
assertThat(result.stdout(), containsString("job"));
assertThat(result.stdout(), containsString("NestedFolder"));
assertThat(result.stdout(), not(containsString("nestedJob")));
}
@Issue("JENKINS-18393")
@Test public void getAllJobsFromFolderWithMatrixProject() throws Exception {
MockFolder folder = j.createFolder("Folder");
FreeStyleProject job1 = folder.createProject(FreeStyleProject.class, "job1");
FreeStyleProject job2 = folder.createProject(FreeStyleProject.class, "job2");
MatrixProject matrixProject = folder.createProject(MatrixProject.class, "mp");
matrixProject.setDisplayName("downstream");
matrixProject.setAxes(new AxisList(
new Axis("axis", "a", "b")
));
Label label = LabelExpression.get("aws-linux-dummy");
matrixProject.setAssignedLabel(label);
CLICommandInvoker.Result result = command.invokeWithArgs("Folder");
assertThat(result, CLICommandInvoker.Matcher.succeeded());
assertThat(result.stdout(), containsString("job1"));
assertThat(result.stdout(), containsString("job2"));
assertThat(result.stdout(), containsString("mp"));
}
@Issue("JENKINS-18393")
@Test public void getAllJobsFromFolderWithMavenModuleSet() throws Exception {
MockFolder folder = j.createFolder("Folder");
FreeStyleProject job1 = folder.createProject(FreeStyleProject.class, "job1");
FreeStyleProject job2 = folder.createProject(FreeStyleProject.class, "job2");
MavenModuleSet mavenProject = folder.createProject(MavenModuleSet.class, "mvn");
CLICommandInvoker.Result result = command.invokeWithArgs("Folder");
assertThat(result, CLICommandInvoker.Matcher.succeeded());
assertThat(result.stdout(), containsString("job1"));
assertThat(result.stdout(), containsString("job2"));
assertThat(result.stdout(), containsString("mvn"));
}
@Issue("JENKINS-18393")
@Test public void failForMatrixProject() throws Exception {
MatrixProject matrixProject = j.createProject(MatrixProject.class, "mp");
CLICommandInvoker.Result result = command.invokeWithArgs("MatrixJob");
assertThat(result, CLICommandInvoker.Matcher.failedWith(3));
assertThat(result.stdout(), isEmptyString());
assertThat(result.stderr(), containsString("No view or item group with the given name 'MatrixJob' found."));
}
}
| oleg-nenashev/jenkins | test/src/test/java/hudson/cli/ListJobsCommandTest.java | Java | mit | 6,254 |
/* flatpickr v4.2.1, @license MIT */
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
typeof define === 'function' && define.amd ? define(['exports'], factory) :
(factory((global.sv = {})));
}(this, (function (exports) { 'use strict';
var fp = typeof window !== "undefined" && window.flatpickr !== undefined
? window.flatpickr
: {
l10ns: {},
};
var Swedish = {
firstDayOfWeek: 1,
weekAbbreviation: "v",
weekdays: {
shorthand: ["Sön", "Mån", "Tis", "Ons", "Tor", "Fre", "Lör"],
longhand: [
"Söndag",
"Måndag",
"Tisdag",
"Onsdag",
"Torsdag",
"Fredag",
"Lördag",
],
},
months: {
shorthand: [
"Jan",
"Feb",
"Mar",
"Apr",
"Maj",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Dec",
],
longhand: [
"Januari",
"Februari",
"Mars",
"April",
"Maj",
"Juni",
"Juli",
"Augusti",
"September",
"Oktober",
"November",
"December",
],
},
ordinal: function () {
return ".";
},
};
fp.l10ns.sv = Swedish;
var sv = fp.l10ns;
exports.Swedish = Swedish;
exports['default'] = sv;
Object.defineProperty(exports, '__esModule', { value: true });
})));
| extend1994/cdnjs | ajax/libs/flatpickr/4.2.1/l10n/sv.js | JavaScript | mit | 1,639 |
require 'spec_helper'
require 'command_helper'
require 'shellwords'
describe "Invoking the 'mina' command in a project" do
before :each do
Dir.chdir root('test_env')
end
it 'should echo commands in verbose mode' do
mina 'deploy', '--verbose', '--simulate'
expect(stdout).to include %[echo #{Shellwords.escape('$ git')}]
end
it 'should not echo commands when not in verbose mode' do
mina 'deploy', '--simulate'
expect(stdout).not_to include %[echo #{Shellwords.escape('$ git')}]
end
end
| ianks/mina | spec/commands/verbose_spec.rb | Ruby | mit | 523 |
##
# This file is part of WhatWeb and may be subject to
# redistribution and commercial restrictions. Please see the WhatWeb
# web site for more information on licensing and terms of use.
# http://www.morningstarsecurity.com/research/whatweb
##
Plugin.define "Citrix-NetScaler" do
author "Aung Khant <http://yehg.net/>" # 2011-02-04
version "0.1"
description "Citrix NetScaler - http://www.citrix.com/netscaler"
def passive
m = []
m << {:name=>"http via" } if @headers["via"] =~ /NS\-CACHE/i
if @headers["via"] =~ /NS\-CACHE\-(\d{1,4}\.\d{1,4}):/i
version = @headers["via"].scan(/NS\-CACHE\-(\d{1,4}\.\d{1,4})/i)
m << {:version=>version.to_s}
end
m
end
end
| tempbottle/WhatWeb | plugins/citrix-netscaler.rb | Ruby | gpl-2.0 | 714 |
/* ScummVM - Graphic Adventure Engine
*
* ScummVM is the legal property of its developers, whose names
* are too numerous to list here. Please refer to the COPYRIGHT
* file distributed with this source distribution.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#include "glk/glulx/glulx.h"
namespace Glk {
namespace Glulx {
#define IFFID(c1, c2, c3, c4) MKTAG(c1, c2, c3, c4)
bool Glulx::init_serial() {
undo_chain_num = 0;
undo_chain_size = max_undo_level;
undo_chain = (unsigned char **)glulx_malloc(sizeof(unsigned char *) * undo_chain_size);
if (!undo_chain)
return false;
#ifdef SERIALIZE_CACHE_RAM
{
uint len = (endmem - ramstart);
uint res;
ramcache = (unsigned char *)glulx_malloc(sizeof(unsigned char *) * len);
if (!ramcache)
return false;
_gameFile.seek(gamefile_start + ramstart);
res = _gameFile.read(ramcache, len);
if (res != len)
return false;
}
#endif /* SERIALIZE_CACHE_RAM */
return true;
}
void Glulx::final_serial() {
if (undo_chain) {
int ix;
for (ix = 0; ix < undo_chain_num; ix++) {
glulx_free(undo_chain[ix]);
}
glulx_free(undo_chain);
}
undo_chain = nullptr;
undo_chain_size = 0;
undo_chain_num = 0;
#ifdef SERIALIZE_CACHE_RAM
if (ramcache) {
glulx_free(ramcache);
ramcache = nullptr;
}
#endif /* SERIALIZE_CACHE_RAM */
}
uint Glulx::perform_saveundo() {
dest_t dest;
uint res;
uint memstart = 0, memlen = 0, heapstart = 0, heaplen = 0;
uint stackstart = 0, stacklen = 0;
/* The format for undo-saves is simpler than for saves on disk. We
just have a memory chunk, a heap chunk, and a stack chunk, in
that order. We skip the IFF chunk headers (although the size
fields are still there.) We also don't bother with IFF's 16-bit
alignment. */
if (undo_chain_size == 0)
return 1;
dest._isMem = true;
res = 0;
if (res == 0) {
res = write_long(&dest, 0); /* space for chunk length */
}
if (res == 0) {
memstart = dest._pos;
res = write_memstate(&dest);
memlen = dest._pos - memstart;
}
if (res == 0) {
res = write_long(&dest, 0); /* space for chunk length */
}
if (res == 0) {
heapstart = dest._pos;
res = write_heapstate(&dest, false);
heaplen = dest._pos - heapstart;
}
if (res == 0) {
res = write_long(&dest, 0); /* space for chunk length */
}
if (res == 0) {
stackstart = dest._pos;
res = write_stackstate(&dest, false);
stacklen = dest._pos - stackstart;
}
if (res == 0) {
/* Trim it down to the perfect size. */
dest._ptr = (byte *)glulx_realloc(dest._ptr, dest._pos);
if (!dest._ptr)
res = 1;
}
if (res == 0) {
res = reposition_write(&dest, memstart - 4);
}
if (res == 0) {
res = write_long(&dest, memlen);
}
if (res == 0) {
res = reposition_write(&dest, heapstart - 4);
}
if (res == 0) {
res = write_long(&dest, heaplen);
}
if (res == 0) {
res = reposition_write(&dest, stackstart - 4);
}
if (res == 0) {
res = write_long(&dest, stacklen);
}
if (res == 0) {
/* It worked. */
if (undo_chain_num >= undo_chain_size) {
glulx_free(undo_chain[undo_chain_num - 1]);
undo_chain[undo_chain_num - 1] = nullptr;
}
if (undo_chain_size > 1)
memmove(undo_chain + 1, undo_chain,
(undo_chain_size - 1) * sizeof(unsigned char *));
undo_chain[0] = dest._ptr;
if (undo_chain_num < undo_chain_size)
undo_chain_num += 1;
dest._ptr = nullptr;
} else {
/* It didn't work. */
if (dest._ptr) {
glulx_free(dest._ptr);
dest._ptr = nullptr;
}
}
return res;
}
uint Glulx::perform_restoreundo() {
dest_t dest;
uint res, val = 0;
uint heapsumlen = 0;
uint *heapsumarr = nullptr;
/* If profiling is enabled and active then fail. */
#if VM_PROFILING
if (profile_profiling_active())
return 1;
#endif /* VM_PROFILING */
if (undo_chain_size == 0 || undo_chain_num == 0)
return 1;
dest._isMem = true;
dest._ptr = undo_chain[0];
res = 0;
if (res == 0) {
res = read_long(&dest, &val);
}
if (res == 0) {
res = read_memstate(&dest, val);
}
if (res == 0) {
res = read_long(&dest, &val);
}
if (res == 0) {
res = read_heapstate(&dest, val, false, &heapsumlen, &heapsumarr);
}
if (res == 0) {
res = read_long(&dest, &val);
}
if (res == 0) {
res = read_stackstate(&dest, val, false);
}
/* ### really, many of the failure modes of those calls ought to
cause fatal errors. The stack or main memory may be damaged now. */
if (res == 0) {
if (heapsumarr)
res = heap_apply_summary(heapsumlen, heapsumarr);
}
if (res == 0) {
/* It worked. */
if (undo_chain_size > 1)
memmove(undo_chain, undo_chain + 1,
(undo_chain_size - 1) * sizeof(unsigned char *));
undo_chain_num -= 1;
glulx_free(dest._ptr);
dest._ptr = nullptr;
} else {
/* It didn't work. */
dest._ptr = nullptr;
}
return res;
}
Common::Error Glulx::readSaveData(Common::SeekableReadStream *rs) {
Common::ErrorCode errCode = Common::kNoError;
QuetzalReader r;
if (r.open(rs))
// Load in the savegame chunks
errCode = loadGameChunks(r).getCode();
return errCode;
}
Common::Error Glulx::writeGameData(Common::WriteStream *ws) {
QuetzalWriter w;
Common::ErrorCode errCode = saveGameChunks(w).getCode();
if (errCode == Common::kNoError) {
w.save(ws, _savegameDescription);
}
return errCode;
}
Common::Error Glulx::loadGameChunks(QuetzalReader &quetzal) {
uint res = 0;
uint heapsumlen = 0;
uint *heapsumarr = nullptr;
for (QuetzalReader::Iterator it = quetzal.begin();
it != quetzal.end() && !res; ++it) {
Common::SeekableReadStream *rs = it.getStream();
dest_t dest;
dest._src = rs;
switch ((*it)._id) {
case ID_IFhd:
for (int ix = 0; ix < 128 && !res; ix++) {
byte v = rs->readByte();
if (Mem1(ix) != v)
// ### non-matching header
res = 1;
}
break;
case ID_CMem:
res = read_memstate(&dest, rs->size());
break;
case MKTAG('M', 'A', 'l', 'l'):
res = read_heapstate(&dest, rs->size(), true, &heapsumlen, &heapsumarr);
break;
case ID_Stks:
res = read_stackstate(&dest, rs->size(), true);
break;
default:
break;
}
delete rs;
}
if (!res) {
if (heapsumarr) {
/* The summary might have come from any interpreter, so it could
be out of order. We'll sort it. */
glulx_sort(heapsumarr + 2, (heapsumlen - 2) / 2, 2 * sizeof(uint), &sort_heap_summary);
res = heap_apply_summary(heapsumlen, heapsumarr);
}
}
return res ? Common::kReadingFailed : Common::kNoError;
}
Common::Error Glulx::saveGameChunks(QuetzalWriter &quetzal) {
uint res = 0;
// IFHd
if (!res) {
Common::WriteStream &ws = quetzal.add(ID_IFhd);
for (int ix = 0; res == 0 && ix < 128; ix++)
ws.writeByte(Mem1(ix));
}
// CMem
if (!res) {
Common::WriteStream &ws = quetzal.add(ID_CMem);
dest_t dest;
dest._dest = &ws;
res = write_memstate(&dest);
}
// MAll
if (!res) {
Common::WriteStream &ws = quetzal.add(MKTAG('M', 'A', 'l', 'l'));
dest_t dest;
dest._dest = &ws;
res = write_heapstate(&dest, true);
}
// Stks
if (!res) {
Common::WriteStream &ws = quetzal.add(ID_Stks);
dest_t dest;
dest._dest = &ws;
res = write_stackstate(&dest, true);
}
// All done
return res ? Common::kUnknownError : Common::kNoError;
}
int Glulx::reposition_write(dest_t *dest, uint pos) {
if (dest->_isMem) {
dest->_pos = pos;
} else {
error("Seeking a WriteStream isn't allowed");
}
return 0;
}
int Glulx::write_buffer(dest_t *dest, const byte *ptr, uint len) {
if (dest->_isMem) {
if (dest->_pos + len > dest->_size) {
dest->_size = dest->_pos + len + 1024;
if (!dest->_ptr) {
dest->_ptr = (byte *)glulx_malloc(dest->_size);
} else {
dest->_ptr = (byte *)glulx_realloc(dest->_ptr, dest->_size);
}
if (!dest->_ptr)
return 1;
}
memcpy(dest->_ptr + dest->_pos, ptr, len);
} else {
dest->_dest->write(ptr, len);
}
dest->_pos += len;
return 0;
}
int Glulx::read_buffer(dest_t *dest, byte *ptr, uint len) {
uint newlen;
if (dest->_isMem) {
memcpy(ptr, dest->_ptr + dest->_pos, len);
} else {
newlen = dest->_src->read(ptr, len);
if (newlen != len)
return 1;
}
dest->_pos += len;
return 0;
}
int Glulx::write_long(dest_t *dest, uint val) {
unsigned char buf[4];
Write4(buf, val);
return write_buffer(dest, buf, 4);
}
int Glulx::write_short(dest_t *dest, uint16 val) {
unsigned char buf[2];
Write2(buf, val);
return write_buffer(dest, buf, 2);
}
int Glulx::write_byte(dest_t *dest, byte val) {
return write_buffer(dest, &val, 1);
}
int Glulx::read_long(dest_t *dest, uint *val) {
unsigned char buf[4];
int res = read_buffer(dest, buf, 4);
if (res)
return res;
*val = Read4(buf);
return 0;
}
int Glulx::read_short(dest_t *dest, uint16 *val) {
unsigned char buf[2];
int res = read_buffer(dest, buf, 2);
if (res)
return res;
*val = Read2(buf);
return 0;
}
int Glulx::read_byte(dest_t *dest, byte *val) {
return read_buffer(dest, val, 1);
}
uint Glulx::write_memstate(dest_t *dest) {
uint res, pos;
int val;
int runlen;
unsigned char ch;
#ifdef SERIALIZE_CACHE_RAM
uint cachepos;
#endif /* SERIALIZE_CACHE_RAM */
res = write_long(dest, endmem);
if (res)
return res;
runlen = 0;
#ifdef SERIALIZE_CACHE_RAM
cachepos = 0;
#else /* SERIALIZE_CACHE_RAM */
_gameFile.seek(gamefile_start + ramstart);
#endif /* SERIALIZE_CACHE_RAM */
for (pos = ramstart; pos < endmem; pos++) {
ch = Mem1(pos);
if (pos < endgamefile) {
#ifdef SERIALIZE_CACHE_RAM
val = ramcache[cachepos];
cachepos++;
#else /* SERIALIZE_CACHE_RAM */
val = glk_get_char_stream(gamefile);
if (val == -1) {
fatal_error("The game file ended unexpectedly while saving.");
}
#endif /* SERIALIZE_CACHE_RAM */
ch ^= (unsigned char)val;
}
if (ch == 0) {
runlen++;
} else {
/* Write any run we've got. */
while (runlen) {
if (runlen >= 0x100)
val = 0x100;
else
val = runlen;
res = write_byte(dest, 0);
if (res)
return res;
res = write_byte(dest, (val - 1));
if (res)
return res;
runlen -= val;
}
/* Write the byte we got. */
res = write_byte(dest, ch);
if (res)
return res;
}
}
/* It's possible we've got a run left over, but we don't write it. */
return 0;
}
uint Glulx::read_memstate(dest_t *dest, uint chunklen) {
uint chunkend = dest->_pos + chunklen;
uint newlen;
uint res, pos;
int val;
int runlen;
unsigned char ch, ch2;
#ifdef SERIALIZE_CACHE_RAM
uint cachepos;
#endif /* SERIALIZE_CACHE_RAM */
heap_clear();
res = read_long(dest, &newlen);
if (res)
return res;
res = change_memsize(newlen, false);
if (res)
return res;
runlen = 0;
#ifdef SERIALIZE_CACHE_RAM
cachepos = 0;
#else /* SERIALIZE_CACHE_RAM */
_gameFile.seek(gamefile_start + ramstart);
#endif /* SERIALIZE_CACHE_RAM */
for (pos = ramstart; pos < endmem; pos++) {
if (pos < endgamefile) {
#ifdef SERIALIZE_CACHE_RAM
val = ramcache[cachepos];
cachepos++;
#else /* SERIALIZE_CACHE_RAM */
if (_gameFile.pos() >= _gameFile.size()) {
fatal_error("The game file ended unexpectedly while restoring.");
val = _gameFile.readByte();
}
#endif /* SERIALIZE_CACHE_RAM */
ch = (unsigned char)val;
} else {
ch = 0;
}
if (dest->_pos >= chunkend) {
/* we're into the final, unstored run. */
} else if (runlen) {
runlen--;
} else {
res = read_byte(dest, &ch2);
if (res)
return res;
if (ch2 == 0) {
res = read_byte(dest, &ch2);
if (res)
return res;
runlen = (uint)ch2;
} else {
ch ^= ch2;
}
}
if (pos >= protectstart && pos < protectend)
continue;
MemW1(pos, ch);
}
return 0;
}
uint Glulx::write_heapstate(dest_t *dest, int portable) {
uint res;
uint sumlen;
uint *sumarray;
res = heap_get_summary(&sumlen, &sumarray);
if (res)
return res;
if (!sumarray)
return 0; /* no heap */
res = write_heapstate_sub(sumlen, sumarray, dest, portable);
glulx_free(sumarray);
return res;
}
uint Glulx::write_heapstate_sub(uint sumlen, uint *sumarray, dest_t *dest, int portable) {
uint res, lx;
/* If we're storing for the purpose of undo, we don't need to do any
byte-swapping, because the result will only be used by this session. */
if (!portable) {
res = write_buffer(dest, (const byte *)sumarray, sumlen * sizeof(uint));
if (res)
return res;
return 0;
}
for (lx = 0; lx < sumlen; lx++) {
res = write_long(dest, sumarray[lx]);
if (res)
return res;
}
return 0;
}
int Glulx::sort_heap_summary(const void *p1, const void *p2) {
uint v1 = *(const uint *)p1;
uint v2 = *(const uint *)p2;
if (v1 < v2)
return -1;
if (v1 > v2)
return 1;
return 0;
}
uint Glulx::read_heapstate(dest_t *dest, uint chunklen, int portable, uint *sumlen, uint **summary) {
uint res, count, lx;
uint *arr;
*sumlen = 0;
*summary = nullptr;
if (chunklen == 0)
return 0; /* no heap */
if (!portable) {
count = chunklen / sizeof(uint);
arr = (uint *)glulx_malloc(chunklen);
if (!arr)
return 1;
res = read_buffer(dest, (byte *)arr, chunklen);
if (res)
return res;
*sumlen = count;
*summary = arr;
return 0;
}
count = chunklen / 4;
arr = (uint *)glulx_malloc(count * sizeof(uint));
if (!arr)
return 1;
for (lx = 0; lx < count; lx++) {
res = read_long(dest, arr + lx);
if (res)
return res;
}
*sumlen = count;
*summary = arr;
return 0;
}
uint Glulx::write_stackstate(dest_t *dest, int portable) {
uint res;
uint lx;
uint lastframe;
/* If we're storing for the purpose of undo, we don't need to do any
byte-swapping, because the result will only be used by this session. */
if (!portable) {
res = write_buffer(dest, stack, stackptr);
if (res)
return res;
return 0;
}
/* Write a portable stack image. To do this, we have to write stack
frames in order, bottom to top. Remember that the last word of
every stack frame is a pointer to the beginning of that stack frame.
(This includes the last frame, because the save opcode pushes on
a call stub before it calls perform_save().) */
lastframe = (uint)(-1);
while (1) {
uint frameend, frm, frm2, frm3;
unsigned char loctype, loccount;
uint numlocals, frlen, locpos;
/* Find the next stack frame (after the one in lastframe). Sadly,
this requires searching the stack from the top down. We have to
do this for *every* frame, which takes N^2 time overall. But
save routines usually aren't nested very deep.
If it becomes a practical problem, we can build a stack-frame
array, which requires dynamic allocation. */
for (frm = stackptr, frameend = stackptr;
frm != 0 && (frm2 = Stk4(frm - 4)) != lastframe;
frameend = frm, frm = frm2) { };
/* Write out the frame. */
frm2 = frm;
frlen = Stk4(frm2);
frm2 += 4;
res = write_long(dest, frlen);
if (res)
return res;
locpos = Stk4(frm2);
frm2 += 4;
res = write_long(dest, locpos);
if (res)
return res;
frm3 = frm2;
numlocals = 0;
while (1) {
loctype = Stk1(frm2);
frm2 += 1;
loccount = Stk1(frm2);
frm2 += 1;
res = write_byte(dest, loctype);
if (res)
return res;
res = write_byte(dest, loccount);
if (res)
return res;
if (loctype == 0 && loccount == 0)
break;
numlocals++;
}
if ((numlocals & 1) == 0) {
res = write_byte(dest, 0);
if (res)
return res;
res = write_byte(dest, 0);
if (res)
return res;
frm2 += 2;
}
if (frm2 != frm + locpos)
fatal_error("Inconsistent stack frame during save.");
/* Write out the locals. */
for (lx = 0; lx < numlocals; lx++) {
loctype = Stk1(frm3);
frm3 += 1;
loccount = Stk1(frm3);
frm3 += 1;
if (loctype == 0 && loccount == 0)
break;
/* Put in up to 0, 1, or 3 bytes of padding, depending on loctype. */
while (frm2 & (loctype - 1)) {
res = write_byte(dest, 0);
if (res)
return res;
frm2 += 1;
}
/* Put in this set of locals. */
switch (loctype) {
case 1:
do {
res = write_byte(dest, Stk1(frm2));
if (res)
return res;
frm2 += 1;
loccount--;
} while (loccount);
break;
case 2:
do {
res = write_short(dest, Stk2(frm2));
if (res)
return res;
frm2 += 2;
loccount--;
} while (loccount);
break;
case 4:
do {
res = write_long(dest, Stk4(frm2));
if (res)
return res;
frm2 += 4;
loccount--;
} while (loccount);
break;
}
}
if (frm2 != frm + frlen)
fatal_error("Inconsistent stack frame during save.");
while (frm2 < frameend) {
res = write_long(dest, Stk4(frm2));
if (res)
return res;
frm2 += 4;
}
/* Go on to the next frame. */
if (frameend == stackptr)
break; /* All done. */
lastframe = frm;
}
return 0;
}
uint Glulx::read_stackstate(dest_t *dest, uint chunklen, int portable) {
uint res;
uint frameend, frm, frm2, frm3, locpos, frlen, numlocals;
if (chunklen > stacksize)
return 1;
stackptr = chunklen;
frameptr = 0;
valstackbase = 0;
localsbase = 0;
if (!portable) {
res = read_buffer(dest, stack, stackptr);
if (res)
return res;
return 0;
}
/* This isn't going to be pleasant; we're going to read the data in
as a block, and then convert it in-place. */
res = read_buffer(dest, stack, stackptr);
if (res)
return res;
frameend = stackptr;
while (frameend != 0) {
/* Read the beginning-of-frame pointer. Remember, right now, the
whole frame is stored big-endian. So we have to read with the
Read*() macros, and then write with the StkW*() macros. */
frm = Read4(stack + (frameend - 4));
frm2 = frm;
frlen = Read4(stack + frm2);
StkW4(frm2, frlen);
frm2 += 4;
locpos = Read4(stack + frm2);
StkW4(frm2, locpos);
frm2 += 4;
/* The locals-format list is in bytes, so we don't have to convert it. */
frm3 = frm2;
frm2 = frm + locpos;
numlocals = 0;
while (1) {
unsigned char loctype, loccount;
loctype = Read1(stack + frm3);
frm3 += 1;
loccount = Read1(stack + frm3);
frm3 += 1;
if (loctype == 0 && loccount == 0)
break;
/* Skip up to 0, 1, or 3 bytes of padding, depending on loctype. */
while (frm2 & (loctype - 1)) {
StkW1(frm2, 0);
frm2++;
}
/* Convert this set of locals. */
switch (loctype) {
case 1:
do {
/* Don't need to convert bytes. */
frm2 += 1;
loccount--;
} while (loccount);
break;
case 2:
do {
uint16 loc = Read2(stack + frm2);
StkW2(frm2, loc);
frm2 += 2;
loccount--;
} while (loccount);
break;
case 4:
do {
uint loc = Read4(stack + frm2);
StkW4(frm2, loc);
frm2 += 4;
loccount--;
} while (loccount);
break;
}
numlocals++;
}
if ((numlocals & 1) == 0) {
StkW1(frm3, 0);
frm3++;
StkW1(frm3, 0);
frm3++;
}
if (frm3 != frm + locpos) {
return 1;
}
while (frm2 & 3) {
StkW1(frm2, 0);
frm2++;
}
if (frm2 != frm + frlen) {
return 1;
}
/* Now, the values pushed on the stack after the call frame itself.
This includes the stub. */
while (frm2 < frameend) {
uint loc = Read4(stack + frm2);
StkW4(frm2, loc);
frm2 += 4;
}
frameend = frm;
}
return 0;
}
uint Glulx::perform_verify() {
uint len, chksum = 0, newlen;
unsigned char buf[4];
uint val, newsum, ix;
len = gamefile_len;
if (len < 256 || (len & 0xFF) != 0)
return 1;
_gameFile.seek(gamefile_start);
newsum = 0;
/* Read the header */
for (ix = 0; ix < 9; ix++) {
newlen = _gameFile.read(buf, 4);
if (newlen != 4)
return 1;
val = Read4(buf);
if (ix == 3) {
if (len != val)
return 1;
}
if (ix == 8)
chksum = val;
else
newsum += val;
}
/* Read everything else */
for (; ix < len / 4; ix++) {
newlen = _gameFile.read(buf, 4);
if (newlen != 4)
return 1;
val = Read4(buf);
newsum += val;
}
if (newsum != chksum)
return 1;
return 0;
}
} // End of namespace Glulx
} // End of namespace Glk
| somaen/scummvm | engines/glk/glulx/serial.cpp | C++ | gpl-2.0 | 20,624 |
<?php defined("SYSPATH") or die("No direct script access.");
/**
* Gallery - a web based photo album viewer and editor
* Copyright (C) 2000-2013 Bharat Mediratta
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
*/
class Form_Uploadify_Core extends Form_Input {
protected $data = array(
"name" => false,
"type" => "UNKNOWN",
"url" => "",
"text" => "");
public function __construct($name) {
parent::__construct($name);
$this->data["script_data"] = array(
"g3sid" => Session::instance()->id(),
"user_agent" => Input::instance()->server("HTTP_USER_AGENT"),
"csrf" => access::csrf_token());
}
public function album(Item_Model $album) {
$this->data["album"] = $album;
return $this;
}
public function script_data($key, $value) {
$this->data["script_data"][$key] = $value;
}
public function render() {
$v = new View("form_uploadify.html");
$v->album = $this->data["album"];
$v->script_data = $this->data["script_data"];
$v->simultaneous_upload_limit = module::get_var("gallery", "simultaneous_upload_limit");
$v->movies_allowed = movie::allow_uploads();
$v->extensions = legal_file::get_filters();
$v->suhosin_session_encrypt = (bool) ini_get("suhosin.session.encrypt");
list ($toolkit_max_filesize_bytes, $toolkit_max_filesize) = graphics::max_filesize();
$upload_max_filesize = trim(ini_get("upload_max_filesize"));
$upload_max_filesize_bytes = num::convert_to_bytes($upload_max_filesize);
if ($upload_max_filesize_bytes < $toolkit_max_filesize_bytes) {
$v->size_limit_bytes = $upload_max_filesize_bytes;
$v->size_limit = $upload_max_filesize;
} else {
$v->size_limit_bytes = $toolkit_max_filesize_bytes;
$v->size_limit = $toolkit_max_filesize;
}
return $v;
}
public function validate() {
return true;
}
} | phamtuanchip/gallary | modules/gallery/libraries/Form_Uploadify.php | PHP | gpl-2.0 | 2,546 |
<?php
/**
* Zend Framework
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://framework.zend.com/license/new-bsd
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@zend.com so we can send you a copy immediately.
*
* @category Zend
* @package Zend_Validate
* @copyright Copyright (c) 2005-2015 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @version $Id$
*/
/**
* @see Zend_Validate_Barcode_AdapterInterface
*/
//require_once 'Zend/Validate/Barcode/AdapterInterface.php';
/**
* @category Zend
* @package Zend_Validate
* @copyright Copyright (c) 2005-2015 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
abstract class Zend_Validate_Barcode_AdapterAbstract
implements Zend_Validate_Barcode_AdapterInterface
{
/**
* Allowed barcode lengths
* @var integer|array|string
*/
protected $_length;
/**
* Allowed barcode characters
* @var string
*/
protected $_characters;
/**
* Callback to checksum function
* @var string|array
*/
protected $_checksum;
/**
* Is a checksum value included?
* @var boolean
*/
protected $_hasChecksum = true;
/**
* Checks the length of a barcode
*
* @param string $value The barcode to check for proper length
* @return boolean
*/
public function checkLength($value)
{
if (!is_string($value)) {
return false;
}
$fixum = strlen($value);
$found = false;
$length = $this->getLength();
if (is_array($length)) {
foreach ($length as $value) {
if ($fixum == $value) {
$found = true;
}
if ($value == -1) {
$found = true;
}
}
} elseif ($fixum == $length) {
$found = true;
} elseif ($length == -1) {
$found = true;
} elseif ($length == 'even') {
$count = $fixum % 2;
$found = ($count == 0) ? true : false;
} elseif ($length == 'odd') {
$count = $fixum % 2;
$found = ($count == 1) ? true : false;
}
return $found;
}
/**
* Checks for allowed characters within the barcode
*
* @param string $value The barcode to check for allowed characters
* @return boolean
*/
public function checkChars($value)
{
if (!is_string($value)) {
return false;
}
$characters = $this->getCharacters();
if ($characters == 128) {
for ($x = 0; $x < 128; ++$x) {
$value = str_replace(chr($x), '', $value);
}
} else {
$chars = str_split($characters);
foreach ($chars as $char) {
$value = str_replace($char, '', $value);
}
}
if (strlen($value) > 0) {
return false;
}
return true;
}
/**
* Validates the checksum
*
* @param string $value The barcode to check the checksum for
* @return boolean
*/
public function checksum($value)
{
$checksum = $this->getChecksum();
if (!empty($checksum)) {
if (method_exists($this, $checksum)) {
return call_user_func(array($this, $checksum), $value);
}
}
return false;
}
/**
* Returns the allowed barcode length
*
* @return string
*/
public function getLength()
{
return $this->_length;
}
/**
* Returns the allowed characters
*
* @return integer|string
*/
public function getCharacters()
{
return $this->_characters;
}
/**
* Returns the checksum function name
*
*/
public function getChecksum()
{
return $this->_checksum;
}
/**
* Returns if barcode uses checksum
*
* @return boolean
*/
public function getCheck()
{
return $this->_hasChecksum;
}
/**
* Sets the checksum validation
*
* @param boolean $check
* @return Zend_Validate_Barcode_AdapterAbstract
*/
public function setCheck($check)
{
$this->_hasChecksum = (boolean) $check;
return $this;
}
/**
* Validates the checksum (Modulo 10)
* GTIN implementation factor 3
*
* @param string $value The barcode to validate
* @return boolean
*/
protected function _gtin($value)
{
$barcode = substr($value, 0, -1);
$sum = 0;
$length = strlen($barcode) - 1;
for ($i = 0; $i <= $length; $i++) {
if (($i % 2) === 0) {
$sum += $barcode[$length - $i] * 3;
} else {
$sum += $barcode[$length - $i];
}
}
$calc = $sum % 10;
$checksum = ($calc === 0) ? 0 : (10 - $calc);
if ($value[$length + 1] != $checksum) {
return false;
}
return true;
}
/**
* Validates the checksum (Modulo 10)
* IDENTCODE implementation factors 9 and 4
*
* @param string $value The barcode to validate
* @return boolean
*/
protected function _identcode($value)
{
$barcode = substr($value, 0, -1);
$sum = 0;
$length = strlen($value) - 2;
for ($i = 0; $i <= $length; $i++) {
if (($i % 2) === 0) {
$sum += $barcode[$length - $i] * 4;
} else {
$sum += $barcode[$length - $i] * 9;
}
}
$calc = $sum % 10;
$checksum = ($calc === 0) ? 0 : (10 - $calc);
if ($value[$length + 1] != $checksum) {
return false;
}
return true;
}
/**
* Validates the checksum (Modulo 10)
* CODE25 implementation factor 3
*
* @param string $value The barcode to validate
* @return boolean
*/
protected function _code25($value)
{
$barcode = substr($value, 0, -1);
$sum = 0;
$length = strlen($barcode) - 1;
for ($i = 0; $i <= $length; $i++) {
if (($i % 2) === 0) {
$sum += $barcode[$i] * 3;
} else {
$sum += $barcode[$i];
}
}
$calc = $sum % 10;
$checksum = ($calc === 0) ? 0 : (10 - $calc);
if ($value[$length + 1] != $checksum) {
return false;
}
return true;
}
/**
* Validates the checksum ()
* POSTNET implementation
*
* @param string $value The barcode to validate
* @return boolean
*/
protected function _postnet($value)
{
$checksum = substr($value, -1, 1);
$values = str_split(substr($value, 0, -1));
$check = 0;
foreach($values as $row) {
$check += $row;
}
$check %= 10;
$check = 10 - $check;
if ($check == $checksum) {
return true;
}
return false;
}
}
| default1406/PhyLab | wecenter/system/Zend/Validate/Barcode/AdapterAbstract.php | PHP | gpl-2.0 | 7,539 |
<?php
/**
* @package Joomla.Administrator
* @subpackage com_messages
*
* @copyright Copyright (C) 2005 - 2015 Open Source Matters, Inc. All rights reserved.
* @license GNU General Public License version 2 or later; see LICENSE.txt
*/
defined('_JEXEC') or die;
// Include the component HTML helpers.
JHtml::addIncludePath(JPATH_COMPONENT . '/helpers/html');
JHtml::_('bootstrap.tooltip');
JHtml::_('behavior.multiselect');
JHtml::_('formbehavior.chosen', 'select');
$user = JFactory::getUser();
$listOrder = $this->escape($this->state->get('list.ordering'));
$listDirn = $this->escape($this->state->get('list.direction'));
JFactory::getDocument()->addStyleDeclaration(
"
@media (min-width: 768px) {
div.modal {
left: none;
width: 500px;
margin-left: -250px;
}
}
"
);
?>
<form action="<?php echo JRoute::_('index.php?option=com_messages&view=messages'); ?>" method="post" name="adminForm" id="adminForm">
<?php if (!empty( $this->sidebar)) : ?>
<div id="j-sidebar-container" class="span2">
<?php echo $this->sidebar; ?>
</div>
<div id="j-main-container" class="span10">
<?php else : ?>
<div id="j-main-container">
<?php endif;?>
<div id="filter-bar" class="btn-toolbar">
<div class="filter-search btn-group pull-left">
<input type="text" name="filter_search" id="filter_search" placeholder="<?php echo JText::_('JSEARCH_FILTER'); ?>" value="<?php echo $this->escape($this->state->get('filter.search')); ?>" class="hasTooltip" title="<?php echo JHtml::tooltipText('COM_MESSAGES_SEARCH_IN_SUBJECT'); ?>" />
</div>
<div class="btn-group pull-left">
<button type="submit" class="btn hasTooltip" title="<?php echo JHtml::tooltipText('JSEARCH_FILTER_SUBMIT'); ?>"><span class="icon-search"></span></button>
<button type="button" class="btn hasTooltip" title="<?php echo JHtml::tooltipText('JSEARCH_FILTER_CLEAR'); ?>" onclick="document.getElementById('filter_search').value='';this.form.submit();"><span class="icon-remove"></span></button>
</div>
<div class="btn-group pull-left hidden-phone">
<select name="filter_state" onchange="this.form.submit()">
<option value=""><?php echo JText::_('JOPTION_SELECT_PUBLISHED');?></option>
<?php echo JHtml::_('select.options', MessagesHelper::getStateOptions(), 'value', 'text', $this->state->get('filter.state'));?>
</select>
</div>
</div>
<div class="clearfix"> </div>
<?php if (empty($this->items)) : ?>
<div class="alert alert-no-items">
<?php echo JText::_('JGLOBAL_NO_MATCHING_RESULTS'); ?>
</div>
<?php else : ?>
<table class="table table-striped">
<thead>
<tr>
<th width="20" class="center">
<?php echo JHtml::_('grid.checkall'); ?>
</th>
<th class="title">
<?php echo JHtml::_('grid.sort', 'COM_MESSAGES_HEADING_SUBJECT', 'a.subject', $listDirn, $listOrder); ?>
</th>
<th width="5%">
<?php echo JHtml::_('grid.sort', 'COM_MESSAGES_HEADING_READ', 'a.state', $listDirn, $listOrder); ?>
</th>
<th width="15%">
<?php echo JHtml::_('grid.sort', 'COM_MESSAGES_HEADING_FROM', 'a.user_id_from', $listDirn, $listOrder); ?>
</th>
<th width="20%" class="nowrap hidden-phone">
<?php echo JHtml::_('grid.sort', 'JDATE', 'a.date_time', $listDirn, $listOrder); ?>
</th>
</tr>
</thead>
<tfoot>
<tr>
<td colspan="6">
<?php echo $this->pagination->getListFooter(); ?>
</td>
</tr>
</tfoot>
<tbody>
<?php foreach ($this->items as $i => $item) :
$canChange = $user->authorise('core.edit.state', 'com_messages');
?>
<tr class="row<?php echo $i % 2; ?>">
<td>
<?php echo JHtml::_('grid.id', $i, $item->message_id); ?>
</td>
<td>
<a href="<?php echo JRoute::_('index.php?option=com_messages&view=message&message_id=' . (int) $item->message_id); ?>">
<?php echo $this->escape($item->subject); ?></a>
</td>
<td class="center">
<?php echo JHtml::_('messages.status', $i, $item->state, $canChange); ?>
</td>
<td>
<?php echo $item->user_from; ?>
</td>
<td class="hidden-phone">
<?php echo JHtml::_('date', $item->date_time, JText::_('DATE_FORMAT_LC2')); ?>
</td>
</tr>
<?php endforeach; ?>
</tbody>
</table>
<?php endif; ?>
<div>
<input type="hidden" name="task" value="" />
<input type="hidden" name="boxchecked" value="0" />
<input type="hidden" name="filter_order" value="<?php echo $listOrder; ?>" />
<input type="hidden" name="filter_order_Dir" value="<?php echo $listDirn; ?>" />
<?php echo JHtml::_('form.token'); ?>
</div>
</div>
</form>
| renekorss/joomla-cms | administrator/components/com_messages/views/messages/tmpl/default.php | PHP | gpl-2.0 | 4,720 |
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* This file is available under and governed by the GNU General Public
* License version 2 only, as published by the Free Software Foundation.
* However, the following notice accompanied the original version of this
* file:
*
* ASM: a very small and fast Java bytecode manipulation framework
* Copyright (c) 2000-2011 INRIA, France Telecom
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
package jdk.internal.org.objectweb.asm.commons;
import jdk.internal.org.objectweb.asm.Handle;
import jdk.internal.org.objectweb.asm.Type;
import jdk.internal.org.objectweb.asm.signature.SignatureReader;
import jdk.internal.org.objectweb.asm.signature.SignatureVisitor;
import jdk.internal.org.objectweb.asm.signature.SignatureWriter;
/**
* A class responsible for remapping types and names. Subclasses can override
* the following methods:
*
* <ul>
* <li>{@link #map(String)} - map type</li>
* <li>{@link #mapFieldName(String, String, String)} - map field name</li>
* <li>{@link #mapMethodName(String, String, String)} - map method name</li>
* </ul>
*
* @author Eugene Kuleshov
*/
public abstract class Remapper {
public String mapDesc(String desc) {
Type t = Type.getType(desc);
switch (t.getSort()) {
case Type.ARRAY:
String s = mapDesc(t.getElementType().getDescriptor());
for (int i = 0; i < t.getDimensions(); ++i) {
s = '[' + s;
}
return s;
case Type.OBJECT:
String newType = map(t.getInternalName());
if (newType != null) {
return 'L' + newType + ';';
}
}
return desc;
}
private Type mapType(Type t) {
switch (t.getSort()) {
case Type.ARRAY:
String s = mapDesc(t.getElementType().getDescriptor());
for (int i = 0; i < t.getDimensions(); ++i) {
s = '[' + s;
}
return Type.getType(s);
case Type.OBJECT:
s = map(t.getInternalName());
return s != null ? Type.getObjectType(s) : t;
case Type.METHOD:
return Type.getMethodType(mapMethodDesc(t.getDescriptor()));
}
return t;
}
public String mapType(String type) {
if (type == null) {
return null;
}
return mapType(Type.getObjectType(type)).getInternalName();
}
public String[] mapTypes(String[] types) {
String[] newTypes = null;
boolean needMapping = false;
for (int i = 0; i < types.length; i++) {
String type = types[i];
String newType = map(type);
if (newType != null && newTypes == null) {
newTypes = new String[types.length];
if (i > 0) {
System.arraycopy(types, 0, newTypes, 0, i);
}
needMapping = true;
}
if (needMapping) {
newTypes[i] = newType == null ? type : newType;
}
}
return needMapping ? newTypes : types;
}
public String mapMethodDesc(String desc) {
if ("()V".equals(desc)) {
return desc;
}
Type[] args = Type.getArgumentTypes(desc);
StringBuilder sb = new StringBuilder("(");
for (int i = 0; i < args.length; i++) {
sb.append(mapDesc(args[i].getDescriptor()));
}
Type returnType = Type.getReturnType(desc);
if (returnType == Type.VOID_TYPE) {
sb.append(")V");
return sb.toString();
}
sb.append(')').append(mapDesc(returnType.getDescriptor()));
return sb.toString();
}
public Object mapValue(Object value) {
if (value instanceof Type) {
return mapType((Type) value);
}
if (value instanceof Handle) {
Handle h = (Handle) value;
return new Handle(h.getTag(), mapType(h.getOwner()), mapMethodName(
h.getOwner(), h.getName(), h.getDesc()),
mapMethodDesc(h.getDesc()), h.isInterface());
}
return value;
}
/**
* @param signature
* signature for mapper
* @param typeSignature
* true if signature is a FieldTypeSignature, such as the
* signature parameter of the ClassVisitor.visitField or
* MethodVisitor.visitLocalVariable methods
* @return signature rewritten as a string
*/
public String mapSignature(String signature, boolean typeSignature) {
if (signature == null) {
return null;
}
SignatureReader r = new SignatureReader(signature);
SignatureWriter w = new SignatureWriter();
SignatureVisitor a = createSignatureRemapper(w);
if (typeSignature) {
r.acceptType(a);
} else {
r.accept(a);
}
return w.toString();
}
/**
* @deprecated use {@link #createSignatureRemapper} instead.
*/
@Deprecated
protected SignatureVisitor createRemappingSignatureAdapter(
SignatureVisitor v) {
return new SignatureRemapper(v, this);
}
protected SignatureVisitor createSignatureRemapper(
SignatureVisitor v) {
return createRemappingSignatureAdapter(v);
}
/**
* Map method name to the new name. Subclasses can override.
*
* @param owner
* owner of the method.
* @param name
* name of the method.
* @param desc
* descriptor of the method.
* @return new name of the method
*/
public String mapMethodName(String owner, String name, String desc) {
return name;
}
/**
* Map invokedynamic method name to the new name. Subclasses can override.
*
* @param name
* name of the invokedynamic.
* @param desc
* descriptor of the invokedynamic.
* @return new invokdynamic name.
*/
public String mapInvokeDynamicMethodName(String name, String desc) {
return name;
}
/**
* Map field name to the new name. Subclasses can override.
*
* @param owner
* owner of the field.
* @param name
* name of the field
* @param desc
* descriptor of the field
* @return new name of the field.
*/
public String mapFieldName(String owner, String name, String desc) {
return name;
}
/**
* Map type name to the new name. Subclasses can override.
*
* @param typeName
* the type name
* @return new name, default implementation is the identity.
*/
public String map(String typeName) {
return typeName;
}
}
| FauxFaux/jdk9-jdk | src/java.base/share/classes/jdk/internal/org/objectweb/asm/commons/Remapper.java | Java | gpl-2.0 | 9,445 |
FactoryBot.define do
factory :katello_content_view_environment, :class => Katello::ContentViewEnvironment do
sequence(:name) { |n| "name#{n}" }
sequence(:label) { |n| "label#{n}" }
end
end
| pmoravec/katello | test/factories/content_view_environment_factory.rb | Ruby | gpl-2.0 | 201 |
// Build don't link:
// Origin: Jakub Jelinek <jakub@redhat.com>
namespace N
{
class X;
template <class T>
class Y
{
public:
inline Y () {}
inline operator const Y<X> & () const
{
return *reinterpret_cast<const Y<X> *>(this);
}
};
}
class bar
{
public:
inline bar () {}
inline bar (const ::N::Y< ::N::X>& a);
};
class foo
{
bool b;
public:
foo();
void x () throw(bar);
};
void foo::x() throw(bar)
{
if (!b) throw bar (static_cast<::N::X*>(this)); // ERROR - parse error
}
| nslu2/Build-gcc-3.2.1 | gcc/testsuite/g++.old-deja/g++.other/crash28.C | C++ | gpl-2.0 | 520 |
// version 1.0 - original version
jQuery(document).ready(function () {
var version = jQuery.fn.jquery.split('.');
if (parseFloat(version[1]) < 7) {
// use .live as we are on jQuery prior to 1.7
jQuery('.colorpickerField').live('click', function () {
if (jQuery(this).attr('id').search("__i__") === -1) {
var picker,
field = jQuery(this).attr('id').substr(0, 20);
jQuery('.s2_colorpicker').hide();
jQuery('.s2_colorpicker').each(function () {
if (jQuery(this).attr('id').search(field) !== -1) {
picker = jQuery(this).attr('id');
}
});
jQuery.farbtastic('#' + picker).linkTo(this);
jQuery('#' + picker).slideDown();
}
});
} else {
// use .on as we are using jQuery 1.7 and up where .live is deprecated
jQuery(document).on('focus', '.colorpickerField', function () {
if (jQuery(this).is('.s2_initialised') || this.id.search('__i__') !== -1) {
return; // exit early, already initialized or not activated
}
jQuery(this).addClass('s2_initialised');
var picker,
field = jQuery(this).attr('id').substr(0, 20);
jQuery('.s2_colorpicker').each(function () {
if (jQuery(this).attr('id').search(field) !== -1) {
picker = jQuery(this).attr('id');
return false; // stop looping
}
});
jQuery(this).on('focusin', function (event) {
jQuery('.s2_colorpicker').hide();
jQuery.farbtastic('#' + picker).linkTo(this);
jQuery('#' + picker).slideDown();
});
jQuery(this).on('focusout', function (event) {
jQuery('#' + picker).slideUp();
});
jQuery(this).trigger('focus'); // retrigger focus event for plugin to work
});
}
});
| Melanie27/2014 | wp-content/plugins/subscribe2/include/s2_colorpicker.js | JavaScript | gpl-2.0 | 1,643 |
# -*- coding: utf-8 -*-
#
# synapsecollection.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Example script to show some of the possibilities of the SynapseCollection class. We
connect neurons, and get the SynapseCollection with a GetConnections call. To get
a better understanding of the connections, we plot the weights between the
source and targets.
"""
import nest
import matplotlib.pyplot as plt
import numpy as np
def makeMatrix(sources, targets, weights):
"""
Returns a matrix with the weights between the source and target node_ids.
"""
aa = np.zeros((max(sources)+1, max(targets)+1))
for src, trg, wght in zip(sources, targets, weights):
aa[src, trg] += wght
return aa
def plotMatrix(srcs, tgts, weights, title, pos):
"""
Plots weight matrix.
"""
plt.subplot(pos)
plt.matshow(makeMatrix(srcs, tgts, weights), fignum=False)
plt.xlim([min(tgts)-0.5, max(tgts)+0.5])
plt.xlabel('target')
plt.ylim([max(srcs)+0.5, min(srcs)-0.5])
plt.ylabel('source')
plt.title(title)
plt.colorbar(fraction=0.046, pad=0.04)
"""
Start with a simple, one_to_one example.
We create the neurons, connect them, and get the connections. From this we can
get the connected sources, targets, and weights. The corresponding matrix will
be the identity matrix, as we have a one_to_one connection.
"""
nest.ResetKernel()
nrns = nest.Create('iaf_psc_alpha', 10)
nest.Connect(nrns, nrns, 'one_to_one')
conns = nest.GetConnections(nrns, nrns) # This returns a SynapseCollection
# We can get desired information of the SynapseCollection with simple get() call.
g = conns.get(['source', 'target', 'weight'])
srcs = g['source']
tgts = g['target']
weights = g['weight']
# Plot the matrix consisting of the weights between the sources and targets
plt.figure(figsize=(12, 10))
plotMatrix(srcs, tgts, weights, 'Uniform weight', 121)
"""
Add some weights to the connections, and plot the updated weight matrix.
"""
# We can set data of the connections with a simple set() call.
w = [{'weight': x*1.0} for x in range(1, 11)]
conns.set(w)
weights = conns.weight
plotMatrix(srcs, tgts, weights, 'Set weight', 122)
"""
We can also plot an all_to_all connection, with uniformly distributed weights,
and different number of sources and targets.
"""
nest.ResetKernel()
pre = nest.Create('iaf_psc_alpha', 10)
post = nest.Create('iaf_psc_delta', 5)
nest.Connect(pre, post,
syn_spec={'weight':
{'distribution': 'uniform', 'low': 0.5, 'high': 4.5}})
# Get a SynapseCollection with all connections
conns = nest.GetConnections()
srcs = conns.source
tgts = conns.target
weights = conns.weight
plt.figure(figsize=(12, 10))
plotMatrix(srcs, tgts, weights, 'All to all connection', 111)
"""
Lastly, we'll do an exmple that is a bit more complex. We connect different
neurons with different rules, synapse models and weight distributions, and get
different SynapseCollections by calling GetConnections with different inputs.
"""
nest.ResetKernel()
nrns = nest.Create('iaf_psc_alpha', 15)
nest.Connect(nrns[:5], nrns[:5],
'one_to_one',
{'synapse_model': 'stdp_synapse',
'weight': {'distribution': 'normal', 'mu': 5.0, 'sigma': 2.0}})
nest.Connect(nrns[:10], nrns[5:12],
{'rule': 'pairwise_bernoulli', 'p': 0.4},
{'weight': 4.0})
nest.Connect(nrns[5:10], nrns[:5],
{'rule': 'fixed_total_number', 'N': 5},
{'weight': 3.0})
nest.Connect(nrns[10:], nrns[:12],
'all_to_all',
{'synapse_model': 'stdp_synapse',
'weight': {'distribution': 'uniform', 'low': 1., 'high': 5.}})
nest.Connect(nrns, nrns[12:],
{'rule': 'fixed_indegree', 'indegree': 3})
# First get a SynapseCollection consisting of all the connections
conns = nest.GetConnections()
srcs = conns.source
tgts = conns.target
weights = conns.weight
plt.figure(figsize=(14, 12))
plotMatrix(list(srcs), list(tgts), weights, 'All connections', 221)
# Get SynapseCollection consisting of a subset of connections
conns = nest.GetConnections(nrns[:10], nrns[:10])
g = conns.get(['source', 'target', 'weight'])
srcs = g['source']
tgts = g['target']
weights = g['weight']
plotMatrix(srcs, tgts, weights, 'Connections of the first ten neurons', 222)
# Get SynapseCollection consisting of just the stdp_synapses
conns = nest.GetConnections(synapse_model='stdp_synapse')
g = conns.get(['source', 'target', 'weight'])
srcs = g['source']
tgts = g['target']
weights = g['weight']
plotMatrix(srcs, tgts, weights, 'Connections with stdp_synapse', 223)
# Get SynapseCollection consisting of the fixed_total_number connections, but set
# weight before plotting
conns = nest.GetConnections(nrns[5:10], nrns[:5])
w = [{'weight': x*1.0} for x in range(1, 6)]
conns.set(w)
g = conns.get(['source', 'target', 'weight'])
srcs = g['source']
tgts = g['target']
weights = g['weight']
plotMatrix(srcs, tgts, weights, 'fixed_total_number, set weight', 224)
plt.show()
| SepehrMN/nest-simulator | pynest/examples/synapsecollection.py | Python | gpl-2.0 | 5,672 |
// PR c++/20148
// { dg-options "-fshow-column -ansi -pedantic-errors -Wno-long-long" }
void foo()
{
if (({int c[2];})) ; // { dg-error "7:ISO C.. forbids" "7" }
// { dg-error "7:could not convert" "17" { target *-*-* } .-1 }
}
void bar()
{
if (({})); // { dg-error "7:ISO C.. forbids" "7" }
// { dg-error "11:could not convert" "11" { target *-*-* } .-1 }
}
| Gurgel100/gcc | gcc/testsuite/g++.dg/parse/error26.C | C++ | gpl-2.0 | 369 |
/* -*- Mode: C++; c-file-style: "gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2011 Centre Tecnologic de Telecomunicacions de Catalunya (CTTC)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Manuel Requena <manuel.requena@cttc.es>
*/
#include "ns3/simulator.h"
#include "ns3/log.h"
#include "ns3/lte-rlc-header.h"
#include "ns3/lte-rlc-um.h"
#include "ns3/lte-rlc-sdu-status-tag.h"
#include "ns3/lte-rlc-tag.h"
NS_LOG_COMPONENT_DEFINE ("LteRlcUm");
namespace ns3 {
NS_OBJECT_ENSURE_REGISTERED (LteRlcUm);
LteRlcUm::LteRlcUm ()
: m_maxTxBufferSize (2 * 1024 * 1024),
m_txBufferSize (0),
m_sequenceNumber (0),
m_vrUr (0),
m_vrUx (0),
m_vrUh (0),
m_windowSize (512),
m_expectedSeqNumber (0)
{
NS_LOG_FUNCTION (this);
m_reassemblingState = WAITING_S0_FULL;
}
LteRlcUm::~LteRlcUm ()
{
NS_LOG_FUNCTION (this);
}
TypeId
LteRlcUm::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::LteRlcUm")
.SetParent<LteRlc> ()
.AddConstructor<LteRlcUm> ()
.AddAttribute ("MaxTxBufferSize",
"Maximum Size of the Transmission Buffer (in Bytes)",
UintegerValue (2 * 1024 * 1024),
MakeUintegerAccessor (&LteRlcUm::m_maxTxBufferSize),
MakeUintegerChecker<uint32_t> ())
;
return tid;
}
void
LteRlcUm::DoDispose ()
{
NS_LOG_FUNCTION (this);
m_reorderingTimer.Cancel ();
m_rbsTimer.Cancel ();
LteRlc::DoDispose ();
}
/**
* RLC SAP
*/
void
LteRlcUm::DoTransmitPdcpPdu (Ptr<Packet> p)
{
NS_LOG_FUNCTION (this << m_rnti << (uint32_t) m_lcid << p->GetSize ());
if (m_txBufferSize + p->GetSize () <= m_maxTxBufferSize)
{
/** Store arrival time */
RlcTag timeTag (Simulator::Now ());
p->AddPacketTag (timeTag);
/** Store PDCP PDU */
LteRlcSduStatusTag tag;
tag.SetStatus (LteRlcSduStatusTag::FULL_SDU);
p->AddPacketTag (tag);
NS_LOG_LOGIC ("Tx Buffer: New packet added");
m_txBuffer.push_back (p);
m_txBufferSize += p->GetSize ();
NS_LOG_LOGIC ("NumOfBuffers = " << m_txBuffer.size() );
NS_LOG_LOGIC ("txBufferSize = " << m_txBufferSize);
}
else
{
// Discard full RLC SDU
NS_LOG_LOGIC ("TxBuffer is full. RLC SDU discarded");
NS_LOG_LOGIC ("MaxTxBufferSize = " << m_maxTxBufferSize);
NS_LOG_LOGIC ("txBufferSize = " << m_txBufferSize);
NS_LOG_LOGIC ("packet size = " << p->GetSize ());
}
/** Report Buffer Status */
DoReportBufferStatus ();
m_rbsTimer.Cancel ();
}
/**
* MAC SAP
*/
void
LteRlcUm::DoNotifyTxOpportunity (uint32_t bytes, uint8_t layer, uint8_t harqId)
{
NS_LOG_FUNCTION (this << m_rnti << (uint32_t) m_lcid << bytes);
if (bytes <= 2)
{
// Stingy MAC: Header fix part is 2 bytes, we need more bytes for the data
NS_LOG_LOGIC ("TX opportunity too small = " << bytes);
return;
}
Ptr<Packet> packet = Create<Packet> ();
LteRlcHeader rlcHeader;
// Build Data field
uint32_t nextSegmentSize = bytes - 2;
uint32_t nextSegmentId = 1;
uint32_t dataFieldTotalSize = 0;
uint32_t dataFieldAddedSize = 0;
std::vector < Ptr<Packet> > dataField;
// Remove the first packet from the transmission buffer.
// If only a segment of the packet is taken, then the remaining is given back later
if ( m_txBuffer.size () == 0 )
{
NS_LOG_LOGIC ("No data pending");
return;
}
NS_LOG_LOGIC ("SDUs in TxBuffer = " << m_txBuffer.size ());
NS_LOG_LOGIC ("First SDU buffer = " << *(m_txBuffer.begin()));
NS_LOG_LOGIC ("First SDU size = " << (*(m_txBuffer.begin()))->GetSize ());
NS_LOG_LOGIC ("Next segment size = " << nextSegmentSize);
NS_LOG_LOGIC ("Remove SDU from TxBuffer");
Ptr<Packet> firstSegment = (*(m_txBuffer.begin ()))->Copy ();
m_txBufferSize -= (*(m_txBuffer.begin()))->GetSize ();
NS_LOG_LOGIC ("txBufferSize = " << m_txBufferSize );
m_txBuffer.erase (m_txBuffer.begin ());
while ( firstSegment && (firstSegment->GetSize () > 0) && (nextSegmentSize > 0) )
{
NS_LOG_LOGIC ("WHILE ( firstSegment && firstSegment->GetSize > 0 && nextSegmentSize > 0 )");
NS_LOG_LOGIC (" firstSegment size = " << firstSegment->GetSize ());
NS_LOG_LOGIC (" nextSegmentSize = " << nextSegmentSize);
if ( (firstSegment->GetSize () > nextSegmentSize) ||
// Segment larger than 2047 octets can only be mapped to the end of the Data field
(firstSegment->GetSize () > 2047)
)
{
// Take the minimum size, due to the 2047-bytes 3GPP exception
// This exception is due to the length of the LI field (just 11 bits)
uint32_t currSegmentSize = std::min (firstSegment->GetSize (), nextSegmentSize);
NS_LOG_LOGIC (" IF ( firstSegment > nextSegmentSize ||");
NS_LOG_LOGIC (" firstSegment > 2047 )");
// Segment txBuffer.FirstBuffer and
// Give back the remaining segment to the transmission buffer
Ptr<Packet> newSegment = firstSegment->CreateFragment (0, currSegmentSize);
NS_LOG_LOGIC (" newSegment size = " << newSegment->GetSize ());
// Status tag of the new and remaining segments
// Note: This is the only place where a PDU is segmented and
// therefore its status can change
LteRlcSduStatusTag oldTag, newTag;
firstSegment->RemovePacketTag (oldTag);
newSegment->RemovePacketTag (newTag);
if (oldTag.GetStatus () == LteRlcSduStatusTag::FULL_SDU)
{
newTag.SetStatus (LteRlcSduStatusTag::FIRST_SEGMENT);
oldTag.SetStatus (LteRlcSduStatusTag::LAST_SEGMENT);
}
else if (oldTag.GetStatus () == LteRlcSduStatusTag::LAST_SEGMENT)
{
newTag.SetStatus (LteRlcSduStatusTag::MIDDLE_SEGMENT);
//oldTag.SetStatus (LteRlcSduStatusTag::LAST_SEGMENT);
}
// Give back the remaining segment to the transmission buffer
firstSegment->RemoveAtStart (currSegmentSize);
NS_LOG_LOGIC (" firstSegment size (after RemoveAtStart) = " << firstSegment->GetSize ());
if (firstSegment->GetSize () > 0)
{
firstSegment->AddPacketTag (oldTag);
m_txBuffer.insert (m_txBuffer.begin (), firstSegment);
m_txBufferSize += (*(m_txBuffer.begin()))->GetSize ();
NS_LOG_LOGIC (" TX buffer: Give back the remaining segment");
NS_LOG_LOGIC (" TX buffers = " << m_txBuffer.size ());
NS_LOG_LOGIC (" Front buffer size = " << (*(m_txBuffer.begin()))->GetSize ());
NS_LOG_LOGIC (" txBufferSize = " << m_txBufferSize );
}
else
{
// Whole segment was taken, so adjust tag
if (newTag.GetStatus () == LteRlcSduStatusTag::FIRST_SEGMENT)
{
newTag.SetStatus (LteRlcSduStatusTag::FULL_SDU);
}
else if (newTag.GetStatus () == LteRlcSduStatusTag::MIDDLE_SEGMENT)
{
newTag.SetStatus (LteRlcSduStatusTag::LAST_SEGMENT);
}
}
// Segment is completely taken or
// the remaining segment is given back to the transmission buffer
firstSegment = 0;
// Put status tag once it has been adjusted
newSegment->AddPacketTag (newTag);
// Add Segment to Data field
dataFieldAddedSize = newSegment->GetSize ();
dataFieldTotalSize += dataFieldAddedSize;
dataField.push_back (newSegment);
newSegment = 0;
// ExtensionBit (Next_Segment - 1) = 0
rlcHeader.PushExtensionBit (LteRlcHeader::DATA_FIELD_FOLLOWS);
// no LengthIndicator for the last one
nextSegmentSize -= dataFieldAddedSize;
nextSegmentId++;
// nextSegmentSize MUST be zero (only if segment is smaller or equal to 2047)
// (NO more segments) → exit
// break;
}
else if ( (nextSegmentSize - firstSegment->GetSize () <= 2) || (m_txBuffer.size () == 0) )
{
NS_LOG_LOGIC (" IF nextSegmentSize - firstSegment->GetSize () <= 2 || txBuffer.size == 0");
// Add txBuffer.FirstBuffer to DataField
dataFieldAddedSize = firstSegment->GetSize ();
dataFieldTotalSize += dataFieldAddedSize;
dataField.push_back (firstSegment);
firstSegment = 0;
// ExtensionBit (Next_Segment - 1) = 0
rlcHeader.PushExtensionBit (LteRlcHeader::DATA_FIELD_FOLLOWS);
// no LengthIndicator for the last one
nextSegmentSize -= dataFieldAddedSize;
nextSegmentId++;
NS_LOG_LOGIC (" SDUs in TxBuffer = " << m_txBuffer.size ());
if (m_txBuffer.size () > 0)
{
NS_LOG_LOGIC (" First SDU buffer = " << *(m_txBuffer.begin()));
NS_LOG_LOGIC (" First SDU size = " << (*(m_txBuffer.begin()))->GetSize ());
}
NS_LOG_LOGIC (" Next segment size = " << nextSegmentSize);
// nextSegmentSize <= 2 (only if txBuffer is not empty)
// (NO more segments) → exit
// break;
}
else // (firstSegment->GetSize () < m_nextSegmentSize) && (m_txBuffer.size () > 0)
{
NS_LOG_LOGIC (" IF firstSegment < NextSegmentSize && txBuffer.size > 0");
// Add txBuffer.FirstBuffer to DataField
dataFieldAddedSize = firstSegment->GetSize ();
dataFieldTotalSize += dataFieldAddedSize;
dataField.push_back (firstSegment);
// ExtensionBit (Next_Segment - 1) = 1
rlcHeader.PushExtensionBit (LteRlcHeader::E_LI_FIELDS_FOLLOWS);
// LengthIndicator (Next_Segment) = txBuffer.FirstBuffer.length()
rlcHeader.PushLengthIndicator (firstSegment->GetSize ());
nextSegmentSize -= ((nextSegmentId % 2) ? (2) : (1)) + dataFieldAddedSize;
nextSegmentId++;
NS_LOG_LOGIC (" SDUs in TxBuffer = " << m_txBuffer.size ());
if (m_txBuffer.size () > 0)
{
NS_LOG_LOGIC (" First SDU buffer = " << *(m_txBuffer.begin()));
NS_LOG_LOGIC (" First SDU size = " << (*(m_txBuffer.begin()))->GetSize ());
}
NS_LOG_LOGIC (" Next segment size = " << nextSegmentSize);
NS_LOG_LOGIC (" Remove SDU from TxBuffer");
// (more segments)
firstSegment = (*(m_txBuffer.begin ()))->Copy ();
m_txBufferSize -= (*(m_txBuffer.begin()))->GetSize ();
m_txBuffer.erase (m_txBuffer.begin ());
NS_LOG_LOGIC (" txBufferSize = " << m_txBufferSize );
}
}
// Build RLC header
rlcHeader.SetSequenceNumber (m_sequenceNumber++);
// Build RLC PDU with DataField and Header
std::vector< Ptr<Packet> >::iterator it;
it = dataField.begin ();
uint8_t framingInfo = 0;
// FIRST SEGMENT
LteRlcSduStatusTag tag;
(*it)->RemovePacketTag (tag);
if ( (tag.GetStatus () == LteRlcSduStatusTag::FULL_SDU) ||
(tag.GetStatus () == LteRlcSduStatusTag::FIRST_SEGMENT) )
{
framingInfo |= LteRlcHeader::FIRST_BYTE;
}
else
{
framingInfo |= LteRlcHeader::NO_FIRST_BYTE;
}
(*it)->AddPacketTag (tag);
while (it < dataField.end ())
{
NS_LOG_LOGIC ("Adding SDU/segment to packet, length = " << (*it)->GetSize ());
packet->AddAtEnd (*it);
it++;
}
// LAST SEGMENT (Note: There could be only one and be the first one)
it--;
(*it)->RemovePacketTag (tag);
if ( (tag.GetStatus () == LteRlcSduStatusTag::FULL_SDU) ||
(tag.GetStatus () == LteRlcSduStatusTag::LAST_SEGMENT) )
{
framingInfo |= LteRlcHeader::LAST_BYTE;
}
else
{
framingInfo |= LteRlcHeader::NO_LAST_BYTE;
}
(*it)->AddPacketTag (tag);
rlcHeader.SetFramingInfo (framingInfo);
NS_LOG_LOGIC ("RLC header: " << rlcHeader);
packet->AddHeader (rlcHeader);
// Sender timestamp
RlcTag rlcTag (Simulator::Now ());
packet->AddByteTag (rlcTag);
m_txPdu (m_rnti, m_lcid, packet->GetSize ());
// Send RLC PDU to MAC layer
LteMacSapProvider::TransmitPduParameters params;
params.pdu = packet;
params.rnti = m_rnti;
params.lcid = m_lcid;
params.layer = layer;
params.harqProcessId = harqId;
m_macSapProvider->TransmitPdu (params);
if (! m_txBuffer.empty ())
{
m_rbsTimer.Cancel ();
m_rbsTimer = Simulator::Schedule (MilliSeconds (10), &LteRlcUm::ExpireRbsTimer, this);
}
}
void
LteRlcUm::DoNotifyHarqDeliveryFailure ()
{
NS_LOG_FUNCTION (this);
}
void
LteRlcUm::DoReceivePdu (Ptr<Packet> p)
{
NS_LOG_FUNCTION (this << m_rnti << (uint32_t) m_lcid << p->GetSize ());
// Receiver timestamp
RlcTag rlcTag;
Time delay;
if (p->FindFirstMatchingByteTag (rlcTag))
{
delay = Simulator::Now() - rlcTag.GetSenderTimestamp ();
}
m_rxPdu (m_rnti, m_lcid, p->GetSize (), delay.GetNanoSeconds ());
// 5.1.2.2 Receive operations
// Get RLC header parameters
LteRlcHeader rlcHeader;
p->PeekHeader (rlcHeader);
NS_LOG_LOGIC ("RLC header: " << rlcHeader);
SequenceNumber10 seqNumber = rlcHeader.GetSequenceNumber ();
// 5.1.2.2.1 General
// The receiving UM RLC entity shall maintain a reordering window according to state variable VR(UH) as follows:
// - a SN falls within the reordering window if (VR(UH) - UM_Window_Size) <= SN < VR(UH);
// - a SN falls outside of the reordering window otherwise.
// When receiving an UMD PDU from lower layer, the receiving UM RLC entity shall:
// - either discard the received UMD PDU or place it in the reception buffer (see sub clause 5.1.2.2.2);
// - if the received UMD PDU was placed in the reception buffer:
// - update state variables, reassemble and deliver RLC SDUs to upper layer and start/stop t-Reordering as needed (see sub clause 5.1.2.2.3);
// When t-Reordering expires, the receiving UM RLC entity shall:
// - update state variables, reassemble and deliver RLC SDUs to upper layer and start t-Reordering as needed (see sub clause 5.1.2.2.4).
// 5.1.2.2.2 Actions when an UMD PDU is received from lower layer
// When an UMD PDU with SN = x is received from lower layer, the receiving UM RLC entity shall:
// - if VR(UR) < x < VR(UH) and the UMD PDU with SN = x has been received before; or
// - if (VR(UH) - UM_Window_Size) <= x < VR(UR):
// - discard the received UMD PDU;
// - else:
// - place the received UMD PDU in the reception buffer.
NS_LOG_LOGIC ("VR(UR) = " << m_vrUr);
NS_LOG_LOGIC ("VR(UX) = " << m_vrUx);
NS_LOG_LOGIC ("VR(UH) = " << m_vrUh);
NS_LOG_LOGIC ("SN = " << seqNumber);
m_vrUr.SetModulusBase (m_vrUh - m_windowSize);
m_vrUh.SetModulusBase (m_vrUh - m_windowSize);
seqNumber.SetModulusBase (m_vrUh - m_windowSize);
if ( ( (m_vrUr < seqNumber) && (seqNumber < m_vrUh) && (m_rxBuffer.count (seqNumber.GetValue ()) > 0) ) ||
( ((m_vrUh - m_windowSize) <= seqNumber) && (seqNumber < m_vrUr) )
)
{
NS_LOG_LOGIC ("PDU discarded");
p = 0;
return;
}
else
{
NS_LOG_LOGIC ("Place PDU in the reception buffer");
m_rxBuffer[seqNumber.GetValue ()] = p;
}
// 5.1.2.2.3 Actions when an UMD PDU is placed in the reception buffer
// When an UMD PDU with SN = x is placed in the reception buffer, the receiving UM RLC entity shall:
// - if x falls outside of the reordering window:
// - update VR(UH) to x + 1;
// - reassemble RLC SDUs from any UMD PDUs with SN that falls outside of the reordering window, remove
// RLC headers when doing so and deliver the reassembled RLC SDUs to upper layer in ascending order of the
// RLC SN if not delivered before;
// - if VR(UR) falls outside of the reordering window:
// - set VR(UR) to (VR(UH) - UM_Window_Size);
if ( ! IsInsideReorderingWindow (seqNumber))
{
NS_LOG_LOGIC ("SN is outside the reordering window");
m_vrUh = seqNumber + 1;
NS_LOG_LOGIC ("New VR(UH) = " << m_vrUh);
ReassembleOutsideWindow ();
if ( ! IsInsideReorderingWindow (m_vrUr) )
{
m_vrUr = m_vrUh - m_windowSize;
NS_LOG_LOGIC ("VR(UR) is outside the reordering window");
NS_LOG_LOGIC ("New VR(UR) = " << m_vrUr);
}
}
// - if the reception buffer contains an UMD PDU with SN = VR(UR):
// - update VR(UR) to the SN of the first UMD PDU with SN > current VR(UR) that has not been received;
// - reassemble RLC SDUs from any UMD PDUs with SN < updated VR(UR), remove RLC headers when doing
// so and deliver the reassembled RLC SDUs to upper layer in ascending order of the RLC SN if not delivered
// before;
if ( m_rxBuffer.count (m_vrUr.GetValue ()) > 0 )
{
NS_LOG_LOGIC ("Reception buffer contains SN = " << m_vrUr);
std::map <uint16_t, Ptr<Packet> >::iterator it;
uint16_t newVrUr;
SequenceNumber10 oldVrUr = m_vrUr;
it = m_rxBuffer.find (m_vrUr.GetValue ());
newVrUr = (it->first) + 1;
while ( m_rxBuffer.count (newVrUr) > 0 )
{
newVrUr++;
}
m_vrUr = newVrUr;
NS_LOG_LOGIC ("New VR(UR) = " << m_vrUr);
ReassembleSnInterval (oldVrUr, m_vrUr);
}
// m_vrUh can change previously, set new modulus base
// for the t-Reordering timer-related comparisons
m_vrUr.SetModulusBase (m_vrUh - m_windowSize);
m_vrUx.SetModulusBase (m_vrUh - m_windowSize);
m_vrUh.SetModulusBase (m_vrUh - m_windowSize);
// - if t-Reordering is running:
// - if VR(UX) <= VR(UR); or
// - if VR(UX) falls outside of the reordering window and VR(UX) is not equal to VR(UH)::
// - stop and reset t-Reordering;
if ( m_reorderingTimer.IsRunning () )
{
NS_LOG_LOGIC ("Reordering timer is running");
if ( (m_vrUx <= m_vrUr) ||
((! IsInsideReorderingWindow (m_vrUx)) && (m_vrUx != m_vrUh)) )
{
NS_LOG_LOGIC ("Stop reordering timer");
m_reorderingTimer.Cancel ();
}
}
// - if t-Reordering is not running (includes the case when t-Reordering is stopped due to actions above):
// - if VR(UH) > VR(UR):
// - start t-Reordering;
// - set VR(UX) to VR(UH).
if ( ! m_reorderingTimer.IsRunning () )
{
NS_LOG_LOGIC ("Reordering timer is not running");
if ( m_vrUh > m_vrUr )
{
NS_LOG_LOGIC ("VR(UH) > VR(UR)");
NS_LOG_LOGIC ("Start reordering timer");
m_reorderingTimer = Simulator::Schedule (Time ("0.1s"),
&LteRlcUm::ExpireReorderingTimer ,this);
m_vrUx = m_vrUh;
NS_LOG_LOGIC ("New VR(UX) = " << m_vrUx);
}
}
}
bool
LteRlcUm::IsInsideReorderingWindow (SequenceNumber10 seqNumber)
{
NS_LOG_FUNCTION (this << seqNumber);
NS_LOG_LOGIC ("Reordering Window: " <<
m_vrUh << " - " << m_windowSize << " <= " << seqNumber << " < " << m_vrUh);
m_vrUh.SetModulusBase (m_vrUh - m_windowSize);
seqNumber.SetModulusBase (m_vrUh - m_windowSize);
if ( ((m_vrUh - m_windowSize) <= seqNumber) && (seqNumber < m_vrUh))
{
NS_LOG_LOGIC (seqNumber << " is INSIDE the reordering window");
return true;
}
else
{
NS_LOG_LOGIC (seqNumber << " is OUTSIDE the reordering window");
return false;
}
}
void
LteRlcUm::ReassembleAndDeliver (Ptr<Packet> packet)
{
LteRlcHeader rlcHeader;
packet->RemoveHeader (rlcHeader);
uint8_t framingInfo = rlcHeader.GetFramingInfo ();
SequenceNumber10 currSeqNumber = rlcHeader.GetSequenceNumber ();
bool expectedSnLost;
if ( currSeqNumber != m_expectedSeqNumber )
{
expectedSnLost = true;
NS_LOG_LOGIC ("There are losses. Expected SN = " << m_expectedSeqNumber << ". Current SN = " << currSeqNumber);
m_expectedSeqNumber = currSeqNumber + 1;
}
else
{
expectedSnLost = false;
NS_LOG_LOGIC ("No losses. Expected SN = " << m_expectedSeqNumber << ". Current SN = " << currSeqNumber);
m_expectedSeqNumber++;
}
// Build list of SDUs
uint8_t extensionBit;
uint16_t lengthIndicator;
do
{
extensionBit = rlcHeader.PopExtensionBit ();
NS_LOG_LOGIC ("E = " << (uint16_t)extensionBit);
if ( extensionBit == 0 )
{
m_sdusBuffer.push_back (packet);
}
else // extensionBit == 1
{
lengthIndicator = rlcHeader.PopLengthIndicator ();
NS_LOG_LOGIC ("LI = " << lengthIndicator);
// Check if there is enough data in the packet
if ( lengthIndicator >= packet->GetSize () )
{
NS_LOG_LOGIC ("INTERNAL ERROR: Not enough data in the packet (" << packet->GetSize () << "). Needed LI=" << lengthIndicator);
}
// Split packet in two fragments
Ptr<Packet> data_field = packet->CreateFragment (0, lengthIndicator);
packet->RemoveAtStart (lengthIndicator);
m_sdusBuffer.push_back (data_field);
}
}
while ( extensionBit == 1 );
std::list < Ptr<Packet> >::iterator it;
// Current reassembling state
if (m_reassemblingState == WAITING_S0_FULL) NS_LOG_LOGIC ("Reassembling State = 'WAITING_S0_FULL'");
else if (m_reassemblingState == WAITING_SI_SF) NS_LOG_LOGIC ("Reassembling State = 'WAITING_SI_SF'");
else NS_LOG_LOGIC ("Reassembling State = Unknown state");
// Received framing Info
NS_LOG_LOGIC ("Framing Info = " << (uint16_t)framingInfo);
// Reassemble the list of SDUs (when there is no losses)
if (!expectedSnLost)
{
switch (m_reassemblingState)
{
case WAITING_S0_FULL:
switch (framingInfo)
{
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Deliver one or multiple PDUs
*/
for ( it = m_sdusBuffer.begin () ; it != m_sdusBuffer.end () ; it++ )
{
m_rlcSapUser->ReceivePdcpPdu (*it);
}
m_sdusBuffer.clear ();
break;
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
m_reassemblingState = WAITING_SI_SF;
/**
* Deliver full PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Discard SI or SN
*/
m_sdusBuffer.pop_front ();
/**
* Deliver zero, one or multiple PDUs
*/
while ( ! m_sdusBuffer.empty () )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
if ( m_sdusBuffer.size () == 1 )
{
m_reassemblingState = WAITING_S0_FULL;
}
else
{
m_reassemblingState = WAITING_SI_SF;
}
/**
* Discard SI or SN
*/
m_sdusBuffer.pop_front ();
if ( m_sdusBuffer.size () > 0 )
{
/**
* Deliver zero, one or multiple PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
}
break;
default:
/**
* ERROR: Transition not possible
*/
NS_LOG_LOGIC ("INTERNAL ERROR: Transition not possible. FI = " << (uint32_t) framingInfo);
break;
}
break;
case WAITING_SI_SF:
switch (framingInfo)
{
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Deliver (Kept)S0 + SN
*/
m_keepS0->AddAtEnd (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
m_rlcSapUser->ReceivePdcpPdu (m_keepS0);
/**
* Deliver zero, one or multiple PDUs
*/
while ( ! m_sdusBuffer.empty () )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
m_reassemblingState = WAITING_SI_SF;
/**
* Keep SI
*/
if ( m_sdusBuffer.size () == 1 )
{
m_keepS0->AddAtEnd (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
else // m_sdusBuffer.size () > 1
{
/**
* Deliver (Kept)S0 + SN
*/
m_keepS0->AddAtEnd (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
m_rlcSapUser->ReceivePdcpPdu (m_keepS0);
/**
* Deliver zero, one or multiple PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
}
break;
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::LAST_BYTE):
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
default:
/**
* ERROR: Transition not possible
*/
NS_LOG_LOGIC ("INTERNAL ERROR: Transition not possible. FI = " << (uint32_t) framingInfo);
break;
}
break;
default:
NS_LOG_LOGIC ("INTERNAL ERROR: Wrong reassembling state = " << (uint32_t) m_reassemblingState);
break;
}
}
else // Reassemble the list of SDUs (when there are losses, i.e. the received SN is not the expected one)
{
switch (m_reassemblingState)
{
case WAITING_S0_FULL:
switch (framingInfo)
{
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Deliver one or multiple PDUs
*/
for ( it = m_sdusBuffer.begin () ; it != m_sdusBuffer.end () ; it++ )
{
m_rlcSapUser->ReceivePdcpPdu (*it);
}
m_sdusBuffer.clear ();
break;
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
m_reassemblingState = WAITING_SI_SF;
/**
* Deliver full PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Discard SN
*/
m_sdusBuffer.pop_front ();
/**
* Deliver zero, one or multiple PDUs
*/
while ( ! m_sdusBuffer.empty () )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
if ( m_sdusBuffer.size () == 1 )
{
m_reassemblingState = WAITING_S0_FULL;
}
else
{
m_reassemblingState = WAITING_SI_SF;
}
/**
* Discard SI or SN
*/
m_sdusBuffer.pop_front ();
if ( m_sdusBuffer.size () > 0 )
{
/**
* Deliver zero, one or multiple PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
}
break;
default:
/**
* ERROR: Transition not possible
*/
NS_LOG_LOGIC ("INTERNAL ERROR: Transition not possible. FI = " << (uint32_t) framingInfo);
break;
}
break;
case WAITING_SI_SF:
switch (framingInfo)
{
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Discard S0
*/
m_keepS0 = 0;
/**
* Deliver one or multiple PDUs
*/
while ( ! m_sdusBuffer.empty () )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
break;
case (LteRlcHeader::FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
m_reassemblingState = WAITING_SI_SF;
/**
* Discard S0
*/
m_keepS0 = 0;
/**
* Deliver zero, one or multiple PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::LAST_BYTE):
m_reassemblingState = WAITING_S0_FULL;
/**
* Discard S0
*/
m_keepS0 = 0;
/**
* Discard SI or SN
*/
m_sdusBuffer.pop_front ();
/**
* Deliver zero, one or multiple PDUs
*/
while ( ! m_sdusBuffer.empty () )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
break;
case (LteRlcHeader::NO_FIRST_BYTE | LteRlcHeader::NO_LAST_BYTE):
if ( m_sdusBuffer.size () == 1 )
{
m_reassemblingState = WAITING_S0_FULL;
}
else
{
m_reassemblingState = WAITING_SI_SF;
}
/**
* Discard S0
*/
m_keepS0 = 0;
/**
* Discard SI or SN
*/
m_sdusBuffer.pop_front ();
if ( m_sdusBuffer.size () > 0 )
{
/**
* Deliver zero, one or multiple PDUs
*/
while ( m_sdusBuffer.size () > 1 )
{
m_rlcSapUser->ReceivePdcpPdu (m_sdusBuffer.front ());
m_sdusBuffer.pop_front ();
}
/**
* Keep S0
*/
m_keepS0 = m_sdusBuffer.front ();
m_sdusBuffer.pop_front ();
}
break;
default:
/**
* ERROR: Transition not possible
*/
NS_LOG_LOGIC ("INTERNAL ERROR: Transition not possible. FI = " << (uint32_t) framingInfo);
break;
}
break;
default:
NS_LOG_LOGIC ("INTERNAL ERROR: Wrong reassembling state = " << (uint32_t) m_reassemblingState);
break;
}
}
}
void
LteRlcUm::ReassembleOutsideWindow (void)
{
NS_LOG_LOGIC ("Reassemble Outside Window");
std::map <uint16_t, Ptr<Packet> >::iterator it;
it = m_rxBuffer.begin ();
while ( (it != m_rxBuffer.end ()) && ! IsInsideReorderingWindow (SequenceNumber10 (it->first)) )
{
NS_LOG_LOGIC ("SN = " << it->first);
// Reassemble RLC SDUs and deliver the PDCP PDU to upper layer
ReassembleAndDeliver (it->second);
std::map <uint16_t, Ptr<Packet> >::iterator it_tmp = it;
++it;
m_rxBuffer.erase (it_tmp);
}
if (it != m_rxBuffer.end ())
{
NS_LOG_LOGIC ("(SN = " << it->first << ") is inside the reordering window");
}
}
void
LteRlcUm::ReassembleSnInterval (SequenceNumber10 lowSeqNumber, SequenceNumber10 highSeqNumber)
{
NS_LOG_LOGIC ("Reassemble SN between " << lowSeqNumber << " and " << highSeqNumber);
std::map <uint16_t, Ptr<Packet> >::iterator it;
SequenceNumber10 reassembleSn = lowSeqNumber;
NS_LOG_LOGIC ("reassembleSN = " << reassembleSn);
NS_LOG_LOGIC ("highSeqNumber = " << highSeqNumber);
while (reassembleSn < highSeqNumber)
{
NS_LOG_LOGIC ("reassembleSn < highSeqNumber");
it = m_rxBuffer.find (reassembleSn.GetValue ());
NS_LOG_LOGIC ("it->first = " << it->first);
NS_LOG_LOGIC ("it->second = " << it->second);
if (it != m_rxBuffer.end () )
{
NS_LOG_LOGIC ("SN = " << it->first);
// Reassemble RLC SDUs and deliver the PDCP PDU to upper layer
ReassembleAndDeliver (it->second);
m_rxBuffer.erase (it);
}
reassembleSn++;
}
}
void
LteRlcUm::DoReportBufferStatus (void)
{
Time holDelay (0);
uint32_t queueSize = 0;
if (! m_txBuffer.empty ())
{
RlcTag holTimeTag;
m_txBuffer.front ()->PeekPacketTag (holTimeTag);
holDelay = Simulator::Now () - holTimeTag.GetSenderTimestamp ();
queueSize = m_txBufferSize + 2 * m_txBuffer.size (); // Data in tx queue + estimated headers size
}
LteMacSapProvider::ReportBufferStatusParameters r;
r.rnti = m_rnti;
r.lcid = m_lcid;
r.txQueueSize = queueSize;
r.txQueueHolDelay = holDelay.GetMilliSeconds () ;
r.retxQueueSize = 0;
r.retxQueueHolDelay = 0;
r.statusPduSize = 0;
NS_LOG_LOGIC ("Send ReportBufferStatus = " << r.txQueueSize << ", " << r.txQueueHolDelay );
m_macSapProvider->ReportBufferStatus (r);
}
void
LteRlcUm::ExpireReorderingTimer (void)
{
NS_LOG_FUNCTION (this << m_rnti << (uint32_t) m_lcid);
NS_LOG_LOGIC ("Reordering timer has expired");
// 5.1.2.2.4 Actions when t-Reordering expires
// When t-Reordering expires, the receiving UM RLC entity shall:
// - update VR(UR) to the SN of the first UMD PDU with SN >= VR(UX) that has not been received;
// - reassemble RLC SDUs from any UMD PDUs with SN < updated VR(UR), remove RLC headers when doing so
// and deliver the reassembled RLC SDUs to upper layer in ascending order of the RLC SN if not delivered before;
// - if VR(UH) > VR(UR):
// - start t-Reordering;
// - set VR(UX) to VR(UH).
std::map <uint16_t, Ptr<Packet> >::iterator it;
SequenceNumber10 newVrUr = m_vrUx;
while ( (it = m_rxBuffer.find (newVrUr.GetValue ())) != m_rxBuffer.end () )
{
newVrUr++;
}
SequenceNumber10 oldVrUr = m_vrUr;
m_vrUr = newVrUr;
NS_LOG_LOGIC ("New VR(UR) = " << m_vrUr);
ReassembleSnInterval (oldVrUr, m_vrUr);
if ( m_vrUh > m_vrUr)
{
NS_LOG_LOGIC ("Start reordering timer");
m_reorderingTimer = Simulator::Schedule (Time ("0.1s"),
&LteRlcUm::ExpireReorderingTimer, this);
m_vrUx = m_vrUh;
NS_LOG_LOGIC ("New VR(UX) = " << m_vrUx);
}
}
void
LteRlcUm::ExpireRbsTimer (void)
{
NS_LOG_LOGIC ("RBS Timer expires");
if (! m_txBuffer.empty ())
{
DoReportBufferStatus ();
m_rbsTimer = Simulator::Schedule (MilliSeconds (10), &LteRlcUm::ExpireRbsTimer, this);
}
}
} // namespace ns3
| darolt/ndnSIMQoS | src/lte/model/lte-rlc-um.cc | C++ | gpl-2.0 | 43,355 |
<?php
/**
* File containing the Services controller class
*
* @copyright Copyright (C) eZ Systems AS. All rights reserved.
* @license For full copyright and license information view LICENSE file distributed with this source code.
* @version //autogentag//
*/
namespace eZ\Publish\Core\REST\Server\Controller;
use eZ\Publish\Core\REST\Server\Controller as RestController;
use eZ\Publish\Core\REST\Server\Values;
/**
* Services controller
*/
class Services extends RestController
{
/**
* @var array
*/
protected $countriesInfo;
public function __construct( array $countriesInfo )
{
$this->countriesInfo = $countriesInfo;
}
/**
* Loads Country List
*
*/
public function loadCountryList()
{
return new Values\CountryList( $this->countriesInfo );
}
}
| netgen/ezpublish-kernel | eZ/Publish/Core/REST/Server/Controller/Services.php | PHP | gpl-2.0 | 836 |
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* A JVM with JDP on should send multicast JDP packets regularly.
* Look at JdpOnTestCase.java and JdpOffTestCase.java
*/
import sun.management.jdp.JdpJmxPacket;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.DatagramPacket;
import java.net.MulticastSocket;
import java.net.SocketTimeoutException;
import java.util.Arrays;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
public abstract class JdpTestCase {
final Logger log = Logger.getLogger("sun.management.jdp");
final int MAGIC = 0xC0FFEE42; // Jdp magic number.
private static final int BUFFER_LENGTH = 64 * 1024; // max UDP size, except for IPv6 jumbograms.
private final int TIME_OUT_FACTOR = 10; // Socket times out after 10 times the jdp pause.
protected int timeOut;
private long startTime;
protected ClientConnection connection;
public JdpTestCase(ClientConnection connection) {
this.connection = connection;
JdpTestUtil.enableConsoleLogging(log, Level.ALL);
}
public void run() throws Exception {
log.fine("Test started.");
log.fine("Listening for multicast packets at " + connection.address.getHostAddress()
+ ":" + String.valueOf(connection.port));
log.fine(initialLogMessage());
log.fine("Pause in between packets is: " + connection.pauseInSeconds + " seconds.");
startTime = System.currentTimeMillis();
timeOut = connection.pauseInSeconds * TIME_OUT_FACTOR;
log.fine("Timeout set to " + String.valueOf(timeOut) + " seconds.");
MulticastSocket socket = connection.connectWithTimeout(timeOut * 1000);
byte[] buffer = new byte[BUFFER_LENGTH];
DatagramPacket datagram = new DatagramPacket(buffer, buffer.length);
do {
try {
socket.receive(datagram);
onReceived(extractUDPpayload(datagram));
} catch (SocketTimeoutException e) {
onSocketTimeOut(e);
}
if (hasTestLivedLongEnough()) {
shutdown();
}
} while (shouldContinue());
log.fine("Test ended successfully.");
}
/**
* Subclasses: JdpOnTestCase and JdpOffTestCase have different messages.
*/
protected abstract String initialLogMessage();
/**
* Executed when the socket receives a UDP packet.
*/
private void onReceived(byte[] packet) throws Exception {
if (isJDP(packet)) {
Map<String, String> payload = checkStructure(packet);
jdpPacketReceived(payload);
} else {
log.fine("Non JDP packet received, ignoring it.");
}
}
/**
* Determine whether the test should end.
*
* @return
*/
abstract protected boolean shouldContinue();
/**
* This method is executed when the socket has not received any packet for timeOut seconds.
*/
abstract protected void onSocketTimeOut(SocketTimeoutException e) throws Exception;
/**
* This method is executed after a correct Jdp packet has been received.
*
* @param payload A dictionary containing the data if the received Jdp packet.
*/
private void jdpPacketReceived(Map<String, String> payload) throws Exception {
final String instanceName = payload.get("INSTANCE_NAME");
if (instanceName != null && instanceName.equals(connection.instanceName)) {
packetFromThisVMReceived(payload);
} else {
packetFromOtherVMReceived(payload);
}
}
/**
* This method is executed after a correct Jdp packet, coming from this VM has been received.
*
* @param payload A dictionary containing the data if the received Jdp packet.
*/
protected abstract void packetFromThisVMReceived(Map<String, String> payload) throws Exception;
/**
* This method is executed after a correct Jdp packet, coming from another VM has been received.
*
* @param payload A dictionary containing the data if the received Jdp packet.
*/
protected void packetFromOtherVMReceived(Map<String, String> payload) {
final String jdpName = payload.get("INSTANCE_NAME");
log.fine("Ignoring JDP packet sent by other VM, jdp.name=" + jdpName);
}
/**
* The test should stop if it has been 12 times the jdp.pause.
* jdp.pause is how many seconds in between packets.
* <p/>
* This timeout (12 times)is slightly longer than the socket timeout (10 times) on purpose.
* In the off test case, the socket should time out first.
*
* @return
*/
protected boolean hasTestLivedLongEnough() {
long now = System.currentTimeMillis();
boolean haslivedLongEnough = (now - startTime) > (timeOut * 1.2 * 1000);
return haslivedLongEnough;
}
/**
* This exit condition arises when we receive UDP packets but they are not valid Jdp.
*/
protected void shutdown() throws Exception {
log.severe("Shutting down the test.");
throw new Exception("Not enough JDP packets received before timeout!");
}
/**
* Assert that this Jdp packet contains the required two keys.
* <p/>
* We expect zero packet corruption and thus fail on the first corrupted packet.
* This might need revision.
*/
protected Map<String, String> checkStructure(byte[] packet) throws UnsupportedEncodingException {
Map<String, String> payload = JdpTestUtil.readPayload(packet);
assertTrue(payload.size() >= 2, "JDP should have minimun 2 entries.");
assertTrue(payload.get(JdpJmxPacket.UUID_KEY).length() > 0);
assertTrue(payload.get(JdpJmxPacket.JMX_SERVICE_URL_KEY).length() > 0);
return payload;
}
/**
* Check if packet has correct JDP magic number.
*
* @param packet
* @return
* @throws IOException
*/
private boolean isJDP(byte[] packet) throws IOException {
int magic = JdpTestUtil.decode4ByteInt(packet, 0);
return (magic == MAGIC);
}
private byte[] extractUDPpayload(DatagramPacket datagram) {
byte[] data = Arrays.copyOf(datagram.getData(), datagram.getLength());
return data;
}
/**
* Hack until I find a way to use TestNG's assertions.
*/
private void assertTrue(boolean assertion, String message) {
if (assertion == false) {
log.severe(message);
assert (false);
}
}
private void assertTrue(boolean assertion) {
assertTrue(assertion, "");
}
}
| FauxFaux/jdk9-jdk | test/sun/management/jdp/JdpTestCase.java | Java | gpl-2.0 | 7,749 |
<?php
/**
* Zend Framework
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://framework.zend.com/license/new-bsd
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@zend.com so we can send you a copy immediately.
*
* @category Zend
* @package Zend_Validate
* @copyright Copyright (c) 2005-2008 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @version $Id: LessThan.php 8064 2008-02-16 10:58:39Z thomas $
*/
/**
* @see Zend_Validate_Abstract
*/
require_once 'external/Zend/Validate/Abstract.php';
/**
* @category Zend
* @package Zend_Validate
* @copyright Copyright (c) 2005-2008 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
class Zend_Validate_LessThan extends Zend_Validate_Abstract {
const NOT_LESS = 'notLessThan';
/**
* @var array
*/
protected $_messageTemplates = array(self::NOT_LESS => "'%value%' is not less than '%max%'");
/**
* @var array
*/
protected $_messageVariables = array('max' => '_max');
/**
* Maximum value
*
* @var mixed
*/
protected $_max;
/**
* Sets validator options
*
* @param mixed $max
* @return void
*/
public function __construct($max) {
$this->setMax($max);
}
/**
* Returns the max option
*
* @return mixed
*/
public function getMax() {
return $this->_max;
}
/**
* Sets the max option
*
* @param mixed $max
* @return Zend_Validate_LessThan Provides a fluent interface
*/
public function setMax($max) {
$this->_max = $max;
return $this;
}
/**
* Defined by Zend_Validate_Interface
*
* Returns true if and only if $value is less than max option
*
* @param mixed $value
* @return boolean
*/
public function isValid($value) {
$this->_setValue($value);
if ($this->_max <= $value) {
$this->_error();
return false;
}
return true;
}
}
| ROLE/widget-store | src/main/drupal/sites/all/libraries/shindig2.5beta/external/Zend/Validate/LessThan.php | PHP | gpl-2.0 | 2,286 |
<?php
$base = array(
0x00 => '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
0x10 => '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
0x20 => '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
0x30 => '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
0x40 => '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', NULL,
0x50 => NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
0x60 => '', '', '', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
0x70 => NULL, NULL, NULL, NULL, '\'', ',', NULL, NULL, NULL, NULL, '', NULL, NULL, NULL, '?', NULL,
0x80 => NULL, NULL, NULL, NULL, '', '', 'A', ';', 'E', 'I', 'I', NULL, 'O', NULL, 'U', 'O',
0x90 => 'I', 'A', 'V', 'G', 'D', 'E', 'Z', 'I', 'Th', 'I', 'K', 'L', 'M', 'N', 'X', 'O',
0xA0 => 'P', 'R', NULL, 'S', 'T', 'Y', 'F', 'H', 'Ps', 'O', 'I', 'Y', 'a', 'e', 'i', 'i',
0xB0 => 'y', 'a', 'v', 'g', 'd', 'e', 'z', 'i', 'th', 'i', 'k', 'l', 'm', 'n', 'x', 'o',
0xC0 => 'p', 'r', 's', 's', 't', 'y', 'f', 'h', 'ps', 'o', 'i', 'y', 'o', 'y', 'o', NULL,
0xD0 => 'b', 'th', 'U', 'U', 'U', 'ph', 'p', '&', NULL, NULL, 'St', 'st', 'W', 'w', 'Q', 'q',
0xE0 => 'Sp', 'sp', 'Sh', 'sh', 'F', 'f', 'Kh', 'kh', 'H', 'h', 'G', 'g', 'CH', 'ch', 'Ti', 'ti',
0xF0 => 'k', 'r', 'c', 'j', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
);
| idiomaweb/um | sites/all/modules/transliteration/data/x03.php | PHP | gpl-2.0 | 1,483 |
/******************************************************************************
* Product: Adempiere ERP & CRM Smart Business Solution *
* Copyright (C) 1999-2007 ComPiere, Inc. All Rights Reserved. *
* This program is free software, you can redistribute it and/or modify it *
* under the terms version 2 of the GNU General Public License as published *
* by the Free Software Foundation. This program is distributed in the hope *
* that it will be useful, but WITHOUT ANY WARRANTY, without even the implied *
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU General Public License for more details. *
* You should have received a copy of the GNU General Public License along *
* with this program, if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. *
* For the text or an alternative of this public license, you may reach us *
* ComPiere, Inc., 2620 Augustine Dr. #245, Santa Clara, CA 95054, USA *
* or via info@compiere.org or http://www.compiere.org/license.html *
*****************************************************************************/
package org.compiere.model;
import java.math.BigDecimal;
import java.sql.Timestamp;
import org.compiere.util.KeyNamePair;
/** Generated Interface for C_Phase
* @author Adempiere (generated)
* @version Release 3.8.0
*/
public interface I_C_Phase
{
/** TableName=C_Phase */
public static final String Table_Name = "C_Phase";
/** AD_Table_ID=577 */
public static final int Table_ID = MTable.getTable_ID(Table_Name);
KeyNamePair Model = new KeyNamePair(Table_ID, Table_Name);
/** AccessLevel = 3 - Client - Org
*/
BigDecimal accessLevel = BigDecimal.valueOf(3);
/** Load Meta Data */
/** Column name AD_Client_ID */
public static final String COLUMNNAME_AD_Client_ID = "AD_Client_ID";
/** Get Client.
* Client/Tenant for this installation.
*/
public int getAD_Client_ID();
/** Column name AD_Org_ID */
public static final String COLUMNNAME_AD_Org_ID = "AD_Org_ID";
/** Set Organization.
* Organizational entity within client
*/
public void setAD_Org_ID (int AD_Org_ID);
/** Get Organization.
* Organizational entity within client
*/
public int getAD_Org_ID();
/** Column name C_Phase_ID */
public static final String COLUMNNAME_C_Phase_ID = "C_Phase_ID";
/** Set Standard Phase.
* Standard Phase of the Project Type
*/
public void setC_Phase_ID (int C_Phase_ID);
/** Get Standard Phase.
* Standard Phase of the Project Type
*/
public int getC_Phase_ID();
/** Column name C_ProjectType_ID */
public static final String COLUMNNAME_C_ProjectType_ID = "C_ProjectType_ID";
/** Set Project Type.
* Type of the project
*/
public void setC_ProjectType_ID (int C_ProjectType_ID);
/** Get Project Type.
* Type of the project
*/
public int getC_ProjectType_ID();
public org.compiere.model.I_C_ProjectType getC_ProjectType() throws RuntimeException;
/** Column name Created */
public static final String COLUMNNAME_Created = "Created";
/** Get Created.
* Date this record was created
*/
public Timestamp getCreated();
/** Column name CreatedBy */
public static final String COLUMNNAME_CreatedBy = "CreatedBy";
/** Get Created By.
* User who created this records
*/
public int getCreatedBy();
/** Column name Description */
public static final String COLUMNNAME_Description = "Description";
/** Set Description.
* Optional short description of the record
*/
public void setDescription (String Description);
/** Get Description.
* Optional short description of the record
*/
public String getDescription();
/** Column name Help */
public static final String COLUMNNAME_Help = "Help";
/** Set Comment/Help.
* Comment or Hint
*/
public void setHelp (String Help);
/** Get Comment/Help.
* Comment or Hint
*/
public String getHelp();
/** Column name IsActive */
public static final String COLUMNNAME_IsActive = "IsActive";
/** Set Active.
* The record is active in the system
*/
public void setIsActive (boolean IsActive);
/** Get Active.
* The record is active in the system
*/
public boolean isActive();
/** Column name M_Product_ID */
public static final String COLUMNNAME_M_Product_ID = "M_Product_ID";
/** Set Product.
* Product, Service, Item
*/
public void setM_Product_ID (int M_Product_ID);
/** Get Product.
* Product, Service, Item
*/
public int getM_Product_ID();
public org.compiere.model.I_M_Product getM_Product() throws RuntimeException;
/** Column name Name */
public static final String COLUMNNAME_Name = "Name";
/** Set Name.
* Alphanumeric identifier of the entity
*/
public void setName (String Name);
/** Get Name.
* Alphanumeric identifier of the entity
*/
public String getName();
/** Column name SeqNo */
public static final String COLUMNNAME_SeqNo = "SeqNo";
/** Set Sequence.
* Method of ordering records;
lowest number comes first
*/
public void setSeqNo (int SeqNo);
/** Get Sequence.
* Method of ordering records;
lowest number comes first
*/
public int getSeqNo();
/** Column name StandardQty */
public static final String COLUMNNAME_StandardQty = "StandardQty";
/** Set Standard Quantity.
* Standard Quantity
*/
public void setStandardQty (BigDecimal StandardQty);
/** Get Standard Quantity.
* Standard Quantity
*/
public BigDecimal getStandardQty();
/** Column name Updated */
public static final String COLUMNNAME_Updated = "Updated";
/** Get Updated.
* Date this record was updated
*/
public Timestamp getUpdated();
/** Column name UpdatedBy */
public static final String COLUMNNAME_UpdatedBy = "UpdatedBy";
/** Get Updated By.
* User who updated this records
*/
public int getUpdatedBy();
}
| armenrz/adempiere | base/src/org/compiere/model/I_C_Phase.java | Java | gpl-2.0 | 6,102 |
/* ScummVM - Graphic Adventure Engine
*
* ScummVM is the legal property of its developers, whose names
* are too numerous to list here. Please refer to the COPYRIGHT
* file distributed with this source distribution.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#include "kyra/engine/kyra_lok.h"
#include "kyra/sequence/seqplayer_lok.h"
#include "kyra/resource/resource.h"
#include "kyra/engine/sprites.h"
#include "kyra/graphics/wsamovie.h"
#include "kyra/graphics/animator_lok.h"
#include "kyra/engine/timer.h"
#include "kyra/sound/sound.h"
#include "common/system.h"
#include "common/savefile.h"
#include "common/list.h"
namespace Kyra {
void KyraEngine_LoK::seq_demo() {
snd_playTheme(0, 2);
_screen->loadBitmap("START.CPS", 7, 7, &_screen->getPalette(0));
_screen->copyRegion(0, 0, 0, 0, 320, 200, 6, 0, Screen::CR_NO_P_CHECK);
_screen->updateScreen();
_screen->fadeFromBlack();
delay(60 * _tickLength);
_screen->fadeToBlack();
_screen->clearPage(0);
_screen->loadBitmap("TOP.CPS", 7, 7, 0);
_screen->loadBitmap("BOTTOM.CPS", 5, 5, &_screen->getPalette(0));
_screen->copyRegion(0, 91, 0, 8, 320, 103, 6, 0);
_screen->copyRegion(0, 0, 0, 111, 320, 64, 6, 0);
_screen->updateScreen();
_screen->fadeFromBlack();
_seq->playSequence(_seq_WestwoodLogo, true);
delay(60 * _tickLength);
_seq->playSequence(_seq_KyrandiaLogo, true);
_screen->fadeToBlack();
_screen->clearPage(2);
_screen->clearPage(0);
_seq->playSequence(_seq_Demo1, true);
_screen->clearPage(0);
_seq->playSequence(_seq_Demo2, true);
_screen->clearPage(0);
_seq->playSequence(_seq_Demo3, true);
_screen->clearPage(0);
_seq->playSequence(_seq_Demo4, true);
_screen->clearPage(0);
_screen->loadBitmap("FINAL.CPS", 7, 7, &_screen->getPalette(0));
_screen->_curPage = 0;
_screen->copyRegion(0, 0, 0, 0, 320, 200, 6, 0);
_screen->updateScreen();
_screen->fadeFromBlack();
delay(60 * _tickLength);
_screen->fadeToBlack();
_sound->haltTrack();
}
void KyraEngine_LoK::seq_intro() {
if (_flags.isTalkie)
_res->loadPakFile("INTRO.VRM");
static const IntroProc introProcTable[] = {
&KyraEngine_LoK::seq_introPublisherLogos,
&KyraEngine_LoK::seq_introLogos,
&KyraEngine_LoK::seq_introStory,
&KyraEngine_LoK::seq_introMalcolmTree,
&KyraEngine_LoK::seq_introKallakWriting,
&KyraEngine_LoK::seq_introKallakMalcolm
};
Common::InSaveFile *in;
if ((in = _saveFileMan->openForLoading(getSavegameFilename(0)))) {
delete in;
_skipIntroFlag = true;
} else {
_skipIntroFlag = !_flags.isDemo;
}
_seq->setCopyViewOffs(true);
_screen->setFont(_flags.lang == Common::JA_JPN ? Screen::FID_SJIS_FNT : Screen::FID_8_FNT);
if (_flags.platform != Common::kPlatformFMTowns && _flags.platform != Common::kPlatformPC98 && _flags.platform != Common::kPlatformAmiga)
snd_playTheme(0, 2);
_text->setTalkCoords(144);
for (int i = 0; i < ARRAYSIZE(introProcTable) && !seq_skipSequence(); ++i) {
if ((this->*introProcTable[i])() && !shouldQuit()) {
resetSkipFlag();
_screen->fadeToBlack();
_screen->clearPage(0);
}
}
_text->setTalkCoords(136);
delay(30 * _tickLength);
_seq->setCopyViewOffs(false);
_sound->haltTrack();
_sound->voiceStop();
if (_flags.isTalkie)
_res->unloadPakFile("INTRO.VRM");
}
bool KyraEngine_LoK::seq_introPublisherLogos() {
if (_flags.platform == Common::kPlatformFMTowns || _flags.platform == Common::kPlatformPC98) {
_screen->loadBitmap("LOGO.CPS", 3, 3, &_screen->getPalette(0));
_screen->copyRegion(0, 0, 0, 0, 320, 200, 2, 0);
_screen->updateScreen();
_screen->fadeFromBlack();
delay(90 * _tickLength);
if (!_abortIntroFlag) {
_screen->fadeToBlack();
snd_playWanderScoreViaMap(_flags.platform == Common::kPlatformFMTowns ? 57 : 2, 0);
}
} else if (_flags.platform == Common::kPlatformMacintosh && _res->exists("MP_GOLD.CPS")) {
_screen->loadPalette("MP_GOLD.COL", _screen->getPalette(0));
_screen->loadBitmap("MP_GOLD.CPS", 3, 3, 0);
_screen->copyRegion(0, 0, 0, 0, 320, 200, 2, 0);
_screen->updateScreen();
_screen->fadeFromBlack();
delay(120 * _tickLength);
if (!_abortIntroFlag)
_screen->fadeToBlack();
}
return _abortIntroFlag;
}
bool KyraEngine_LoK::seq_introLogos() {
_screen->clearPage(0);
if (_flags.platform == Common::kPlatformAmiga) {
_screen->loadPaletteTable("INTRO.PAL", 0);
_screen->loadBitmap("BOTTOM.CPS", 3, 5, 0);
_screen->loadBitmap("TOP.CPS", 3, 3, 0);
_screen->copyRegion(0, 0, 0, 111, 320, 64, 2, 0);
_screen->copyRegion(0, 91, 0, 8, 320, 109, 2, 0);
_screen->copyRegion(0, 0, 0, 0, 320, 190, 0, 2);
} else {
_screen->loadBitmap("TOP.CPS", 7, 7, 0);
_screen->loadBitmap("BOTTOM.CPS", 5, 5, &_screen->getPalette(0));
_screen->copyRegion(0, 91, 0, 8, 320, 103, 6, 0);
_screen->copyRegion(0, 0, 0, 111, 320, 64, 6, 0);
}
_screen->_curPage = 0;
_screen->updateScreen();
_screen->fadeFromBlack();
if (_seq->playSequence(_seq_WestwoodLogo, skipFlag()) || shouldQuit())
return true;
delay(60 * _tickLength);
if (_flags.platform == Common::kPlatformAmiga) {
_screen->copyPalette(0, 1);
_screen->setScreenPalette(_screen->getPalette(0));
}
Screen::FontId of = _screen->setFont(Screen::FID_8_FNT);
if (_seq->playSequence(_seq_KyrandiaLogo, skipFlag()) || shouldQuit())
return true;
_screen->setFont(of);
_screen->fillRect(0, 179, 319, 199, 0);
if (shouldQuit())
return false;
if (_flags.platform == Common::kPlatformAmiga) {
_screen->copyPalette(0, 2);
_screen->fadeToBlack();
_screen->copyRegion(0, 0, 0, 0, 320, 200, 4, 0);
_screen->fadeFromBlack();
} else {
_screen->copyRegion(0, 91, 0, 8, 320, 104, 6, 2);
_screen->copyRegion(0, 0, 0, 112, 320, 64, 6, 2);
uint32 start = _system->getMillis();
bool doneFlag = false;
int oldDistance = 0;
do {
uint32 now = _system->getMillis();
// The smallest y2 we ever draw the screen for is 65.
int distance = (now - start) / _tickLength;
if (distance > 112) {
distance = 112;
doneFlag = true;
}
if (distance > oldDistance) {
int y1 = 8 + distance;
int h1 = 168 - distance;
int y2 = 176 - distance;
int h2 = distance;
_screen->copyRegion(0, y1, 0, 8, 320, h1, 2, 0);
if (h2 > 0)
_screen->copyRegion(0, 64, 0, y2, 320, h2, 4, 0);
_screen->updateScreen();
}
oldDistance = distance;
delay(10);
} while (!doneFlag && !shouldQuit() && !_abortIntroFlag);
}
if (_abortIntroFlag || shouldQuit())
return true;
return _seq->playSequence(_seq_Forest, true);
}
bool KyraEngine_LoK::seq_introStory() {
_screen->clearPage(3);
_screen->clearPage(0);
// HACK: The Italian fan translation uses an special text screen here
// so we show it even when text is disabled
if (!textEnabled() && speechEnabled() && _flags.lang != Common::IT_ITA)
return false;
if (((_flags.lang == Common::EN_ANY || _flags.lang == Common::RU_RUS) && !_flags.isTalkie && _flags.platform == Common::kPlatformDOS) || _flags.platform == Common::kPlatformAmiga)
_screen->loadBitmap("TEXT.CPS", 3, 3, &_screen->getPalette(0));
else if (_flags.lang == Common::EN_ANY || _flags.lang == Common::JA_JPN)
_screen->loadBitmap("TEXT_ENG.CPS", 3, 3, &_screen->getPalette(0));
else if (_flags.lang == Common::DE_DEU)
_screen->loadBitmap("TEXT_GER.CPS", 3, 3, &_screen->getPalette(0));
else if (_flags.lang == Common::FR_FRA || (_flags.lang == Common::ES_ESP && _flags.isTalkie) /* Spanish fan made over French CD version */ )
_screen->loadBitmap("TEXT_FRE.CPS", 3, 3, &_screen->getPalette(0));
else if (_flags.lang == Common::ES_ESP)
_screen->loadBitmap("TEXT_SPA.CPS", 3, 3, &_screen->getPalette(0));
else if (_flags.lang == Common::IT_ITA && !_flags.isTalkie)
_screen->loadBitmap("TEXT_ITA.CPS", 3, 3, &_screen->getPalette(0));
else if (_flags.lang == Common::IT_ITA && _flags.isTalkie)
_screen->loadBitmap("TEXT_ENG.CPS", 3, 3, &_screen->getPalette(0));
else if (_flags.lang == Common::RU_RUS && _flags.isTalkie)
_screen->loadBitmap("TEXT_ENG.CPS", 3, 3, &_screen->getPalette(0));
else if (_flags.lang == Common::HE_ISR)
_screen->loadBitmap("TEXT_HEB.CPS", 3, 3, &_screen->getPalette(0));
else
warning("no story graphics file found");
if (_flags.platform == Common::kPlatformAmiga)
_screen->setScreenPalette(_screen->getPalette(4));
else
_screen->setScreenPalette(_screen->getPalette(0));
_screen->copyPage(3, 0);
if (_flags.lang == Common::JA_JPN) {
const int y1 = 175;
int x1, x2, y2, col1;
const char *s1, *s2;
if (_flags.platform == Common::kPlatformFMTowns) {
s1 = _seq_textsTable[18];
s2 = _seq_textsTable[19];
x1 = (Screen::SCREEN_W - _screen->getTextWidth(s1)) / 2;
x2 = (Screen::SCREEN_W - _screen->getTextWidth(s2)) / 2;
uint8 colorMap[] = { 0, 15, 12, 12 };
_screen->setTextColor(colorMap, 0, 3);
y2 = 184;
col1 = 5;
} else {
s1 = _storyStrings[0];
s2 = _storyStrings[1];
x1 = x2 = 54;
y2 = 185;
col1 = 15;
}
_screen->printText(s1, x1, y1, col1, 8);
_screen->printText(s2, x2, y2, col1, 8);
}
_screen->updateScreen();
delay(360 * _tickLength);
return _abortIntroFlag;
}
bool KyraEngine_LoK::seq_introMalcolmTree() {
_screen->_curPage = 0;
_screen->clearPage(3);
return _seq->playSequence(_seq_MalcolmTree, true);
}
bool KyraEngine_LoK::seq_introKallakWriting() {
_seq->makeHandShapes();
_screen->setAnimBlockPtr(5060);
_screen->_charSpacing = -2;
_screen->clearPage(3);
const bool skipped = _seq->playSequence(_seq_KallakWriting, true);
_seq->freeHandShapes();
return skipped;
}
bool KyraEngine_LoK::seq_introKallakMalcolm() {
_screen->clearPage(3);
return _seq->playSequence(_seq_KallakMalcolm, true);
}
void KyraEngine_LoK::seq_createAmuletJewel(int jewel, int page, int noSound, int drawOnly) {
static const uint16 specialJewelTable[] = {
0x167, 0x162, 0x15D, 0x158, 0x153, 0xFFFF
};
static const uint16 specialJewelTable1[] = {
0x14F, 0x154, 0x159, 0x15E, 0x163, 0xFFFF
};
static const uint16 specialJewelTable2[] = {
0x150, 0x155, 0x15A, 0x15F, 0x164, 0xFFFF
};
static const uint16 specialJewelTable3[] = {
0x151, 0x156, 0x15B, 0x160, 0x165, 0xFFFF
};
static const uint16 specialJewelTable4[] = {
0x152, 0x157, 0x15C, 0x161, 0x166, 0xFFFF
};
if (!noSound)
snd_playSoundEffect(0x5F);
_screen->hideMouse();
if (!drawOnly) {
for (int i = 0; specialJewelTable[i] != 0xFFFF; ++i) {
_screen->drawShape(page, _shapes[specialJewelTable[i]], _amuletX2[jewel], _amuletY2[jewel], 0, 0);
_screen->updateScreen();
delayWithTicks(3);
}
const uint16 *opcodes = 0;
switch (jewel - 1) {
case 0:
opcodes = specialJewelTable1;
break;
case 1:
opcodes = specialJewelTable2;
break;
case 2:
opcodes = specialJewelTable3;
break;
case 3:
opcodes = specialJewelTable4;
break;
default:
break;
}
if (opcodes) {
for (int i = 0; opcodes[i] != 0xFFFF; ++i) {
_screen->drawShape(page, _shapes[opcodes[i]], _amuletX2[jewel], _amuletY2[jewel], 0, 0);
_screen->updateScreen();
delayWithTicks(3);
}
}
}
_screen->drawShape(page, _shapes[323 + jewel], _amuletX2[jewel], _amuletY2[jewel], 0, 0);
_screen->updateScreen();
_screen->showMouse();
setGameFlag(0x55 + jewel);
}
void KyraEngine_LoK::seq_brandonHealing() {
if (!(_deathHandler & 8))
return;
if (_currentCharacter->sceneId == 210) {
if (_beadStateVar == 4 || _beadStateVar == 6)
return;
}
_screen->hideMouse();
checkAmuletAnimFlags();
assert(_healingShapeTable);
setupShapes123(_healingShapeTable, 22, 0);
_animator->setBrandonAnimSeqSize(3, 48);
snd_playSoundEffect(0x53);
for (int i = 123; i <= 144; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
for (int i = 125; i >= 123; --i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
_animator->resetBrandonAnimSeqSize();
_currentCharacter->currentAnimFrame = 7;
_animator->animRefreshNPC(0);
freeShapes123();
_screen->showMouse();
}
void KyraEngine_LoK::seq_brandonHealing2() {
_screen->hideMouse();
checkAmuletAnimFlags();
assert(_healingShape2Table);
setupShapes123(_healingShape2Table, 30, 0);
resetBrandonPoisonFlags();
_animator->setBrandonAnimSeqSize(3, 48);
snd_playSoundEffect(0x50);
for (int i = 123; i <= 152; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
_animator->resetBrandonAnimSeqSize();
_currentCharacter->currentAnimFrame = 7;
_animator->animRefreshNPC(0);
freeShapes123();
_screen->showMouse();
assert(_poisonGone);
characterSays(2010, _poisonGone[0], 0, -2);
characterSays(2011, _poisonGone[1], 0, -2);
}
void KyraEngine_LoK::seq_poisonDeathNow(int now) {
if (!(_brandonStatusBit & 1))
return;
++_poisonDeathCounter;
if (now)
_poisonDeathCounter = 2;
if (_poisonDeathCounter >= 2) {
snd_playWanderScoreViaMap(1, 1);
assert(_thePoison);
characterSays(7000, _thePoison[0], 0, -2);
characterSays(7001, _thePoison[1], 0, -2);
seq_poisonDeathNowAnim();
_deathHandler = 3;
} else {
assert(_thePoison);
characterSays(7002, _thePoison[2], 0, -2);
characterSays(7004, _thePoison[3], 0, -2);
}
}
void KyraEngine_LoK::seq_poisonDeathNowAnim() {
_screen->hideMouse();
checkAmuletAnimFlags();
assert(_posionDeathShapeTable);
setupShapes123(_posionDeathShapeTable, 20, 0);
_animator->setBrandonAnimSeqSize(8, 48);
_currentCharacter->currentAnimFrame = 124;
_animator->animRefreshNPC(0);
delayWithTicks(30);
_currentCharacter->currentAnimFrame = 123;
_animator->animRefreshNPC(0);
delayWithTicks(30);
for (int i = 125; i <= 139; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
delayWithTicks(60);
for (int i = 140; i <= 142; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
delayWithTicks(60);
_animator->resetBrandonAnimSeqSize();
freeShapes123();
_animator->restoreAllObjectBackgrounds();
_currentCharacter->x1 = _currentCharacter->x2 = -1;
_currentCharacter->y1 = _currentCharacter->y2 = -1;
_animator->preserveAllBackgrounds();
_screen->showMouse();
}
void KyraEngine_LoK::seq_playFluteAnimation() {
_screen->hideMouse();
checkAmuletAnimFlags();
setupShapes123(_fluteAnimShapeTable, 36, 0);
_animator->setBrandonAnimSeqSize(3, 75);
for (int i = 123; i <= 130; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(2);
}
int delayTime = 0, soundType = 0;
if (queryGameFlag(0x85)) {
snd_playSoundEffect(0x63);
delayTime = 9;
soundType = 3;
} else if (!queryGameFlag(0x86)) {
snd_playSoundEffect(0x61);
delayTime = 2;
soundType = 1;
setGameFlag(0x86);
} else {
snd_playSoundEffect(0x62);
delayTime = 2;
soundType = 2;
}
for (int i = 131; i <= 158; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(delayTime);
}
for (int i = 126; i >= 123; --i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(delayTime);
}
_animator->resetBrandonAnimSeqSize();
_currentCharacter->currentAnimFrame = 7;
_animator->animRefreshNPC(0);
freeShapes123();
_screen->showMouse();
if (soundType == 1) {
assert(_fluteString);
characterSays(1000, _fluteString[0], 0, -2);
} else if (soundType == 2) {
assert(_fluteString);
characterSays(1001, _fluteString[1], 0, -2);
}
}
void KyraEngine_LoK::seq_winterScroll1() {
_screen->hideMouse();
checkAmuletAnimFlags();
assert(_winterScrollTable);
assert(_winterScroll1Table);
assert(_winterScroll2Table);
setupShapes123(_winterScrollTable, 7, 0);
_animator->setBrandonAnimSeqSize(5, 66);
for (int i = 123; i <= 129; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
freeShapes123();
snd_playSoundEffect(0x20);
uint8 numFrames, midpoint;
if (_flags.isTalkie) {
numFrames = 18;
midpoint = 136;
} else {
numFrames = 35;
midpoint = 147;
}
setupShapes123(_winterScroll1Table, numFrames, 0);
for (int i = 123; i < midpoint; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
if (_currentCharacter->sceneId == 41 && !queryGameFlag(0xA2)) {
snd_playSoundEffect(0x20);
_sprites->_anims[0].play = false;
_animator->sprites()[0].active = 0;
_sprites->_anims[1].play = true;
_animator->sprites()[1].active = 1;
if (_flags.platform != Common::kPlatformAmiga)
setGameFlag(0xA2);
}
for (int i = midpoint; i < 123 + numFrames; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
if (_currentCharacter->sceneId == 117 && !queryGameFlag(0xB3)) {
for (int i = 0; i <= 7; ++i) {
_sprites->_anims[i].play = false;
_animator->sprites()[i].active = 0;
}
if (_flags.platform == Common::kPlatformAmiga) {
_screen->copyPalette(0, 11);
} else {
_screen->getPalette(0).copy(palTable2()[0], 0, 20, 228);
_screen->fadePalette(_screen->getPalette(0), 72);
_screen->setScreenPalette(_screen->getPalette(0));
setGameFlag(0xB3);
}
} else {
delayWithTicks(120);
}
freeShapes123();
setupShapes123(_winterScroll2Table, 4, 0);
for (int i = 123; i <= 126; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
_animator->resetBrandonAnimSeqSize();
_currentCharacter->currentAnimFrame = 7;
_animator->animRefreshNPC(0);
freeShapes123();
_screen->showMouse();
}
void KyraEngine_LoK::seq_winterScroll2() {
_screen->hideMouse();
checkAmuletAnimFlags();
assert(_winterScrollTable);
setupShapes123(_winterScrollTable, 7, 0);
_animator->setBrandonAnimSeqSize(5, 66);
for (int i = 123; i <= 128; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
delayWithTicks(120);
for (int i = 127; i >= 123; --i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
_animator->resetBrandonAnimSeqSize();
_currentCharacter->currentAnimFrame = 7;
_animator->animRefreshNPC(0);
freeShapes123();
_screen->showMouse();
}
void KyraEngine_LoK::seq_makeBrandonInv() {
if (_deathHandler == 8)
return;
if (_currentCharacter->sceneId == 210) {
if (_beadStateVar == 4 || _beadStateVar == 6)
return;
}
_screen->hideMouse();
checkAmuletAnimFlags();
_brandonStatusBit |= 0x20;
_timer->setCountdown(18, 2700);
_brandonStatusBit |= 0x40;
snd_playSoundEffect(0x77);
_brandonInvFlag = 0;
while (_brandonInvFlag <= 0x100) {
_animator->animRefreshNPC(0);
delayWithTicks(10);
_brandonInvFlag += 0x10;
}
_brandonStatusBit &= 0xFFBF;
_screen->showMouse();
}
void KyraEngine_LoK::seq_makeBrandonNormal() {
_screen->hideMouse();
_brandonStatusBit |= 0x40;
snd_playSoundEffect(0x77);
_brandonInvFlag = 0x100;
while (_brandonInvFlag >= 0) {
_animator->animRefreshNPC(0);
delayWithTicks(10);
_brandonInvFlag -= 0x10;
}
_brandonInvFlag = 0;
_brandonStatusBit &= 0xFF9F;
_screen->showMouse();
}
void KyraEngine_LoK::seq_makeBrandonNormal2() {
_screen->hideMouse();
assert(_brandonToWispTable);
setupShapes123(_brandonToWispTable, 26, 0);
_animator->setBrandonAnimSeqSize(5, 48);
_brandonStatusBit &= 0xFFFD;
snd_playSoundEffect(0x6C);
for (int i = 138; i >= 123; --i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
_animator->setBrandonAnimSeqSize(3, 48);
_currentCharacter->currentAnimFrame = 7;
_animator->animRefreshNPC(0);
if (_currentCharacter->sceneId >= 229 && _currentCharacter->sceneId <= 245)
_screen->fadeSpecialPalette(31, 234, 13, 4);
else if (_currentCharacter->sceneId >= 118 && _currentCharacter->sceneId <= 186)
_screen->fadeSpecialPalette(14, 228, 15, 4);
freeShapes123();
_screen->showMouse();
}
void KyraEngine_LoK::seq_makeBrandonWisp() {
if (_deathHandler == 8)
return;
if (_currentCharacter->sceneId == 210) {
if (_beadStateVar == 4 || _beadStateVar == 6)
return;
}
_screen->hideMouse();
checkAmuletAnimFlags();
assert(_brandonToWispTable);
setupShapes123(_brandonToWispTable, 26, 0);
_animator->setBrandonAnimSeqSize(5, 48);
snd_playSoundEffect(0x6C);
for (int i = 123; i <= 138; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
_brandonStatusBit |= 2;
if (_currentCharacter->sceneId >= 109 && _currentCharacter->sceneId <= 198)
_timer->setCountdown(14, 18000);
else
_timer->setCountdown(14, 7200);
_animator->_brandonDrawFrame = 113;
_brandonStatusBit0x02Flag = 1;
_currentCharacter->currentAnimFrame = 113;
_animator->animRefreshNPC(0);
_animator->updateAllObjectShapes();
if (_flags.platform == Common::kPlatformAmiga) {
if ((_currentCharacter->sceneId >= 229 && _currentCharacter->sceneId <= 245) ||
(_currentCharacter->sceneId >= 118 && _currentCharacter->sceneId <= 186))
_screen->fadePalette(_screen->getPalette(10), 0x54);
} else {
if (_currentCharacter->sceneId >= 229 && _currentCharacter->sceneId <= 245)
_screen->fadeSpecialPalette(30, 234, 13, 4);
else if (_currentCharacter->sceneId >= 118 && _currentCharacter->sceneId <= 186)
_screen->fadeSpecialPalette(14, 228, 15, 4);
}
freeShapes123();
_screen->showMouse();
}
void KyraEngine_LoK::seq_dispelMagicAnimation() {
if (_deathHandler == 8)
return;
if (_currentCharacter->sceneId == 210) {
if (_beadStateVar == 4 || _beadStateVar == 6)
return;
}
_screen->hideMouse();
// TODO
#if 0
// FIXME: This condition is always false. Is this a typo or a bug in the original?
if (_currentCharacter->sceneId == 210 && _currentCharacter->sceneId < 160) {
_currentCharacter->facing = 3;
}
#endif
if (_malcolmFlag == 7 && _beadStateVar == 3) {
_beadStateVar = 6;
_unkEndSeqVar5 = 2;
_malcolmFlag = 10;
}
checkAmuletAnimFlags();
setGameFlag(0xEE);
assert(_magicAnimationTable);
setupShapes123(_magicAnimationTable, 5, 0);
_animator->setBrandonAnimSeqSize(8, 49);
snd_playSoundEffect(0x15);
for (int i = 123; i <= 127; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
delayWithTicks(120);
for (int i = 127; i >= 123; --i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(10);
}
_animator->resetBrandonAnimSeqSize();
_currentCharacter->currentAnimFrame = 7;
_animator->animRefreshNPC(0);
freeShapes123();
_screen->showMouse();
}
void KyraEngine_LoK::seq_fillFlaskWithWater(int item, int type) {
int newItem = -1;
static const uint8 flaskTable1[] = { 0x46, 0x48, 0x4A, 0x4C };
static const uint8 flaskTable2[] = { 0x47, 0x49, 0x4B, 0x4D };
if (item >= 60 && item <= 77) {
assert(_flaskFull);
characterSays(8006, _flaskFull[0], 0, -2);
} else if (item == 78) {
assert(type >= 0 && type < ARRAYSIZE(flaskTable1));
newItem = flaskTable1[type];
} else if (item == 79) {
assert(type >= 0 && type < ARRAYSIZE(flaskTable2));
newItem = flaskTable2[type];
}
if (newItem == -1)
return;
setMouseItem(newItem);
_itemInHand = newItem;
assert(_fullFlask);
assert(type < _fullFlask_Size && type >= 0);
static const uint16 voiceEntries[] = {
0x1F40, 0x1F41, 0x1F42, 0x1F45
};
assert(type < ARRAYSIZE(voiceEntries));
characterSays(voiceEntries[type], _fullFlask[type], 0, -2);
}
void KyraEngine_LoK::seq_playDrinkPotionAnim(int item, int makeFlaskEmpty, int flags) {
if (_flags.platform == Common::kPlatformAmiga) {
uint8 r, g, b;
switch (item) {
case 60: case 61:
// 0xC22
r = 50;
g = 8;
b = 8;
break;
case 62: case 63: case 76:
case 77:
// 0x00E
r = 0;
g = 0;
b = 58;
break;
case 64: case 65:
// 0xFF5
r = 63;
g = 63;
b = 21;
break;
case 66:
// 0x090
r = 0;
g = 37;
b = 0;
break;
case 67:
// 0xC61
r = 50;
g = 25;
b = 4;
break;
case 68:
// 0xE2E
r = 58;
g = 8;
b = 58;
break;
case 69:
// 0xBBB
r = 46;
g = 46;
b = 46;
break;
default:
// 0xFFF
r = 63;
g = 63;
b = 63;
}
_screen->setPaletteIndex(16, r, g, b);
} else {
uint8 red, green, blue;
switch (item) {
case 60: case 61:
red = 63;
green = blue = 6;
break;
case 62: case 63:
red = green = 0;
blue = 67;
break;
case 64: case 65:
red = 84;
green = 78;
blue = 14;
break;
case 66:
red = blue = 0;
green = 48;
break;
case 67:
red = 100;
green = 48;
blue = 23;
break;
case 68:
red = 73;
green = 0;
blue = 89;
break;
case 69:
red = green = 73;
blue = 86;
break;
default:
red = 33;
green = 66;
blue = 100;
}
red = red * 0x3F / 100;
green = green * 0x3F / 100;
blue = blue * 0x3F / 100;
_screen->setPaletteIndex(0xFE, red, green, blue);
}
_screen->hideMouse();
checkAmuletAnimFlags();
_currentCharacter->facing = 5;
_animator->animRefreshNPC(0);
assert(_drinkAnimationTable);
setupShapes123(_drinkAnimationTable, 9, flags);
_animator->setBrandonAnimSeqSize(5, 54);
for (int i = 123; i <= 131; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(5);
}
snd_playSoundEffect(0x34);
for (int i = 0; i < 2; ++i) {
_currentCharacter->currentAnimFrame = 130;
_animator->animRefreshNPC(0);
delayWithTicks(7);
_currentCharacter->currentAnimFrame = 131;
_animator->animRefreshNPC(0);
delayWithTicks(7);
}
if (makeFlaskEmpty)
_screen->setPaletteIndex(0xFE, 30, 30, 30);
for (int i = 131; i >= 123; --i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(5);
}
_animator->resetBrandonAnimSeqSize();
_currentCharacter->currentAnimFrame = 7;
_animator->animRefreshNPC(0);
freeShapes123();
if (_flags.platform != Common::kPlatformAmiga)
_screen->setPaletteIndex(0xFE, 30, 30, 30);
_screen->showMouse();
}
int KyraEngine_LoK::seq_playEnd() {
if (_endSequenceSkipFlag)
return 0;
if (_deathHandler == 8)
return 0;
_screen->_curPage = 2;
if (_endSequenceNeedLoading) {
snd_playWanderScoreViaMap(50, 1);
setupPanPages();
if (_flags.platform == Common::kPlatformAmiga) {
_sound->loadSoundFile(kMusicFinale);
// The original started song 0 directly here. Since our player
// uses 0, 1 for stop and fade we start song 0 with 2
_sound->playTrack(2);
}
_finalA = createWSAMovie();
assert(_finalA);
_finalA->open("finala.wsa", 1, 0);
_finalB = createWSAMovie();
assert(_finalB);
_finalB->open("finalb.wsa", 1, 0);
_finalC = createWSAMovie();
assert(_finalC);
_endSequenceNeedLoading = 0;
_finalC->open("finalc.wsa", 1, 0);
_screen->_curPage = 0;
_beadStateVar = 0;
_malcolmFlag = 0;
_unkEndSeqVar2 = _system->getMillis() + 600 * _tickLength;
_screen->copyRegion(312, 0, 312, 0, 8, 136, 0, 2);
}
// TODO: better handling. This timer shouldn't count when the menu is open or something.
if (_unkEndSeqVar2 != -1) {
if (_system->getMillis() > (uint32)_unkEndSeqVar2) {
_unkEndSeqVar2 = -1;
if (!_malcolmFlag)
_malcolmFlag = 1;
}
}
if (handleMalcolmFlag()) {
_beadStateVar = 0;
_malcolmFlag = 12;
handleMalcolmFlag();
handleBeadState();
closeFinalWsa();
if (_deathHandler == 8) {
_screen->_curPage = 0;
checkAmuletAnimFlags();
seq_brandonToStone();
delay(60 * _tickLength);
return 1;
} else {
_endSequenceSkipFlag = 1;
if (_text->printed())
_text->restoreTalkTextMessageBkgd(2, 0);
_screen->_curPage = 0;
_screen->hideMouse();
if (_flags.platform != Common::kPlatformAmiga)
_screen->fadeSpecialPalette(32, 228, 20, 60);
delay(60 * _tickLength);
_screen->loadBitmap("GEMHEAL.CPS", 3, 3, &_screen->getPalette(0));
_screen->setScreenPalette(_screen->getPalette(0));
_screen->shuffleScreen(8, 8, 304, 128, 2, 0, 1, 0);
uint32 nextTime = _system->getMillis() + 120 * _tickLength;
_finalA = createWSAMovie();
assert(_finalA);
_finalA->open("finald.wsa", 1, 0);
delayUntil(nextTime);
snd_playSoundEffect(0x40);
for (int i = 0; i < 22; ++i) {
delayUntil(nextTime);
if (i == 4)
snd_playSoundEffect(0x3E);
else if (i == 20)
snd_playSoundEffect(_flags.platform == Common::kPlatformPC98 ? 0x13 : 0x0E);
nextTime = _system->getMillis() + 8 * _tickLength;
_finalA->displayFrame(i, 0, 8, 8, 0, 0, 0);
_screen->updateScreen();
}
nextTime = _system->getMillis() + 300 * _tickLength;
delete _finalA;
_finalA = 0;
delayUntil(nextTime);
seq_playEnding();
return 1;
}
} else {
handleBeadState();
_screen->bitBlitRects();
_screen->updateScreen();
_screen->_curPage = 0;
}
return 0;
}
void KyraEngine_LoK::seq_brandonToStone() {
_screen->hideMouse();
assert(_brandonStoneTable);
setupShapes123(_brandonStoneTable, 14, 0);
_animator->setBrandonAnimSeqSize(5, 51);
for (int i = 123; i <= 136; ++i) {
_currentCharacter->currentAnimFrame = i;
_animator->animRefreshNPC(0);
delayWithTicks(8);
}
_animator->resetBrandonAnimSeqSize();
freeShapes123();
_screen->showMouse();
}
void KyraEngine_LoK::seq_playEnding() {
if (shouldQuit())
return;
_screen->hideMouse();
_screen->_curPage = 0;
_screen->fadeToBlack();
if (_flags.platform == Common::kPlatformAmiga) {
_screen->loadBitmap("GEMCUT.CPS", 3, 3, &_screen->getPalette(0));
_screen->copyRegion(232, 136, 176, 56, 56, 56, 2, 2);
_screen->copyRegion(8, 8, 8, 8, 304, 128, 2, 0);
_screen->copyRegion(0, 0, 0, 0, 320, 200, 0, 2, Screen::CR_NO_P_CHECK);
} else {
_screen->loadBitmap("REUNION.CPS", 3, 3, &_screen->getPalette(0));
_screen->copyRegion(8, 8, 8, 8, 304, 128, 2, 0);
}
_screen->_curPage = 0;
// XXX
assert(_homeString);
drawSentenceCommand(_homeString[0], 179);
_screen->getPalette(2).clear();
_screen->setScreenPalette(_screen->getPalette(2));
_seqPlayerFlag = true;
_seq->playSequence(_seq_Reunion, false);
_screen->fadeToBlack();
_seqPlayerFlag = false;
_screen->showMouse();
// To avoid any remaining input events, we remove the queue
// over here.
_eventList.clear();
if (_flags.platform == Common::kPlatformAmiga) {
_screen->_charSpacing = -2;
_screen->setCurPage(2);
_screen->getPalette(2).clear();
_screen->setScreenPalette(_screen->getPalette(2));
while (!shouldQuit()) {
seq_playCreditsAmiga();
delayUntil(_system->getMillis() + 300 * _tickLength);
}
} else {
seq_playCredits();
}
}
namespace {
struct CreditsLine {
int16 x, y;
Screen::FontId font;
uint8 *str;
};
} // end of anonymous namespace
void KyraEngine_LoK::seq_playCredits() {
static const uint8 colorMap[] = { 0, 0, 0xC, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
static const char stringTerms[] = { 0x5, 0xD, 0x0};
typedef Common::List<CreditsLine> CreditsLineList;
CreditsLineList lines;
_screen->disableDualPaletteMode();
_screen->hideMouse();
if (!_flags.isTalkie) {
_screen->loadFont(Screen::FID_CRED6_FNT, "CREDIT6.FNT");
_screen->loadFont(Screen::FID_CRED8_FNT, "CREDIT8.FNT");
_screen->setFont(Screen::FID_CRED8_FNT);
} else
_screen->setFont(Screen::FID_8_FNT);
_screen->loadBitmap("CHALET.CPS", 4, 4, &_screen->getPalette(0));
_screen->setCurPage(0);
_screen->clearCurPage();
_screen->setTextColorMap(colorMap);
_screen->_charSpacing = -1;
// we only need this for the FM-TOWNS version
if (_flags.platform == Common::kPlatformFMTowns && _configMusic == 1)
snd_playWanderScoreViaMap(53, 1);
uint8 *buffer = 0;
uint32 size = 0;
if (_flags.platform == Common::kPlatformFMTowns || _flags.platform == Common::kPlatformPC98) {
int sizeTmp = 0;
const uint8 *bufferTmp = _staticres->loadRawData(k1CreditsStrings, sizeTmp);
buffer = new uint8[sizeTmp];
assert(buffer);
memcpy(buffer, bufferTmp, sizeTmp);
size = sizeTmp;
_staticres->unloadId(k1CreditsStrings);
} else {
buffer = _res->fileData("CREDITS.TXT", &size);
assert(buffer);
}
uint8 *nextString = buffer;
uint8 *currentString = buffer;
int currentY = 200;
do {
currentString = nextString;
nextString = (uint8 *)strpbrk((char *)currentString, stringTerms);
if (!nextString)
nextString = (uint8 *)strchr((char *)currentString, 0);
CreditsLine line;
int lineEndCode = nextString[0];
*nextString = 0;
if (lineEndCode != 0)
nextString++;
int alignment = 0;
if (*currentString == 3 || *currentString == 4) {
alignment = *currentString;
currentString++;
}
if (*currentString == 1) {
currentString++;
if (!_flags.isTalkie)
_screen->setFont(Screen::FID_CRED6_FNT);
} else if (*currentString == 2) {
currentString++;
if (!_flags.isTalkie)
_screen->setFont(Screen::FID_CRED8_FNT);
}
line.font = _screen->_currentFont;
if (alignment == 3)
line.x = 157 - _screen->getTextWidth((const char *)currentString);
else if (alignment == 4)
line.x = 161;
else
line.x = (320 - _screen->getTextWidth((const char *)currentString)) / 2 + 1;
line.y = currentY;
if (lineEndCode != 5)
currentY += 10;
line.str = currentString;
lines.push_back(line);
} while (*nextString);
_screen->setCurPage(2);
_screen->getPalette(2).clear();
_screen->setScreenPalette(_screen->getPalette(2));
_screen->copyRegion(0, 32, 0, 32, 320, 128, 4, 0, Screen::CR_NO_P_CHECK);
_screen->fadePalette(_screen->getPalette(0), 0x5A);
bool finished = false;
int bottom = 201;
while (!finished && !shouldQuit()) {
uint32 startLoop = _system->getMillis();
if (bottom > 175) {
_screen->copyRegion(0, 32, 0, 32, 320, 128, 4, 2, Screen::CR_NO_P_CHECK);
bottom = 0;
for (CreditsLineList::iterator it = lines.begin(); it != lines.end();) {
if (it->y < 0) {
it = lines.erase(it);
continue;
}
if (it->y < 200) {
if (it->font != _screen->_currentFont)
_screen->setFont(it->font);
_screen->printText((const char *)it->str, it->x, it->y, 15, 0);
}
it->y--;
if (it->y > bottom)
bottom = it->y;
++it;
}
_screen->copyRegion(0, 32, 0, 32, 320, 128, 2, 0, Screen::CR_NO_P_CHECK);
_screen->updateScreen();
}
if (checkInput(0, false)) {
removeInputTop();
finished = true;
}
uint32 now = _system->getMillis();
uint32 nextLoop = startLoop + _tickLength * 5;
if (nextLoop > now)
_system->delayMillis(nextLoop - now);
}
delete[] buffer;
_screen->fadeToBlack();
_screen->clearCurPage();
_screen->showMouse();
}
void KyraEngine_LoK::seq_playCreditsAmiga() {
_screen->setFont(Screen::FID_8_FNT);
_screen->loadBitmap("CHALET.CPS", 4, 2, &_screen->getPalette(0));
_screen->copyPage(2, 0);
_screen->getPalette(0).fill(16, 1, 63);
_screen->fadePalette(_screen->getPalette(0), 0x5A);
_screen->updateScreen();
const char *theEnd = "THE END";
const int width = _screen->getTextWidth(theEnd) + 1;
int x = (320 - width) / 2 + 1;
_screen->copyRegion(x, 8, x, 8, width, 56, 0, 2, Screen::CR_NO_P_CHECK);
_screen->copyRegion(x, 8, 0, 8, width, 11, 0, 2, Screen::CR_NO_P_CHECK);
_screen->printText(theEnd, 0, 10, 31, 0);
for (int y = 18, h = 1; y >= 10 && !shouldQuit(); --y, ++h) {
uint32 endTime = _system->getMillis() + 3 * _tickLength;
_screen->copyRegion(0, y, x, 8, width, h, 2, 0, Screen::CR_NO_P_CHECK);
_screen->updateScreen();
delayUntil(endTime);
}
for (int y = 8; y <= 62 && !shouldQuit(); ++y) {
uint32 endTime = _system->getMillis() + 3 * _tickLength;
_screen->copyRegion(x, y, 0, 8, width, 11, 2, 2, Screen::CR_NO_P_CHECK);
_screen->printText(theEnd, 0, 9, 31, 0);
_screen->copyRegion(0, 8, x, y, width, 11, 2, 0, Screen::CR_NO_P_CHECK);
_screen->updateScreen();
delayUntil(endTime);
}
int size = 0;
const char *creditsData = (const char *)_staticres->loadRawData(k1CreditsStrings, size);
char stringBuffer[81];
memset(stringBuffer, 0, sizeof(stringBuffer));
const char *cur = creditsData;
char *specialString = stringBuffer;
bool fillRectFlag = false, subWidth = false, centerFlag = false;
x = 0;
int specialX = 0;
const int fontHeight = _screen->getFontHeight();
do {
char code = *cur;
if (code == 3) {
fillRectFlag = subWidth = true;
} else if (code == 5) {
centerFlag = true;
} else if (code == 4) {
if (fillRectFlag) {
_screen->fillRect(0, 0, 319, 20, 0);
if (subWidth)
specialX = 157 - _screen->getTextWidth(stringBuffer);
_screen->printText(stringBuffer, specialX + 8, 0, 31, 0);
}
specialString = stringBuffer;
*specialString = 0;
x = 161;
} else if (code == 13) {
if (!fillRectFlag)
_screen->fillRect(0, 0, 319, 20, 0);
uint32 nextTime = _system->getMillis() + 8 * _tickLength;
if (centerFlag)
x = (320 - _screen->getTextWidth(stringBuffer)) / 2 - 8;
_screen->printText(stringBuffer, x + 8, 0, 31, 0);
for (int i = 0; i < fontHeight && !shouldQuit(); ++i) {
_screen->copyRegion(0, 141, 0, 140, 320, 59, 0, 0, Screen::CR_NO_P_CHECK);
_screen->copyRegion(0, i, 0, 198, 320, 3, 2, 0, Screen::CR_NO_P_CHECK);
_screen->updateScreen();
delayUntil(nextTime);
nextTime = _system->getMillis() + 8 * _tickLength;
}
specialString = stringBuffer;
*specialString = 0;
centerFlag = fillRectFlag = false;
} else {
*specialString++ = code;
*specialString = 0;
}
if (checkInput(0, false)) {
removeInputTop();
break;
}
} while (++cur != (creditsData + size) && !shouldQuit());
}
bool KyraEngine_LoK::seq_skipSequence() const {
return shouldQuit() || _abortIntroFlag;
}
int KyraEngine_LoK::handleMalcolmFlag() {
switch (_malcolmFlag) {
case 1:
_malcolmFrame = 0;
_malcolmFlag = 2;
_malcolmTimer2 = 0;
// fall through
case 2:
if (_system->getMillis() >= _malcolmTimer2) {
_finalA->displayFrame(_malcolmFrame, 0, 8, 46, 0, 0, 0);
_screen->updateScreen();
_malcolmTimer2 = _system->getMillis() + 8 * _tickLength;
++_malcolmFrame;
if (_malcolmFrame > 13) {
_malcolmFlag = 3;
_malcolmTimer1 = _system->getMillis() + 180 * _tickLength;
}
}
break;
case 3:
if (_system->getMillis() < _malcolmTimer1) {
if (_system->getMillis() >= _malcolmTimer2) {
_malcolmFrame = _rnd.getRandomNumberRng(14, 17);
_finalA->displayFrame(_malcolmFrame, 0, 8, 46, 0, 0, 0);
_screen->updateScreen();
_malcolmTimer2 = _system->getMillis() + 8 * _tickLength;
}
} else {
_malcolmFlag = 4;
_malcolmFrame = 18;
}
break;
case 4:
if (_system->getMillis() >= _malcolmTimer2) {
_finalA->displayFrame(_malcolmFrame, 0, 8, 46, 0, 0, 0);
_screen->updateScreen();
_malcolmTimer2 = _system->getMillis() + 8 * _tickLength;
++_malcolmFrame;
if (_malcolmFrame > 25) {
_malcolmFrame = 26;
_malcolmFlag = 5;
_beadStateVar = 1;
}
}
break;
case 5:
if (_system->getMillis() >= _malcolmTimer2) {
_finalA->displayFrame(_malcolmFrame, 0, 8, 46, 0, 0, 0);
_screen->updateScreen();
_malcolmTimer2 = _system->getMillis() + 8 * _tickLength;
++_malcolmFrame;
if (_malcolmFrame > 31) {
_malcolmFrame = 32;
_malcolmFlag = 6;
}
}
break;
case 6:
if (_unkEndSeqVar4) {
if (_malcolmFrame <= 33 && _system->getMillis() >= _malcolmTimer2) {
_finalA->displayFrame(_malcolmFrame, 0, 8, 46, 0, 0, 0);
_screen->updateScreen();
_malcolmTimer2 = _system->getMillis() + 8 * _tickLength;
++_malcolmFrame;
if (_malcolmFrame > 33) {
_malcolmFlag = 7;
_malcolmFrame = 32;
_unkEndSeqVar5 = 0;
}
}
}
break;
case 7:
if (_unkEndSeqVar5 == 1) {
_malcolmFlag = 8;
_malcolmFrame = 34;
} else if (_unkEndSeqVar5 == 2) {
_malcolmFlag = 3;
_malcolmTimer1 = _system->getMillis() + 180 * _tickLength;
}
break;
case 8:
if (_system->getMillis() >= _malcolmTimer2) {
_finalA->displayFrame(_malcolmFrame, 0, 8, 46, 0, 0, 0);
_screen->updateScreen();
_malcolmTimer2 = _system->getMillis() + 8 * _tickLength;
++_malcolmFrame;
if (_malcolmFrame > 37) {
_malcolmFlag = 0;
_deathHandler = 8;
return 1;
}
}
break;
case 9:
snd_playSoundEffect(12);
snd_playSoundEffect(12);
for (int i = 0; i < 18; ++i) {
_malcolmTimer2 = _system->getMillis() + 4 * _tickLength;
_finalC->displayFrame(i, 0, 16, 50, 0, 0, 0);
_screen->updateScreen();
delayUntil(_malcolmTimer2);
}
snd_playWanderScoreViaMap(51, 1);
delay(60 * _tickLength);
_malcolmFlag = 0;
return 1;
case 10:
if (!_beadStateVar) {
handleBeadState();
_screen->bitBlitRects();
assert(_veryClever);
_text->printTalkTextMessage(_veryClever[0], 60, 31, 5, 0, 2);
_malcolmTimer2 = _system->getMillis() + 180 * _tickLength;
_malcolmFlag = 11;
}
break;
case 11:
if (_system->getMillis() >= _malcolmTimer2) {
_text->restoreTalkTextMessageBkgd(2, 0);
_malcolmFlag = 3;
_malcolmTimer1 = _system->getMillis() + 180 * _tickLength;
}
break;
default:
break;
}
return 0;
}
int KyraEngine_LoK::handleBeadState() {
static const int table1[] = {
-1, -2, -4, -5, -6, -7, -6, -5,
-4, -2, -1, 0, 1, 2, 4, 5,
6, 7, 6, 5, 4, 2, 1, 0, 0
};
static const int table2[] = {
0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 5, 5, 4, 4,
3, 3, 2, 2, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0
};
switch (_beadStateVar) {
case 0:
if (_beadState1.x != -1 && _endSequenceBackUpRect) {
_screen->copyBlockToPage(_screen->_curPage, _beadState1.x, _beadState1.y, _beadState1.width << 3, _beadState1.height, _endSequenceBackUpRect);
_screen->addBitBlitRect(_beadState1.x, _beadState1.y, _beadState1.width2, _beadState1.height);
}
_beadState1.x = -1;
_beadState1.tableIndex = 0;
_beadStateTimer1 = 0;
_beadStateTimer2 = 0;
_lastDisplayedPanPage = 0;
return 1;
case 1:
if (_beadState1.x != -1) {
if (_endSequenceBackUpRect) {
_screen->copyBlockToPage(_screen->_curPage, _beadState1.x, _beadState1.y, _beadState1.width << 3, _beadState1.height, _endSequenceBackUpRect);
_screen->addBitBlitRect(_beadState1.x, _beadState1.y, _beadState1.width2, _beadState1.height);
}
_beadState1.x = -1;
_beadState1.tableIndex = 0;
}
_beadStateVar = 2;
break;
case 2:
if (_system->getMillis() >= _beadStateTimer1) {
int x = 0, y = 0;
_beadStateTimer1 = _system->getMillis() + 4 * _tickLength;
if (_beadState1.x == -1) {
assert(_panPagesTable);
_beadState1.width2 = _animator->fetchAnimWidth(_panPagesTable[19], 256);
_beadState1.width = ((_beadState1.width2 + 7) >> 3) + 1;
_beadState1.height = _animator->fetchAnimHeight(_panPagesTable[19], 256);
if (!_endSequenceBackUpRect) {
_endSequenceBackUpRect = new uint8[(_beadState1.width * _beadState1.height) << 3];
assert(_endSequenceBackUpRect);
memset(_endSequenceBackUpRect, 0, ((_beadState1.width * _beadState1.height) << 3) * sizeof(uint8));
}
x = _beadState1.x = 60;
y = _beadState1.y = 40;
initBeadState(x, y, x, 25, 8, &_beadState2);
} else {
if (processBead(_beadState1.x, _beadState1.y, x, y, &_beadState2)) {
_beadStateVar = 3;
_beadStateTimer2 = _system->getMillis() + 240 * _tickLength;
_unkEndSeqVar4 = 0;
_beadState1.dstX = _beadState1.x;
_beadState1.dstY = _beadState1.y;
return 0;
} else {
_screen->copyBlockToPage(_screen->_curPage, _beadState1.x, _beadState1.y, _beadState1.width << 3, _beadState1.height, _endSequenceBackUpRect);
_screen->addBitBlitRect(_beadState1.x, _beadState1.y, _beadState1.width2, _beadState1.height);
_beadState1.x = x;
_beadState1.y = y;
}
}
_screen->copyRegionToBuffer(_screen->_curPage, x, y, _beadState1.width << 3, _beadState1.height, _endSequenceBackUpRect);
_screen->drawShape(2, _panPagesTable[_lastDisplayedPanPage++], x, y, 0, 0);
if (_lastDisplayedPanPage > 17)
_lastDisplayedPanPage = 0;
_screen->addBitBlitRect(x, y, _beadState1.width2, _beadState1.height);
}
break;
case 3:
if (_system->getMillis() >= _beadStateTimer1) {
_beadStateTimer1 = _system->getMillis() + 4 * _tickLength;
_screen->copyBlockToPage(_screen->_curPage, _beadState1.x, _beadState1.y, _beadState1.width << 3, _beadState1.height, _endSequenceBackUpRect);
_screen->addBitBlitRect(_beadState1.x, _beadState1.y, _beadState1.width2, _beadState1.height);
_beadState1.x = _beadState1.dstX + table1[_beadState1.tableIndex];
_beadState1.y = _beadState1.dstY + table2[_beadState1.tableIndex];
_screen->copyRegionToBuffer(_screen->_curPage, _beadState1.x, _beadState1.y, _beadState1.width << 3, _beadState1.height, _endSequenceBackUpRect);
_screen->drawShape(2, _panPagesTable[_lastDisplayedPanPage++], _beadState1.x, _beadState1.y, 0, 0);
if (_lastDisplayedPanPage >= 17)
_lastDisplayedPanPage = 0;
_screen->addBitBlitRect(_beadState1.x, _beadState1.y, _beadState1.width2, _beadState1.height);
++_beadState1.tableIndex;
if (_beadState1.tableIndex > 24) {
_beadState1.tableIndex = 0;
_unkEndSeqVar4 = 1;
}
if (_system->getMillis() > _beadStateTimer2 && _malcolmFlag == 7 && !_unkAmuletVar && !_text->printed()) {
snd_playSoundEffect(0x0B);
if (_currentCharacter->x1 > 233 && _currentCharacter->x1 < 305 && _currentCharacter->y1 > 85 && _currentCharacter->y1 < 105 &&
(_brandonStatusBit & 0x20)) {
_beadState1.unk8 = 290;
_beadState1.unk9 = 40;
_beadStateVar = 5;
} else {
_beadStateVar = 4;
_beadState1.unk8 = _currentCharacter->x1 - 4;
_beadState1.unk9 = _currentCharacter->y1 - 30;
}
if (_text->printed())
_text->restoreTalkTextMessageBkgd(2, 0);
initBeadState(_beadState1.x, _beadState1.y, _beadState1.unk8, _beadState1.unk9, 12, &_beadState2);
_lastDisplayedPanPage = 18;
}
}
break;
case 4:
if (_system->getMillis() >= _beadStateTimer1) {
int x = 0, y = 0;
_beadStateTimer1 = _system->getMillis() + _tickLength;
if (processBead(_beadState1.x, _beadState1.y, x, y, &_beadState2)) {
if (_brandonStatusBit & 20) {
_unkEndSeqVar5 = 2;
_beadStateVar = 6;
} else {
snd_playWanderScoreViaMap(52, 1);
snd_playSoundEffect(0x0C);
_unkEndSeqVar5 = 1;
_beadStateVar = 0;
}
} else {
_screen->copyBlockToPage(_screen->_curPage, _beadState1.x, _beadState1.y, _beadState1.width << 3, _beadState1.height, _endSequenceBackUpRect);
_screen->addBitBlitRect(_beadState1.x, _beadState1.y, _beadState1.width2, _beadState1.height);
_beadState1.x = x;
_beadState1.y = y;
_screen->copyRegionToBuffer(_screen->_curPage, _beadState1.x, _beadState1.y, _beadState1.width << 3, _beadState1.height, _endSequenceBackUpRect);
_screen->drawShape(2, _panPagesTable[_lastDisplayedPanPage++], x, y, 0, 0);
if (_lastDisplayedPanPage > 17) {
_lastDisplayedPanPage = 0;
}
_screen->addBitBlitRect(_beadState1.x, _beadState1.y, _beadState1.width2, _beadState1.height);
}
}
break;
case 5:
if (_system->getMillis() >= _beadStateTimer1) {
_beadStateTimer1 = _system->getMillis() + _tickLength;
int x = 0, y = 0;
if (processBead(_beadState1.x, _beadState1.y, x, y, &_beadState2)) {
if (_beadState2.dstX == 290) {
_screen->copyBlockToPage(_screen->_curPage, _beadState1.x, _beadState1.y, _beadState1.width << 3, _beadState1.height, _endSequenceBackUpRect);
uint32 nextRun = 0;
for (int i = 0; i < 8; ++i) {
nextRun = _system->getMillis() + _tickLength;
_finalB->displayFrame(i, 0, 224, 8, 0, 0, 0);
_screen->updateScreen();
delayUntil(nextRun);
}
snd_playSoundEffect(0x0D);
for (int i = 7; i >= 0; --i) {
nextRun = _system->getMillis() + _tickLength;
_finalB->displayFrame(i, 0, 224, 8, 0, 0, 0);
_screen->updateScreen();
delayUntil(nextRun);
}
initBeadState(_beadState1.x, _beadState1.y, 63, 60, 12, &_beadState2);
} else {
_screen->copyBlockToPage(_screen->_curPage, _beadState1.x, _beadState1.y, _beadState1.width << 3, _beadState1.height, _endSequenceBackUpRect);
_screen->addBitBlitRect(_beadState1.x, _beadState1.y, _beadState1.width2, _beadState1.height);
_beadState1.x = -1;
_beadState1.tableIndex = 0;
_beadStateVar = 0;
_malcolmFlag = 9;
}
} else {
_screen->copyBlockToPage(_screen->_curPage, _beadState1.x, _beadState1.y, _beadState1.width << 3, _beadState1.height, _endSequenceBackUpRect);
_screen->addBitBlitRect(_beadState1.x, _beadState1.y, _beadState1.width2, _beadState1.height);
_beadState1.x = x;
_beadState1.y = y;
_screen->copyRegionToBuffer(_screen->_curPage, _beadState1.x, _beadState1.y, _beadState1.width << 3, _beadState1.height, _endSequenceBackUpRect);
_screen->drawShape(2, _panPagesTable[_lastDisplayedPanPage++], x, y, 0, 0);
if (_lastDisplayedPanPage > 17)
_lastDisplayedPanPage = 0;
_screen->addBitBlitRect(_beadState1.x, _beadState1.y, _beadState1.width2, _beadState1.height);
}
}
break;
case 6:
_screen->drawShape(2, _panPagesTable[19], _beadState1.x, _beadState1.y, 0, 0);
_screen->addBitBlitRect(_beadState1.x, _beadState1.y, _beadState1.width2, _beadState1.height);
_beadStateVar = 0;
break;
default:
break;
}
return 0;
}
void KyraEngine_LoK::initBeadState(int x, int y, int x2, int y2, int unk, BeadState *ptr) {
ptr->unk9 = unk;
int xDiff = x2 - x;
int yDiff = y2 - y;
int unk1 = 0, unk2 = 0;
if (xDiff > 0)
unk1 = 1;
else if (xDiff == 0)
unk1 = 0;
else
unk1 = -1;
if (yDiff > 0)
unk2 = 1;
else if (yDiff == 0)
unk2 = 0;
else
unk2 = -1;
xDiff = ABS(xDiff);
yDiff = ABS(yDiff);
ptr->y = 0;
ptr->x = 0;
ptr->width = xDiff;
ptr->height = yDiff;
ptr->dstX = x2;
ptr->dstY = y2;
ptr->width2 = unk1;
ptr->unk8 = unk2;
}
int KyraEngine_LoK::processBead(int x, int y, int &x2, int &y2, BeadState *ptr) {
if (x == ptr->dstX && y == ptr->dstY)
return 1;
int xPos = x, yPos = y;
if (ptr->width >= ptr->height) {
for (int i = 0; i < ptr->unk9; ++i) {
ptr->y += ptr->height;
if (ptr->y >= ptr->width) {
ptr->y -= ptr->width;
yPos += ptr->unk8;
}
xPos += ptr->width2;
}
} else {
for (int i = 0; i < ptr->unk9; ++i) {
ptr->x += ptr->width;
if (ptr->x >= ptr->height) {
ptr->x -= ptr->height;
xPos += ptr->width2;
}
yPos += ptr->unk8;
}
}
int temp = ABS(x - ptr->dstX);
if (ptr->unk9 > temp)
xPos = ptr->dstX;
temp = ABS(y - ptr->dstY);
if (ptr->unk9 > temp)
yPos = ptr->dstY;
x2 = xPos;
y2 = yPos;
return 0;
}
void KyraEngine_LoK::setupPanPages() {
_screen->savePageToDisk("BKGD.PG", 2);
_screen->loadBitmap("BEAD.CPS", 3, 3, 0);
if (_flags.platform == Common::kPlatformMacintosh || _flags.platform == Common::kPlatformAmiga) {
int pageBackUp = _screen->_curPage;
_screen->_curPage = 2;
delete[] _panPagesTable[19];
_panPagesTable[19] = _screen->encodeShape(0, 0, 16, 9, 0);
assert(_panPagesTable[19]);
int curX = 16;
for (int i = 0; i < 19; ++i) {
delete[] _panPagesTable[i];
_panPagesTable[i] = _screen->encodeShape(curX, 0, 8, 5, 0);
assert(_panPagesTable[i]);
curX += 8;
}
_screen->_curPage = pageBackUp;
} else {
for (int i = 0; i <= 19; ++i) {
delete[] _panPagesTable[i];
_panPagesTable[i] = _seq->setPanPages(3, i);
assert(_panPagesTable[i]);
}
}
_screen->loadPageFromDisk("BKGD.PG", 2);
}
void KyraEngine_LoK::freePanPages() {
delete[] _endSequenceBackUpRect;
_endSequenceBackUpRect = 0;
for (int i = 0; i <= 19; ++i) {
delete[] _panPagesTable[i];
_panPagesTable[i] = 0;
}
}
void KyraEngine_LoK::closeFinalWsa() {
delete _finalA;
_finalA = 0;
delete _finalB;
_finalB = 0;
delete _finalC;
_finalC = 0;
freePanPages();
_endSequenceNeedLoading = 1;
}
void KyraEngine_LoK::updateKyragemFading() {
if (_flags.platform == Common::kPlatformAmiga) {
// The AMIGA version seems to have no fading for the Kyragem. The code does not
// alter the screen palette.
//
// TODO: Check this in the original.
return;
}
static const uint8 kyraGemPalette[0x28] = {
0x3F, 0x3B, 0x38, 0x34, 0x32, 0x2F, 0x2C, 0x29, 0x25, 0x22,
0x1F, 0x1C, 0x19, 0x16, 0x12, 0x0F, 0x0C, 0x0A, 0x06, 0x03,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
if (_system->getMillis() < _kyragemFadingState.timerCount)
return;
_kyragemFadingState.timerCount = _system->getMillis() + 4 * _tickLength;
int palPos = 684;
for (int i = 0; i < 20; ++i) {
_screen->getPalette(0)[palPos++] = kyraGemPalette[i + _kyragemFadingState.rOffset];
_screen->getPalette(0)[palPos++] = kyraGemPalette[i + _kyragemFadingState.gOffset];
_screen->getPalette(0)[palPos++] = kyraGemPalette[i + _kyragemFadingState.bOffset];
}
_screen->setScreenPalette(_screen->getPalette(0));
switch (_kyragemFadingState.nextOperation) {
case 0:
--_kyragemFadingState.bOffset;
if (_kyragemFadingState.bOffset >= 1)
return;
_kyragemFadingState.nextOperation = 1;
break;
case 1:
++_kyragemFadingState.rOffset;
if (_kyragemFadingState.rOffset < 19)
return;
_kyragemFadingState.nextOperation = 2;
break;
case 2:
--_kyragemFadingState.gOffset;
if (_kyragemFadingState.gOffset >= 1)
return;
_kyragemFadingState.nextOperation = 3;
break;
case 3:
++_kyragemFadingState.bOffset;
if (_kyragemFadingState.bOffset < 19)
return;
_kyragemFadingState.nextOperation = 4;
break;
case 4:
--_kyragemFadingState.rOffset;
if (_kyragemFadingState.rOffset >= 1)
return;
_kyragemFadingState.nextOperation = 5;
break;
case 5:
++_kyragemFadingState.gOffset;
if (_kyragemFadingState.gOffset < 19)
return;
_kyragemFadingState.nextOperation = 0;
break;
default:
break;
}
_kyragemFadingState.timerCount = _system->getMillis() + 120 * _tickLength;
}
void KyraEngine_LoK::drawJewelPress(int jewel, int drawSpecial) {
_screen->hideMouse();
int shape = 0;
if (drawSpecial)
shape = 0x14E;
else
shape = jewel + 0x149;
snd_playSoundEffect(0x45);
_screen->drawShape(0, _shapes[shape], _amuletX2[jewel], _amuletY2[jewel], 0, 0);
_screen->updateScreen();
delayWithTicks(2);
if (drawSpecial)
shape = 0x148;
else
shape = jewel + 0x143;
_screen->drawShape(0, _shapes[shape], _amuletX2[jewel], _amuletY2[jewel], 0, 0);
_screen->updateScreen();
_screen->showMouse();
}
void KyraEngine_LoK::drawJewelsFadeOutStart() {
static const uint16 jewelTable1[] = { 0x164, 0x15F, 0x15A, 0x155, 0x150, 0xFFFF };
static const uint16 jewelTable2[] = { 0x163, 0x15E, 0x159, 0x154, 0x14F, 0xFFFF };
static const uint16 jewelTable3[] = { 0x166, 0x160, 0x15C, 0x157, 0x152, 0xFFFF };
static const uint16 jewelTable4[] = { 0x165, 0x161, 0x15B, 0x156, 0x151, 0xFFFF };
for (int i = 0; jewelTable1[i] != 0xFFFF; ++i) {
if (queryGameFlag(0x57))
_screen->drawShape(0, _shapes[jewelTable1[i]], _amuletX2[2], _amuletY2[2], 0, 0);
if (queryGameFlag(0x59))
_screen->drawShape(0, _shapes[jewelTable3[i]], _amuletX2[4], _amuletY2[4], 0, 0);
if (queryGameFlag(0x56))
_screen->drawShape(0, _shapes[jewelTable2[i]], _amuletX2[1], _amuletY2[1], 0, 0);
if (queryGameFlag(0x58))
_screen->drawShape(0, _shapes[jewelTable4[i]], _amuletX2[3], _amuletY2[3], 0, 0);
_screen->updateScreen();
delayWithTicks(3);
}
}
void KyraEngine_LoK::drawJewelsFadeOutEnd(int jewel) {
static const uint16 jewelTable[] = { 0x153, 0x158, 0x15D, 0x162, 0x148, 0xFFFF };
int newDelay = 0;
switch (jewel - 1) {
case 2:
if (_currentCharacter->sceneId >= 109 && _currentCharacter->sceneId <= 198)
newDelay = 18900;
else
newDelay = 8100;
break;
default:
newDelay = 3600;
}
setGameFlag(0xF1);
_timer->setCountdown(19, newDelay);
_screen->hideMouse();
for (int i = 0; jewelTable[i] != 0xFFFF; ++i) {
uint16 shape = jewelTable[i];
if (queryGameFlag(0x57))
_screen->drawShape(0, _shapes[shape], _amuletX2[2], _amuletY2[2], 0, 0);
if (queryGameFlag(0x59))
_screen->drawShape(0, _shapes[shape], _amuletX2[4], _amuletY2[4], 0, 0);
if (queryGameFlag(0x56))
_screen->drawShape(0, _shapes[shape], _amuletX2[1], _amuletY2[1], 0, 0);
if (queryGameFlag(0x58))
_screen->drawShape(0, _shapes[shape], _amuletX2[3], _amuletY2[3], 0, 0);
_screen->updateScreen();
delayWithTicks(3);
}
_screen->showMouse();
}
} // End of namespace Kyra
| somaen/scummvm | engines/kyra/sequence/sequences_lok.cpp | C++ | gpl-2.0 | 56,978 |
// SuperTuxKart - a fun racing game with go-kart
//
// Copyright (C) 2013-2015 Lionel Fuentes
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 3
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include "crash_reporting.hpp"
#include "log.hpp"
#include <string.h>
#if defined(WIN32) && !defined(DEBUG) && !defined(__MINGW32__)
// --------------------- Windows version -----------------
#include <Windows.h>
#include <DbgHelp.h>
#include <stdlib.h>
#include <signal.h>
#include <new.h>
typedef BOOL (__stdcall *tSymCleanup)(
_In_ HANDLE hProcess
);
typedef PVOID (__stdcall *tSymFunctionTableAccess64)(
_In_ HANDLE hProcess,
_In_ DWORD64 AddrBase
);
typedef BOOL (__stdcall *tSymGetLineFromAddr64)(
_In_ HANDLE hProcess,
_In_ DWORD64 qwAddr,
_Out_ PDWORD pdwDisplacement,
_Out_ PIMAGEHLP_LINE64 Line64
);
typedef DWORD64 (__stdcall *tSymGetModuleBase64)(
_In_ HANDLE hProcess,
_In_ DWORD64 qwAddr
);
typedef BOOL (__stdcall *tSymGetSymFromAddr64)(
_In_ HANDLE hProcess,
_In_ DWORD64 qwAddr,
_Out_opt_ PDWORD64 pdwDisplacement,
_Inout_ PIMAGEHLP_SYMBOL64 Symbol
);
typedef BOOL (__stdcall *tSymInitialize)(
_In_ HANDLE hProcess,
_In_opt_ PCSTR UserSearchPath,
_In_ BOOL fInvadeProcess
);
typedef DWORD (__stdcall *tSymSetOptions)(
_In_ DWORD SymOptions
);
typedef BOOL (__stdcall *tStackWalk64)(
_In_ DWORD MachineType,
_In_ HANDLE hProcess,
_In_ HANDLE hThread,
_Inout_ LPSTACKFRAME64 StackFrame,
_Inout_ PVOID ContextRecord,
_In_opt_ PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
_In_opt_ PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
_In_opt_ PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
_In_opt_ PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress
);
typedef DWORD (__stdcall *tUnDecorateSymbolName)(
_In_ PCSTR name,
_Out_ PSTR outputString,
_In_ DWORD maxStringLength,
_In_ DWORD flags
);
namespace CrashReporting
{
void getCallStackWithContext(std::string& callstack, PCONTEXT pContext);
void winCrashHandler(PCONTEXT pContext=NULL)
{
std::string callstack;
if(pContext)
getCallStackWithContext(callstack, pContext);
else
getCallStack(callstack);
std::string msg = "SuperTuxKart crashed!\n"
"Please hit Ctrl+C to copy to clipboard and signal the problem\n"
"to the developers on our forum: http://forum.freegamedev.net/viewforum.php?f=16\n"
"\n"
"Call stack:\n";
msg += callstack;
MessageBoxA(NULL, msg.c_str(), "SuperTuxKart crashed :/", MB_OK);
}
LONG WINAPI sehHandler(_In_ struct _EXCEPTION_POINTERS *ExceptionInfo)
{
winCrashHandler(ExceptionInfo->ContextRecord);
return EXCEPTION_EXECUTE_HANDLER;
}
void pureCallHandler()
{
winCrashHandler();
}
int newHandler( size_t )
{
winCrashHandler();
return 0;
}
void invalidParameterHandler(const wchar_t *, const wchar_t *, const wchar_t *, unsigned int, uintptr_t)
{
winCrashHandler();
}
void signalHandler(int code)
{
winCrashHandler();
}
void installHandlers()
{
// ----- Per-process handlers -----
SetUnhandledExceptionFilter(sehHandler); // Top-level SEH handler
_set_purecall_handler(pureCallHandler); // Pure virtual function calls handler
// Catch new operator memory allocation exceptions
_set_new_mode(1); // Force malloc() to call new handler too
_set_new_handler(newHandler);
_set_invalid_parameter_handler(invalidParameterHandler); // Catch invalid parameter exceptions.
//_set_security_error_handler(securityHandler); // Catch buffer overrun exceptions
signal(SIGABRT, signalHandler);
signal(SIGINT, signalHandler);
signal(SIGTERM, signalHandler);
// ----- Per-thread handlers -----
// TODO
}
void getCallStackWithContext(std::string& callstack, PCONTEXT pContext)
{
HINSTANCE hImageHlpDll = LoadLibraryA("imagehlp.dll");
if(!hImageHlpDll)
{
Log::warn("CrashReporting", "Failed to load DLL imagehlp.dll");
callstack = "Crash reporting failed to load DLL imagehlp.dll";
return;
}
// Retrieve the DLL functions
#define GET_FUNC_PTR(FuncName) \
t##FuncName _##FuncName = (t##FuncName)GetProcAddress(hImageHlpDll, #FuncName); \
if(!_##FuncName) { \
Log::warn("CrashReporting", "Failed to import symbol " #FuncName " from imagehlp.dll"); \
FreeLibrary(hImageHlpDll); \
return; \
}
GET_FUNC_PTR(SymCleanup )
GET_FUNC_PTR(SymFunctionTableAccess64 )
GET_FUNC_PTR(SymGetLineFromAddr64 )
GET_FUNC_PTR(SymGetModuleBase64 )
GET_FUNC_PTR(SymGetSymFromAddr64 )
GET_FUNC_PTR(SymInitialize )
GET_FUNC_PTR(SymSetOptions )
GET_FUNC_PTR(StackWalk64 )
GET_FUNC_PTR(UnDecorateSymbolName )
#undef GET_FUNC_PTR
const HANDLE hProcess = GetCurrentProcess();
const HANDLE hThread = GetCurrentThread();
// Initialize the symbol hander for the process
{
// Get the file path of the executable
char filepath[512];
GetModuleFileNameA(NULL, filepath, sizeof(filepath));
if(!filepath)
{
Log::warn("CrashReporting", "GetModuleFileNameA failed");
FreeLibrary(hImageHlpDll);
return;
}
// Only keep the directory
char* last_separator = strchr(filepath, '/');
if(!last_separator) last_separator = strchr(filepath, '\\');
if(last_separator)
last_separator[0] = '\0';
// Since the stack trace can also be used for leak checks, don't
// initialise this all the time.
static bool first_time = true;
if (first_time)
{
// Finally initialize the symbol handler.
BOOL bOk = _SymInitialize(hProcess, filepath ? filepath : NULL, TRUE);
if (!bOk)
{
Log::warn("CrashReporting", "SymInitialize() failed");
FreeLibrary(hImageHlpDll);
return;
}
_SymSetOptions(SYMOPT_LOAD_LINES);
first_time = false;
}
}
// Get the stack trace
{
// Initialize the IMAGEHLP_SYMBOL64 structure
const size_t MaxNameLength = 256;
IMAGEHLP_SYMBOL64* sym = (IMAGEHLP_SYMBOL64*)_malloca(sizeof(IMAGEHLP_SYMBOL64) + MaxNameLength);
sym->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
sym->MaxNameLength = MaxNameLength;
// Initialize the STACKFRAME structure so that it
// corresponds to the current function call
STACKFRAME64 stackframe;
memset(&stackframe, 0, sizeof(stackframe));
stackframe.AddrPC.Offset = pContext->Eip;
stackframe.AddrPC.Mode = AddrModeFlat;
stackframe.AddrStack.Offset = pContext->Esp;
stackframe.AddrStack.Mode = AddrModeFlat;
stackframe.AddrFrame.Offset = pContext->Ebp;
stackframe.AddrFrame.Mode = AddrModeFlat;
const DWORD machine_type = IMAGE_FILE_MACHINE_I386;
// Walk the stack
const int max_nb_calls = 32;
for(int i=0 ; i < max_nb_calls ; i++)
{
const BOOL stackframe_ok = _StackWalk64( machine_type,
hProcess,
hThread,
&stackframe,
pContext,
NULL,
_SymFunctionTableAccess64,
_SymGetModuleBase64,
NULL);
if(stackframe_ok)
{
// Decode the symbol and add it to the call stack
DWORD64 sym_displacement;
if(_SymGetSymFromAddr64( hProcess,
stackframe.AddrPC.Offset,
&sym_displacement,
sym))
{
IMAGEHLP_LINE64 line64;
DWORD dwDisplacement = (DWORD)sym_displacement;
if(_SymGetLineFromAddr64(hProcess, stackframe.AddrPC.Offset, &dwDisplacement, &line64))
{
callstack += "\n ";
// Directory + filename -> filename only
const char* filename = line64.FileName;
const char* ptr = line64.FileName;
while(*ptr)
{
if(*ptr == '\\' || *ptr == '/')
filename = ptr+1;
ptr++;
}
callstack += filename;
callstack += ":";
callstack += sym->Name;
char str[128];
_itoa(line64.LineNumber, str, 10);
callstack += ":";
callstack += str;
}
else
{
callstack += "\n ";
callstack += sym->Name;
}
}
else
callstack += "\n <no symbol available>";
}
else
break; // done
}
}
FreeLibrary(hImageHlpDll);
}
void getCallStack(std::string& callstack)
{
// Get the current CONTEXT
// NB: this code is ONLY VALID FOR X86 (32 bit)!
CONTEXT ctx;
memset(&ctx, '\0', sizeof(ctx));
ctx.ContextFlags = CONTEXT_FULL;
__asm call x
__asm x: pop eax // get eip (can't directly use mov)
__asm mov ctx.Eip, eax
__asm mov ctx.Ebp, ebp
__asm mov ctx.Esp, esp
getCallStackWithContext(callstack, &ctx);
}
} // end namespace CrashReporting
#else
// --------------------- Unix version -----------------------
namespace CrashReporting
{
void installHandlers()
{
// TODO!
}
void getCallStack(std::string& callstack)
{
// TODO!
}
} // end namespace CrashReporting
#endif
| scopeInfinity/stk-code | src/utils/crash_reporting.cpp | C++ | gpl-3.0 | 13,018 |
package net.minecraft.util;
import net.minecraft.nbt.NBTTagCompound;
public class WeightedSpawnerEntity extends WeightedRandom.Item
{
private final NBTTagCompound nbt;
public WeightedSpawnerEntity()
{
super(1);
this.nbt = new NBTTagCompound();
this.nbt.setString("id", "Pig");
}
public WeightedSpawnerEntity(NBTTagCompound nbtIn)
{
this(nbtIn.hasKey("Weight", 99) ? nbtIn.getInteger("Weight") : 1, nbtIn.getCompoundTag("Entity"));
}
public WeightedSpawnerEntity(int itemWeightIn, NBTTagCompound nbtIn)
{
super(itemWeightIn);
this.nbt = nbtIn;
}
public NBTTagCompound toCompoundTag()
{
NBTTagCompound nbttagcompound = new NBTTagCompound();
nbttagcompound.setTag("Entity", this.nbt);
nbttagcompound.setInteger("Weight", this.itemWeight);
return nbttagcompound;
}
public NBTTagCompound getNbt()
{
return this.nbt;
}
} | danielyc/test-1.9.4 | build/tmp/recompileMc/sources/net/minecraft/util/WeightedSpawnerEntity.java | Java | gpl-3.0 | 973 |
/* Copyright 2008 (C) Nicira, Inc.
*
* This file is part of NOX.
*
* NOX is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* NOX is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with NOX. If not, see <http://www.gnu.org/licenses/>.
*/
/*
*/
#include "user_event_log_proxy.hh"
#include "threads/cooperative.hh"
#include "pyrt/pycontext.hh"
#include "swigpyrun.h"
#include "vlog.hh"
#include "pyrt/pyglue.hh"
using namespace std;
using namespace vigil;
using namespace vigil::applications;
namespace {
Vlog_module lg("user_event_log_proxy");
}
namespace vigil {
namespace applications {
/*
* Get a pointer to the runtime context so we can resolve
* user_event_log at configure time.
*/
user_event_log_proxy::user_event_log_proxy(PyObject* ctxt) : uel(0)
{
if (!SWIG_Python_GetSwigThis(ctxt) || !SWIG_Python_GetSwigThis(ctxt)->ptr) {
throw runtime_error("Unable to access Python context.");
}
c = ((PyContext*)SWIG_Python_GetSwigThis(ctxt)->ptr)->c;
}
void user_event_log_proxy::log_simple(const string &app_name, int level,
const string &msg){
uel->log_simple(app_name, (LogEntry::Level) level, msg);
}
void user_event_log_proxy::log(const LogEntry &entry){
uel->log(entry);
}
int user_event_log_proxy::get_max_logid() {
return uel->get_max_logid();
}
int user_event_log_proxy::get_min_logid() {
return uel->get_min_logid();
}
void user_event_log_proxy::set_max_num_entries(int num) {
uel->set_max_num_entries(num);
}
void user_event_log_proxy::python_callback(PyObject *args,
boost::intrusive_ptr<PyObject> cb) {
Co_critical_section c;
PyObject* ret = PyObject_CallObject(cb.get(), args);
if (ret == 0) {
const string exc = pretty_print_python_exception();
lg.err("Python callback invocation failed:\n%s", exc.c_str());
}
Py_DECREF(args);
Py_XDECREF(ret);
}
PyObject *user_event_log_proxy::get_log_entry(int logid, PyObject *cb){
try {
if (!cb || !PyCallable_Check(cb)) { throw "Invalid callback"; }
boost::intrusive_ptr<PyObject> cptr(cb, true);
Log_entry_callback f = boost::bind(
&user_event_log_proxy::get_log_callback,this,_1,_2,_3,_4,_5,_6,_7,cptr);
uel->get_log_entry((int64_t)logid, f);
Py_RETURN_NONE;
}
catch (const char* msg) {
/* Unable to convert the arguments. */
PyErr_SetString(PyExc_TypeError, msg);
return 0;
}
}
void user_event_log_proxy::get_log_callback(int64_t logid, int64_t ts,
const string &app,
int level, const string &msg, const PrincipalList &src_names,
const PrincipalList &dst_names, boost::intrusive_ptr<PyObject> cb) {
Co_critical_section c;
PyObject* args = PyTuple_New(7);
PyTuple_SetItem(args, 0, PyInt_FromLong(logid));
PyTuple_SetItem(args, 1, PyLong_FromLong(ts));
PyTuple_SetItem(args, 2, PyString_FromString(app.c_str()));
PyTuple_SetItem(args, 3, PyInt_FromLong(level));
PyTuple_SetItem(args, 4, PyString_FromString(msg.c_str()));
PyTuple_SetItem(args, 5, to_python_list(src_names));
PyTuple_SetItem(args, 6, to_python_list(dst_names));
python_callback(args,cb);
}
PyObject * user_event_log_proxy::get_logids_for_name(int64_t id,
int64_t type, PyObject* cb) {
try {
if (!cb || !PyCallable_Check(cb)) { throw "Invalid callback"; }
boost::intrusive_ptr<PyObject> cptr(cb, true);
Get_logids_callback f = boost::bind(
&user_event_log_proxy::get_logids_callback,this,_1,cptr);
uel->get_logids_for_name(id, (PrincipalType) type,f);
Py_RETURN_NONE;
}
catch (const char* msg) {
/* Unable to convert the arguments. */
PyErr_SetString(PyExc_TypeError, msg);
return 0;
}
}
void user_event_log_proxy::get_logids_callback(const list<int64_t> &logids,
boost::intrusive_ptr<PyObject> cb) {
Co_critical_section c;
PyObject* logid_tuple = PyTuple_New(logids.size());
list<int64_t>::const_iterator it = logids.begin();
int i = 0;
for( ; it != logids.end(); ++it) {
PyTuple_SetItem(logid_tuple, i, PyLong_FromLong(*it));
++i;
}
PyObject* args = PyTuple_New(1);
PyTuple_SetItem(args,0,logid_tuple);
python_callback(args,cb);
}
PyObject *user_event_log_proxy::clear(PyObject *cb){
try {
if (!cb || !PyCallable_Check(cb)) { throw "Invalid callback"; }
boost::intrusive_ptr<PyObject> cptr(cb, true);
Clear_log_callback f = boost::bind(
&user_event_log_proxy::clear_callback,this,_1, cptr);
uel->clear(f);
Py_RETURN_NONE;
}
catch (const char* msg) {
/* Unable to convert the arguments. */
PyErr_SetString(PyExc_TypeError, msg);
return 0;
}
}
PyObject *user_event_log_proxy::remove(int max_logid, PyObject *cb){
try {
if (!cb || !PyCallable_Check(cb)) { throw "Invalid callback"; }
boost::intrusive_ptr<PyObject> cptr(cb, true);
Clear_log_callback f = boost::bind(
&user_event_log_proxy::clear_callback,this,_1,cptr);
uel->remove(max_logid, f);
Py_RETURN_NONE;
}
catch (const char* msg) {
/* Unable to convert the arguments. */
PyErr_SetString(PyExc_TypeError, msg);
return 0;
}
}
void user_event_log_proxy::clear_callback(const storage::Result &r,
boost::intrusive_ptr<PyObject> cb){
PyObject* args = PyTuple_New(0);
python_callback(args,cb);
}
void
user_event_log_proxy::configure(PyObject* configuration)
{
c->resolve(uel);
}
void
user_event_log_proxy::install(PyObject*)
{
}
} // namespace applications
} // namespace vigil
| zainabg/NOX | src/nox/netapps/user_event_log/user_event_log_proxy.cc | C++ | gpl-3.0 | 6,355 |
var toArray = require('./toArray');
var find = require('../array/find');
/**
* Return first non void argument
*/
function defaults(var_args){
return find(toArray(arguments), nonVoid);
}
function nonVoid(val){
return val != null;
}
module.exports = defaults;
| sanwalkailash/DotFresh | node_modules/grunt-bower-installer/node_modules/bower/node_modules/mout/lang/defaults.js | JavaScript | gpl-3.0 | 330 |
<?php
/*
* Copyright (c) 2011-2015 Lp digital system
*
* This file is part of BackBee.
*
* BackBee is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* BackBee is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with BackBee. If not, see <http://www.gnu.org/licenses/>.
*
* @author Charles Rouillon <charles.rouillon@lp-digital.fr>
*/
namespace BackBee\Util;
use Symfony\Component\HttpFoundation\File\MimeType\MimeTypeGuesser;
/**
* @category BackBee
*
* @copyright Lp digital system
* @author c.rouillon <charles.rouillon@lp-digital.fr>
*/
class MimeType
{
/**
* The singleton instance.
*
* @var MimeTypeGuesser
*/
private static $instance = null;
private static $guesser = null;
private function __construct()
{
self::$guesser = MimeTypeGuesser::getInstance();
self::$guesser->register(new ExtensionMimeTypeGuesser());
}
public static function getInstance()
{
if (null === self::$instance) {
self::$instance = new self();
}
return self::$instance;
}
/**
* @codeCoverageIgnore
*
* @param type $path
*
* @return type
*/
public function guess($path)
{
return self::$guesser->guess($path);
}
}
| backbee/BackBee | Util/MimeType.php | PHP | gpl-3.0 | 1,733 |
YUI.add('moodle-core-blocks', function (Y, NAME) {
/**
* Provides drag and drop functionality for blocks.
*
* @module moodle-core-blockdraganddrop
*/
var AJAXURL = '/lib/ajax/blocks.php',
CSS = {
BLOCK : 'block',
BLOCKREGION : 'block-region',
BLOCKADMINBLOCK : 'block_adminblock',
EDITINGMOVE : 'editing_move',
HEADER : 'header',
LIGHTBOX : 'lightbox',
REGIONCONTENT : 'region-content',
SKIPBLOCK : 'skip-block',
SKIPBLOCKTO : 'skip-block-to',
MYINDEX : 'page-my-index',
REGIONMAIN : 'region-main'
};
/**
* Legacy drag and drop manager.
* This drag and drop manager is specifically designed for themes using side-pre and side-post
* that do not make use of the block output methods introduced by MDL-39824.
*
* @namespace M.core.blockdraganddrop
* @class LegacyManager
* @constructor
* @extends M.core.dragdrop
*/
var DRAGBLOCK = function() {
DRAGBLOCK.superclass.constructor.apply(this, arguments);
};
Y.extend(DRAGBLOCK, M.core.dragdrop, {
skipnodetop : null,
skipnodebottom : null,
dragsourceregion : null,
initializer : function() {
// Set group for parent class
this.groups = ['block'];
this.samenodeclass = CSS.BLOCK;
this.parentnodeclass = CSS.REGIONCONTENT;
// Add relevant classes and ID to 'content' block region on My Home page.
var myhomecontent = Y.Node.all('body#'+CSS.MYINDEX+' #'+CSS.REGIONMAIN+' > .'+CSS.REGIONCONTENT);
if (myhomecontent.size() > 0) {
var contentregion = myhomecontent.item(0);
contentregion.addClass(CSS.BLOCKREGION);
contentregion.set('id', CSS.REGIONCONTENT);
contentregion.one('div').addClass(CSS.REGIONCONTENT);
}
// Initialise blocks dragging
// Find all block regions on the page
var blockregionlist = Y.Node.all('div.'+CSS.BLOCKREGION);
if (blockregionlist.size() === 0) {
return false;
}
// See if we are missing either of block regions,
// if yes we need to add an empty one to use as target
if (blockregionlist.size() !== this.get('regions').length) {
var blockregion = Y.Node.create('<div></div>')
.addClass(CSS.BLOCKREGION);
var regioncontent = Y.Node.create('<div></div>')
.addClass(CSS.REGIONCONTENT);
blockregion.appendChild(regioncontent);
var pre = blockregionlist.filter('#region-pre');
var post = blockregionlist.filter('#region-post');
if (pre.size() === 0 && post.size() === 1) {
// pre block is missing, instert it before post
blockregion.setAttrs({id : 'region-pre'});
post.item(0).insert(blockregion, 'before');
blockregionlist.unshift(blockregion);
} else if (post.size() === 0 && pre.size() === 1) {
// post block is missing, instert it after pre
blockregion.setAttrs({id : 'region-post'});
pre.item(0).insert(blockregion, 'after');
blockregionlist.push(blockregion);
}
}
blockregionlist.each(function(blockregionnode) {
// Setting blockregion as droptarget (the case when it is empty)
// The region-post (the right one)
// is very narrow, so add extra padding on the left to drop block on it.
new Y.DD.Drop({
node: blockregionnode.one('div.'+CSS.REGIONCONTENT),
groups: this.groups,
padding: '40 240 40 240'
});
// Make each div element in the list of blocks draggable
var del = new Y.DD.Delegate({
container: blockregionnode,
nodes: '.'+CSS.BLOCK,
target: true,
handles: ['.'+CSS.HEADER],
invalid: '.block-hider-hide, .block-hider-show, .moveto',
dragConfig: {groups: this.groups}
});
del.dd.plug(Y.Plugin.DDProxy, {
// Don't move the node at the end of the drag
moveOnEnd: false
});
del.dd.plug(Y.Plugin.DDWinScroll);
var blocklist = blockregionnode.all('.'+CSS.BLOCK);
blocklist.each(function(blocknode) {
var move = blocknode.one('a.'+CSS.EDITINGMOVE);
if (move) {
move.remove();
blocknode.one('.'+CSS.HEADER).setStyle('cursor', 'move');
}
}, this);
}, this);
},
get_block_id : function(node) {
return Number(node.get('id').replace(/inst/i, ''));
},
get_block_region : function(node) {
var region = node.ancestor('div.'+CSS.BLOCKREGION).get('id').replace(/region-/i, '');
if (Y.Array.indexOf(this.get('regions'), region) === -1) {
// Must be standard side-X
if (right_to_left()) {
if (region === 'post') {
region = 'pre';
} else if (region === 'pre') {
region = 'post';
}
}
return 'side-' + region;
}
// Perhaps custom region
return region;
},
get_region_id : function(node) {
return node.get('id').replace(/region-/i, '');
},
drag_start : function(e) {
// Get our drag object
var drag = e.target;
// Store the parent node of original drag node (block)
// we will need it later for show/hide empty regions
this.dragsourceregion = drag.get('node').ancestor('div.'+CSS.BLOCKREGION);
// Determine skipnodes and store them
if (drag.get('node').previous() && drag.get('node').previous().hasClass(CSS.SKIPBLOCK)) {
this.skipnodetop = drag.get('node').previous();
}
if (drag.get('node').next() && drag.get('node').next().hasClass(CSS.SKIPBLOCKTO)) {
this.skipnodebottom = drag.get('node').next();
}
},
drop_over : function(e) {
// Get a reference to our drag and drop nodes
var drag = e.drag.get('node');
var drop = e.drop.get('node');
// We need to fix the case when parent drop over event has determined
// 'goingup' and appended the drag node after admin-block.
if (drop.hasClass(this.parentnodeclass) && drop.one('.'+CSS.BLOCKADMINBLOCK) && drop.one('.'+CSS.BLOCKADMINBLOCK).next('.'+CSS.BLOCK)) {
drop.prepend(drag);
}
// Block is moved within the same region
// stop here, no need to modify anything.
if (this.dragsourceregion.contains(drop)) {
return false;
}
// TODO: Hiding-displaying block region only works for base theme blocks
// (region-pre, region-post) at the moment. It should be improved
// to work with custom block regions as well.
// TODO: Fix this for the case when user drag block towards empty section,
// then the section appears, then user chnages his mind and moving back to
// original section. The opposite section remains opened and empty.
var documentbody = Y.one('body');
// Moving block towards hidden region-content, display it
var regionname = this.get_region_id(this.dragsourceregion);
if (documentbody.hasClass('side-'+regionname+'-only')) {
documentbody.removeClass('side-'+regionname+'-only');
}
// Moving from empty region-content towards the opposite one,
// hide empty one (only for region-pre, region-post areas at the moment).
regionname = this.get_region_id(drop.ancestor('div.'+CSS.BLOCKREGION));
if (this.dragsourceregion.all('.'+CSS.BLOCK).size() === 0 && this.dragsourceregion.get('id').match(/(region-pre|region-post)/i)) {
if (!documentbody.hasClass('side-'+regionname+'-only')) {
documentbody.addClass('side-'+regionname+'-only');
}
}
},
drop_end : function() {
// clear variables
this.skipnodetop = null;
this.skipnodebottom = null;
this.dragsourceregion = null;
},
drag_dropmiss : function(e) {
// Missed the target, but we assume the user intended to drop it
// on the last last ghost node location, e.drag and e.drop should be
// prepared by global_drag_dropmiss parent so simulate drop_hit(e).
this.drop_hit(e);
},
drop_hit : function(e) {
var drag = e.drag;
// Get a reference to our drag node
var dragnode = drag.get('node');
var dropnode = e.drop.get('node');
// Amend existing skipnodes
if (dragnode.previous() && dragnode.previous().hasClass(CSS.SKIPBLOCK)) {
// the one that belongs to block below move below
dragnode.insert(dragnode.previous(), 'after');
}
// Move original skipnodes
if (this.skipnodetop) {
dragnode.insert(this.skipnodetop, 'before');
}
if (this.skipnodebottom) {
dragnode.insert(this.skipnodebottom, 'after');
}
// Add lightbox if it not there
var lightbox = M.util.add_lightbox(Y, dragnode);
// Prepare request parameters
var params = {
sesskey : M.cfg.sesskey,
courseid : this.get('courseid'),
pagelayout : this.get('pagelayout'),
pagetype : this.get('pagetype'),
subpage : this.get('subpage'),
contextid : this.get('contextid'),
action : 'move',
bui_moveid : this.get_block_id(dragnode),
bui_newregion : this.get_block_region(dropnode)
};
if (this.get('cmid')) {
params.cmid = this.get('cmid');
}
if (dragnode.next('.'+this.samenodeclass) && !dragnode.next('.'+this.samenodeclass).hasClass(CSS.BLOCKADMINBLOCK)) {
params.bui_beforeid = this.get_block_id(dragnode.next('.'+this.samenodeclass));
}
// Do AJAX request
Y.io(M.cfg.wwwroot+AJAXURL, {
method: 'POST',
data: params,
on: {
start : function() {
lightbox.show();
},
success: function(tid, response) {
window.setTimeout(function() {
lightbox.hide();
}, 250);
try {
var responsetext = Y.JSON.parse(response.responseText);
if (responsetext.error) {
new M.core.ajaxException(responsetext);
}
} catch (e) {}
},
failure: function(tid, response) {
this.ajax_failure(response);
lightbox.hide();
}
},
context:this
});
}
}, {
NAME : 'core-blocks-dragdrop',
ATTRS : {
courseid : {
value : null
},
cmid : {
value : null
},
contextid : {
value : null
},
pagelayout : {
value : null
},
pagetype : {
value : null
},
subpage : {
value : null
},
regions : {
value : null
}
}
});
/**
* Core namespace.
* @static
* @class core
*/
M.core = M.core || {};
/**
* Block drag and drop static class.
* @namespace M.core
* @class blockdraganddrop
* @static
*/
M.core.blockdraganddrop = M.core.blockdraganddrop || {};
/**
* True if the page is using the new blocks methods.
* @private
* @static
* @property _isusingnewblocksmethod
* @type Boolean
* @default null
*/
M.core.blockdraganddrop._isusingnewblocksmethod = null;
/**
* Returns true if the page is using the new blocks methods.
* @static
* @method is_using_blocks_render_method
* @return Boolean
*/
M.core.blockdraganddrop.is_using_blocks_render_method = function() {
if (this._isusingnewblocksmethod === null) {
var goodregions = Y.all('.block-region[data-blockregion]').size();
var allregions = Y.all('.block-region').size();
this._isusingnewblocksmethod = (allregions === goodregions);
}
return this._isusingnewblocksmethod;
};
/**
* Initialises a drag and drop manager.
* This should only ever be called once for a page.
* @static
* @method init
* @param {Object} params
* @return Manager
*/
M.core.blockdraganddrop.init = function(params) {
if (this.is_using_blocks_render_method()) {
new MANAGER(params);
} else {
new DRAGBLOCK(params);
}
};
/**
* Legacy code to keep things working.
*/
M.core_blocks = M.core_blocks || {};
M.core_blocks.init_dragdrop = function(params) {
M.core.blockdraganddrop.init(params);
};
/**
* This file contains the drag and drop manager class.
*
* Provides drag and drop functionality for blocks.
*
* @module moodle-core-blockdraganddrop
*/
/**
* Constructs a new Block drag and drop manager.
*
* @namespace M.core.blockdraganddrop
* @class Manager
* @constructor
* @extends M.core.dragdrop
*/
var MANAGER = function() {
MANAGER.superclass.constructor.apply(this, arguments);
};
MANAGER.prototype = {
/**
* The skip block link from above the block being dragged while a drag is in progress.
* Required by the M.core.dragdrop from whom this class extends.
* @private
* @property skipnodetop
* @type Node
* @default null
*/
skipnodetop : null,
/**
* The skip block link from below the block being dragged while a drag is in progress.
* Required by the M.core.dragdrop from whom this class extends.
* @private
* @property skipnodebottom
* @type Node
* @default null
*/
skipnodebottom : null,
/**
* An associative object of regions and the
* @property regionobjects
* @type {Object} Primitive object mocking an associative array.
* @type {BLOCKREGION} [regionname]* Each item uses the region name as the key with the value being
* an instance of the BLOCKREGION class.
*/
regionobjects : {},
/**
* Called during the initialisation process of the object.
* @method initializer
*/
initializer : function() {
Y.log('Initialising drag and drop for blocks.', 'info');
var regionnames = this.get('regions'),
i = 0,
region,
regionname,
droptarget,
dragdelegation;
// Evil required by M.core.dragdrop.
this.groups = ['block'];
this.samenodeclass = CSS.BLOCK;
this.parentnodeclass = CSS.BLOCKREGION;
// Add relevant classes and ID to 'content' block region on My Home page.
var myhomecontent = Y.Node.all('body#'+CSS.MYINDEX+' #'+CSS.REGIONMAIN+' > .'+CSS.REGIONCONTENT);
if (myhomecontent.size() > 0) {
var contentregion = myhomecontent.item(0);
contentregion.addClass(CSS.BLOCKREGION);
contentregion.set('id', CSS.REGIONCONTENT);
contentregion.one('div').addClass(CSS.REGIONCONTENT);
}
for (i in regionnames) {
regionname = regionnames[i];
region = new BLOCKREGION({
manager : this,
region : regionname,
node : Y.one('#block-region-'+regionname)
});
this.regionobjects[regionname] = region;
// Setting blockregion as droptarget (the case when it is empty)
// The region-post (the right one)
// is very narrow, so add extra padding on the left to drop block on it.
droptarget = new Y.DD.Drop({
node: region.get_droptarget(),
groups: this.groups,
padding: '40 240 40 240'
});
// Make each div element in the list of blocks draggable
dragdelegation = new Y.DD.Delegate({
container: region.get_droptarget(),
nodes: '.'+CSS.BLOCK,
target: true,
handles: ['.'+CSS.HEADER],
invalid: '.block-hider-hide, .block-hider-show, .moveto',
dragConfig: {groups: this.groups}
});
dragdelegation.dd.plug(Y.Plugin.DDProxy, {
// Don't move the node at the end of the drag
moveOnEnd: false
});
dragdelegation.dd.plug(Y.Plugin.DDWinScroll);
// On the mouse down event we will enable all block regions so that they can be dragged to.
// This is VERY important as without it dnd won't work for empty block regions.
dragdelegation.on('drag:mouseDown', this.enable_all_regions, this);
region.remove_block_move_icons();
}
Y.log('Initialisation of drag and drop for blocks complete.', 'info');
},
/**
* Returns the ID of the block the given node represents.
* @method get_block_id
* @param {Node} node
* @returns {int} The blocks ID in the database.
*/
get_block_id : function(node) {
return Number(node.get('id').replace(/inst/i, ''));
},
/**
* Returns the block region that the node is part of or belonging to.
* @method get_block_region
* @param {Y.Node} node
* @returns {string} The region name.
*/
get_block_region : function(node) {
if (!node.test('[data-blockregion]')) {
node = node.ancestor('[data-blockregion]');
}
return node.getData('blockregion');
},
/**
* Returns the BLOCKREGION instance that represents the block region the given node is part of.
* @method get_region_object
* @param {Y.Node} node
* @returns {BLOCKREGION}
*/
get_region_object : function(node) {
return this.regionobjects[this.get_block_region(node)];
},
/**
* Enables all fo the regions so that they are all visible while dragging is occuring.
* @method enable_all_regions
* @returns {undefined}
*/
enable_all_regions : function() {
var i = 0;
for (i in this.regionobjects) {
this.regionobjects[i].enable();
}
},
/**
* Disables enabled regions if they contain no blocks.
* @method disable_regions_if_required
* @returns {undefined}
*/
disable_regions_if_required : function() {
var i = 0;
for (i in this.regionobjects) {
this.regionobjects[i].disable_if_required();
}
},
/**
* Called by M.core.dragdrop.global_drag_start when dragging starts.
* @method drag_start
* @param {Event} e
* @returns {undefined}
*/
drag_start : function(e) {
// Get our drag object
var drag = e.target;
// Store the parent node of original drag node (block)
// we will need it later for show/hide empty regions
// Determine skipnodes and store them
if (drag.get('node').previous() && drag.get('node').previous().hasClass(CSS.SKIPBLOCK)) {
this.skipnodetop = drag.get('node').previous();
}
if (drag.get('node').next() && drag.get('node').next().hasClass(CSS.SKIPBLOCKTO)) {
this.skipnodebottom = drag.get('node').next();
}
},
/**
* Called by M.core.dragdrop.global_drop_over when something is dragged over a drop target.
* @method drop_over
* @param {Event} e
* @returns {undefined}
*/
drop_over : function(e) {
// Get a reference to our drag and drop nodes
var drag = e.drag.get('node');
var drop = e.drop.get('node');
// We need to fix the case when parent drop over event has determined
// 'goingup' and appended the drag node after admin-block.
if (drop.hasClass(CSS.REGIONCONTENT) && drop.one('.'+CSS.BLOCKADMINBLOCK) && drop.one('.'+CSS.BLOCKADMINBLOCK).next('.'+CSS.BLOCK)) {
drop.prepend(drag);
}
},
/**
* Called by M.core.dragdrop.global_drop_end when a drop has been completed.
* @method drop_end
* @returns {undefined}
*/
drop_end : function() {
// Clear variables.
this.skipnodetop = null;
this.skipnodebottom = null;
this.disable_regions_if_required();
},
/**
* Called by M.core.dragdrop.global_drag_dropmiss when something has been dropped on a node that isn't contained by a drop target.
* @method drag_dropmiss
* @param {Event} e
* @returns {undefined}
*/
drag_dropmiss : function(e) {
// Missed the target, but we assume the user intended to drop it
// on the last ghost node location, e.drag and e.drop should be
// prepared by global_drag_dropmiss parent so simulate drop_hit(e).
this.drop_hit(e);
},
/**
* Called by M.core.dragdrop.global_drag_hit when something has been dropped on a drop target.
* @method drop_hit
* @param {Event} e
* @returns {undefined}
*/
drop_hit : function(e) {
// Get a reference to our drag node
var dragnode = e.drag.get('node');
var dropnode = e.drop.get('node');
// Amend existing skipnodes
if (dragnode.previous() && dragnode.previous().hasClass(CSS.SKIPBLOCK)) {
// the one that belongs to block below move below
dragnode.insert(dragnode.previous(), 'after');
}
// Move original skipnodes
if (this.skipnodetop) {
dragnode.insert(this.skipnodetop, 'before');
}
if (this.skipnodebottom) {
dragnode.insert(this.skipnodebottom, 'after');
}
// Add lightbox if it not there
var lightbox = M.util.add_lightbox(Y, dragnode);
// Prepare request parameters
var params = {
sesskey : M.cfg.sesskey,
courseid : this.get('courseid'),
pagelayout : this.get('pagelayout'),
pagetype : this.get('pagetype'),
subpage : this.get('subpage'),
contextid : this.get('contextid'),
action : 'move',
bui_moveid : this.get_block_id(dragnode),
bui_newregion : this.get_block_region(dropnode)
};
if (this.get('cmid')) {
params.cmid = this.get('cmid');
}
if (dragnode.next('.'+CSS.BLOCK) && !dragnode.next('.'+CSS.BLOCK).hasClass(CSS.BLOCKADMINBLOCK)) {
params.bui_beforeid = this.get_block_id(dragnode.next('.'+CSS.BLOCK));
}
// Do AJAX request
Y.io(M.cfg.wwwroot+AJAXURL, {
method: 'POST',
data: params,
on: {
start : function() {
lightbox.show();
},
success: function(tid, response) {
window.setTimeout(function() {
lightbox.hide();
}, 250);
try {
var responsetext = Y.JSON.parse(response.responseText);
if (responsetext.error) {
new M.core.ajaxException(responsetext);
}
} catch (e) {}
},
failure: function(tid, response) {
this.ajax_failure(response);
lightbox.hide();
},
complete : function() {
this.disable_regions_if_required();
}
},
context:this
});
}
};
Y.extend(MANAGER, M.core.dragdrop, MANAGER.prototype, {
NAME : 'core-blocks-dragdrop-manager',
ATTRS : {
/**
* The Course ID if there is one.
* @attribute courseid
* @type int|null
* @default null
*/
courseid : {
value : null
},
/**
* The Course Module ID if there is one.
* @attribute cmid
* @type int|null
* @default null
*/
cmid : {
value : null
},
/**
* The Context ID.
* @attribute contextid
* @type int|null
* @default null
*/
contextid : {
value : null
},
/**
* The current page layout.
* @attribute pagelayout
* @type string|null
* @default null
*/
pagelayout : {
value : null
},
/**
* The page type string, should be used as the id for the body tag in the theme.
* @attribute pagetype
* @type string|null
* @default null
*/
pagetype : {
value : null
},
/**
* The subpage identifier, if any.
* @attribute subpage
* @type string|null
* @default null
*/
subpage : {
value : null
},
/**
* An array of block regions that are present on the page.
* @attribute regions
* @type array|null
* @default Array[]
*/
regions : {
value : []
}
}
});/**
* This file contains the Block Region class used by the drag and drop manager.
*
* Provides drag and drop functionality for blocks.
*
* @module moodle-core-blockdraganddrop
*/
/**
* Constructs a new block region object.
*
* @namespace M.core.blockdraganddrop
* @class BlockRegion
* @constructor
* @extends Y.Base
*/
var BLOCKREGION = function() {
BLOCKREGION.superclass.constructor.apply(this, arguments);
};
BLOCKREGION.prototype = {
/**
* Called during the initialisation process of the object.
* @method initializer
*/
initializer : function() {
var node = this.get('node');
Y.log('Block region `'+this.get('region')+'` initialising', 'info');
if (!node) {
Y.log('block region known about but no HTML structure found for it. Guessing structure.', 'warn');
node = this.create_and_add_node();
}
var body = Y.one('body'),
hasblocks = node.all('.'+CSS.BLOCK).size() > 0,
hasregionclass = this.get_has_region_class();
this.set('hasblocks', hasblocks);
if (!body.hasClass(hasregionclass)) {
body.addClass(hasregionclass);
}
body.addClass((hasblocks) ? this.get_used_region_class() : this.get_empty_region_class());
body.removeClass((hasblocks) ? this.get_empty_region_class() : this.get_used_region_class());
},
/**
* Creates a generic block region node and adds it to the DOM at the best guess location.
* Any calling of this method is an unfortunate circumstance.
* @method create_and_add_node
* @return Node The newly created Node
*/
create_and_add_node : function() {
var c = Y.Node.create,
region = this.get('region'),
node = c('<div id="block-region-'+region+'" data-droptarget="1"></div>')
.addClass(CSS.BLOCKREGION)
.setData('blockregion', region),
regions = this.get('manager').get('regions'),
i,
haspre = false,
haspost = false,
added = false,
pre,
post;
for (i in regions) {
if (regions[i].match(/(pre|left)/)) {
haspre = regions[i];
} else if (regions[i].match(/(post|right)/)) {
haspost = regions[i];
}
}
if (haspre !== false && haspost !== false) {
if (region === haspre) {
post = Y.one('#block-region-'+haspost);
if (post) {
post.insert(node, 'before');
added = true;
}
} else {
pre = Y.one('#block-region-'+haspre);
if (pre) {
pre.insert(node, 'after');
added = true;
}
}
}
if (added === false) {
Y.one('body').append(node);
}
this.set('node', node);
return node;
},
/**
* Removes the move icons and changes the cursor to a move icon when over the header.
* @method remove_block_move_icons
*/
remove_block_move_icons : function() {
this.get('node').all('.'+CSS.BLOCK+' a.'+CSS.EDITINGMOVE).each(function(moveicon){
moveicon.ancestor('.'+CSS.BLOCK).one('.'+CSS.HEADER).setStyle('cursor', 'move');
moveicon.remove();
});
},
/**
* Returns the class name on the body that signifies the document knows about this region.
* @method get_has_region_class
* @return String
*/
get_has_region_class : function() {
return 'has-region-'+this.get('region');
},
/**
* Returns the class name to use on the body if the region contains no blocks.
* @method get_empty_region_class
* @return String
*/
get_empty_region_class : function() {
return 'empty-region-'+this.get('region');
},
/**
* Returns the class name to use on the body if the region contains blocks.
* @method get_used_region_class
* @return String
*/
get_used_region_class : function() {
return 'used-region-'+this.get('region');
},
/**
* Returns the node to use as the drop target for this region.
* @method get_droptarget
* @return Node
*/
get_droptarget : function() {
var node = this.get('node');
if (node.test('[data-droptarget="1"]')) {
return node;
}
return node.one('[data-droptarget="1"]');
},
/**
* Enables the block region so that we can be sure the user can see it.
* This is done even if it is empty.
* @method enable
*/
enable : function() {
Y.one('body').addClass(this.get_used_region_class()).removeClass(this.get_empty_region_class());
},
/**
* Disables the region if it contains no blocks, essentially hiding it from the user.
* @method disable_if_required
*/
disable_if_required : function() {
if (this.get('node').all('.'+CSS.BLOCK).size() === 0) {
Y.one('body').addClass(this.get_empty_region_class()).removeClass(this.get_used_region_class());
}
}
};
Y.extend(BLOCKREGION, Y.Base, BLOCKREGION.prototype, {
NAME : 'core-blocks-dragdrop-blockregion',
ATTRS : {
/**
* The drag and drop manager that created this block region instance.
* @attribute manager
* @type M.core.blockdraganddrop.Manager
* @writeOnce
*/
manager : {
// Can only be set during initialisation and must be set then.
writeOnce : 'initOnly',
validator : function (value) {
return Y.Lang.isObject(value) && value instanceof MANAGER;
}
},
/**
* The name of the block region this object represents.
* @attribute region
* @type String
* @writeOnce
*/
region : {
// Can only be set during initialisation and must be set then.
writeOnce : 'initOnly',
validator : function (value) {
return Y.Lang.isString(value);
}
},
/**
* The node the block region HTML starts at.s
* @attribute region
* @type Y.Node
*/
node : {
validator : function (value) {
return Y.Lang.isObject(value) || Y.Lang.isNull(value);
}
},
/**
* True if the block region currently contains blocks.
* @attribute hasblocks
* @type Boolean
* @default false
*/
hasblocks : {
value : false,
validator : function (value) {
return Y.Lang.isBoolean(value);
}
}
}
});
}, '@VERSION@', {
"requires": [
"base",
"node",
"io",
"dom",
"dd",
"dd-scroll",
"moodle-core-dragdrop",
"moodle-core-notification"
]
});
| hdomos/moodlehd | lib/yui/build/moodle-core-blocks/moodle-core-blocks-debug.js | JavaScript | gpl-3.0 | 32,521 |
#!/usr/bin/env python
"""
The LibVMI Library is an introspection library that simplifies access to
memory in a target virtual machine or in a file containing a dump of
a system's physical memory. LibVMI is based on the XenAccess Library.
Copyright 2011 Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
retains certain rights in this software.
Author: Bryan D. Payne (bdpayne@acm.org)
This file is part of LibVMI.
LibVMI is free software: you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
LibVMI is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with LibVMI. If not, see <http://www.gnu.org/licenses/>.
"""
import pyvmi
import sys
def get_processes(vmi):
tasks_offset = vmi.get_offset("win_tasks")
name_offset = vmi.get_offset("win_pname") - tasks_offset
pid_offset = vmi.get_offset("win_pid") - tasks_offset
list_head = vmi.read_addr_ksym("PsInitialSystemProcess")
next_process = vmi.read_addr_va(list_head + tasks_offset, 0)
list_head = next_process
while True:
procname = vmi.read_str_va(next_process + name_offset, 0)
pid = vmi.read_32_va(next_process + pid_offset, 0)
next_process = vmi.read_addr_va(next_process, 0)
if (pid < 1<<16):
yield pid, procname
if (list_head == next_process):
break
def main(argv):
vmi = pyvmi.init(argv[1], "complete")
for pid, procname in get_processes(vmi):
print "[%5d] %s" % (pid, procname)
if __name__ == "__main__":
main(sys.argv)
| jie-lin/libvmi | tools/pyvmi/examples/process-list.py | Python | gpl-3.0 | 1,982 |
// ReSharper disable All
using System;
using System.Collections.Generic;
using System.Dynamic;
using PetaPoco;
using MixERP.Net.Entities.Core;
namespace MixERP.Net.Schemas.Core.Data
{
public interface IGetStateIdByStateNameRepository
{
string PgArg0 { get; set; }
/// <summary>
/// Prepares and executes IGetStateIdByStateNameRepository.
/// </summary>
int Execute();
}
} | mixerp/mixerp | src/Libraries/DAL/Core/IGetStateIdByStateNameRepository.cs | C# | gpl-3.0 | 425 |
// ReSharper disable All
using System.Collections.Generic;
using System.Net;
using System.Net.Http;
using System.Web.Http;
using MixERP.Net.Api.Framework;
using MixERP.Net.ApplicationState.Cache;
using MixERP.Net.Common.Extensions;
using MixERP.Net.EntityParser;
using MixERP.Net.Framework;
using Newtonsoft.Json;
using Newtonsoft.Json.Linq;
using PetaPoco;
using MixERP.Net.Schemas.Core.Data;
namespace MixERP.Net.Api.Core
{
/// <summary>
/// Provides a direct HTTP access to perform various tasks such as adding, editing, and removing Item Selling Price Scrud Views.
/// </summary>
[RoutePrefix("api/v1.5/core/item-selling-price-scrud-view")]
public class ItemSellingPriceScrudViewController : ApiController
{
/// <summary>
/// The ItemSellingPriceScrudView repository.
/// </summary>
private readonly IItemSellingPriceScrudViewRepository ItemSellingPriceScrudViewRepository;
public ItemSellingPriceScrudViewController()
{
this._LoginId = AppUsers.GetCurrent().View.LoginId.ToLong();
this._UserId = AppUsers.GetCurrent().View.UserId.ToInt();
this._OfficeId = AppUsers.GetCurrent().View.OfficeId.ToInt();
this._Catalog = AppUsers.GetCurrentUserDB();
this.ItemSellingPriceScrudViewRepository = new MixERP.Net.Schemas.Core.Data.ItemSellingPriceScrudView
{
_Catalog = this._Catalog,
_LoginId = this._LoginId,
_UserId = this._UserId
};
}
public ItemSellingPriceScrudViewController(IItemSellingPriceScrudViewRepository repository, string catalog, LoginView view)
{
this._LoginId = view.LoginId.ToLong();
this._UserId = view.UserId.ToInt();
this._OfficeId = view.OfficeId.ToInt();
this._Catalog = catalog;
this.ItemSellingPriceScrudViewRepository = repository;
}
public long _LoginId { get; }
public int _UserId { get; private set; }
public int _OfficeId { get; private set; }
public string _Catalog { get; }
/// <summary>
/// Counts the number of item selling price scrud views.
/// </summary>
/// <returns>Returns the count of the item selling price scrud views.</returns>
[AcceptVerbs("GET", "HEAD")]
[Route("count")]
[Route("~/api/core/item-selling-price-scrud-view/count")]
public long Count()
{
try
{
return this.ItemSellingPriceScrudViewRepository.Count();
}
catch (UnauthorizedException)
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.Forbidden));
}
catch (MixERPException ex)
{
throw new HttpResponseException(new HttpResponseMessage
{
Content = new StringContent(ex.Message),
StatusCode = HttpStatusCode.InternalServerError
});
}
catch
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError));
}
}
/// <summary>
/// Returns collection of item selling price scrud view for export.
/// </summary>
/// <returns></returns>
[AcceptVerbs("GET", "HEAD")]
[Route("export")]
[Route("all")]
[Route("~/api/core/item-selling-price-scrud-view/export")]
[Route("~/api/core/item-selling-price-scrud-view/all")]
public IEnumerable<MixERP.Net.Entities.Core.ItemSellingPriceScrudView> Get()
{
try
{
return this.ItemSellingPriceScrudViewRepository.Get();
}
catch (UnauthorizedException)
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.Forbidden));
}
catch (MixERPException ex)
{
throw new HttpResponseException(new HttpResponseMessage
{
Content = new StringContent(ex.Message),
StatusCode = HttpStatusCode.InternalServerError
});
}
catch
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError));
}
}
/// <summary>
/// Creates a paginated collection containing 10 item selling price scrud views on each page, sorted by the property .
/// </summary>
/// <returns>Returns the first page from the collection.</returns>
[AcceptVerbs("GET", "HEAD")]
[Route("")]
[Route("~/api/core/item-selling-price-scrud-view")]
public IEnumerable<MixERP.Net.Entities.Core.ItemSellingPriceScrudView> GetPaginatedResult()
{
try
{
return this.ItemSellingPriceScrudViewRepository.GetPaginatedResult();
}
catch (UnauthorizedException)
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.Forbidden));
}
catch (MixERPException ex)
{
throw new HttpResponseException(new HttpResponseMessage
{
Content = new StringContent(ex.Message),
StatusCode = HttpStatusCode.InternalServerError
});
}
catch
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError));
}
}
/// <summary>
/// Creates a paginated collection containing 10 item selling price scrud views on each page, sorted by the property .
/// </summary>
/// <param name="pageNumber">Enter the page number to produce the resultset.</param>
/// <returns>Returns the requested page from the collection.</returns>
[AcceptVerbs("GET", "HEAD")]
[Route("page/{pageNumber}")]
[Route("~/api/core/item-selling-price-scrud-view/page/{pageNumber}")]
public IEnumerable<MixERP.Net.Entities.Core.ItemSellingPriceScrudView> GetPaginatedResult(long pageNumber)
{
try
{
return this.ItemSellingPriceScrudViewRepository.GetPaginatedResult(pageNumber);
}
catch (UnauthorizedException)
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.Forbidden));
}
catch (MixERPException ex)
{
throw new HttpResponseException(new HttpResponseMessage
{
Content = new StringContent(ex.Message),
StatusCode = HttpStatusCode.InternalServerError
});
}
catch
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError));
}
}
/// <summary>
/// Displayfield is a lightweight key/value collection of item selling price scrud views.
/// </summary>
/// <returns>Returns an enumerable key/value collection of item selling price scrud views.</returns>
[AcceptVerbs("GET", "HEAD")]
[Route("display-fields")]
[Route("~/api/core/item-selling-price-scrud-view/display-fields")]
public IEnumerable<DisplayField> GetDisplayFields()
{
try
{
return this.ItemSellingPriceScrudViewRepository.GetDisplayFields();
}
catch (UnauthorizedException)
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.Forbidden));
}
catch (MixERPException ex)
{
throw new HttpResponseException(new HttpResponseMessage
{
Content = new StringContent(ex.Message),
StatusCode = HttpStatusCode.InternalServerError
});
}
catch
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError));
}
}
/// <summary>
/// Counts the number of item selling price scrud views using the supplied filter(s).
/// </summary>
/// <param name="filters">The list of filter conditions.</param>
/// <returns>Returns the count of filtered item selling price scrud views.</returns>
[AcceptVerbs("POST")]
[Route("count-where")]
[Route("~/api/core/item-selling-price-scrud-view/count-where")]
public long CountWhere([FromBody]JArray filters)
{
try
{
List<EntityParser.Filter> f = filters.ToObject<List<EntityParser.Filter>>(JsonHelper.GetJsonSerializer());
return this.ItemSellingPriceScrudViewRepository.CountWhere(f);
}
catch (UnauthorizedException)
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.Forbidden));
}
catch (MixERPException ex)
{
throw new HttpResponseException(new HttpResponseMessage
{
Content = new StringContent(ex.Message),
StatusCode = HttpStatusCode.InternalServerError
});
}
catch
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError));
}
}
/// <summary>
/// Creates a filtered and paginated collection containing 10 item selling price scrud views on each page, sorted by the property .
/// </summary>
/// <param name="pageNumber">Enter the page number to produce the resultset.</param>
/// <param name="filters">The list of filter conditions.</param>
/// <returns>Returns the requested page from the collection using the supplied filters.</returns>
[AcceptVerbs("POST")]
[Route("get-where/{pageNumber}")]
[Route("~/api/core/item-selling-price-scrud-view/get-where/{pageNumber}")]
public IEnumerable<MixERP.Net.Entities.Core.ItemSellingPriceScrudView> GetWhere(long pageNumber, [FromBody]JArray filters)
{
try
{
List<EntityParser.Filter> f = filters.ToObject<List<EntityParser.Filter>>(JsonHelper.GetJsonSerializer());
return this.ItemSellingPriceScrudViewRepository.GetWhere(pageNumber, f);
}
catch (UnauthorizedException)
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.Forbidden));
}
catch (MixERPException ex)
{
throw new HttpResponseException(new HttpResponseMessage
{
Content = new StringContent(ex.Message),
StatusCode = HttpStatusCode.InternalServerError
});
}
catch
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError));
}
}
/// <summary>
/// Counts the number of item selling price scrud views using the supplied filter name.
/// </summary>
/// <param name="filterName">The named filter.</param>
/// <returns>Returns the count of filtered item selling price scrud views.</returns>
[AcceptVerbs("GET", "HEAD")]
[Route("count-filtered/{filterName}")]
[Route("~/api/core/item-selling-price-scrud-view/count-filtered/{filterName}")]
public long CountFiltered(string filterName)
{
try
{
return this.ItemSellingPriceScrudViewRepository.CountFiltered(filterName);
}
catch (UnauthorizedException)
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.Forbidden));
}
catch (MixERPException ex)
{
throw new HttpResponseException(new HttpResponseMessage
{
Content = new StringContent(ex.Message),
StatusCode = HttpStatusCode.InternalServerError
});
}
catch
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError));
}
}
/// <summary>
/// Creates a filtered and paginated collection containing 10 item selling price scrud views on each page, sorted by the property .
/// </summary>
/// <param name="pageNumber">Enter the page number to produce the resultset.</param>
/// <param name="filterName">The named filter.</param>
/// <returns>Returns the requested page from the collection using the supplied filters.</returns>
[AcceptVerbs("GET", "HEAD")]
[Route("get-filtered/{pageNumber}/{filterName}")]
[Route("~/api/core/item-selling-price-scrud-view/get-filtered/{pageNumber}/{filterName}")]
public IEnumerable<MixERP.Net.Entities.Core.ItemSellingPriceScrudView> GetFiltered(long pageNumber, string filterName)
{
try
{
return this.ItemSellingPriceScrudViewRepository.GetFiltered(pageNumber, filterName);
}
catch (UnauthorizedException)
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.Forbidden));
}
catch (MixERPException ex)
{
throw new HttpResponseException(new HttpResponseMessage
{
Content = new StringContent(ex.Message),
StatusCode = HttpStatusCode.InternalServerError
});
}
catch
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.InternalServerError));
}
}
}
} | mixerp/mixerp | src/Libraries/Web API/Core/ItemSellingPriceScrudViewController.cs | C# | gpl-3.0 | 14,350 |
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core import urlresolvers
from django.views.decorators import csrf
from django.conf.urls import patterns
def _patch_pattern(regex_pattern):
"""
Patch pattern callback using csrf_exempt. Enforce
RegexURLPattern callback to get resolved if required.
"""
regex_pattern._callback = \
csrf.csrf_exempt(regex_pattern.callback)
def _patch_resolver(r):
"""
Patch all patterns found in resolver with _patch_pattern
"""
if hasattr(r, 'url_patterns'):
entries = r.url_patterns
else:
# first level view in patterns ?
entries = [r]
for entry in entries:
if isinstance(entry, urlresolvers.RegexURLResolver):
_patch_resolver(entry)
#if isinstance(entry, urlresolvers.RegexURLPattern):
# let it break...
else:
_patch_pattern(entry)
def api_patterns(*args, **kwargs):
"""
Protect all url patterns from csrf attacks.
"""
_patterns = patterns(*args, **kwargs)
for entry in _patterns:
_patch_resolver(entry)
return _patterns
| allmende/synnefo | snf-django-lib/snf_django/lib/api/urls.py | Python | gpl-3.0 | 1,762 |
#region License
// Copyright (c) 2013, ClearCanvas Inc.
// All rights reserved.
// http://www.clearcanvas.ca
//
// This file is part of the ClearCanvas RIS/PACS open source project.
//
// The ClearCanvas RIS/PACS open source project is free software: you can
// redistribute it and/or modify it under the terms of the GNU General Public
// License as published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// The ClearCanvas RIS/PACS open source project is distributed in the hope that it
// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
// Public License for more details.
//
// You should have received a copy of the GNU General Public License along with
// the ClearCanvas RIS/PACS open source project. If not, see
// <http://www.gnu.org/licenses/>.
#endregion
using System;
using System.Windows.Forms;
using ClearCanvas.Desktop.View.WinForms;
using ClearCanvas.Ris.Client.View.WinForms;
namespace ClearCanvas.Ris.Client.Workflow.View.WinForms
{
/// <summary>
/// Provides a Windows Forms user-interface for <see cref="CheckInOrderComponent"/>
/// </summary>
public partial class MppsDocumentationComponentControl : ApplicationComponentUserControl
{
private readonly MppsDocumentationComponent _component;
private CannedTextSupport _cannedTextSupport;
/// <summary>
/// Constructor
/// </summary>
public MppsDocumentationComponentControl(MppsDocumentationComponent component)
: base(component)
{
InitializeComponent();
_component = component;
_label.DataBindings.Add("Text", _component, "CommentsLabel", true, DataSourceUpdateMode.OnPropertyChanged);
_comments.DataBindings.Add("Text", _component, "Comments", true, DataSourceUpdateMode.OnPropertyChanged);
_comments.DataBindings.Add("Enabled", _component, "CommentsEnabled", true, DataSourceUpdateMode.OnPropertyChanged);
_cannedTextSupport = new CannedTextSupport(_comments, _component.CannedTextLookupHandler);
}
}
}
| chinapacs/ImageViewer | Ris/Client/Workflow/View/WinForms/MppsDocumentationComponentControl.cs | C# | gpl-3.0 | 2,158 |
package org.jboss.as.test.integration.jaxrs.decorator;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
/**
* @author Stuart Douglas
*/
@Path("decorator")
@Produces({"text/plain"})
public interface ResourceInterface {
@GET
String getMessage();
}
| xasx/wildfly | testsuite/integration/basic/src/test/java/org/jboss/as/test/integration/jaxrs/decorator/ResourceInterface.java | Java | lgpl-2.1 | 289 |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for
// license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator 1.0.0.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
namespace Microsoft.Azure.KeyVault.Models
{
using Azure;
using KeyVault;
using Newtonsoft.Json;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
/// <summary>
/// A Storage account bundle consists of key vault storage account details
/// plus its attributes.
/// </summary>
public partial class StorageBundle
{
/// <summary>
/// Initializes a new instance of the StorageBundle class.
/// </summary>
public StorageBundle() { }
/// <summary>
/// Initializes a new instance of the StorageBundle class.
/// </summary>
/// <param name="id">The storage account id.</param>
/// <param name="resourceId">The storage account resource id.</param>
/// <param name="activeKeyName">The current active storage account key
/// name.</param>
/// <param name="autoRegenerateKey">whether keyvault should manage the
/// storage account for the user.</param>
/// <param name="regenerationPeriod">The key regeneration time duration
/// specified in ISO-8601 format.</param>
/// <param name="attributes">The storage account attributes.</param>
/// <param name="tags">Application specific metadata in the form of
/// key-value pairs</param>
public StorageBundle(string id = default(string), string resourceId = default(string), string activeKeyName = default(string), bool? autoRegenerateKey = default(bool?), string regenerationPeriod = default(string), StorageAccountAttributes attributes = default(StorageAccountAttributes), IDictionary<string, string> tags = default(IDictionary<string, string>))
{
Id = id;
ResourceId = resourceId;
ActiveKeyName = activeKeyName;
AutoRegenerateKey = autoRegenerateKey;
RegenerationPeriod = regenerationPeriod;
Attributes = attributes;
Tags = tags;
}
/// <summary>
/// Gets the storage account id.
/// </summary>
[JsonProperty(PropertyName = "id")]
public string Id { get; protected set; }
/// <summary>
/// Gets the storage account resource id.
/// </summary>
[JsonProperty(PropertyName = "resourceId")]
public string ResourceId { get; protected set; }
/// <summary>
/// Gets the current active storage account key name.
/// </summary>
[JsonProperty(PropertyName = "activeKeyName")]
public string ActiveKeyName { get; protected set; }
/// <summary>
/// Gets whether keyvault should manage the storage account for the
/// user.
/// </summary>
[JsonProperty(PropertyName = "autoRegenerateKey")]
public bool? AutoRegenerateKey { get; protected set; }
/// <summary>
/// Gets the key regeneration time duration specified in ISO-8601
/// format.
/// </summary>
[JsonProperty(PropertyName = "regenerationPeriod")]
public string RegenerationPeriod { get; protected set; }
/// <summary>
/// Gets the storage account attributes.
/// </summary>
[JsonProperty(PropertyName = "attributes")]
public StorageAccountAttributes Attributes { get; protected set; }
/// <summary>
/// Gets application specific metadata in the form of key-value pairs
/// </summary>
[JsonProperty(PropertyName = "tags")]
public IDictionary<string, string> Tags { get; protected set; }
}
}
| SiddharthChatrolaMs/azure-sdk-for-net | src/SDKs/KeyVault/dataPlane/Microsoft.Azure.KeyVault/Generated/Models/StorageBundle.cs | C# | apache-2.0 | 3,925 |
/*
* Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
/*
* Do not modify this file. This file is generated from the ec2-2014-10-01.normal.json service model.
*/
using System;
using System.Collections.Generic;
using System.Xml.Serialization;
using System.Text;
using System.IO;
using Amazon.Runtime;
using Amazon.Runtime.Internal;
namespace Amazon.EC2.Model
{
/// <summary>
/// Describes a static route for a VPN connection.
/// </summary>
public partial class VpnStaticRoute
{
private string _destinationCidrBlock;
private VpnStaticRouteSource _source;
private VpnState _state;
/// <summary>
/// Gets and sets the property DestinationCidrBlock.
/// <para>
/// The CIDR block associated with the local subnet of the customer data center.
/// </para>
/// </summary>
public string DestinationCidrBlock
{
get { return this._destinationCidrBlock; }
set { this._destinationCidrBlock = value; }
}
// Check to see if DestinationCidrBlock property is set
internal bool IsSetDestinationCidrBlock()
{
return this._destinationCidrBlock != null;
}
/// <summary>
/// Gets and sets the property Source.
/// <para>
/// Indicates how the routes were provided.
/// </para>
/// </summary>
public VpnStaticRouteSource Source
{
get { return this._source; }
set { this._source = value; }
}
// Check to see if Source property is set
internal bool IsSetSource()
{
return this._source != null;
}
/// <summary>
/// Gets and sets the property State.
/// <para>
/// The current state of the static route.
/// </para>
/// </summary>
public VpnState State
{
get { return this._state; }
set { this._state = value; }
}
// Check to see if State property is set
internal bool IsSetState()
{
return this._state != null;
}
}
} | ykbarros/aws-sdk-xamarin | AWS.XamarinSDK/AWSSDK_iOS/Amazon.EC2/Model/VpnStaticRoute.cs | C# | apache-2.0 | 2,707 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package gls.ch06.s05.testClasses;
import groovy.lang.GroovyObjectSupport;
public class Tt1go extends GroovyObjectSupport {
public String x = "field";
public String getX() {
return this.p1;
}
public void setX(final String x) {
this.p1 = x;
}
public String x() {
return "method";
}
public String p1 = "property";
}
| paulk-asert/groovy | src/test/gls/ch06/s05/testClasses/Tt1go.java | Java | apache-2.0 | 1,197 |
package v1_test
import (
"reflect"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
deployapi "github.com/openshift/origin/pkg/deploy/api"
current "github.com/openshift/origin/pkg/deploy/api/v1"
)
func roundTrip(t *testing.T, obj runtime.Object) runtime.Object {
data, err := kapi.Codec.Encode(obj)
if err != nil {
t.Errorf("%v\n %#v", err, obj)
return nil
}
obj2, err := kapi.Codec.Decode(data)
if err != nil {
t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj)
return nil
}
obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object)
err = kapi.Scheme.Convert(obj2, obj3)
if err != nil {
t.Errorf("%v\nSource: %#v", err, obj2)
return nil
}
return obj3
}
func TestDefaults_rollingParams(t *testing.T) {
c := ¤t.DeploymentConfig{}
o := roundTrip(t, runtime.Object(c))
config := o.(*current.DeploymentConfig)
strat := config.Spec.Strategy
if e, a := current.DeploymentStrategyTypeRolling, strat.Type; e != a {
t.Errorf("expected strategy type %s, got %s", e, a)
}
if e, a := deployapi.DefaultRollingUpdatePeriodSeconds, *strat.RollingParams.UpdatePeriodSeconds; e != a {
t.Errorf("expected UpdatePeriodSeconds %d, got %d", e, a)
}
if e, a := deployapi.DefaultRollingIntervalSeconds, *strat.RollingParams.IntervalSeconds; e != a {
t.Errorf("expected IntervalSeconds %d, got %d", e, a)
}
if e, a := deployapi.DefaultRollingTimeoutSeconds, *strat.RollingParams.TimeoutSeconds; e != a {
t.Errorf("expected UpdatePeriodSeconds %d, got %d", e, a)
}
}
| dkorn/origin | pkg/deploy/api/v1/defaults_test.go | GO | apache-2.0 | 1,606 |
/**
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.mr4c.message;
import com.google.mr4c.util.MR4CLogging;
import java.io.IOException;
import java.net.URI;
import org.slf4j.Logger;
public class DefaultMessageHandler implements MessageHandler {
protected static final Logger s_log = MR4CLogging.getLogger(DefaultMessageHandler.class);
public void setURI(URI uri) {}
public void handleMessage(Message msg) throws IOException {
s_log.info("Message sent to default handler for topic [{}] : [{}]", msg.getTopic(), msg.getContent());
}
}
| rickyHong/RSparkMR4C | java/src/java/com/google/mr4c/message/DefaultMessageHandler.java | Java | apache-2.0 | 1,149 |
/*
Copyright 2015 Akexorcist
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.akexorcist.roundcornerprogressbar;
import android.annotation.SuppressLint;
import android.content.Context;
import android.content.res.TypedArray;
import android.graphics.Color;
import android.graphics.drawable.GradientDrawable;
import android.os.Build;
import android.os.Parcel;
import android.os.Parcelable;
import android.util.AttributeSet;
import android.util.DisplayMetrics;
import android.util.TypedValue;
import android.widget.LinearLayout;
import android.widget.TextView;
import com.akexorcist.roundcornerprogressbar.common.BaseRoundCornerProgressBar;
import java.text.NumberFormat;
public class TextRoundCornerProgressBar extends BaseRoundCornerProgressBar {
protected final static int DEFAULT_PROGRESS_BAR_HEIGHT = 30;
protected final static int DEFAULT_TEXT_PADDING = 10;
protected final static int DEFAULT_TEXT_SIZE = 18;
protected final static int DEFAULT_TEXT_WIDTH = 100;
protected final static int DEFAULT_TEXT_COLOR = Color.parseColor("#ff333333");
protected TextView textViewValue;
protected String text;
protected String textUnit;
protected boolean autoTextChange;
protected int textSize;
protected int textPadding;
protected int textWidth;
protected int textColor;
@SuppressLint("NewApi")
public TextRoundCornerProgressBar(Context context, AttributeSet attrs) {
super(context, attrs);
}
@Override
protected int initProgressBarLayout() {
return R.layout.round_corner_with_text_layout;
}
@Override
protected void setup(TypedArray typedArray, DisplayMetrics metrics) {
autoTextChange = typedArray.getBoolean(R.styleable.RoundCornerProgress_rcAutoTextChange, false);
textSize = (int) typedArray.getDimension(R.styleable.RoundCornerProgress_rcTextProgressSize, DEFAULT_TEXT_SIZE);
textPadding = (int) typedArray.getDimension(R.styleable.RoundCornerProgress_rcTextProgressPadding, DEFAULT_TEXT_PADDING);
text = typedArray.getString(R.styleable.RoundCornerProgress_rcTextProgress);
text = (text == null) ? "" : text;
textUnit = typedArray.getString(R.styleable.RoundCornerProgress_rcTextProgressUnit);
textUnit = (textUnit == null) ? "" : textUnit;
textWidth = (int) typedArray.getDimension(R.styleable.RoundCornerProgress_rcTextProgressWidth, DEFAULT_TEXT_WIDTH);
textViewValue = (TextView) findViewById(R.id.round_corner_progress_text);
textViewValue.setTextSize(TypedValue.COMPLEX_UNIT_PX, textSize);
textViewValue.setTextColor(typedArray.getColor(R.styleable.RoundCornerProgress_rcTextProgressColor, DEFAULT_TEXT_COLOR));
textViewValue.setText(text);
textViewValue.setPadding(textPadding, 0, textPadding, 0);
}
@Override
public void setBackgroundLayoutSize(LinearLayout layoutBackground) {
int height, width;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) {
width = getMeasuredWidth() - textWidth;
height = getMeasuredHeight();
} else {
width = getWidth() - textWidth;
height = getHeight();
}
if(height == 0) {
height = (int) dp2px(DEFAULT_PROGRESS_BAR_HEIGHT);
}
setBackgroundWidth(width);
setBackgroundHeight(height);
}
@Override
protected void setGradientRadius(GradientDrawable gradient) {
int radius = getRadius() - (getPadding() / 2);
gradient.setCornerRadii(new float[]{radius, radius, radius, radius, radius, radius, radius, radius});
}
@Override
protected void onLayoutMeasured() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) {
setBackgroundWidth(getMeasuredWidth() - textWidth);
} else {
setBackgroundWidth(getWidth() - textWidth);
}
}
@Override
protected float setLayoutProgressWidth(float ratio) {
if (isAutoTextChange()) {
String strProgress = NumberFormat.getInstance().format((progress % 1 == 0) ? (int) progress : progress);
textViewValue.setText(strProgress + " " + textUnit);
}
return (ratio > 0) ? (getBackgroundWidth() - (getPadding() * 2)) / ratio : 0;
}
@Override
protected float setSecondaryLayoutProgressWidth(float ratio) {
return (ratio > 0) ? (getBackgroundWidth() - (getPadding() * 2)) / ratio : 0;
}
public void setTextUnit(String unit) {
textUnit = unit;
setProgress();
}
public String getTextUnit() {
return textUnit;
}
public void setTextProgress(CharSequence text) {
textViewValue.setText(text);
}
public CharSequence getTextProgress() {
return textViewValue.getText();
}
public void setTextColor(int color) {
textColor = color;
textViewValue.setTextColor(color);
}
public int getTextColor() {
return textColor;
}
public void setAutoTextChange(boolean isAuto) {
autoTextChange = isAuto;
}
public boolean isAutoTextChange() {
return autoTextChange;
}
@Override
protected Parcelable onSaveInstanceState() {
Parcelable superState = super.onSaveInstanceState();
SavedState ss = new SavedState(superState);
ss.autoTextChange = this.autoTextChange;
ss.textSize = this.textSize;
ss.textPadding = this.textPadding;
ss.textWidth = this.textWidth;
ss.textColor = this.textColor;
ss.text = this.text;
ss.textUnit = this.textUnit;
return ss;
}
@Override
protected void onRestoreInstanceState(Parcelable state) {
if(!(state instanceof SavedState)) {
super.onRestoreInstanceState(state);
return;
}
SavedState ss = (SavedState)state;
super.onRestoreInstanceState(ss.getSuperState());
this.autoTextChange = ss.autoTextChange;
this.textSize = ss.textSize;
this.textPadding = ss.textPadding;
this.textWidth = ss.textWidth;
this.textColor = ss.textColor;
this.text = ss.text;
this.textUnit = ss.textUnit;
setTextProgress(text + textUnit);
setTextColor(textColor);
}
private static class SavedState extends BaseSavedState {
String text;
String textUnit;
int textSize;
int textPadding;
int textWidth;
int textColor;
boolean autoTextChange;
SavedState(Parcelable superState) {
super(superState);
}
private SavedState(Parcel in) {
super(in);
this.textSize = in.readInt();
this.textPadding = in.readInt();
this.textWidth = in.readInt();
this.textColor = in.readInt();
this.autoTextChange = in.readByte() != 0;
this.text = in.readString();
this.textUnit = in.readString();
}
@Override
public void writeToParcel(Parcel out, int flags) {
super.writeToParcel(out, flags);
out.writeInt(this.textSize);
out.writeInt(this.textPadding);
out.writeInt(this.textWidth);
out.writeInt(this.textColor);
out.writeByte((byte) (this.autoTextChange ? 1 : 0));
out.writeString(this.text);
out.writeString(this.textUnit);
}
public static final Parcelable.Creator<SavedState> CREATOR = new Parcelable.Creator<SavedState>() {
public SavedState createFromParcel(Parcel in) {
return new SavedState(in);
}
public SavedState[] newArray(int size) {
return new SavedState[size];
}
};
}
}
| first087/Android-RoundCornerProgressBar | library/src/main/java/com/akexorcist/roundcornerprogressbar/TextRoundCornerProgressBar.java | Java | apache-2.0 | 8,335 |
using System.Windows;
using System.Windows.Documents;
using Microsoft.VisualStudio.PlatformUI;
using NuGet.VisualStudio;
namespace NuGet.Dialog.PackageManagerUI
{
/// <summary>
/// Interaction logic for LicenseAcceptanceWindow.xaml
/// </summary>
public partial class LicenseAcceptanceWindow : DialogWindow
{
public LicenseAcceptanceWindow()
{
InitializeComponent();
}
private void OnDeclineButtonClick(object sender, RoutedEventArgs e)
{
this.DialogResult = false;
}
private void OnAcceptButtonClick(object sender, RoutedEventArgs e)
{
this.DialogResult = true;
}
private void OnViewLicenseTermsRequestNavigate(object sender, RoutedEventArgs e)
{
Hyperlink hyperlink = (Hyperlink)sender;
var licenseUrl = hyperlink.NavigateUri;
UriHelper.OpenExternalLink(licenseUrl);
}
}
} | anurse/NuGet | src/DialogServices/PackageManagerUI/LicenseAcceptanceWindow.xaml.cs | C# | apache-2.0 | 1,006 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* <!-- Package description. -->
* Demonstrates usage of data streamer.
*/
package org.apache.ignite.examples.java8.streaming;
| kidaa/incubator-ignite | examples/src/main/java8/org/apache/ignite/examples/java8/streaming/package-info.java | Java | apache-2.0 | 937 |
//=======================================================================
// Copyright 2001 Jeremy G. Siek, Andrew Lumsdaine, Lie-Quan Lee,
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//=======================================================================
#include <boost/config.hpp>
#include <iostream>
#include <fstream>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/depth_first_search.hpp>
#include <boost/graph/graphviz.hpp>
#include <boost/graph/copy.hpp>
int
main(int argc, char *argv[])
{
if (argc < 3) {
std::cerr << "usage: reachable-loop-head.exe <in-file> <out-file>"
<< std::endl;
return -1;
}
using namespace boost;
GraphvizDigraph g;
read_graphviz(argv[1], g);
graph_traits < GraphvizDigraph >::vertex_descriptor loop_head = 1;
typedef color_traits < default_color_type > Color;
std::vector < default_color_type >
reachable_from_head(num_vertices(g), Color::white());
default_color_type c;
depth_first_visit(g, loop_head, default_dfs_visitor(),
make_iterator_property_map(reachable_from_head.begin(),
get(vertex_index, g), c));
property_map<GraphvizDigraph, vertex_attribute_t>::type
vattr_map = get(vertex_attribute, g);
graph_traits < GraphvizDigraph >::vertex_iterator i, i_end;
for (boost::tie(i, i_end) = vertices(g); i != i_end; ++i)
if (reachable_from_head[*i] != Color::white()) {
vattr_map[*i]["color"] = "gray";
vattr_map[*i]["style"] = "filled";
}
std::ofstream loops_out(argv[2]);
#if defined(BOOST_MSVC) && BOOST_MSVC <= 1300
// VC++ has trouble with the get_property() functions
loops_out << "digraph G {\n"
<< "size=\"3,3\"\n"
<< "ratio=\"fill\"\n"
<< "shape=\"box\"\n";
graph_traits<GraphvizDigraph>::vertex_iterator vi, vi_end;
for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; ++vi) {
loops_out << *vi << "[";
for (std::map<std::string,std::string>::iterator ai = vattr_map[*vi].begin();
ai != vattr_map[*vi].end(); ++ai) {
loops_out << ai->first << "=" << ai->second;
if (next(ai) != vattr_map[*vi].end())
loops_out << ", ";
}
loops_out<< "]";
}
property_map<GraphvizDigraph, edge_attribute_t>::type
eattr_map = get(edge_attribute, g);
graph_traits<GraphvizDigraph>::edge_iterator ei, ei_end;
for (boost::tie(ei, ei_end) = edges(g); ei != ei_end; ++ei) {
loops_out << source(*ei, g) << " -> " << target(*ei, g) << "[";
std::map<std::string,std::string>& attr_map = eattr_map[*ei];
for (std::map<std::string,std::string>::iterator eai = attr_map.begin();
eai != attr_map.end(); ++eai) {
loops_out << eai->first << "=" << eai->second;
if (next(eai) != attr_map.end())
loops_out << ", ";
}
loops_out<< "]";
}
loops_out << "}\n";
#else
get_property(g, graph_graph_attribute)["size"] = "3,3";
get_property(g, graph_graph_attribute)["ratio"] = "fill";
get_property(g, graph_vertex_attribute)["shape"] = "box";
write_graphviz(loops_out, g,
make_vertex_attributes_writer(g),
make_edge_attributes_writer(g),
make_graph_attributes_writer(g));
#endif
return EXIT_SUCCESS;
}
| flingone/frameworks_base_cmds_remoted | libs/boost/libs/graph/example/reachable-loop-head.cpp | C++ | apache-2.0 | 3,493 |
/**
* Copyright 2017 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
IframeTransport,
getIframeTransportScriptUrl,
} from '../iframe-transport';
import {addParamsToUrl} from '../../../../src/url';
import {expectPostMessage} from '../../../../testing/iframe.js';
import {urls} from '../../../../src/config';
import {user} from '../../../../src/log';
describes.realWin('amp-analytics.iframe-transport', {amp: true}, env => {
let sandbox;
let iframeTransport;
const frameUrl = 'http://example.com';
beforeEach(() => {
sandbox = env.sandbox;
iframeTransport = new IframeTransport(
env.ampdoc.win,
'some_vendor_type',
{iframe: frameUrl},
frameUrl + '-1'
);
});
afterEach(() => {
IframeTransport.resetCrossDomainIframes();
});
function expectAllUnique(numArray) {
if (!numArray) {
return;
}
expect(numArray).to.have.lengthOf(new Set(numArray).size);
}
it('creates one frame per vendor type', () => {
const createCrossDomainIframeSpy = sandbox.spy(
iframeTransport,
'createCrossDomainIframe'
);
expect(createCrossDomainIframeSpy).to.not.be.called;
expect(IframeTransport.hasCrossDomainIframe(iframeTransport.getType())).to
.be.true;
iframeTransport.processCrossDomainIframe();
expect(createCrossDomainIframeSpy).to.not.be.called;
});
it('enqueues event messages correctly', () => {
const url = 'https://example.com/test';
const config = {iframe: url};
iframeTransport.sendRequest('hello, world!', config);
const {queue} = IframeTransport.getFrameData(iframeTransport.getType());
expect(queue.queueSize()).to.equal(1);
iframeTransport.sendRequest('hello again, world!', config);
expect(queue.queueSize()).to.equal(2);
});
it('does not cause sentinel collisions', () => {
const iframeTransport2 = new IframeTransport(
env.ampdoc.win,
'some_other_vendor_type',
{iframe: 'https://example.com/test2'},
'https://example.com/test2-2'
);
const frame1 = IframeTransport.getFrameData(iframeTransport.getType());
const frame2 = IframeTransport.getFrameData(iframeTransport2.getType());
expectAllUnique([
iframeTransport.getCreativeId(),
iframeTransport2.getCreativeId(),
frame1.frame.sentinel,
frame2.frame.sentinel,
]);
});
it('correctly tracks usageCount and destroys iframes', () => {
const frameUrl2 = 'https://example.com/test2';
const iframeTransport2 = new IframeTransport(
env.ampdoc.win,
'some_other_vendor_type',
{iframe: frameUrl2},
frameUrl2 + '-3'
);
const frame1 = IframeTransport.getFrameData(iframeTransport.getType());
const frame2 = IframeTransport.getFrameData(iframeTransport2.getType());
expect(frame1.usageCount).to.equal(1);
expect(frame2.usageCount).to.equal(1);
expect(env.win.document.getElementsByTagName('IFRAME')).to.have.lengthOf(2);
// Mark the iframes as used multiple times each.
iframeTransport.processCrossDomainIframe();
iframeTransport.processCrossDomainIframe();
iframeTransport2.processCrossDomainIframe();
iframeTransport2.processCrossDomainIframe();
iframeTransport2.processCrossDomainIframe();
expect(frame1.usageCount).to.equal(3);
expect(frame2.usageCount).to.equal(4);
// Stop using the iframes, make sure usage counts go to zero and they are
// removed from the DOM.
IframeTransport.markCrossDomainIframeAsDone(
env.win.document,
iframeTransport.getType()
);
expect(frame1.usageCount).to.equal(2);
IframeTransport.markCrossDomainIframeAsDone(
env.win.document,
iframeTransport.getType()
);
IframeTransport.markCrossDomainIframeAsDone(
env.win.document,
iframeTransport.getType()
);
expect(frame1.usageCount).to.equal(0);
expect(frame2.usageCount).to.equal(4); // (Still)
expect(env.win.document.getElementsByTagName('IFRAME')).to.have.lengthOf(1);
IframeTransport.markCrossDomainIframeAsDone(
env.win.document,
iframeTransport2.getType()
);
IframeTransport.markCrossDomainIframeAsDone(
env.win.document,
iframeTransport2.getType()
);
IframeTransport.markCrossDomainIframeAsDone(
env.win.document,
iframeTransport2.getType()
);
IframeTransport.markCrossDomainIframeAsDone(
env.win.document,
iframeTransport2.getType()
);
expect(frame2.usageCount).to.equal(0);
expect(env.win.document.getElementsByTagName('IFRAME')).to.have.lengthOf(0);
});
it('creates one PerformanceObserver per vendor type', () => {
const createPerformanceObserverSpy = sandbox.spy(
IframeTransport.prototype,
'createPerformanceObserver_'
);
expect(createPerformanceObserverSpy).to.not.be.called;
iframeTransport.processCrossDomainIframe(); // Create 2nd frame for 1st vendor
expect(createPerformanceObserverSpy).to.not.be.called;
// Create frame for a new vendor
const frameUrl2 = 'https://example.com/test2';
new IframeTransport(
env.ampdoc.win,
'some_other_vendor_type',
{iframe: frameUrl2},
frameUrl2 + '-3'
);
expect(createPerformanceObserverSpy).to.be.called;
});
it('gets correct client lib URL in local/test mode', () => {
const url = getIframeTransportScriptUrl(env.ampdoc.win);
expect(url).to.contain(env.win.location.host);
expect(url).to.contain('/dist/iframe-transport-client-lib.js');
});
it('gets correct client lib URL in prod mode', () => {
const url = getIframeTransportScriptUrl(env.ampdoc.win, true);
expect(url).to.contain(urls.thirdParty);
expect(url).to.contain('/iframe-transport-client-v0.js');
expect(url).to.equal(
'https://3p.ampproject.net/$internalRuntimeVersion$/' +
'iframe-transport-client-v0.js'
);
});
});
describes.realWin(
'amp-analytics.iframe-transport',
{amp: true, allowExternalResources: true},
env => {
it('logs poor performance of vendor iframe', () => {
const body =
'<html><head><script>' +
'function busyWait(count, duration, cb) {\n' +
' if (count) {\n' +
' var d = new Date();\n' +
' var d2 = null;\n' +
' do {\n' +
' d2 = new Date();\n' +
' } while (d2-d < duration);\n' + // Note the semicolon!
' setTimeout(function() { ' +
' busyWait(count-1, duration, cb);' +
' },0);\n' +
' } else {\n' +
' cb();\n' +
' }\n' +
'}\n' +
'function begin() {\n' +
' busyWait(5, 200, function() {\n' +
' window.parent.postMessage("doneSleeping", "*");\n' +
' });\n' +
'}' +
'</script></head>' +
'<body onload="javascript:begin()">' +
'Non-Performant Fake Iframe' +
'</body>' +
'</html>';
const frameUrl2 = addParamsToUrl(
'http://ads.localhost:' +
document.location.port +
'/amp4test/compose-doc',
{body}
);
sandbox.stub(env.ampdoc.win.document.body, 'appendChild');
new IframeTransport(
env.ampdoc.win,
'some_other_vendor_type',
{iframe: frameUrl2},
frameUrl2 + '-3'
);
sandbox.restore();
const errorSpy = sandbox.spy(user(), 'error');
const {frame} = IframeTransport.getFrameData('some_other_vendor_type');
frame.setAttribute('style', '');
env.ampdoc.win.document.body.appendChild(frame);
return new Promise((resolve, unused) => {
expectPostMessage(
frame.contentWindow,
env.ampdoc.win,
'doneSleeping'
).then(() => {
expect(errorSpy).to.be.called;
expect(errorSpy.args[0][1]).to.match(
/Long Task: Vendor: "some_other_vendor_type"/
);
resolve();
});
});
}).timeout(10000);
}
);
| dotandads/amphtml | extensions/amp-analytics/0.1/test/test-iframe-transport.js | JavaScript | apache-2.0 | 8,537 |
package org.zstack.header.host;
import org.zstack.header.message.NeedReplyMessage;
import org.zstack.header.vm.VmNicInventory;
import java.util.List;
public class AttachNicToVmOnHypervisorMsg extends NeedReplyMessage implements HostMessage {
private String vmInstanceUuid;
private String hostUuid;
private List<VmNicInventory> nics;
public String getVmUuid() {
return vmInstanceUuid;
}
public void setVmUuid(String vmInstanceUuid) {
this.vmInstanceUuid = vmInstanceUuid;
}
public List<VmNicInventory> getNics() {
return nics;
}
public void setNics(List<VmNicInventory> nics) {
this.nics = nics;
}
public void setHostUuid(String hostUuid) {
this.hostUuid = hostUuid;
}
@Override
public String getHostUuid() {
return hostUuid;
}
}
| SoftwareKing/zstack | header/src/main/java/org/zstack/header/host/AttachNicToVmOnHypervisorMsg.java | Java | apache-2.0 | 806 |
/*
Copyright (c) 2014 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ls
import (
"flag"
"fmt"
"io"
"github.com/vmware/govmomi/govc/cli"
"github.com/vmware/govmomi/govc/flags"
"github.com/vmware/govmomi/list"
"github.com/vmware/govmomi/vim25/mo"
"golang.org/x/net/context"
)
type ls struct {
*flags.DatacenterFlag
Long bool
}
func init() {
cli.Register("ls", &ls{})
}
func (cmd *ls) Register(f *flag.FlagSet) {
f.BoolVar(&cmd.Long, "l", false, "Long listing format")
}
func (cmd *ls) Process() error { return nil }
func (cmd *ls) Usage() string {
return "[PATH]..."
}
func (cmd *ls) Run(f *flag.FlagSet) error {
finder, err := cmd.Finder()
if err != nil {
return err
}
lr := listResult{
Elements: nil,
Long: cmd.Long,
}
args := f.Args()
if len(args) == 0 {
args = []string{"."}
}
for _, arg := range args {
es, err := finder.ManagedObjectListChildren(context.TODO(), arg)
if err != nil {
return err
}
lr.Elements = append(lr.Elements, es...)
}
return cmd.WriteResult(lr)
}
type listResult struct {
Elements []list.Element `json:"elements"`
Long bool `json:"-"`
}
func (l listResult) Write(w io.Writer) error {
var err error
for _, e := range l.Elements {
if !l.Long {
fmt.Fprintf(w, "%s\n", e.Path)
continue
}
switch e.Object.(type) {
case mo.Folder:
if _, err = fmt.Fprintf(w, "%s/\n", e.Path); err != nil {
return err
}
default:
if _, err = fmt.Fprintf(w, "%s (%s)\n", e.Path, e.Object.Reference().Type); err != nil {
return err
}
}
}
return nil
}
| soleo/machine | vendor/github.com/vmware/govmomi/govc/ls/command.go | GO | apache-2.0 | 2,084 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.concurrent;
/**
* Flink's completable future abstraction. A completable future can be completed with a regular
* value or an exception.
*
* @param <T> type of the future's value
*/
public interface CompletableFuture<T> extends Future<T> {
/**
* Completes the future with the given value. The complete operation only succeeds if the future
* has not been completed before. Whether it is successful or not is returned by the method.
*
* @param value to complete the future with
* @return true if the completion was successful; otherwise false
*/
boolean complete(T value);
/**
* Completes the future with the given exception. The complete operation only succeeds if the
* future has not been completed before. Whether it is successful or not is returned by the
* method.
*
* @param t the exception to complete the future with
* @return true if the completion was successful; otherwise false
*/
boolean completeExceptionally(Throwable t);
}
| WangTaoTheTonic/flink | flink-runtime/src/main/java/org/apache/flink/runtime/concurrent/CompletableFuture.java | Java | apache-2.0 | 1,816 |
<?php
/**
* Class used internally by Horde_Text_Diff to actually compute the diffs.
*
* This class is implemented using native PHP code.
*
* The algorithm used here is mostly lifted from the perl module
* Algorithm::Diff (version 1.06) by Ned Konz, which is available at:
* http://www.perl.com/CPAN/authors/id/N/NE/NEDKONZ/Algorithm-Diff-1.06.zip
*
* More ideas are taken from: http://www.ics.uci.edu/~eppstein/161/960229.html
*
* Some ideas (and a bit of code) are taken from analyze.c, of GNU
* diffutils-2.7, which can be found at:
* ftp://gnudist.gnu.org/pub/gnu/diffutils/diffutils-2.7.tar.gz
*
* Some ideas (subdivision by NCHUNKS > 2, and some optimizations) are from
* Geoffrey T. Dairiki <dairiki@dairiki.org>. The original PHP version of this
* code was written by him, and is used/adapted with his permission.
*
* Copyright 2004-2011 Horde LLC (http://www.horde.org/)
*
* See the enclosed file COPYING for license information (LGPL). If you did
* not receive this file, see http://www.horde.org/licenses/lgpl21.
*
* @author Geoffrey T. Dairiki <dairiki@dairiki.org>
* @package Text_Diff
*/
// Disallow direct access to this file for security reasons
if(!defined("IN_MYBB"))
{
die("Direct initialization of this file is not allowed.<br /><br />Please make sure IN_MYBB is defined.");
}
class Horde_Text_Diff_Engine_Native
{
public function diff($from_lines, $to_lines)
{
array_walk($from_lines, array('Horde_Text_Diff', 'trimNewlines'));
array_walk($to_lines, array('Horde_Text_Diff', 'trimNewlines'));
$n_from = count($from_lines);
$n_to = count($to_lines);
$this->xchanged = $this->ychanged = array();
$this->xv = $this->yv = array();
$this->xind = $this->yind = array();
unset($this->seq);
unset($this->in_seq);
unset($this->lcs);
// Skip leading common lines.
for ($skip = 0; $skip < $n_from && $skip < $n_to; $skip++) {
if ($from_lines[$skip] !== $to_lines[$skip]) {
break;
}
$this->xchanged[$skip] = $this->ychanged[$skip] = false;
}
// Skip trailing common lines.
$xi = $n_from; $yi = $n_to;
for ($endskip = 0; --$xi > $skip && --$yi > $skip; $endskip++) {
if ($from_lines[$xi] !== $to_lines[$yi]) {
break;
}
$this->xchanged[$xi] = $this->ychanged[$yi] = false;
}
// Ignore lines which do not exist in both files.
for ($xi = $skip; $xi < $n_from - $endskip; $xi++) {
$xhash[$from_lines[$xi]] = 1;
}
for ($yi = $skip; $yi < $n_to - $endskip; $yi++) {
$line = $to_lines[$yi];
if (($this->ychanged[$yi] = empty($xhash[$line]))) {
continue;
}
$yhash[$line] = 1;
$this->yv[] = $line;
$this->yind[] = $yi;
}
for ($xi = $skip; $xi < $n_from - $endskip; $xi++) {
$line = $from_lines[$xi];
if (($this->xchanged[$xi] = empty($yhash[$line]))) {
continue;
}
$this->xv[] = $line;
$this->xind[] = $xi;
}
// Find the LCS.
$this->_compareseq(0, count($this->xv), 0, count($this->yv));
// Merge edits when possible.
$this->_shiftBoundaries($from_lines, $this->xchanged, $this->ychanged);
$this->_shiftBoundaries($to_lines, $this->ychanged, $this->xchanged);
// Compute the edit operations.
$edits = array();
$xi = $yi = 0;
while ($xi < $n_from || $yi < $n_to) {
assert($yi < $n_to || $this->xchanged[$xi]);
assert($xi < $n_from || $this->ychanged[$yi]);
// Skip matching "snake".
$copy = array();
while ($xi < $n_from && $yi < $n_to
&& !$this->xchanged[$xi] && !$this->ychanged[$yi]) {
$copy[] = $from_lines[$xi++];
++$yi;
}
if ($copy) {
$edits[] = new Horde_Text_Diff_Op_Copy($copy);
}
// Find deletes & adds.
$delete = array();
while ($xi < $n_from && $this->xchanged[$xi]) {
$delete[] = $from_lines[$xi++];
}
$add = array();
while ($yi < $n_to && $this->ychanged[$yi]) {
$add[] = $to_lines[$yi++];
}
if ($delete && $add) {
$edits[] = new Horde_Text_Diff_Op_Change($delete, $add);
} elseif ($delete) {
$edits[] = new Horde_Text_Diff_Op_Delete($delete);
} elseif ($add) {
$edits[] = new Horde_Text_Diff_Op_Add($add);
}
}
return $edits;
}
/**
* Divides the Largest Common Subsequence (LCS) of the sequences (XOFF,
* XLIM) and (YOFF, YLIM) into NCHUNKS approximately equally sized
* segments.
*
* Returns (LCS, PTS). LCS is the length of the LCS. PTS is an array of
* NCHUNKS+1 (X, Y) indexes giving the diving points between sub
* sequences. The first sub-sequence is contained in (X0, X1), (Y0, Y1),
* the second in (X1, X2), (Y1, Y2) and so on. Note that (X0, Y0) ==
* (XOFF, YOFF) and (X[NCHUNKS], Y[NCHUNKS]) == (XLIM, YLIM).
*
* This public function assumes that the first lines of the specified portions of
* the two files do not match, and likewise that the last lines do not
* match. The caller must trim matching lines from the beginning and end
* of the portions it is going to specify.
*/
protected function _diag ($xoff, $xlim, $yoff, $ylim, $nchunks)
{
$flip = false;
if ($xlim - $xoff > $ylim - $yoff) {
/* Things seems faster (I'm not sure I understand why) when the
* shortest sequence is in X. */
$flip = true;
list ($xoff, $xlim, $yoff, $ylim)
= array($yoff, $ylim, $xoff, $xlim);
}
if ($flip) {
for ($i = $ylim - 1; $i >= $yoff; $i--) {
$ymatches[$this->xv[$i]][] = $i;
}
} else {
for ($i = $ylim - 1; $i >= $yoff; $i--) {
$ymatches[$this->yv[$i]][] = $i;
}
}
$this->lcs = 0;
$this->seq[0]= $yoff - 1;
$this->in_seq = array();
$ymids[0] = array();
$numer = $xlim - $xoff + $nchunks - 1;
$x = $xoff;
for ($chunk = 0; $chunk < $nchunks; $chunk++) {
if ($chunk > 0) {
for ($i = 0; $i <= $this->lcs; $i++) {
$ymids[$i][$chunk - 1] = $this->seq[$i];
}
}
$x1 = $xoff + (int)(($numer + ($xlim - $xoff) * $chunk) / $nchunks);
for (; $x < $x1; $x++) {
$line = $flip ? $this->yv[$x] : $this->xv[$x];
if (empty($ymatches[$line])) {
continue;
}
$matches = $ymatches[$line];
reset($matches);
while (list(, $y) = each($matches)) {
if (empty($this->in_seq[$y])) {
$k = $this->_lcsPos($y);
assert($k > 0);
$ymids[$k] = $ymids[$k - 1];
break;
}
}
while (list(, $y) = each($matches)) {
if ($y > $this->seq[$k - 1]) {
assert($y <= $this->seq[$k]);
/* Optimization: this is a common case: next match is
* just replacing previous match. */
$this->in_seq[$this->seq[$k]] = false;
$this->seq[$k] = $y;
$this->in_seq[$y] = 1;
} elseif (empty($this->in_seq[$y])) {
$k = $this->_lcsPos($y);
assert($k > 0);
$ymids[$k] = $ymids[$k - 1];
}
}
}
}
$seps[] = $flip ? array($yoff, $xoff) : array($xoff, $yoff);
$ymid = $ymids[$this->lcs];
for ($n = 0; $n < $nchunks - 1; $n++) {
$x1 = $xoff + (int)(($numer + ($xlim - $xoff) * $n) / $nchunks);
$y1 = $ymid[$n] + 1;
$seps[] = $flip ? array($y1, $x1) : array($x1, $y1);
}
$seps[] = $flip ? array($ylim, $xlim) : array($xlim, $ylim);
return array($this->lcs, $seps);
}
protected function _lcsPos($ypos)
{
$end = $this->lcs;
if ($end == 0 || $ypos > $this->seq[$end]) {
$this->seq[++$this->lcs] = $ypos;
$this->in_seq[$ypos] = 1;
return $this->lcs;
}
$beg = 1;
while ($beg < $end) {
$mid = (int)(($beg + $end) / 2);
if ($ypos > $this->seq[$mid]) {
$beg = $mid + 1;
} else {
$end = $mid;
}
}
assert($ypos != $this->seq[$end]);
$this->in_seq[$this->seq[$end]] = false;
$this->seq[$end] = $ypos;
$this->in_seq[$ypos] = 1;
return $end;
}
/**
* Finds LCS of two sequences.
*
* The results are recorded in the vectors $this->{x,y}changed[], by
* storing a 1 in the element for each line that is an insertion or
* deletion (ie. is not in the LCS).
*
* The subsequence of file 0 is (XOFF, XLIM) and likewise for file 1.
*
* Note that XLIM, YLIM are exclusive bounds. All line numbers are
* origin-0 and discarded lines are not counted.
*/
protected function _compareseq ($xoff, $xlim, $yoff, $ylim)
{
/* Slide down the bottom initial diagonal. */
while ($xoff < $xlim && $yoff < $ylim
&& $this->xv[$xoff] == $this->yv[$yoff]) {
++$xoff;
++$yoff;
}
/* Slide up the top initial diagonal. */
while ($xlim > $xoff && $ylim > $yoff
&& $this->xv[$xlim - 1] == $this->yv[$ylim - 1]) {
--$xlim;
--$ylim;
}
if ($xoff == $xlim || $yoff == $ylim) {
$lcs = 0;
} else {
/* This is ad hoc but seems to work well. $nchunks =
* sqrt(min($xlim - $xoff, $ylim - $yoff) / 2.5); $nchunks =
* max(2,min(8,(int)$nchunks)); */
$nchunks = min(7, $xlim - $xoff, $ylim - $yoff) + 1;
list($lcs, $seps)
= $this->_diag($xoff, $xlim, $yoff, $ylim, $nchunks);
}
if ($lcs == 0) {
/* X and Y sequences have no common subsequence: mark all
* changed. */
while ($yoff < $ylim) {
$this->ychanged[$this->yind[$yoff++]] = 1;
}
while ($xoff < $xlim) {
$this->xchanged[$this->xind[$xoff++]] = 1;
}
} else {
/* Use the partitions to split this problem into subproblems. */
reset($seps);
$pt1 = $seps[0];
while ($pt2 = next($seps)) {
$this->_compareseq ($pt1[0], $pt2[0], $pt1[1], $pt2[1]);
$pt1 = $pt2;
}
}
}
/**
* Adjusts inserts/deletes of identical lines to join changes as much as
* possible.
*
* We do something when a run of changed lines include a line at one end
* and has an excluded, identical line at the other. We are free to
* choose which identical line is included. `compareseq' usually chooses
* the one at the beginning, but usually it is cleaner to consider the
* following identical line to be the "change".
*
* This is extracted verbatim from analyze.c (GNU diffutils-2.7).
*/
protected function _shiftBoundaries($lines, &$changed, $other_changed)
{
$i = 0;
$j = 0;
assert('count($lines) == count($changed)');
$len = count($lines);
$other_len = count($other_changed);
while (1) {
/* Scan forward to find the beginning of another run of
* changes. Also keep track of the corresponding point in the
* other file.
*
* Throughout this code, $i and $j are adjusted together so that
* the first $i elements of $changed and the first $j elements of
* $other_changed both contain the same number of zeros (unchanged
* lines).
*
* Furthermore, $j is always kept so that $j == $other_len or
* $other_changed[$j] == false. */
while ($j < $other_len && $other_changed[$j]) {
$j++;
}
while ($i < $len && ! $changed[$i]) {
assert('$j < $other_len && ! $other_changed[$j]');
$i++; $j++;
while ($j < $other_len && $other_changed[$j]) {
$j++;
}
}
if ($i == $len) {
break;
}
$start = $i;
/* Find the end of this run of changes. */
while (++$i < $len && $changed[$i]) {
continue;
}
do {
/* Record the length of this run of changes, so that we can
* later determine whether the run has grown. */
$runlength = $i - $start;
/* Move the changed region back, so long as the previous
* unchanged line matches the last changed one. This merges
* with previous changed regions. */
while ($start > 0 && $lines[$start - 1] == $lines[$i - 1]) {
$changed[--$start] = 1;
$changed[--$i] = false;
while ($start > 0 && $changed[$start - 1]) {
$start--;
}
assert('$j > 0');
while ($other_changed[--$j]) {
continue;
}
assert('$j >= 0 && !$other_changed[$j]');
}
/* Set CORRESPONDING to the end of the changed run, at the
* last point where it corresponds to a changed run in the
* other file. CORRESPONDING == LEN means no such point has
* been found. */
$corresponding = $j < $other_len ? $i : $len;
/* Move the changed region forward, so long as the first
* changed line matches the following unchanged one. This
* merges with following changed regions. Do this second, so
* that if there are no merges, the changed region is moved
* forward as far as possible. */
while ($i < $len && $lines[$start] == $lines[$i]) {
$changed[$start++] = false;
$changed[$i++] = 1;
while ($i < $len && $changed[$i]) {
$i++;
}
assert('$j < $other_len && ! $other_changed[$j]');
$j++;
if ($j < $other_len && $other_changed[$j]) {
$corresponding = $i;
while ($j < $other_len && $other_changed[$j]) {
$j++;
}
}
}
} while ($runlength != $i - $start);
/* If possible, move the fully-merged run of changes back to a
* corresponding run in the other file. */
while ($corresponding < $i) {
$changed[--$start] = 1;
$changed[--$i] = 0;
assert('$j > 0');
while ($other_changed[--$j]) {
continue;
}
assert('$j >= 0 && !$other_changed[$j]');
}
}
}
}
| hemangsk/ipuhub | qa/inc/3rdparty/diff/Diff/Engine/Native.php | PHP | apache-2.0 | 16,119 |
/*
* Copyright 2010 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.javascript.jscomp.graph.LatticeElement;
import java.util.List;
/**
* Defines a way join a list of LatticeElements.
*/
interface JoinOp<L extends LatticeElement> extends Function<List<L>, L> {
/**
* An implementation of {@code JoinOp} that makes it easy to join to
* lattice elements at a time.
*/
static abstract class BinaryJoinOp<L extends LatticeElement>
implements JoinOp<L> {
@Override
public final L apply(List<L> values) {
Preconditions.checkArgument(!values.isEmpty());
int size = values.size();
if (size == 1) {
return values.get(0);
} else if (size == 2) {
return apply(values.get(0), values.get(1));
} else {
int mid = computeMidPoint(size);
return apply(
apply(values.subList(0, mid)),
apply(values.subList(mid, size)));
}
}
/**
* Creates a new lattice that will be the join of two input lattices.
*
* @return The join of {@code latticeA} and {@code latticeB}.
*/
abstract L apply(L latticeA, L latticeB);
/**
* Finds the midpoint of a list. The function will favor two lists of
* even length instead of two lists of the same odd length. The list
* must be at least length two.
*
* @param size Size of the list.
*/
static int computeMidPoint(int size) {
int midpoint = size >>> 1;
if (size > 4) {
/* Any list longer than 4 should prefer an even split point
* over the true midpoint, so that [0,6] splits at 2, not 3. */
midpoint &= -2; // (0xfffffffe) clears low bit so midpoint is even
}
return midpoint;
}
}
}
| weitzj/closure-compiler | src/com/google/javascript/jscomp/JoinOp.java | Java | apache-2.0 | 2,431 |