text
stringlengths 2
99.9k
| meta
dict |
|---|---|
#ifndef _WEB_STRING_H_
#define _WEB_STRING_H_
#include "web_string.h"
#include <string>
#include <vector>
#include <string.h>
#if _WIN32
#define snprintf _snprintf
#define vsnprintf _vsnprintf
#define strcasecmp _stricmp
#define strncasecmp _strnicmp
#else
#include <strings.h>
#endif
#if __linux__
#include <cwctype>
#endif
using namespace std;
namespace canvas
{
string::size_type find_close_bracket(const string &s, string::size_type off, char open_b, char close_b);
void split_string(const string &str, vector<string> &tokens, const string &delims, const string &delims_preserve = string(""), const string "e = string("\""));
int value_index(const string &val, const string &strings, int defValue = -1, char delim = ';');
bool value_in_list(const string &val, const string &strings, char delim = ';');
}
#endif
|
{
"pile_set_name": "Github"
}
|
<?php
return [
'Facebook' => 'Facebook',
'Google+' => 'Google +',
'Instagram' => 'Instagram',
'RSS' => 'RSS Beslemesi',
'Twitter' => 'Twitter',
'Youtube' => 'Youtube',
];
|
{
"pile_set_name": "Github"
}
|
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.idm.engine.impl.persistence.entity;
import java.util.List;
import java.util.Map;
import org.flowable.common.engine.impl.persistence.entity.EntityManager;
/**
* @author Joram Barrez
*/
public interface IdentityInfoEntityManager extends EntityManager<IdentityInfoEntity> {
IdentityInfoEntity findUserInfoByUserIdAndKey(String userId, String key);
List<String> findUserInfoKeysByUserIdAndType(String userId, String type);
List<IdentityInfoEntity> findIdentityInfoByUserId(String userId);
void updateUserInfo(String userId, String userPassword, String type, String key, String value, String accountPassword, Map<String, String> accountDetails);
void deleteUserInfoByUserIdAndKey(String userId, String key);
}
|
{
"pile_set_name": "Github"
}
|
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
*/
package org.hibernate.envers.test.entities.manytomany.biowned;
import java.util.ArrayList;
import java.util.List;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.JoinColumn;
import javax.persistence.JoinTable;
import javax.persistence.ManyToMany;
import org.hibernate.envers.Audited;
/**
* Entity owning a many-to-many relation, where the other entity also owns the relation.
*
* @author Adam Warski (adam at warski dot org)
*/
@Entity
@Audited
public class ListBiowning1Entity {
@Id
@GeneratedValue
private Integer id;
private String data;
@ManyToMany
@JoinTable(
name = "biowning",
joinColumns = @JoinColumn(name = "biowning1_id"),
inverseJoinColumns = @JoinColumn(name = "biowning2_id", insertable = false, updatable = false)
)
private List<ListBiowning2Entity> references = new ArrayList<ListBiowning2Entity>();
public ListBiowning1Entity() {
}
public ListBiowning1Entity(Integer id, String data) {
this.id = id;
this.data = data;
}
public ListBiowning1Entity(String data) {
this.data = data;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getData() {
return data;
}
public void setData(String data) {
this.data = data;
}
public List<ListBiowning2Entity> getReferences() {
return references;
}
public void setReferences(List<ListBiowning2Entity> references) {
this.references = references;
}
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( !(o instanceof ListBiowning1Entity) ) {
return false;
}
ListBiowning1Entity that = (ListBiowning1Entity) o;
if ( data != null ? !data.equals( that.data ) : that.data != null ) {
return false;
}
//noinspection RedundantIfStatement
if ( id != null ? !id.equals( that.id ) : that.id != null ) {
return false;
}
return true;
}
public int hashCode() {
int result;
result = (id != null ? id.hashCode() : 0);
result = 31 * result + (data != null ? data.hashCode() : 0);
return result;
}
public String toString() {
return "ListBiowning1Entity(id = " + id + ", data = " + data + ")";
}
}
|
{
"pile_set_name": "Github"
}
|
#compdef crontab
# Notes:
# - We assume a cronie-, dcron-, or Vixie-esque crontab
# - BusyBox crontab is forked from dcron
# - Generally only the super-user can use -c/-u; we aren't that restrictive
# - @todo As usual, BusyBox multi-call isn't handled
local variant sluser
local -a args etargs ccargs clargs rcargs aopts
_pick_variant -r variant \
dcron='-c*(#i)dir' \
cronie-selinux='(#i)selinux' \
cronie='(#i)cluster' \
unix --help
variant+=-$OSTYPE
# On Solaris, instead of using -u, the user can be specified as an optional
# first operand with -e/-l/-r. We'll treat it as an optional *argument* to one
# of those options, though, since the logic is a bit simpler
if [[ $variant == *-solaris* ]]; then
sluser='::user whose crontab to work with:_users'
else
etargs+=( '(cl)-u+[specify user whose crontab to work with]: :_users' )
fi
case $variant in
dcron-*)
etargs+=( '-c+[specify crontab directory]:crontab directory:_directories' )
;;
cronie-selinux-*)
ccargs+=( '(-l cl nc rc)-s[append SELinux context (with -e)]' )
;& # FALL THROUGH
cronie-*)
etargs+=( '(: * -)-V[display version information]' )
clargs+=(
'(: * -)-c[display cluster host]'
'(: * -)-n+[specify cluster host]: :_hosts'
)
;& # FALL THROUGH
*-linux*)
rcargs+=( '(cc cl nc)-i[prompt for confirmation (with -r)]' )
;;
*-freebsd*)
rcargs+=( '(cc cl nc)-f[bypass confirmation prompt (with -r)]' )
;;
esac
(( $#etargs )) && args+=( + et $etargs ) # Misc.
(( $#clargs )) && args+=( + cl $clargs ) # Work with cluster
args+=(
+ nc # Install new crontab
'(cc cl rc sl):crontab to install:_files'
+ cc # Edit/display current crontab
"(-l cl nc rc)-e[edit current crontab]$sluser"
"(-e -s cl nc rc)-l[display current crontab]$sluser"
$ccargs
+ rc # Remove current crontab
"(cc cl nc)-r[remove current crontab]$sluser"
$rcargs
)
# Implementations that use GNU's getopt(3) probably support permutation; this
# should be accurate enough
[[ $OSTYPE == linux* ]] || aopts=( -A '-*' )
_arguments -s -S $aopts : $args
|
{
"pile_set_name": "Github"
}
|
require_relative '../../spec_helper'
require 'bigdecimal'
describe "BigDecimal#precs" do
before :each do
@infinity = BigDecimal("Infinity")
@infinity_neg = BigDecimal("-Infinity")
@nan = BigDecimal("NaN")
@zero = BigDecimal("0")
@zero_neg = BigDecimal("-0")
@arr = [BigDecimal("2E40001"), BigDecimal("3E-20001"),\
@infinity, @infinity_neg, @nan, @zero, @zero_neg]
@precision = BigDecimal::BASE.to_s.length - 1
end
it "returns array of two values" do
@arr.each do |x|
x.precs.kind_of?(Array).should == true
x.precs.size.should == 2
end
end
it "returns Integers as array values" do
@arr.each do |x|
x.precs[0].kind_of?(Integer).should == true
x.precs[1].kind_of?(Integer).should == true
end
end
it "returns the current value of significant digits as the first value" do
BigDecimal("3.14159").precs[0].should >= 6
BigDecimal('1').precs[0].should == BigDecimal('1' + '0' * 100).precs[0]
[@infinity, @infinity_neg, @nan, @zero, @zero_neg].each do |value|
value.precs[0].should <= @precision
end
end
it "returns the maximum number of significant digits as the second value" do
BigDecimal("3.14159").precs[1].should >= 6
BigDecimal('1').precs[1].should >= 1
BigDecimal('1' + '0' * 100).precs[1].should >= 101
[@infinity, @infinity_neg, @nan, @zero, @zero_neg].each do |value|
value.precs[1].should >= 1
end
end
end
|
{
"pile_set_name": "Github"
}
|
<Type Name="NSImageRect" FullName="MonoMac.AppKit.NSImageRect">
<TypeSignature Language="C#" Value="public delegate MonoMac.AppKit.NSImage NSImageRect(NSObject sender, RectangleF aRect);" />
<TypeSignature Language="ILAsm" Value=".class public auto ansi sealed NSImageRect extends System.MulticastDelegate" />
<AssemblyInfo>
<AssemblyName>MonoMac</AssemblyName>
<AssemblyVersion>0.0.0.0</AssemblyVersion>
</AssemblyInfo>
<Base>
<BaseTypeName>System.Delegate</BaseTypeName>
</Base>
<Parameters>
<Parameter Name="sender" Type="MonoMac.Foundation.NSObject" />
<Parameter Name="aRect" Type="System.Drawing.RectangleF" />
</Parameters>
<ReturnValue>
<ReturnType>MonoMac.AppKit.NSImage</ReturnType>
</ReturnValue>
<Docs>
<param name="sender">To be added.</param>
<param name="aRect">To be added.</param>
<summary>To be added.</summary>
<returns>To be added.</returns>
<remarks>To be added.</remarks>
</Docs>
</Type>
|
{
"pile_set_name": "Github"
}
|
/**
* @jest-environment ./__tests__/html/__jest__/WebChatEnvironment.js
*/
describe('Avatar', () => {
test('with undefined initials should not leave gutter space', () => runHTMLTest('avatar.undefinedInitials.html'));
});
|
{
"pile_set_name": "Github"
}
|
#pragma once
#include <mutex>
#include <vector>
#include "core/lsn.h"
#include "estl/h_vector.h"
#include "estl/shared_mutex.h"
#include "estl/string_view.h"
#include "replicator/walrecord.h"
#include "tools/errors.h"
#include "tools/stringstools.h"
#include "vendor/hopscotch/hopscotch_map.h"
namespace reindexer {
class ItemImpl;
struct IndexDef;
/// Object of this class contains filters set. Filters are separated by namespace and concatenated with disjunction
class UpdatesFilters {
public:
class Filter {
public:
// TODO: Any additional condition check should be added here
bool Check() const { return true; }
void FromJSON(const gason::JsonNode &) {}
void GetJSON(JsonBuilder &) const {}
bool operator==(const Filter &) const { return true; }
};
/// Merge two filters sets
/// If one of the filters set is empty, result filters set will also be empty
/// If one of the filters set contains some conditions for specific namespace,
/// then result filters set will also contain this conditions
/// @param rhs - Another filters set
void Merge(const UpdatesFilters &rhs);
/// Add new filter for specified namespace. Doesn't merge filters, just concatenates it into disjunction sequence
/// @param ns - Namespace
/// @param filter - Filter to add
void AddFilter(string_view ns, Filter filter);
/// Check if filters set allows this namespace
/// @param ns - Namespace
/// @return 'true' if filter's conditions are satisfied
bool Check(string_view ns) const;
Error FromJSON(span<char> json);
void FromJSON(const gason::JsonNode &root);
void GetJSON(WrSerializer &ser) const;
bool operator==(const UpdatesFilters &rhs) const;
private:
using FiltersList = h_vector<Filter, 4>;
tsl::hopscotch_map<std::string, FiltersList, nocase_hash_str, nocase_equal_str> filters_;
};
class IUpdatesObserver {
public:
virtual ~IUpdatesObserver() = default;
virtual void OnWALUpdate(LSNPair LSNs, string_view nsName, const WALRecord &rec) = 0;
virtual void OnConnectionState(const Error &err) = 0;
};
class UpdatesObservers {
public:
struct ObserverInfo {
IUpdatesObserver *ptr;
UpdatesFilters filters;
};
Error Add(IUpdatesObserver *observer, const UpdatesFilters &filter, SubscriptionOpts opts);
Error Delete(IUpdatesObserver *observer);
std::vector<ObserverInfo> Get() const;
void OnModifyItem(LSNPair LSNs, string_view nsName, ItemImpl *item, int modifyMode, bool inTransaction);
void OnWALUpdate(LSNPair LSNs, string_view nsName, const WALRecord &rec);
void OnConnectionState(const Error &err);
bool empty() {
shared_lock<shared_timed_mutex> lck(mtx_);
return observers_.empty();
}
UpdatesFilters GetMergedFilter() const;
protected:
std::vector<ObserverInfo> observers_;
mutable shared_timed_mutex mtx_;
};
std::ostream &operator<<(std::ostream &o, const reindexer::UpdatesFilters &sv);
} // namespace reindexer
|
{
"pile_set_name": "Github"
}
|
var convert = require('./convert'),
func = convert('mean', require('../mean'), require('./_falseOptions'));
func.placeholder = require('./placeholder');
module.exports = func;
|
{
"pile_set_name": "Github"
}
|
// ****************************************************************
// Copyright 2007, Charlie Poole
// This is free software licensed under the NUnit license. You may
// obtain a copy of the license at http://nunit.org.
// ****************************************************************
using System;
using System.Collections;
#if NET_2_0
using System.Collections.Generic;
#endif
using NUnit.Framework.Tests;
namespace NUnit.Framework.Constraints
{
#region AllItems
[TestFixture]
public class AllItemsTests : MessageChecker
{
[Test]
public void AllItemsAreNotNull()
{
object[] c = new object[] { 1, "hello", 3, Environment.OSVersion };
Assert.That(c, new AllItemsConstraint( Is.Not.Null ));
}
[Test, ExpectedException(typeof(AssertionException))]
public void AllItemsAreNotNullFails()
{
object[] c = new object[] { 1, "hello", null, 3 };
expectedMessage =
TextMessageWriter.Pfx_Expected + "all items not null" + Environment.NewLine +
TextMessageWriter.Pfx_Actual + "< 1, \"hello\", null, 3 >" + Environment.NewLine;
Assert.That(c, new AllItemsConstraint(new NotConstraint(new EqualConstraint(null))));
}
[Test]
public void AllItemsAreInRange()
{
int[] c = new int[] { 12, 27, 19, 32, 45, 99, 26 };
Assert.That(c, new AllItemsConstraint(new RangeConstraint(10, 100)));
}
[Test]
public void AllItemsAreInRange_UsingIComparer()
{
int[] c = new int[] { 12, 27, 19, 32, 45, 99, 26 };
Assert.That(c, new AllItemsConstraint(new RangeConstraint(10, 100).Using(Comparer.Default)));
}
#if NET_2_0
[Test]
public void AllItemsAreInRange_UsingIComparerOfT()
{
int[] c = new int[] { 12, 27, 19, 32, 45, 99, 26 };
Assert.That(c, new AllItemsConstraint(new RangeConstraint(10, 100).Using(Comparer.Default)));
}
[Test]
public void AllItemsAreInRange_UsingComparisonOfT()
{
int[] c = new int[] { 12, 27, 19, 32, 45, 99, 26 };
Assert.That(c, new AllItemsConstraint(new RangeConstraint(10, 100).Using(Comparer.Default)));
}
#endif
[Test, ExpectedException(typeof(AssertionException))]
public void AllItemsAreInRangeFailureMessage()
{
int[] c = new int[] { 12, 27, 19, 32, 107, 99, 26 };
expectedMessage =
TextMessageWriter.Pfx_Expected + "all items in range (10,100)" + Environment.NewLine +
TextMessageWriter.Pfx_Actual + "< 12, 27, 19, 32, 107, 99, 26 >" + Environment.NewLine;
Assert.That(c, new AllItemsConstraint(new RangeConstraint(10, 100)));
}
[Test]
public void AllItemsAreInstancesOfType()
{
object[] c = new object[] { 'a', 'b', 'c' };
Assert.That(c, new AllItemsConstraint(new InstanceOfTypeConstraint(typeof(char))));
}
[Test, ExpectedException(typeof(AssertionException))]
public void AllItemsAreInstancesOfTypeFailureMessage()
{
object[] c = new object[] { 'a', "b", 'c' };
expectedMessage =
TextMessageWriter.Pfx_Expected + "all items instance of <System.Char>" + Environment.NewLine +
TextMessageWriter.Pfx_Actual + "< 'a', \"b\", 'c' >" + Environment.NewLine;
Assert.That(c, new AllItemsConstraint(new InstanceOfTypeConstraint(typeof(char))));
}
}
#endregion
#region CollectionContains
[TestFixture]
public class CollectionContainsTests
{
[Test]
public void CanTestContentsOfArray()
{
object item = "xyz";
object[] c = new object[] { 123, item, "abc" };
Assert.That(c, new CollectionContainsConstraint(item));
}
[Test]
public void CanTestContentsOfArrayList()
{
object item = "xyz";
ArrayList list = new ArrayList( new object[] { 123, item, "abc" } );
Assert.That(list, new CollectionContainsConstraint(item));
}
[Test]
public void CanTestContentsOfSortedList()
{
object item = "xyz";
SortedList list = new SortedList();
list.Add("a", 123);
list.Add("b", item);
list.Add("c", "abc");
Assert.That(list.Values, new CollectionContainsConstraint(item));
Assert.That(list.Keys, new CollectionContainsConstraint("b"));
}
[Test]
public void CanTestContentsOfCollectionNotImplementingIList()
{
ICollectionAdapter ints = new ICollectionAdapter(new int[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Assert.That(ints, new CollectionContainsConstraint( 9 ));
}
[Test]
public void IgnoreCaseIsHonored()
{
Assert.That(new string[] { "Hello", "World" },
new CollectionContainsConstraint("WORLD").IgnoreCase);
}
[Test]
public void UsesProvidedIComparer()
{
MyComparer comparer = new MyComparer();
Assert.That(new string[] { "Hello", "World" },
new CollectionContainsConstraint("World").Using(comparer));
Assert.That(comparer.Called, "Comparer was not called");
}
class MyComparer : IComparer
{
public bool Called;
public int Compare(object x, object y)
{
Called = true;
return Comparer.Default.Compare(x, y);
}
}
#if NET_2_0
[Test]
public void UsesProvidedEqualityComparer()
{
MyEqualityComparer comparer = new MyEqualityComparer();
Assert.That(new string[] { "Hello", "World" },
new CollectionContainsConstraint("World").Using(comparer));
Assert.That(comparer.Called, "Comparer was not called");
}
class MyEqualityComparer : IEqualityComparer
{
public bool Called;
bool IEqualityComparer.Equals(object x, object y)
{
Called = true;
return Comparer.Default.Compare(x, y) == 0;
}
int IEqualityComparer.GetHashCode(object x)
{
return x.GetHashCode();
}
}
[Test]
public void UsesProvidedEqualityComparerOfT()
{
MyEqualityComparerOfT<string> comparer = new MyEqualityComparerOfT<string>();
Assert.That(new string[] { "Hello", "World" },
new CollectionContainsConstraint("World").Using(comparer));
Assert.That(comparer.Called, "Comparer was not called");
}
class MyEqualityComparerOfT<T> : IEqualityComparer<T>
{
public bool Called;
bool IEqualityComparer<T>.Equals(T x, T y)
{
Called = true;
return Comparer<T>.Default.Compare(x, y) == 0;
}
int IEqualityComparer<T>.GetHashCode(T x)
{
return x.GetHashCode();
}
}
[Test]
public void UsesProvidedComparerOfT()
{
MyComparer<string> comparer = new MyComparer<string>();
Assert.That(new string[] { "Hello", "World" },
new CollectionContainsConstraint("World").Using(comparer));
Assert.That(comparer.Called, "Comparer was not called");
}
class MyComparer<T> : IComparer<T>
{
public bool Called;
public int Compare(T x, T y)
{
Called = true;
return Comparer<T>.Default.Compare(x, y);
}
}
[Test]
public void UsesProvidedComparisonOfT()
{
MyComparison<string> comparer = new MyComparison<string>();
Assert.That(new string[] { "Hello", "World" },
new CollectionContainsConstraint("World").Using(new Comparison<string>(comparer.Compare)));
Assert.That(comparer.Called, "Comparer was not called");
}
class MyComparison<T>
{
public bool Called;
public int Compare(T x, T y)
{
Called = true;
return Comparer<T>.Default.Compare(x, y);
}
}
#if CS_3_0
[Test]
public void UsesProvidedLambdaExpression()
{
Assert.That(new string[] { "Hello", "World" },
new CollectionContainsConstraint("WORLD").Using<string>( (x,y)=>String.Compare(x, y, true) ));
}
#endif
#endif
}
#endregion
#region CollectionEquivalent
public class CollectionEquivalentTests
{
[Test]
public void EqualCollectionsAreEquivalent()
{
ICollection set1 = new ICollectionAdapter("x", "y", "z");
ICollection set2 = new ICollectionAdapter("x", "y", "z");
Assert.That(new CollectionEquivalentConstraint(set1).Matches(set2));
}
[Test]
public void WorksWithCollectionsOfArrays()
{
byte[] array1 = new byte[] { 0x20, 0x44, 0x56, 0x76, 0x1e, 0xff };
byte[] array2 = new byte[] { 0x42, 0x52, 0x72, 0xef };
byte[] array3 = new byte[] { 0x20, 0x44, 0x56, 0x76, 0x1e, 0xff };
byte[] array4 = new byte[] { 0x42, 0x52, 0x72, 0xef };
ICollection set1 = new ICollectionAdapter(array1, array2);
ICollection set2 = new ICollectionAdapter(array3, array4);
Constraint constraint = new CollectionEquivalentConstraint(set1);
Assert.That(constraint.Matches(set2));
set2 = new ICollectionAdapter(array4, array3);
Assert.That(constraint.Matches(set2));
}
[Test]
public void EquivalentIgnoresOrder()
{
ICollection set1 = new ICollectionAdapter("x", "y", "z");
ICollection set2 = new ICollectionAdapter("z", "y", "x");
Assert.That(new CollectionEquivalentConstraint(set1).Matches(set2));
}
[Test]
public void EquivalentFailsWithDuplicateElementInActual()
{
ICollection set1 = new ICollectionAdapter("x", "y", "z");
ICollection set2 = new ICollectionAdapter("x", "y", "x");
Assert.False(new CollectionEquivalentConstraint(set1).Matches(set2));
}
[Test]
public void EquivalentFailsWithDuplicateElementInExpected()
{
ICollection set1 = new ICollectionAdapter("x", "y", "x");
ICollection set2 = new ICollectionAdapter("x", "y", "z");
Assert.False(new CollectionEquivalentConstraint(set1).Matches(set2));
}
[Test]
public void EquivalentHandlesNull()
{
ICollection set1 = new ICollectionAdapter(null, "x", null, "z");
ICollection set2 = new ICollectionAdapter("z", null, "x", null);
Assert.That(new CollectionEquivalentConstraint(set1).Matches(set2));
}
[Test]
public void EquivalentHonorsIgnoreCase()
{
ICollection set1 = new ICollectionAdapter("x", "y", "z");
ICollection set2 = new ICollectionAdapter("z", "Y", "X");
Assert.That(new CollectionEquivalentConstraint(set1).IgnoreCase.Matches(set2));
}
#if CS_3_0
[Test]
public void EquivalentHonorsUsing()
{
ICollection set1 = new ICollectionAdapter("x", "y", "z");
ICollection set2 = new ICollectionAdapter("z", "Y", "X");
Assert.That(new CollectionEquivalentConstraint(set1)
.Using<string>( (x,y)=>String.Compare(x,y,true) )
.Matches(set2));
}
#endif
}
#endregion
#region CollectionOrdered
[TestFixture]
public class CollectionOrderedTests : MessageChecker
{
[Test]
public void IsOrdered()
{
ArrayList al = new ArrayList();
al.Add("x");
al.Add("y");
al.Add("z");
Assert.That(al, Is.Ordered);
}
[Test]
public void IsOrdered_2()
{
ArrayList al = new ArrayList();
al.Add(1);
al.Add(2);
al.Add(3);
Assert.That(al, Is.Ordered);
}
[Test]
public void IsOrderedDescending()
{
ArrayList al = new ArrayList();
al.Add("z");
al.Add("y");
al.Add("x");
Assert.That(al, Is.Ordered.Descending);
}
[Test]
public void IsOrderedDescending_2()
{
ArrayList al = new ArrayList();
al.Add(3);
al.Add(2);
al.Add(1);
Assert.That(al, Is.Ordered.Descending);
}
[Test, ExpectedException(typeof(AssertionException))]
public void IsOrdered_Fails()
{
ArrayList al = new ArrayList();
al.Add("x");
al.Add("z");
al.Add("y");
expectedMessage =
" Expected: collection ordered" + Environment.NewLine +
" But was: < \"x\", \"z\", \"y\" >" + Environment.NewLine;
Assert.That(al, Is.Ordered);
}
[Test]
public void IsOrdered_Allows_adjacent_equal_values()
{
ArrayList al = new ArrayList();
al.Add("x");
al.Add("x");
al.Add("z");
Assert.That(al, Is.Ordered);
}
[Test, ExpectedException(typeof(ArgumentNullException),
ExpectedMessage="index 1", MatchType=MessageMatch.Contains)]
public void IsOrdered_Handles_null()
{
ArrayList al = new ArrayList();
al.Add("x");
al.Add(null);
al.Add("z");
Assert.That(al, Is.Ordered);
}
[Test, ExpectedException(typeof(ArgumentException))]
public void IsOrdered_TypesMustBeComparable()
{
ArrayList al = new ArrayList();
al.Add(1);
al.Add("x");
Assert.That(al, Is.Ordered);
}
[Test, ExpectedException(typeof(ArgumentException))]
public void IsOrdered_AtLeastOneArgMustImplementIComparable()
{
ArrayList al = new ArrayList();
al.Add(new object());
al.Add(new object());
Assert.That(al, Is.Ordered);
}
[Test]
public void IsOrdered_Handles_custom_comparison()
{
ArrayList al = new ArrayList();
al.Add(new object());
al.Add(new object());
AlwaysEqualComparer comparer = new AlwaysEqualComparer();
Assert.That(al, Is.Ordered.Using(comparer));
Assert.That(comparer.Called, "TestComparer was not called");
}
[Test]
public void IsOrdered_Handles_custom_comparison2()
{
ArrayList al = new ArrayList();
al.Add(2);
al.Add(1);
TestComparer comparer = new TestComparer();
Assert.That(al, Is.Ordered.Using(comparer));
Assert.That(comparer.Called, "TestComparer was not called");
}
#if NET_2_0
[Test]
public void UsesProvidedComparerOfT()
{
ArrayList al = new ArrayList();
al.Add(1);
al.Add(2);
MyComparer<int> comparer = new MyComparer<int>();
Assert.That(al, Is.Ordered.Using(comparer));
Assert.That(comparer.Called, "Comparer was not called");
}
class MyComparer<T> : IComparer<T>
{
public bool Called;
public int Compare(T x, T y)
{
Called = true;
return Comparer<T>.Default.Compare(x, y);
}
}
[Test]
public void UsesProvidedComparisonOfT()
{
ArrayList al = new ArrayList();
al.Add(1);
al.Add(2);
MyComparison<int> comparer = new MyComparison<int>();
Assert.That(al, Is.Ordered.Using(new Comparison<int>(comparer.Compare)));
Assert.That(comparer.Called, "Comparer was not called");
}
class MyComparison<T>
{
public bool Called;
public int Compare(T x, T y)
{
Called = true;
return Comparer<T>.Default.Compare(x, y);
}
}
#if CS_3_0
[Test]
public void UsesProvidedLambda()
{
ArrayList al = new ArrayList();
al.Add(1);
al.Add(2);
Comparison<int> comparer = (x, y) => x.CompareTo(y);
Assert.That(al, Is.Ordered.Using(comparer));
}
#endif
#endif
[Test]
public void IsOrderedBy()
{
ArrayList al = new ArrayList();
al.Add(new OrderedByTestClass(1));
al.Add(new OrderedByTestClass(2));
Assert.That(al, Is.Ordered.By("Value"));
}
[Test]
public void IsOrderedBy_Comparer()
{
ArrayList al = new ArrayList();
al.Add(new OrderedByTestClass(1));
al.Add(new OrderedByTestClass(2));
Assert.That(al, Is.Ordered.By("Value").Using(Comparer.Default));
}
[Test]
public void IsOrderedBy_Handles_heterogeneous_classes_as_long_as_the_property_is_of_same_type()
{
ArrayList al = new ArrayList();
al.Add(new OrderedByTestClass(1));
al.Add(new OrderedByTestClass2(2));
Assert.That(al, Is.Ordered.By("Value"));
}
class OrderedByTestClass
{
private int myValue;
public int Value
{
get { return myValue; }
set { myValue = value; }
}
public OrderedByTestClass(int value)
{
Value = value;
}
}
class OrderedByTestClass2
{
private int myValue;
public int Value
{
get { return myValue; }
set { myValue = value; }
}
public OrderedByTestClass2(int value)
{
Value = value;
}
}
}
#endregion
}
|
{
"pile_set_name": "Github"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from collections.abc import Iterable
import numpy as np
from ... import opcodes as OperandDef
from ...serialize import ValueType, KeyField, TupleField
from ...utils import check_chunks_unknown_shape
from ...tiles import TilesError
from ..utils import unify_chunks
from ..array_utils import as_same_device, device, is_sparse_module
from ..operands import TensorOperand, TensorOperandMixin
from ..arithmetic.utils import tree_add
from ..datasource import tensor as astensor
from ..core import TensorOrder
class TensorTensorDot(TensorOperand, TensorOperandMixin):
_op_type_ = OperandDef.TENSORDOT
_a = KeyField('a')
_b = KeyField('b')
_a_axes = TupleField('a_axes', ValueType.int32)
_b_axes = TupleField('b_axes', ValueType.int32)
def __init__(self, a_axes=None, b_axes=None, dtype=None, sparse=False, **kw):
super().__init__(_a_axes=a_axes, _b_axes=b_axes, _dtype=dtype, _sparse=sparse, **kw)
@property
def a(self):
return self._a
@property
def b(self):
return self._b
@property
def a_axes(self):
return self._a_axes
@property
def b_axes(self):
return self._b_axes
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._a = self._inputs[0]
self._b = self._inputs[1]
def __call__(self, a, b):
shape = tuple(s for i, s in enumerate(a.shape) if i not in set(self._a_axes)) + \
tuple(s for i, s in enumerate(b.shape) if i not in set(self._b_axes))
return self.new_tensor([a, b], shape, order=TensorOrder.C_ORDER)
@classmethod
def estimate_size(cls, ctx, op):
chunk = op.outputs[0]
if chunk.is_sparse():
return super().estimate_size(ctx, op)
# empirical value in real environments
calc_usage = chunk.nbytes
# add input sizes when sparse-to-dense is needed
for inp in chunk.inputs:
if inp.is_sparse():
calc_usage += inp.nbytes
ctx[chunk.key] = (chunk.nbytes, calc_usage)
@classmethod
def tile(cls, op):
a, b, a_axes, b_axes = op.a, op.b, op.a_axes, op.b_axes
c = itertools.count(max(a.ndim, b.ndim))
a_ax = tuple(a_axes.index(i) if i in a_axes else next(c) for i in range(a.ndim))
b_ax = tuple(b_axes.index(i) if i in b_axes else next(c) for i in range(b.ndim))
check_chunks_unknown_shape(op.inputs, TilesError)
a, b = unify_chunks((a, a_ax), (b, b_ax))
out = op.outputs[0]
a_output_indexes = [range(len(a.nsplits[i])) for i in range(a.ndim) if i not in a_axes]
b_output_indexes = [range(len(b.nsplits[i])) for i in range(b.ndim) if i not in b_axes]
output_axes = [(0, i) for i in range(a.ndim) if i not in a_axes] + \
[(1, i) for i in range(b.ndim) if i not in b_axes]
out_chunks = []
for out_idx in itertools.product(*itertools.chain(a_output_indexes, b_output_indexes)):
a_indexes = [None] * a.ndim
b_indexes = [None] * b.ndim
tensor_shape = []
for i, idx in enumerate(out_idx):
t_idx, axis = output_axes[i]
t = (a, b)[t_idx]
(a_indexes if t_idx == 0 else b_indexes)[axis] = idx
tensor_shape.append(t.nsplits[axis][idx])
tensor_shape = tuple(tensor_shape)
tensordot_chunks = []
for contract_indexes in itertools.product(*[range(len(a.nsplits[ax])) for ax in a_axes]):
a_indices, b_indices = list(a_indexes), list(b_indexes)
for a_axis, contract_index in zip(a_axes, contract_indexes):
a_indices[a_axis] = contract_index
for b_axis, contract_index in zip(b_axes, contract_indexes):
b_indices[b_axis] = contract_index
tensordot_chunk_op = op.copy().reset_key()
tensordot_chunk = tensordot_chunk_op.new_chunk(
[a.cix[tuple(a_indices)], b.cix[tuple(b_indices)]],
shape=tensor_shape, order=out.order)
tensordot_chunks.append(tensordot_chunk)
if len(tensordot_chunks) == 1:
c = tensordot_chunks[0]
chunk_op = c.op.copy()
chunk = chunk_op.new_chunk(c.inputs, shape=c.shape, index=out_idx, order=out.order)
else:
chunk = tree_add(op.dtype, tensordot_chunks, out_idx, tensor_shape, sparse=op.sparse)
out_chunks.append(chunk)
get_nsplits = lambda t_idx, i: (a, b)[t_idx].nsplits[i]
nsplits = [get_nsplits(*it) for it in output_axes]
new_op = op.copy()
return new_op.new_tensors([a, b], out.shape,
chunks=out_chunks, nsplits=nsplits)
@classmethod
def execute(cls, ctx, op):
(a, b), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
axes = op.a_axes, op.b_axes
with device(device_id):
if not op.sparse and is_sparse_module(xp):
# tell sparse to do calculation on numpy or cupy dot
ctx[op.outputs[0].key] = xp.tensordot(a, b, axes, sparse=False)
else:
ret = xp.tensordot(a, b, axes)
out = op.outputs[0]
ctx[out.key] = ret.astype(ret.dtype, order=out.order.value, copy=False)
def tensordot(a, b, axes=2, sparse=None):
"""
Compute tensor dot product along specified axes for tensors >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
`a` and `b`, and an array_like object containing two array_like
objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of `a` and the first ``N`` dimensions of `b` are summed
over.
Parameters
----------
a, b : array_like, len(shape) >= 1
Tensors to "dot".
axes : int or (2,) array_like
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) array_like
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements array_like must be of the same length.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
>>> import mars.tensor as mt
A "traditional" example:
>>> a = mt.arange(60.).reshape(3,4,5)
>>> b = mt.arange(24.).reshape(4,3,2)
>>> c = mt.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> r = c.execute()
>>> r
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> ra = np.arange(60.).reshape(3,4,5)
>>> rb = np.arange(24.).reshape(4,3,2)
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += ra[k,n,i] * rb[n,k,j]
>>> r == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]], dtype=bool)
An extended example taking advantage of the overloading of + and \\*:
>>> a = mt.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = mt.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a.execute(); A.execute()
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([[a, b],
[c, d]], dtype=object)
>>> mt.tensordot(a, A).execute() # third argument default is 2 for double-contraction
array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
>>> mt.tensordot(a, A, 1).execute()
array([[[acc, bdd],
[aaacccc, bbbdddd]],
[[aaaaacccccc, bbbbbdddddd],
[aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
>>> mt.tensordot(a, A, 0).execute() # tensor product (result too long to incl.)
array([[[[[a, b],
[c, d]],
...
>>> mt.tensordot(a, A, (0, 1)).execute()
array([[[abbbbb, cddddd],
[aabbbbbb, ccdddddd]],
[[aaabbbbbbb, cccddddddd],
[aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
>>> mt.tensordot(a, A, (2, 1)).execute()
array([[[abb, cdd],
[aaabbbb, cccdddd]],
[[aaaaabbbbbb, cccccdddddd],
[aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
>>> mt.tensordot(a, A, ((0, 1), (0, 1))).execute()
array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
>>> mt.tensordot(a, A, ((2, 1), (1, 0))).execute()
array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
"""
a = astensor(a)
b = astensor(b)
if isinstance(axes, Iterable):
a_axes, b_axes = axes
else:
a_axes = tuple(range(a.ndim - 1, a.ndim - axes - 1, -1))
b_axes = tuple(range(0, axes))
if isinstance(a_axes, Iterable):
a_axes = tuple(a_axes)
else:
a_axes = (a_axes,)
a_axes = tuple(axis if axis >= 0 else a.ndim + axis for axis in a_axes)
if isinstance(b_axes, Iterable):
b_axes = tuple(b_axes)
else:
b_axes = (b_axes,)
b_axes = tuple(axis if axis >= 0 else b.ndim + axis for axis in b_axes)
if a.shape and b.shape and \
not np.array_equal(np.array(a.shape)[list(a_axes)], np.array(b.shape)[list(b_axes)]):
raise ValueError('shape-mismatch for sum')
sparse = sparse if sparse is not None else a.issparse() and b.issparse()
op = TensorTensorDot(a_axes=a_axes, b_axes=b_axes, dtype=np.promote_types(a.dtype, b.dtype),
sparse=sparse)
return op(a, b)
|
{
"pile_set_name": "Github"
}
|
# Jasmine Core 2.4.0 Release Notes
## Summary
This release contains a number of fixes and pull requests.
The most notable is probably that Jasmine now supports randomization of spec order
## Changes
* Run jasmine's specs in random order
* Add support for returning run details for reporting randomness
* Use className instead of class when creating DOM elements
## Pull Requests & Issues
* Syntax highlighting in README.md
- Merges [#973](https://github.com/jasmine/jasmine/issues/973) from @brunoqc
* Added a throw error block in describe incase a function with arguments is passed in describe
- Fixes [#896](https://github.com/jasmine/jasmine/issues/896)
- Merges [#955](https://github.com/jasmine/jasmine/issues/955) from @himajasuman
* Remove unused `queueableFn` arg from `onException`
- Fixes [#958](https://github.com/jasmine/jasmine/issues/958)
* Remove unused parameter from toThrowError
- Merges [#957](https://github.com/jasmine/jasmine/issues/957) from @FuzzySockets
* Abort spying when the target cannot be spied upon
- Fixes [#948](https://github.com/jasmine/jasmine/issues/948)
- Merges [#949](https://github.com/jasmine/jasmine/issues/949) from @StephanBijzitter
* Removed GOALS_2.0.md, doesn't seem to be needed anymore
- Merges [#954](https://github.com/jasmine/jasmine/issues/954) from @matthewhuff89
* Change #xit so that it will output a more BDD-style pending message
- Merges [#942](https://github.com/jasmine/jasmine/issues/942) from @lalunamel
- Fixes [#930](https://github.com/jasmine/jasmine/issues/930)
- Fixes [#912](https://github.com/jasmine/jasmine/issues/912)
* Allow tests to run in random order
- Merges [#927](https://github.com/jasmine/jasmine/issues/927) from @marcioj
* Use toString for objects if it has been overriden
- Merges [#929](https://github.com/jasmine/jasmine/issues/929) from @myitcv
- Fixes [#928](https://github.com/jasmine/jasmine/issues/928)
* Fix circles/x from getting cut off on Mac/chrome
- Merges [#932](https://github.com/jasmine/jasmine/issues/932) from @James-Dunn
* Postpone find() until it is needed
- Merges [#924](https://github.com/jasmine/jasmine/issues/924) from @danielalexiuc
- Fixes [#917](https://github.com/jasmine/jasmine/issues/917)
* check for global before assigning
* Reverse suite afterEach behavior to match semantics?
- Merges [#908](https://github.com/jasmine/jasmine/issues/908) from @mcamac
* Use badges from shields.io
- Merges [#902](https://github.com/jasmine/jasmine/issues/902) from @SimenB
* xdescribe marks pending, plus associated tests.
- Merges [#869](https://github.com/jasmine/jasmine/issues/869) from @ljwall
- Fixes [#855](https://github.com/jasmine/jasmine/issues/855)
* Update glob to latest
- Merge [#892](https://github.com/jasmine/jasmine/issues/892) from @obastemur
- Fixes [#891](https://github.com/jasmine/jasmine/issues/891)
* Remove moot `version` property from bower.json
- Merges [#874](https://github.com/jasmine/jasmine/issues/874) from @kkirsche
* add toHaveBeenCalledTimes matcher
- Merges [#871](https://github.com/jasmine/jasmine/issues/871) from @logankd
- Fixes [#853](https://github.com/jasmine/jasmine/issues/853)
* Update CONTRIBUTING.md
- Merges [#856](https://github.com/jasmine/jasmine/issues/856) from @lpww
* Make the HtmlReport CSS classes "unique enough"
- Merges [#851](https://github.com/jasmine/jasmine/issues/851) from @prather-mcs
- Fixes [#844](https://github.com/jasmine/jasmine/issues/844)
* Raise an error when jasmine.any() isn't passed a constructor
- Merges [#854](https://github.com/jasmine/jasmine/issues/854) from @danfinnie
- Fixes [#852](https://github.com/jasmine/jasmine/issues/852)
------
_Release Notes generated with _[Anchorman](http://github.com/infews/anchorman)_
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8" ?>
<dt-example order="0">
<title lib="Buttons">API</title>
<info><![CDATA[
The Buttons API is tightly integrated with DataTables own API, building on the same style and interaction. With the API it is possible to manipulate individual buttons and groups of buttons, altering their characteristics and behaviour.
The examples in this section show how the Buttons API can be used.
]]></info>
</dt-example>
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2014-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.db
import com.netflix.atlas.core.model._
import com.netflix.atlas.core.stacklang.Interpreter
import com.netflix.spectator.api.DefaultRegistry
import com.netflix.spectator.api.ManualClock
import com.typesafe.config.ConfigFactory
import org.scalatest.funsuite.AnyFunSuite
class MemoryDatabaseSuite extends AnyFunSuite {
private val interpreter = new Interpreter(DataVocabulary.allWords)
private val step = DefaultSettings.stepSize
private val clock = new ManualClock()
private val registry = new DefaultRegistry(clock)
private val db = new MemoryDatabase(
registry,
ConfigFactory.parseString("""
|block-size = 60
|num-blocks = 2
|rebuild-frequency = 10s
|test-mode = true
|intern-while-building = true
""".stripMargin)
)
addData("a", 1.0, 2.0, 3.0)
addData("b", 3.0, 2.0, 1.0)
addRollupData("c", 4.0, 5.0, 6.0)
addRollupData("c", 5.0, 6.0, 7.0)
addRollupData("c", 6.0, 7.0, 8.0)
private val context = EvalContext(0, 3 * step, step)
private def addData(name: String, values: Double*): Unit = {
val tags = Map("name" -> name)
val data = values.toList.zipWithIndex.map {
case (v, i) =>
clock.setWallTime(i * step)
Datapoint(tags, i * step, v)
}
db.update(data)
db.index.rebuildIndex()
}
private def addRollupData(name: String, values: Double*): Unit = {
val tags = Map("name" -> name)
val data = values.toList.zipWithIndex.map {
case (v, i) =>
clock.setWallTime(i * step)
Datapoint(tags, i * step, v)
}
data.foreach(db.rollup)
db.index.rebuildIndex()
}
private def expr(str: String): DataExpr = {
interpreter.execute(str).stack match {
case ModelExtractors.DataExprType(v) :: Nil => v
case _ => throw new IllegalArgumentException(s"invalid data expr: $str")
}
}
private def exec(str: String, s: Long = step): List[TimeSeries] = {
val ctxt = context.copy(step = s)
db.execute(ctxt, expr(str)).sortWith(_.label < _.label).map { t =>
t.mapTimeSeq(s => s.bounded(context.start, context.end))
}
}
private def ts(label: String, mul: Int, values: Double*): TimeSeries = {
TimeSeries(Map.empty, label, new ArrayTimeSeq(DsType.Gauge, 0L, mul * step, values.toArray))
}
private def ts(name: String, label: String, mul: Int, values: Double*): TimeSeries = {
val seq = new ArrayTimeSeq(DsType.Gauge, 0L, mul * step, values.toArray)
TimeSeries(Map("name" -> name, "foo" -> "bar"), label, seq)
}
private def expTS(name: String, label: String, mul: Int, values: Double*): TimeSeries = {
val tmp = ts(name, label, mul, values: _*)
tmp.withTags(tmp.tags - "foo")
}
test(":eq query") {
val result = exec("name,a,:eq")
assert(result.map(_.tags) === List(Map("name" -> "a")))
assert(result === List(expTS("a", "sum(name=a)", 1, 1.0, 2.0, 3.0)))
}
test(":in query") {
assert(exec("name,(,a,b,),:in") === List(ts("sum(name in (a,b))", 1, 4.0, 4.0, 4.0)))
}
test(":re query") {
assert(exec("name,[ab]$,:re") === List(ts("sum(name~/^[ab]$/)", 1, 4.0, 4.0, 4.0)))
}
test(":has query") {
assert(exec("name,:has") === List(ts("sum(has(name))", 1, 19.0, 22.0, 25.0)))
}
test(":offset expr") {
assert(
exec(":true,:sum,1m,:offset") === List(
ts("sum(true) (offset=1m)", 1, Double.NaN, 19.0, 22.0)
)
)
}
test(":sum expr") {
assert(exec(":true,:sum") === List(ts("sum(true)", 1, 19.0, 22.0, 25.0)))
}
test(":count expr") {
assert(exec(":true,:count") === List(ts("count(true)", 1, 5.0, 5.0, 5.0)))
}
test(":min expr") {
assert(exec(":true,:min") === List(ts("min(true)", 1, 1.0, 2.0, 1.0)))
}
test(":max expr") {
assert(exec(":true,:max") === List(ts("max(true)", 1, 6.0, 7.0, 8.0)))
}
test(":by expr") {
val expected = List(
expTS("a", "(name=a)", 1, 1.0, 2.0, 3.0),
expTS("b", "(name=b)", 1, 3.0, 2.0, 1.0),
expTS("c", "(name=c)", 1, 15.0, 18.0, 21.0)
)
assert(exec(":true,(,name,),:by") === expected)
}
test(":all expr") {
val expected = List(
expTS("a", "name=a", 1, 1.0, 2.0, 3.0),
expTS("b", "name=b", 1, 3.0, 2.0, 1.0),
expTS("c", "name=c", 1, 15.0, 18.0, 21.0)
)
assert(exec(":true,:all") === expected)
}
test(":sum expr, c=3") {
assert(exec(":true,:sum", 3 * step) === List(ts("sum(true)", 3, 22.0)))
}
test(":sum expr, c=3, cf=sum") {
assert(exec(":true,:sum,:cf-sum", 3 * step) === List(ts("sum(true)", 3, 66.0)))
}
test(":sum expr, c=3, cf=max") {
assert(exec(":true,:sum,:cf-max", 3 * step) === List(ts("sum(true)", 3, 27.0)))
}
test(":count expr, c=3") {
assert(exec(":true,:count", 3 * step) === List(ts("count(true)", 3, 5.0)))
}
test(":count expr, c=3, cf=sum") {
assert(exec(":true,:count,:cf-sum", 3 * step) === List(ts("count(true)", 3, 15.0)))
}
test(":count expr, c=3, cf=max") {
assert(exec(":true,:count,:cf-max", 3 * step) === List(ts("count(true)", 3, 5.0)))
}
test(":min expr, c=3") {
assert(exec(":true,:min", 3 * step) === List(ts("min(true)", 3, 1.0)))
}
test(":max expr, c=3") {
assert(exec(":true,:max", 3 * step) === List(ts("max(true)", 3, 8.0)))
}
test(":by expr, c=3") {
val expected = List(
expTS("a", "(name=a)", 3, 2.0),
expTS("b", "(name=b)", 3, 2.0),
expTS("c", "(name=c)", 3, 18.0)
)
assert(exec(":true,(,name,),:by", 3 * step) === expected)
}
test(":all expr, c=3") {
val expected = List(
expTS("a", "name=a", 3, 6.0),
expTS("b", "name=b", 3, 6.0),
expTS("c", "name=c", 3, 54.0)
)
assert(exec(":true,:all", 3 * step) === expected)
}
}
|
{
"pile_set_name": "Github"
}
|
-- Revert delivery:add_saml_to_user_type from pg
BEGIN;
-- We delete the users, but the user_type 'saml' is not revertable
DELETE FROM users
WHERE user_type = 'saml';
CREATE TYPE user_type_old as ENUM('internal', 'external');
-- Drop all things that depend on user_type
-- Drop trigger that prevents type switches
DROP TRIGGER prevent_type_switch on users;
-- Drop internal_users view
DROP VIEW internal_users;
ALTER TABLE users
ALTER COLUMN user_type TYPE user_type_old USING (user_type::text::user_type_old);
DROP TYPE user_type;
ALTER TYPE user_type_old RENAME TO user_type;
-- Re-create trigger that prevents type switches
CREATE TRIGGER prevent_type_switch
BEFORE UPDATE
ON users
FOR EACH ROW
WHEN (NEW.user_type != OLD.user_type)
EXECUTE PROCEDURE raise_user_type_change_exception();
-- Re-create view
CREATE OR REPLACE VIEW internal_users AS
SELECT u.id,
u.enterprise_id,
u.name,
u.ssh_pub_key,
u.first_name,
u.last_name,
u.email,
p.hashed_pass,
p.hash_type
FROM users AS u
NATURAL LEFT OUTER JOIN user_passwords AS p
WHERE u.user_type = 'internal';
CREATE TRIGGER insert_internal_user
INSTEAD OF INSERT ON internal_users
FOR EACH ROW
EXECUTE PROCEDURE insert_internal_user();
CREATE TRIGGER update_internal_user
INSTEAD OF UPDATE ON internal_users
FOR EACH ROW
EXECUTE PROCEDURE update_internal_user();
CREATE TRIGGER delete_internal_user
INSTEAD OF DELETE ON internal_users
FOR EACH ROW
EXECUTE PROCEDURE delete_internal_user();
CREATE OR REPLACE VIEW user_aliases AS
SELECT u.id,
u.enterprise_id,
u.name,
u.ssh_pub_key,
u.first_name,
u.last_name,
u.email,
u.user_type,
o.oauth_app_id,
o.alias
FROM users AS u
JOIN oauth_user_aliases AS o
ON o.user_id = u.id;
COMMENT ON VIEW user_aliases IS
'A view of all users and their various OAuth aliases.';
COMMIT;
|
{
"pile_set_name": "Github"
}
|
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
[assembly: AssemblyTitle("ScaleToSize.UWP")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyProduct("ScaleToSize.UWP")]
[assembly: AssemblyCopyright("Copyright © 2015")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
// Version information for an assembly consists of the following four values:
//
// Major Version
// Minor Version
// Build Number
// Revision
//
// You can specify all the values or you can default the Build and Revision Numbers
// by using the '*' as shown below:
// [assembly: AssemblyVersion("1.0.*")]
[assembly: AssemblyVersion("1.0.0.0")]
[assembly: AssemblyFileVersion("1.0.0.0")]
[assembly: ComVisible(false)]
|
{
"pile_set_name": "Github"
}
|
/*
Copyright (c) 2014, Philipp Krähenbühl
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Stanford University nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY Philipp Krähenbühl ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Philipp Krähenbühl BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include "eigen.h"
// Solves a strictly convex QP of the form
// minimize_x x' Q x + c' x
// subject to A x <= b
// Note Q needs to be positive definite!
VectorXf qp( const RMatrixXf & Q, const VectorXf & c, const RMatrixXf & A, const VectorXf & b );
VectorXd qp( const RMatrixXd & Q, const VectorXd & c, const RMatrixXd & A, const VectorXd & b );
VectorXf sparseQp( const VectorXf & Q, const VectorXf & c, const SRMatrixXf & A, const VectorXf & b );
VectorXd sparseQp( const VectorXf & Q, const VectorXd & c, const SRMatrixXd & A, const VectorXd & b );
|
{
"pile_set_name": "Github"
}
|
.TH PAHO_C_SUB 1L "31 July 2018 (v1.3.0)" http://eclipse.org/paho
.SH NAME
paho_c_sub \- receive (subscribe to) data from an MQTT server
.SH SYNOPSIS
.B paho_c_sub
[\fItopic\fR]
[\fB\-t\fR|\fB\-\-topic\fR \fItopic\fR]
[\fB\-c\fR|\fB\-\-connection\fR \fIconnection\fR]
[\fB\-h\fR|\fB\-\-host\fR \fIhostname\fR]
[\fB\-p\fR|\fB\-\-port\fR \fIportnumber\fR]
[\fB\-i\fR|\fB\-\-clientid\fR \fIclientid\fR]
[\fB\-u\fR|\fB\-\-username\fR \fIusername\fR]
[\fB\-P\fR|\fB\-\-password\fR \fIpassword\fR]
[\fB\-k\fR|\fB\-\-keepalive\fR \fIkeepalive-timeout\fR]
[\fB\-V\fR|\fB\-\-MQTT-version\fR \fB31\fR|\fB311\fR|\fB5\fR]
.br
[\fB\-q\fR|\fB\-\-qos\fR \fB0\fR|\fB1\fR|\fB2\fR]
[\fB\-R\fR|\fB\-\-no-retained\fR]
[\fB\-\-delimiter\fR \fIdelimiter\fR]
[\fB\-\-no-delimiter\fR]
.br
[\fB\-\-quiet\fR]
[\fB\-\-verbose\fR]
[\fB\-\-trace\fR \fBmin\fR|\fBmax\fR|\fBerror\fR|\fBprotocol\fR]
.br
[\fB\-\-will-topic\fR \fIwill-topic\fR]
[\fB\-\-will-payload\fR \fIwill-payload\fR]
[\fB\-\-will-retain\fR]
[\fB\-\-will-qos\fR \fB0\fR|\fB1\fR|\fB2\fR]
.br
[\fB\-\-cafile\fR \fIcafile\fR]
[\fB\-\-capath\fR \fIcapath\fR]
[\fB\-\-cert\fR \fIcertfile\fR]
[\fB\-\-key\fR \fIkeyfile\fR]
[\fB\-\-keypass\fR \fIpassword\fR]
[\fB\-\-ciphers\fR \fIcipher-string\fR]
[\fB\-\-insecure\fR]
.SH DESCRIPTION
.B paho_c_sub
receives data from an MQTT server using the Eclipse Paho C client asynchronous library (MQTTAsync).
MQTT is a protocol, operating over TCP/IP, which allows programs to easily communicate
with each other through a server. Messages are published to topics and delivered to any subscribers to those topics.
The corresponding publisher program \fBpaho_c_pub\fR allows MQTT messages to be sent.
.PP
The default mode of operation is to output each message to stdout terminated by the delimiter.
.SH "OPTIONS"
.TP
.PD 0
.BI \-t
.TP
.PD
.B \-\-topic
The MQTT topic to publish the data to.
.TP
.PD 0
.BI \-c
.TP
.PD
.B \-\-connection
The MQTT URI to connect to, a combination of transport prefix, host, port and for websockets, topic.
To connect using TCP use the tcp prefix, for example: \fBtcp://\fR\fIlocalhost\fR\fB:\fR\fI1883\fR.
An example using SSL/TLS: \fBssl://\fR\fIlocalhost\fR\fB:\fR\fI1883\fR.
An example for websockets, insecure: \fBws://\fR\fIlocalhost\fR\fB:\fR\fI1883\fR\fB/\fR\fItopic\fR, and
secure: \fBwss://\fR\fIlocalhost\fR\fB:\fR\fI80\fR\fB/\fR\fItopic\fR.
.TP
.PD 0
.BI \-h
.TP
.PD
.B \-\-host
The TCP/IP host name of the MQTT server to connect to. Along with the \fB--port\fR option, an older alternative to using \fB--connection\fR.
.TP
.PD 0
.BI \-p
.TP
.PD
.B \-\-port
The TCP/IP port number of the MQTT server to connect to. Along with the \fB--host\fR option, an older alternative to using \fB--connection\fR.
.TP
.PD 0
.BI \-q
.TP
.PD
.B \-\-qos
The MQTT QoS on which to publish the message. The alternatives are \fB0\fR, \fB1\fR or \fB2\fR.
.TP
.PD 0
.BI \-V
.TP
.PD
.B \-\-MQTTversion
The version of the MQTT protocol to use. Valid options are \fB31\fR (or \fBmqttv31\fR), \fB311\fR (\fBmqttv311\fR) and \fB5\fR (or \fBmqttv5\fR).
.TP
.PD
.B \-\-quiet
Do not print error messages.
.TP
.PD
.B \-\-trace
Print library internal trace. Valid levels are \fBmin\fR, \fBmax\fR, \fBerror\fR and \fprotocol\fR.
.TP
.PD 0
.BI \-R
.TP
.PD
.B \-\-no-retained
Do not print messages which have the MQTT retained flag set.
.TP
.PD
.B \-\-delimiter
The delimiter string to append to each message when printing. Defaults to newline.
.TP
.PD
.B \-\-no-delimiter
Do not add a delimiter to each message when printing.
.TP
.PD
.B \-\-will-topic
Sets the MQTT will message topic to publish to. If the application ends without sending an MQTT disconnect, the
will message will be published to this topic.
.TP
.PD
.B \-\-will-payload
Only used if \fBwill-topic\fR is set. Sets the MQTT will message to be published.
.TP
.PD
.B \-\-will-qos
Only used if \fBwill-topic\fR is set. Sets the MQTT QoS at which the will message is published. The alternatives are \fB0\fR, \fB1\fR or \fB2\fR.
.TP
.PD
.B \-\-will-retain
Only used if \fBwill-topic\fR is set. Sets the MQTT retained flag on the will message.
.TP
.PD
.B \-\-cafile
Only used with a TLS connection. The name of a file for the OpenSSL trust store.
.TP
.PD
.B \-\-capath
Only used with a TLS connection. The name of a directory holding OpenSSL trusted certificates.
.TP
.PD
.B \-\-cert
Only used with a TLS connection. The name of a file for the TLS keystore containing a client certificate to be presented.
.TP
.PD
.B \-\-key
Only used with a TLS connection. The name of a file containing the client private key.
.TP
.PD
.B \-\-keypass
Only used with a TLS connection. The password for the client private key file, if needed.
.TP
.PD
.B \-\-ciphers
Only used with a TLS connection. A list of cipher suites that the client will present to the server during the TLS handshake.
.TP
.PD
.B \-\-insecure
Only used with a TLS connection. Don't check that the server certificate common name matches the hostname.
|
{
"pile_set_name": "Github"
}
|
.so man3/deprecated_items.3
|
{
"pile_set_name": "Github"
}
|
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2010-2012, Willow Garage, Inc.
* Copyright (c) 2012-, Open Perception, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the copyright holder(s) nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id$
*
*/
#pragma once
#include <pcl/common/intensity.h>
#include <pcl/point_types.h>
namespace pcl
{
namespace common
{
template<>
struct IntensityFieldAccessor<pcl::PointNormal>
{
inline float
operator () (const pcl::PointNormal &p) const
{
return (p.curvature);
}
inline void
get (const pcl::PointNormal &p, float &intensity) const
{
intensity = p.curvature;
}
inline void
set (pcl::PointNormal &p, float intensity) const
{
p.curvature = intensity;
}
inline void
demean (pcl::PointNormal& p, float value) const
{
p.curvature -= value;
}
inline void
add (pcl::PointNormal& p, float value) const
{
p.curvature += value;
}
};
template<>
struct IntensityFieldAccessor<pcl::PointXYZ>
{
inline float
operator () (const pcl::PointXYZ &p) const
{
return (p.z);
}
inline void
get (const pcl::PointXYZ &p, float &intensity) const
{
intensity = p.z;
}
inline void
set (pcl::PointXYZ &p, float intensity) const
{
p.z = intensity;
}
inline void
demean (pcl::PointXYZ& p, float value) const
{
p.z -= value;
}
inline void
add (pcl::PointXYZ& p, float value) const
{
p.z += value;
}
};
template<>
struct IntensityFieldAccessor<pcl::PointXYZRGB>
{
inline float
operator () (const pcl::PointXYZRGB &p) const
{
return (static_cast<float> (299*p.r + 587*p.g + 114*p.b) * 0.001f);
}
inline void
get (const pcl::PointXYZRGB &p, float& intensity) const
{
intensity = static_cast<float> (299*p.r + 587*p.g + 114*p.b) * 0.001f;
}
inline void
set (pcl::PointXYZRGB &p, float intensity) const
{
p.r = static_cast<std::uint8_t> (intensity * 3.34448160535f); // 1000 / 299
p.g = static_cast<std::uint8_t> (intensity * 1.70357751278f); // 1000 / 587
p.b = static_cast<std::uint8_t> (intensity * 8.77192982456f); // 1000 / 114
}
inline void
demean (pcl::PointXYZRGB& p, float value) const
{
float intensity = this->operator () (p);
intensity -= value;
set (p, intensity);
}
inline void
add (pcl::PointXYZRGB& p, float value) const
{
float intensity = this->operator () (p);
intensity += value;
set (p, intensity);
}
};
template<>
struct IntensityFieldAccessor<pcl::PointXYZRGBA>
{
inline float
operator () (const pcl::PointXYZRGBA &p) const
{
return (static_cast<float> (299*p.r + 587*p.g + 114*p.b) * 0.001f);
}
inline void
get (const pcl::PointXYZRGBA &p, float& intensity) const
{
intensity = static_cast<float> (299*p.r + 587*p.g + 114*p.b) * 0.001f;
}
inline void
set (pcl::PointXYZRGBA &p, float intensity) const
{
p.r = static_cast<std::uint8_t> (intensity * 3.34448160535f); // 1000 / 299
p.g = static_cast<std::uint8_t> (intensity * 1.70357751278f); // 1000 / 587
p.b = static_cast<std::uint8_t> (intensity * 8.77192982456f); // 1000 / 114
}
inline void
demean (pcl::PointXYZRGBA& p, float value) const
{
float intensity = this->operator () (p);
intensity -= value;
set (p, intensity);
}
inline void
add (pcl::PointXYZRGBA& p, float value) const
{
float intensity = this->operator () (p);
intensity += value;
set (p, intensity);
}
};
template<>
struct IntensityFieldAccessor<pcl::PointXYZRGBNormal>
{
inline float
operator () (const pcl::PointXYZRGBNormal &p) const
{
return (static_cast<float> (299*p.r + 587*p.g + 114*p.b) * 0.001f);
}
inline void
get (const pcl::PointXYZRGBNormal &p, float& intensity) const
{
intensity = static_cast<float> (299*p.r + 587*p.g + 114*p.b) * 0.001f;
}
inline void
set (pcl::PointXYZRGBNormal &p, float intensity) const
{
p.r = static_cast<std::uint8_t> (intensity * 3.34448160535f); // 1000 / 299
p.g = static_cast<std::uint8_t> (intensity * 1.70357751278f); // 1000 / 587
p.b = static_cast<std::uint8_t> (intensity * 8.77192982456f); // 1000 / 114
}
inline void
demean (pcl::PointXYZRGBNormal &p, float value) const
{
float intensity = this->operator () (p);
intensity -= value;
set (p, intensity);
}
inline void
add (pcl::PointXYZRGBNormal &p, float value) const
{
float intensity = this->operator () (p);
intensity += value;
set (p, intensity);
}
};
template<>
struct IntensityFieldAccessor<pcl::PointXYZRGBL>
{
inline float
operator () (const pcl::PointXYZRGBL &p) const
{
return (static_cast<float> (299*p.r + 587*p.g + 114*p.b) * 0.001f);
}
inline void
get (const pcl::PointXYZRGBL &p, float& intensity) const
{
intensity = static_cast<float> (299*p.r + 587*p.g + 114*p.b) * 0.001f;
}
inline void
set (pcl::PointXYZRGBL &p, float intensity) const
{
p.r = static_cast<std::uint8_t> (intensity * 3.34448160535f); // 1000 / 299
p.g = static_cast<std::uint8_t> (intensity * 1.70357751278f); // 1000 / 587
p.b = static_cast<std::uint8_t> (intensity * 8.77192982456f); // 1000 / 114
}
inline void
demean (pcl::PointXYZRGBL& p, float value) const
{
float intensity = this->operator () (p);
intensity -= value;
set (p, intensity);
}
inline void
add (pcl::PointXYZRGBL& p, float value) const
{
float intensity = this->operator () (p);
intensity += value;
set (p, intensity);
}
};
template<>
struct IntensityFieldAccessor<pcl::PointXYZHSV>
{
inline float
operator () (const pcl::PointXYZHSV &p) const
{
return (p.v);
}
inline void
get (const pcl::PointXYZHSV &p, float &intensity) const
{
intensity = p.v;
}
inline void
set (pcl::PointXYZHSV &p, float intensity) const
{
p.v = intensity;
p.s = 0.0f;
}
inline void
demean (pcl::PointXYZHSV& p, float value) const
{
p.v -= value;
}
inline void
add (pcl::PointXYZHSV& p, float value) const
{
p.v += value;
}
};
template<>
struct IntensityFieldAccessor<pcl::PointXYZL>
{
inline float
operator () (const pcl::PointXYZL &p) const
{
return (static_cast<float>(p.label));
}
inline void
get (const pcl::PointXYZL &p, float &intensity) const
{
intensity = static_cast<float>(p.label);
}
inline void
set (pcl::PointXYZL &p, float intensity) const
{
p.label = static_cast<std::uint32_t>(intensity);
}
inline void
demean (pcl::PointXYZL& p, float value) const
{
p.label -= static_cast<std::uint32_t>(value);
}
inline void
add (pcl::PointXYZL& p, float value) const
{
p.label += static_cast<std::uint32_t>(value);
}
};
template<>
struct IntensityFieldAccessor<pcl::PointXYZLNormal>
{
inline float
operator () (const pcl::PointXYZLNormal &p) const
{
return (static_cast<float>(p.label));
}
inline void
get (const pcl::PointXYZLNormal &p, float &intensity) const
{
intensity = static_cast<float>(p.label);
}
inline void
set (pcl::PointXYZLNormal &p, float intensity) const
{
p.label = static_cast<std::uint32_t>(intensity);
}
inline void
demean (pcl::PointXYZLNormal& p, float value) const
{
p.label -= static_cast<std::uint32_t>(value);
}
inline void
add (pcl::PointXYZLNormal& p, float value) const
{
p.label += static_cast<std::uint32_t>(value);
}
};
template<>
struct IntensityFieldAccessor<pcl::InterestPoint>
{
inline float
operator () (const pcl::InterestPoint &p) const
{
return (p.strength);
}
inline void
get (const pcl::InterestPoint &p, float &intensity) const
{
intensity = p.strength;
}
inline void
set (pcl::InterestPoint &p, float intensity) const
{
p.strength = intensity;
}
inline void
demean (pcl::InterestPoint& p, float value) const
{
p.strength -= value;
}
inline void
add (pcl::InterestPoint& p, float value) const
{
p.strength += value;
}
};
template<>
struct IntensityFieldAccessor<pcl::PointWithRange>
{
inline float
operator () (const pcl::PointWithRange &p) const
{
return (p.range);
}
inline void
get (const pcl::PointWithRange &p, float &intensity) const
{
intensity = p.range;
}
inline void
set (pcl::PointWithRange &p, float intensity) const
{
p.range = intensity;
}
inline void
demean (pcl::PointWithRange& p, float value) const
{
p.range -= value;
}
inline void
add (pcl::PointWithRange& p, float value) const
{
p.range += value;
}
};
template<>
struct IntensityFieldAccessor<pcl::PointWithScale>
{
inline float
operator () (const pcl::PointWithScale &p) const
{
return (p.scale);
}
inline void
get (const pcl::PointWithScale &p, float &intensity) const
{
intensity = p.scale;
}
inline void
set (pcl::PointWithScale &p, float intensity) const
{
p.scale = intensity;
}
inline void
demean (pcl::PointWithScale& p, float value) const
{
p.scale -= value;
}
inline void
add (pcl::PointWithScale& p, float value) const
{
p.scale += value;
}
};
template<>
struct IntensityFieldAccessor<pcl::PointWithViewpoint>
{
inline float
operator () (const pcl::PointWithViewpoint &p) const
{
return (p.z);
}
inline void
get (const pcl::PointWithViewpoint &p, float &intensity) const
{
intensity = p.z;
}
inline void
set (pcl::PointWithViewpoint &p, float intensity) const
{
p.z = intensity;
}
inline void
demean (pcl::PointWithViewpoint& p, float value) const
{
p.z -= value;
}
inline void
add (pcl::PointWithViewpoint& p, float value) const
{
p.z += value;
}
};
template<>
struct IntensityFieldAccessor<pcl::PointSurfel>
{
inline float
operator () (const pcl::PointSurfel &p) const
{
return (p.curvature);
}
inline void
get (const pcl::PointSurfel &p, float &intensity) const
{
intensity = p.curvature;
}
inline void
set (pcl::PointSurfel &p, float intensity) const
{
p.curvature = intensity;
}
inline void
demean (pcl::PointSurfel& p, float value) const
{
p.curvature -= value;
}
inline void
add (pcl::PointSurfel& p, float value) const
{
p.curvature += value;
}
};
}
}
|
{
"pile_set_name": "Github"
}
|
class dumps::generation::server::exceptionchecker(
$dumpsbasedir = undef,
$user = undef,
) {
file { '/usr/local/bin/dumps_exception_checker.py':
ensure => 'present',
mode => '0755',
owner => 'root',
group => 'root',
source => 'puppet:///modules/dumps/generation/dumps_exception_checker.py',
}
cron { 'dumps-exception-checker':
ensure => 'present',
environment => 'MAILTO=ops-dumps@wikimedia.org',
command => "/usr/bin/python3 /usr/local/bin/dumps_exception_checker.py ${dumpsbasedir} 480 latest",
user => $user,
minute => '40',
hour => '*/8',
require => File['/usr/local/bin/dumps_exception_checker.py'],
}
}
|
{
"pile_set_name": "Github"
}
|
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/execution/interrupts-scope.h"
#include "src/execution/isolate.h"
namespace v8 {
namespace internal {
InterruptsScope::InterruptsScope(Isolate* isolate, intptr_t intercept_mask,
Mode mode)
: stack_guard_(isolate->stack_guard()),
intercept_mask_(intercept_mask),
intercepted_flags_(0),
mode_(mode) {
if (mode_ != kNoop) stack_guard_->PushInterruptsScope(this);
}
bool InterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
InterruptsScope* last_postpone_scope = nullptr;
for (InterruptsScope* current = this; current; current = current->prev_) {
// We only consider scopes related to passed flag.
if (!(current->intercept_mask_ & flag)) continue;
if (current->mode_ == kRunInterrupts) {
// If innermost scope is kRunInterrupts scope, prevent interrupt from
// being intercepted.
break;
} else {
DCHECK_EQ(current->mode_, kPostponeInterrupts);
last_postpone_scope = current;
}
}
// If there is no postpone scope for passed flag then we should not intercept.
if (!last_postpone_scope) return false;
last_postpone_scope->intercepted_flags_ |= flag;
return true;
}
} // namespace internal
} // namespace v8
|
{
"pile_set_name": "Github"
}
|
# From http://ftp.gnome.org/pub/gnome/sources/metacity/2.25/metacity-2.25.1.sha256sum
sha256 fb2ede4ac02d7da08d3c3323fb76afaf945c8cccc07cb2d3a4b7f44fb49f1c47 metacity-2.25.1.tar.bz2
# Locally computed
sha256 32b1062f7da84967e7019d01ab805935caa7ab7321a7ced0e30ebe75e5df1670 COPYING
|
{
"pile_set_name": "Github"
}
|
open! Base
let unstage = Staged.unstage
(* Regression tests to ensure that [Validate.field], [Validate.field_folder], and
[Validate.field_direct_folder] continue to work with private record types. *)
module Fold_with_private (M : sig
type t = private { a : int } [@@deriving fields]
end) : sig
val validate : M.t -> Validate.t
end = struct
open M
let validate t =
let w f = Validate.field_folder f t in
Fields.fold ~init:[] ~a:(w (fun _ -> Validate.pass)) |> Validate.of_list
;;
end
[%%expect {|
|}]
module Fold_regular (M : sig
type t = { a : int } [@@deriving fields]
end) : sig
val validate : M.t -> Validate.t
end = struct
open M
let validate t =
let w f = Validate.field_folder f t in
Fields.fold ~init:[] ~a:(w (fun _ -> Validate.pass)) |> Validate.of_list
;;
end
[%%expect {|
|}]
module Fold_direct_private (M : sig
type t = private { a : int } [@@deriving fields]
end) : sig
val validate : M.t -> Validate.t
end = struct
open M
let validate t =
let w f = unstage (Validate.field_direct_folder f) in
Fields.Direct.fold t ~init:[] ~a:(w (fun _ -> Validate.pass)) |> Validate.of_list
;;
end
[%%expect {|
|}]
module Fold_direct_regular (M : sig
type t = { a : int } [@@deriving fields]
end) : sig
val validate : M.t -> Validate.t
end = struct
open M
let validate t =
let w f = unstage (Validate.field_direct_folder f) in
Fields.Direct.fold t ~init:[] ~a:(w (fun _ -> Validate.pass)) |> Validate.of_list
;;
end
[%%expect {|
|}]
module Validate_field_private (M : sig
type t = private { a : int } [@@deriving fields]
end) : sig
val validate : M.t -> Validate.t
end = struct
open M
let validate t =
let w check acc field = Validate.field check t field :: acc in
Fields.fold ~init:[] ~a:(w (fun _ -> Validate.pass)) |> Validate.of_list
;;
end
[%%expect {|
|}]
module Validate_field (M : sig
type t = { a : int } [@@deriving fields]
end) : sig
val validate : M.t -> Validate.t
end = struct
open M
let validate t =
let w check acc field = Validate.field check t field :: acc in
Fields.fold ~init:[] ~a:(w (fun _ -> Validate.pass)) |> Validate.of_list
;;
end
[%%expect {|
|}]
|
{
"pile_set_name": "Github"
}
|
import * as checkpoint from 'checkpoint-client'
describe('checkpointClient', () => {
test('check async signature', async () => {
// getSignature is used in SendPanic
const signature = await checkpoint.getSignature()
// Check if it's a uuid
expect(signature).toMatch(
/(?:^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[a-f0-9]{4}-[a-f0-9]{12}$)|(?:^0{8}-0{4}-0{4}-0{4}-0{12}$)/u,
)
})
})
|
{
"pile_set_name": "Github"
}
|
from Cython.Build import cythonize
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.items():
#for k, v in cudaconfig.iteritems():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
self.src_extensions.append('.cu')
default_compiler_so = self.compiler_so
super = self._compile
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
print(extra_postargs)
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"bbox",
["bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension(
"cython_nms",
["cython_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('gpu_nms',
['nms_kernel.cu', 'gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
),
]
setup(
ext_modules=ext_modules,
cmdclass={'build_ext': custom_build_ext},
)
|
{
"pile_set_name": "Github"
}
|
/* -*- Mode: C; c-basic-offset:4 ; -*- */
/*
* (C) 2007 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
/* This file creates strings for the most important configuration options.
These are then used in the file src/mpi/init/initthread.c to initialize
global variables that will then be included in both the library and
executables, providing a way to determine what version and features of
MPICH were used with a particular library or executable.
*/
#ifndef MPICHINFO_H_INCLUDED
#define MPICHINFO_H_INCLUDED
#define MPICH_CONFIGURE_ARGS_CLEAN "@CONFIGURE_ARGS_CLEAN@"
#define MPICH_VERSION_DATE "@MPICH_RELEASE_DATE@"
#define MPICH_DEVICE "@DEVICE@"
#define MPICH_COMPILER_CC "@CC@ @CFLAGS@"
#define MPICH_COMPILER_CXX "@CXX@ @CXXFLAGS@"
#define MPICH_COMPILER_F77 "@F77@ @FFLAGS@"
#define MPICH_COMPILER_FC "@FC@ @FCFLAGS@"
#endif
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="utf-8"?>
<!--
Copyright 2014 Luke Klinker
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<shape xmlns:android="http://schemas.android.com/apk/res/android" android:shape="rectangle">
<solid android:color="#00000000"/>
<stroke android:width="1dip" android:color="#18FFFFFF"/>
<item android:drawable="@color/pressed_app_color"
android:state_pressed="true"/>
</shape>
|
{
"pile_set_name": "Github"
}
|
-----BEGIN CERTIFICATE-----
MIIDxTCCAq2gAwIBAgIHBHKrZmDpzTANBgkqhkiG9w0BAQsFADCBnTELMAkGA1UE
BhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAcMCUxvcyBHYXRvczEU
MBIGA1UECgwLTmV0ZmxpeCBJbmMxLTArBgNVBAsMJFBsYXRmb3JtIFNlY3VyaXR5
ICgxMjUxOTgwMTc0NjM4MDIxKTEgMB4GA1UEAwwXSW50ZXJtZWRpYXRlIENBIGZv
ciA3MzIwHhcNMjAwNzE0MDMxNTE5WhcNMjEwNzE0MDMwOTQ0WjCBlTELMAkGA1UE
BhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAcMCUxvcyBHYXRvczEU
MBIGA1UECgwLTmV0ZmxpeCBJbmMxLTArBgNVBAsMJFBsYXRmb3JtIFNlY3VyaXR5
ICgxMjUxOTgwMzg5Mzk4Njg1KTEYMBYGA1UEAwwPbG9jYWxob3N0LmxvY2FsMIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAooAA/V91B8rJxwn514ujh9T7
Qm26cORtJfyD+/x0Yiqq9zZyhVX54fm0grdUuUuSPdVjhL99FoY/ficwEKsAKVxY
Tuppgyfmd608ZTj3Nkjcx8rWAPDIpmF6xMfzw6k/il+/gj85e1M4Lhew1yuwAnf1
HnZ1Ys9aSlywFVPZ9WycpNpcfybVce7vUlNIW73jIKhpFcTWhExZU4ePRRZaPEO8
aIFwRMQCT/HKL8Qmvp1xQGShr1sLu3j52EoU3SLI/D4ifzrsABn6IQuD8PtLTpYo
1KtFIDVIZDlFXRDK+NY6h83YYOAwAN9EFsYkc9xeJOaVUymuiJCsBTs7/5ub3wID
AQABoxAwDjAMBgNVHRMBAf8EAjAAMA0GCSqGSIb3DQEBCwUAA4IBAQAwnyI5MaG2
Z25MO9CzbijM6GEx4hEPqR4hDqQSUWDC5roZE+iN9FP4wRQHrannYbZFbfBdp9DA
1C9iZ41zumwAufGebyjBCGQMaru1SZuPIW6PaaaaYW3+OgRH0tWr8E7OR1cIjzbl
6lW4ERoOLspgRzjTRvEyhiPGtCnV1Gv/QQHa9QL88gw7ZUdOV/JXUM/9xi67HcAK
2cCLA+7DZNNCtSunyuWvXgF1Wk5aOPpg9mKCL4edYJKoByo7w6XfDMzEnxmKv1x+
bHjGCxINfi/Z7Uo5C+FGKh6LroVPPAtqczCYGCR9utlYxJKLiaA4i0pUJ/c39zu9
bXVEF0iT6wwu
-----END CERTIFICATE-----
|
{
"pile_set_name": "Github"
}
|
; Joomla! Project
; Copyright (C) 2005 - 2017 Open Source Matters. All rights reserved.
; License GNU General Public License version 2 or later; see LICENSE.txt, see LICENSE.php
; Note : All ini files need to be saved as UTF-8
PLG_TWOFACTORAUTH_TOTP="两步验证 - Google 身份验证器插件"
PLG_TWOFACTORAUTH_TOTP_XML_DESCRIPTION="此插件用于会员使用 <a href="_QQ_"https://en.wikipedia.org/wiki/Google_Authenticator"_QQ_" target="_QQ_"_blank"_QQ_">Google 身份验证器</a> 或其它基于时间的密码生成器(例如:<a href="_QQ_"https://freeotp.github.io/"_QQ_" target="_QQ_"_blank"_QQ_">FreeOTP</a>)在网站上启用双重认证功能. 要使用双重认证,请在会员个人资料中启用双重认证."
|
{
"pile_set_name": "Github"
}
|
'use strict';
const DYNAMIC_USED_BY_A = 'DYNAMIC_USED_BY_A';
exports.DYNAMIC_USED_BY_A = DYNAMIC_USED_BY_A;
|
{
"pile_set_name": "Github"
}
|
/**********************************************************************
* params.c *
* Copyright (c) 2005-2006 Cryptocom LTD *
* This file is distributed under the same license as OpenSSL *
* *
* Definitions of GOST R 34.10 parameter sets, defined in RFC 4357 *
* OpenSSL 0.9.9 libraries required to compile and use *
* this code *
**********************************************************************/
#include "gost_params.h"
#include <openssl/objects.h>
/* Parameters of GOST 34.10 */
R3410_params R3410_paramset[] = {
/* Paramset A */
{NID_id_GostR3410_94_CryptoPro_A_ParamSet,
"100997906755055304772081815535925224869"
"8410825720534578748235158755771479905292727772441528526992987964833"
"5669968284202797289605274717317548059048560713474685214192868091256"
"1502802222185647539190902656116367847270145019066794290930185446216"
"3997308722217328898303231940973554032134009725883228768509467406639"
"62",
"127021248288932417465907042777176443525"
"7876535089165358128175072657050312609850984974231883334834011809259"
"9999512098893413065920561499672425412104927434935707492031276956145"
"1689224110579311248812610229678534638401693520013288995000362260684"
"2227508135323070045173416336850045410625869714168836867788425378203"
"83",
"683631961449557007844441656118272528951"
"02170888761442055095051287550314083023"}
,
{NID_id_GostR3410_94_CryptoPro_B_ParamSet,
"429418261486158041438734477379555023926"
"7234596860714306679811299408947123142002706038521669956384871995765"
"7284814898909770759462613437669456364882730370838934791080835932647"
"9767786019153434744009610342313166725786869204821949328786333602033"
"8479709268434224762105576023501613261478065276102850944540333865234"
"1",
"139454871199115825601409655107690713107"
"0417070599280317977580014543757653577229840941243685222882398330391"
"1468164807668823692122073732267216074074777170091113455043205380464"
"7694904686120113087816240740184800477047157336662926249423571248823"
"9685422217536601433914856808405203368594584948031873412885804895251"
"63",
"79885141663410976897627118935756323747307951916507639758300472692338873533959"}
,
{NID_id_GostR3410_94_CryptoPro_C_ParamSet,
"816552717970881016017893191415300348226"
"2544051353358162468249467681876621283478212884286545844013955142622"
"2087723485023722868022275009502224827866201744494021697716482008353"
"6398202298024892620480898699335508064332313529725332208819456895108"
"5155178100221003459370588291073071186553005962149936840737128710832"
"3",
"110624679233511963040518952417017040248"
"5862954819831383774196396298584395948970608956170224210628525560327"
"8638246716655439297654402921844747893079518669992827880792192992701"
"1428546551433875806377110443534293554066712653034996277099320715774"
"3542287621283671843703709141350171945045805050291770503634517804938"
"01",
"113468861199819350564868233378875198043"
"267947776488510997961231672532899549103"}
,
{NID_id_GostR3410_94_CryptoPro_D_ParamSet,
"756976611021707301782128757801610628085"
"5283803109571158829574281419208532589041660017017859858216341400371"
"4687551412794400562878935266630754392677014598582103365983119173924"
"4732511225464712252386803315902707727668715343476086350472025298282"
"7271461690125050616858238384366331089777463541013033926723743254833"
"7",
"905457649621929965904290958774625315611"
"3056083907389766971404812524422262512556054474620855996091570786713"
"5849550236741915584185990627801066465809510095784713989819413820871"
"5964648914493053407920737078890520482730623038837767710173664838239"
"8574828787891286471201460474326612697849693665518073864436497893214"
"9",
"108988435796353506912374591498972192620"
"190487557619582334771735390599299211593"}
,
{NID_id_GostR3410_94_CryptoPro_XchA_ParamSet,
"1335318132727206734338595199483190012179423759678474868994823595993"
"6964252873471246159040332773182141032801252925387191478859899310331"
"0567744136196364803064721377826656898686468463277710150809401182608"
"7702016153249904683329312949209127762411378780302243557466062839716"
"59376426832674269780880061631528163475887",
"14201174159756348119636828602231808974327613839524373876287257344192"
"74593935127189736311660784676003608489466235676257952827747192122419"
"29071046134208380636394084512691828894000571524625445295769349356752"
"72895683154177544176313938445719175509684710784659566254794231229333"
"8483924514339614727760681880609734239",
"91771529896554605945588149018382750217296858393520724172743325725474"
"374979801"}
,
{NID_id_GostR3410_94_CryptoPro_XchB_ParamSet,
"8890864727828423151699995801875757891031463338652579140051973659"
"3048131440685857067369829407947744496306656291505503608252399443"
"7900272386749145996230867832228661977543992816745254823298629859"
"8753575466286051738837854736167685769017780335804511440773337196"
"2538423532919394477873664752824509986617878992443177",
"1028946126624994859676552074360530315217970499989304888248413244"
"8474923022758470167998871003604670704877377286176171227694098633"
"1539089568784129110109512690503345393869871295783467257264868341"
"7200196629860561193666752429682367397084815179752036423595736533"
"68957392061769855284593965042530895046088067160269433",
"9109671391802626916582318050603555673628769498182593088388796888"
"5281641595199"}
,
{NID_id_GostR3410_94_CryptoPro_XchC_ParamSet,
"4430618464297584182473135030809859326863990650118941756995270074"
"8609973181426950235239623239110557450826919295792878938752101867"
"7047181623251027516953100431855964837602657827828194249605561893"
"6965865325513137194483136247773653468410118796740709840825496997"
"9375560722345106704721086025979309968763193072908334",
"1246996366993477513607147265794064436203408861395055989217248455"
"7299870737698999651480662364723992859320868822848751165438350943"
"3276647222625940615560580450040947211826027729977563540237169063"
"0448079715771649447778447000597419032457722226253269698374446528"
"35352729304393746106576383349151001715930924115499549",
"6787876137336591234380295020065682527118129468050147943114675429"
"4748422492761"}
,
{NID_undef, NULL, NULL, NULL}
};
R3410_2001_params R3410_2001_paramset[] = {
/* default_cc_sign01_param 1.2.643.2.9.1.8.1 */
{NID_id_GostR3410_2001_ParamSet_cc,
/* A */
"C0000000000000000000000000000000000000000000000000000000000003c4",
/* B */
"2d06B4265ebc749ff7d0f1f1f88232e81632e9088fd44b7787d5e407e955080c",
/* P */
"C0000000000000000000000000000000000000000000000000000000000003C7",
/* Q */
"5fffffffffffffffffffffffffffffff606117a2f4bde428b7458a54b6e87b85",
/* X */
"2",
/* Y */
"a20e034bf8813ef5c18d01105e726a17eb248b264ae9706f440bedc8ccb6b22c"}
,
/* 1.2.643.2.2.35.0 */
{NID_id_GostR3410_2001_TestParamSet,
"7",
"5FBFF498AA938CE739B8E022FBAFEF40563F6E6A3472FC2A514C0CE9DAE23B7E",
"8000000000000000000000000000000000000000000000000000000000000431",
"8000000000000000000000000000000150FE8A1892976154C59CFC193ACCF5B3",
"2",
"08E2A8A0E65147D4BD6316030E16D19C85C97F0A9CA267122B96ABBCEA7E8FC8"}
,
/*
* 1.2.643.2.2.35.1
*/
{NID_id_GostR3410_2001_CryptoPro_A_ParamSet,
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD94",
"a6",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD97",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF6C611070995AD10045841B09B761B893",
"1",
"8D91E471E0989CDA27DF505A453F2B7635294F2DDF23E3B122ACC99C9E9F1E14"}
,
/*
* 1.2.643.2.2.35.2
*/
{NID_id_GostR3410_2001_CryptoPro_B_ParamSet,
"8000000000000000000000000000000000000000000000000000000000000C96",
"3E1AF419A269A5F866A7D3C25C3DF80AE979259373FF2B182F49D4CE7E1BBC8B",
"8000000000000000000000000000000000000000000000000000000000000C99",
"800000000000000000000000000000015F700CFFF1A624E5E497161BCC8A198F",
"1",
"3FA8124359F96680B83D1C3EB2C070E5C545C9858D03ECFB744BF8D717717EFC"}
,
/*
* 1.2.643.2.2.35.3
*/
{NID_id_GostR3410_2001_CryptoPro_C_ParamSet,
"9B9F605F5A858107AB1EC85E6B41C8AACF846E86789051D37998F7B9022D7598",
"805a",
"9B9F605F5A858107AB1EC85E6B41C8AACF846E86789051D37998F7B9022D759B",
"9B9F605F5A858107AB1EC85E6B41C8AA582CA3511EDDFB74F02F3A6598980BB9",
"0",
"41ECE55743711A8C3CBF3783CD08C0EE4D4DC440D4641A8F366E550DFDB3BB67"}
,
/*
* 1.2.643.2.2.36.0
*/
{NID_id_GostR3410_2001_CryptoPro_XchA_ParamSet,
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD94",
"a6",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD97",
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF6C611070995AD10045841B09B761B893",
"1",
"8D91E471E0989CDA27DF505A453F2B7635294F2DDF23E3B122ACC99C9E9F1E14"}
,
/*
* 1.2.643.2.2.36.1
*/
{NID_id_GostR3410_2001_CryptoPro_XchB_ParamSet,
"9B9F605F5A858107AB1EC85E6B41C8AACF846E86789051D37998F7B9022D7598",
"805a",
"9B9F605F5A858107AB1EC85E6B41C8AACF846E86789051D37998F7B9022D759B",
"9B9F605F5A858107AB1EC85E6B41C8AA582CA3511EDDFB74F02F3A6598980BB9",
"0",
"41ECE55743711A8C3CBF3783CD08C0EE4D4DC440D4641A8F366E550DFDB3BB67"}
,
{0, NULL, NULL, NULL, NULL, NULL, NULL}
};
|
{
"pile_set_name": "Github"
}
|
-- @description Move selected items to markers with the same name as the active take
-- @author Mordi
-- @version 1.0
-- @screenshot https://i.imgur.com/fkc8Ley.gif
-- @about Moves each selected item to each marker it finds, in the order of the marker index. If there are more items than markers, the surplus items simply stay put.
SCRIPT_NAME = "Move selected items to markers with the same name as the active take"
reaper.ClearConsole()
function Msg(variable)
reaper.ShowConsoleMsg(tostring(variable).."\n")
end
function get_selected_items_data()
local t={}
for i=1, reaper.CountSelectedMediaItems(0) do
t[i] = {}
local item = reaper.GetSelectedMediaItem(0, i-1)
if item ~= nil then
t[i].item = item
-- Get active take name
activeTakeIndex = reaper.GetMediaItemInfo_Value(item, "I_CURTAKE")
activeTake = reaper.GetTake(item, activeTakeIndex)
t[i].name = reaper.GetTakeName(activeTake)
end
end
return t
end
-- ######################################################################
reaper.Undo_BeginBlock()
-- Count markers
retval, marker_count, rgn_count = reaper.CountProjectMarkers(0)
-- Check if any markers exist
if marker_count + rgn_count < 1 then
reaper.ShowMessageBox("There are no regions or markers in the project", SCRIPT_NAME, 0)
return
end
-- Store markers info in an array
markers = {}
for i = 0, marker_count+rgn_count-1 do
retval, isrgn, pos, rgnend, name, markrgnindexnumber = reaper.EnumProjectMarkers(i)
if not isrgn then
markers[i] = {}
markers[i].name = name
markers[i].pos = pos
markers[i].hasBeenUsed = false
end
end
-- Count selected items
selectedItemNum = reaper.CountSelectedMediaItems(0)
if selectedItemNum == 0 then
reaper.ShowMessageBox("There are no items selected", SCRIPT_NAME, 0)
return
end
-- Loop through selected items
data = get_selected_items_data()
for i=1, #data do
for n = 0, #markers do
if markers[n].hasBeenUsed == false and markers[n].name == data[i].name then
-- Move item to marker
reaper.SetMediaItemInfo_Value(data[i].item, "D_POSITION", markers[n].pos)
markers[n].hasBeenUsed = true
break
end
end
end
reaper.Undo_EndBlock(SCRIPT_NAME, 0)
|
{
"pile_set_name": "Github"
}
|
"""Test ghost object support in VTK-Python
When PyVTKObject is destroyed, the vtkObjectBase that it
contained often continues to exist because references to
it still exist within VTK. When that vtkObjectBase is
returned to python, a new PyVTKObject is created.
If the PyVTKObject has a custom class or a custom dict,
then we make a "ghost" of the PyVTKObject when it is
destroyed, so that if its vtkObjectBase returns to python,
the PyVTKObject can be restored with the proper class and
dict. Each ghost has a weak pointer to its vtkObjectBase
so that it can be erased if the vtkObjectBase is destroyed.
To be tested:
- make sure custom dicts are restored
- make sure custom classes are restored
Created on Aug 19, 2010 by David Gobbi
"""
import sys
import vtk
from vtk.test import Testing
class vtkCustomObject(vtk.vtkObject):
pass
class TestGhost(Testing.vtkTest):
def testGhostForDict(self):
"""Ghost an object to save the dict"""
o = vtk.vtkObject()
o.customattr = 'hello'
a = vtk.vtkVariantArray()
a.InsertNextValue(o)
i = id(o)
del o
o = vtk.vtkObject()
o = a.GetValue(0).ToVTKObject()
# make sure the id has changed, but dict the same
self.assertEqual(o.customattr, 'hello')
self.assertNotEqual(i, id(o))
def testGhostForClass(self):
"""Ghost an object to save the class"""
o = vtkCustomObject()
a = vtk.vtkVariantArray()
a.InsertNextValue(o)
i = id(o)
del o
o = vtk.vtkObject()
o = a.GetValue(0).ToVTKObject()
# make sure the id has changed, but class the same
self.assertEqual(o.__class__, vtkCustomObject)
self.assertNotEqual(i, id(o))
if __name__ == "__main__":
Testing.main([(TestGhost, 'test')])
|
{
"pile_set_name": "Github"
}
|
#pragma once
#define NONEXISTENT_UWEBSOCKETS
|
{
"pile_set_name": "Github"
}
|
# -*- coding: utf-8 -*-
# Copyright 2019 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extract images from https://photobucket.com/"""
from .common import Extractor, Message
from .. import text, exception
import base64
import json
class PhotobucketAlbumExtractor(Extractor):
"""Extractor for albums on photobucket.com"""
category = "photobucket"
subcategory = "album"
directory_fmt = ("{category}", "{username}", "{location}")
filename_fmt = "{offset:>03}{pictureId:?_//}_{titleOrFilename}.{extension}"
archive_fmt = "{id}"
pattern = (r"(?:https?://)?((?:[^.]+\.)?photobucket\.com)"
r"/user/[^/?&#]+/library(?:/[^?&#]*)?")
test = (
("https://s369.photobucket.com/user/CrpyLrkr/library", {
"pattern": r"https?://[oi]+\d+.photobucket.com/albums/oo139/",
"count": ">= 50"
}),
# subalbums of main "directory"
("https://s271.photobucket.com/user/lakerfanryan/library/", {
"options": (("image-filter", "False"),),
"pattern": pattern,
"count": 1,
}),
# subalbums of subalbum without images
("https://s271.photobucket.com/user/lakerfanryan/library/Basketball", {
"pattern": pattern,
"count": ">= 9",
}),
# private (missing JSON data)
("https://s1277.photobucket.com/user/sinisterkat44/library/", {
"count": 0,
}),
("https://s1110.photobucket.com/user/chndrmhn100/library/"
"Chandu%20is%20the%20King?sort=3&page=1"),
)
def __init__(self, match):
Extractor.__init__(self, match)
self.album_path = ""
self.root = "https://" + match.group(1)
self.session.headers["Referer"] = self.url
def items(self):
yield Message.Version, 1
for image in self.images():
image["titleOrFilename"] = text.unescape(image["titleOrFilename"])
image["title"] = text.unescape(image["title"])
image["extension"] = image["ext"]
yield Message.Directory, image
yield Message.Url, image["fullsizeUrl"], image
if self.config("subalbums", True):
for album in self.subalbums():
album["_extractor"] = PhotobucketAlbumExtractor
yield Message.Queue, album["url"], album
def images(self):
"""Yield all images of the current album"""
url = self.url
params = {"sort": "3", "page": 1}
while True:
page = self.request(url, params=params).text
json_data = text.extract(page, "collectionData:", ",\n")[0]
if not json_data:
msg = text.extract(page, 'libraryPrivacyBlock">', "</div>")[0]
msg = ' ("{}")'.format(text.remove_html(msg)) if msg else ""
self.log.error("Unable to get JSON data%s", msg)
return
data = json.loads(json_data)
yield from data["items"]["objects"]
if data["total"] <= data["offset"] + data["pageSize"]:
self.album_path = data["currentAlbumPath"]
return
params["page"] += 1
def subalbums(self):
"""Return all subalbum objects"""
url = self.root + "/component/Albums-SubalbumList"
params = {
"albumPath": self.album_path,
"fetchSubAlbumsOnly": "true",
"deferCollapsed": "true",
"json": "1",
}
data = self.request(url, params=params).json()
return data["body"].get("subAlbums", ())
class PhotobucketImageExtractor(Extractor):
"""Extractor for individual images from photobucket.com"""
category = "photobucket"
subcategory = "image"
directory_fmt = ("{category}", "{username}")
filename_fmt = "{pictureId:?/_/}{titleOrFilename}.{extension}"
archive_fmt = "{username}_{id}"
pattern = (r"(?:https?://)?(?:[^.]+\.)?photobucket\.com"
r"(?:/gallery/user/([^/?&#]+)/media/([^/?&#]+)"
r"|/user/([^/?&#]+)/media/[^?&#]+\.html)")
test = (
(("https://s271.photobucket.com/user/lakerfanryan"
"/media/Untitled-3-1.jpg.html"), {
"url": "3b647deeaffc184cc48c89945f67574559c9051f",
"keyword": "69732741b2b351db7ecaa77ace2fdb39f08ca5a3",
}),
(("https://s271.photobucket.com/user/lakerfanryan"
"/media/IsotopeswBros.jpg.html?sort=3&o=2"), {
"url": "12c1890c09c9cdb8a88fba7eec13f324796a8d7b",
"keyword": "61200a223df6c06f45ac3d30c88b3f5b048ce9a8",
}),
)
def __init__(self, match):
Extractor.__init__(self, match)
self.user = match.group(1) or match.group(3)
self.media_id = match.group(2)
self.session.headers["Referer"] = self.url
def items(self):
url = "https://photobucket.com/galleryd/search.php"
params = {"userName": self.user, "searchTerm": "", "ref": ""}
if self.media_id:
params["mediaId"] = self.media_id
else:
params["url"] = self.url
# retry API call up to 5 times, since it can randomly fail
tries = 0
while tries < 5:
data = self.request(url, method="POST", params=params).json()
image = data["mediaDocuments"]
if "message" not in image:
break # success
tries += 1
self.log.debug(image["message"])
else:
raise exception.StopExtraction(image["message"])
# adjust metadata entries to be at least somewhat similar
# to what the 'album' extractor provides
if "media" in image:
image = image["media"][image["mediaIndex"]]
image["albumView"] = data["mediaDocuments"]["albumView"]
image["username"] = image["ownerId"]
else:
image["fileUrl"] = image.pop("imageUrl")
image.setdefault("title", "")
image.setdefault("description", "")
name, _, ext = image["fileUrl"].rpartition("/")[2].rpartition(".")
image["ext"] = image["extension"] = ext
image["titleOrFilename"] = image["title"] or name
image["tags"] = image.pop("clarifaiTagList", [])
mtype, _, mid = base64.b64decode(image["id"]).partition(b":")
image["pictureId"] = mid.decode() if mtype == b"mediaId" else ""
yield Message.Version, 1
yield Message.Directory, image
yield Message.Url, image["fileUrl"], image
|
{
"pile_set_name": "Github"
}
|
$NetBSD: distinfo,v 1.15 2017/06/18 20:39:29 youri Exp $
SHA1 (exo-0.11.3.tar.bz2) = 758ced83d97650e0428563b42877aecfc9fc3c81
RMD160 (exo-0.11.3.tar.bz2) = e6fcd2521ecd364ff7d2898970b15ee9c2dddb95
SHA512 (exo-0.11.3.tar.bz2) = 667f7db5b122e9dde07b71583bf1eb412828698d581c17957fbe551c70b76e80c317d7a6781ae89279c60ddc004ce2d0484435a29276e0a949e9e152d8b86574
Size (exo-0.11.3.tar.bz2) = 1294802 bytes
SHA1 (patch-af) = d983808d490ef8f1d47c8a8469b8f3ec4f2d7cf4
SHA1 (patch-exo-helper_helpers_Thunar.desktop.in.in) = efe98db2f48309043c80276c4d39934cb949efa7
|
{
"pile_set_name": "Github"
}
|
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package firehose
const (
// ErrCodeConcurrentModificationException for service response error code
// "ConcurrentModificationException".
//
// Another modification has already happened. Fetch VersionId again and use
// it to update the destination.
ErrCodeConcurrentModificationException = "ConcurrentModificationException"
// ErrCodeInvalidArgumentException for service response error code
// "InvalidArgumentException".
//
// The specified input parameter has a value that is not valid.
ErrCodeInvalidArgumentException = "InvalidArgumentException"
// ErrCodeLimitExceededException for service response error code
// "LimitExceededException".
//
// You have already reached the limit for a requested resource.
ErrCodeLimitExceededException = "LimitExceededException"
// ErrCodeResourceInUseException for service response error code
// "ResourceInUseException".
//
// The resource is already in use and not available for this operation.
ErrCodeResourceInUseException = "ResourceInUseException"
// ErrCodeResourceNotFoundException for service response error code
// "ResourceNotFoundException".
//
// The specified resource could not be found.
ErrCodeResourceNotFoundException = "ResourceNotFoundException"
// ErrCodeServiceUnavailableException for service response error code
// "ServiceUnavailableException".
//
// The service is unavailable. Back off and retry the operation. If you continue
// to see the exception, throughput limits for the delivery stream may have
// been exceeded. For more information about limits and how to request an increase,
// see Amazon Kinesis Data Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html).
ErrCodeServiceUnavailableException = "ServiceUnavailableException"
)
|
{
"pile_set_name": "Github"
}
|
#
# This is the "master security properties file".
#
# In this file, various security properties are set for use by
# java.security classes. This is where users can statically register
# Cryptography Package Providers ("providers" for short). The term
# "provider" refers to a package or set of packages that supply a
# concrete implementation of a subset of the cryptography aspects of
# the Java Security API. A provider may, for example, implement one or
# more digital signature algorithms or message digest algorithms.
#
# Each provider must implement a subclass of the Provider class.
# To register a provider in this master security properties file,
# specify the Provider subclass name and priority in the format
#
# security.provider.<n>=<className>
#
# This declares a provider, and specifies its preference
# order n. The preference order is the order in which providers are
# searched for requested algorithms (when no specific provider is
# requested). The order is 1-based; 1 is the most preferred, followed
# by 2, and so on.
#
# <className> must specify the subclass of the Provider class whose
# constructor sets the values of various properties that are required
# for the Java Security API to look up the algorithms or other
# facilities implemented by the provider.
#
# There must be at least one provider specification in java.security.
# There is a default provider that comes standard with the JDK. It
# is called the "SUN" provider, and its Provider subclass
# named Sun appears in the sun.security.provider package. Thus, the
# "SUN" provider is registered via the following:
#
# security.provider.1=sun.security.provider.Sun
#
# (The number 1 is used for the default provider.)
#
# Note: Providers can be dynamically registered instead by calls to
# either the addProvider or insertProviderAt method in the Security
# class.
#
# List of providers and their preference orders (see above):
#
security.provider.1=sun.security.provider.Sun
security.provider.2=sun.security.rsa.SunRsaSign
security.provider.3=com.sun.net.ssl.internal.ssl.Provider
security.provider.4=com.sun.crypto.provider.SunJCE
security.provider.5=sun.security.jgss.SunProvider
security.provider.6=com.sun.security.sasl.Provider
security.provider.7=org.jcp.xml.dsig.internal.dom.XMLDSigRI
security.provider.8=sun.security.smartcardio.SunPCSC
#
# Select the source of seed data for SecureRandom. By default an
# attempt is made to use the entropy gathering device specified by
# the securerandom.source property. If an exception occurs when
# accessing the URL then the traditional system/thread activity
# algorithm is used.
#
# On Solaris and Linux systems, if file:/dev/urandom is specified and it
# exists, a special SecureRandom implementation is activated by default.
# This "NativePRNG" reads random bytes directly from /dev/urandom.
#
# On Windows systems, the URLs file:/dev/random and file:/dev/urandom
# enables use of the Microsoft CryptoAPI seed functionality.
#
securerandom.source=file:/dev/urandom
#
# The entropy gathering device is described as a URL and can also
# be specified with the system property "java.security.egd". For example,
# -Djava.security.egd=file:/dev/urandom
# Specifying this system property will override the securerandom.source
# setting.
#
# Class to instantiate as the javax.security.auth.login.Configuration
# provider.
#
login.configuration.provider=com.sun.security.auth.login.ConfigFile
#
# Default login configuration file
#
#login.config.url.1=file:${user.home}/.java.login.config
#
# Class to instantiate as the system Policy. This is the name of the class
# that will be used as the Policy object.
#
policy.provider=sun.security.provider.PolicyFile
# The default is to have a single system-wide policy file,
# and a policy file in the user's home directory.
policy.url.1=file:${java.home}/lib/security/java.policy
policy.url.2=file:${user.home}/.java.policy
# whether or not we expand properties in the policy file
# if this is set to false, properties (${...}) will not be expanded in policy
# files.
policy.expandProperties=true
# whether or not we allow an extra policy to be passed on the command line
# with -Djava.security.policy=somefile. Comment out this line to disable
# this feature.
policy.allowSystemProperty=true
# whether or not we look into the IdentityScope for trusted Identities
# when encountering a 1.1 signed JAR file. If the identity is found
# and is trusted, we grant it AllPermission.
policy.ignoreIdentityScope=false
#
# Default keystore type.
#
keystore.type=jks
#
# Class to instantiate as the system scope:
#
system.scope=sun.security.provider.IdentityDatabase
#
# List of comma-separated packages that start with or equal this string
# will cause a security exception to be thrown when
# passed to checkPackageAccess unless the
# corresponding RuntimePermission ("accessClassInPackage."+package) has
# been granted.
package.access=sun.,com.sun.xml.internal.ws.,com.sun.xml.internal.bind.
#
# List of comma-separated packages that start with or equal this string
# will cause a security exception to be thrown when
# passed to checkPackageDefinition unless the
# corresponding RuntimePermission ("defineClassInPackage."+package) has
# been granted.
#
# by default, no packages are restricted for definition, and none of
# the class loaders supplied with the JDK call checkPackageDefinition.
#
#package.definition=
#
# Determines whether this properties file can be appended to
# or overridden on the command line via -Djava.security.properties
#
security.overridePropertiesFile=true
#
# Determines the default key and trust manager factory algorithms for
# the javax.net.ssl package.
#
ssl.KeyManagerFactory.algorithm=SunX509
ssl.TrustManagerFactory.algorithm=PKIX
#
# The Java-level namelookup cache policy for successful lookups:
#
# any negative value: caching forever
# any positive value: the number of seconds to cache an address for
# zero: do not cache
#
# default value is forever (FOREVER). For security reasons, this
# caching is made forever when a security manager is set. When a security
# manager is not set, the default behavior in this implementation
# is to cache for 30 seconds.
#
# NOTE: setting this to anything other than the default value can have
# serious security implications. Do not set it unless
# you are sure you are not exposed to DNS spoofing attack.
#
#networkaddress.cache.ttl=-1
# The Java-level namelookup cache policy for failed lookups:
#
# any negative value: cache forever
# any positive value: the number of seconds to cache negative lookup results
# zero: do not cache
#
# In some Microsoft Windows networking environments that employ
# the WINS name service in addition to DNS, name service lookups
# that fail may take a noticeably long time to return (approx. 5 seconds).
# For this reason the default caching policy is to maintain these
# results for 10 seconds.
#
#
networkaddress.cache.negative.ttl=10
#
# Properties to configure OCSP for certificate revocation checking
#
# Enable OCSP
#
# By default, OCSP is not used for certificate revocation checking.
# This property enables the use of OCSP when set to the value "true".
#
# NOTE: SocketPermission is required to connect to an OCSP responder.
#
# Example,
# ocsp.enable=true
#
# Location of the OCSP responder
#
# By default, the location of the OCSP responder is determined implicitly
# from the certificate being validated. This property explicitly specifies
# the location of the OCSP responder. The property is used when the
# Authority Information Access extension (defined in RFC 3280) is absent
# from the certificate or when it requires overriding.
#
# Example,
# ocsp.responderURL=http://ocsp.example.net:80
#
# Subject name of the OCSP responder's certificate
#
# By default, the certificate of the OCSP responder is that of the issuer
# of the certificate being validated. This property identifies the certificate
# of the OCSP responder when the default does not apply. Its value is a string
# distinguished name (defined in RFC 2253) which identifies a certificate in
# the set of certificates supplied during cert path validation. In cases where
# the subject name alone is not sufficient to uniquely identify the certificate
# then both the "ocsp.responderCertIssuerName" and
# "ocsp.responderCertSerialNumber" properties must be used instead. When this
# property is set then those two properties are ignored.
#
# Example,
# ocsp.responderCertSubjectName="CN=OCSP Responder, O=XYZ Corp"
#
# Issuer name of the OCSP responder's certificate
#
# By default, the certificate of the OCSP responder is that of the issuer
# of the certificate being validated. This property identifies the certificate
# of the OCSP responder when the default does not apply. Its value is a string
# distinguished name (defined in RFC 2253) which identifies a certificate in
# the set of certificates supplied during cert path validation. When this
# property is set then the "ocsp.responderCertSerialNumber" property must also
# be set. When the "ocsp.responderCertSubjectName" property is set then this
# property is ignored.
#
# Example,
# ocsp.responderCertIssuerName="CN=Enterprise CA, O=XYZ Corp"
#
# Serial number of the OCSP responder's certificate
#
# By default, the certificate of the OCSP responder is that of the issuer
# of the certificate being validated. This property identifies the certificate
# of the OCSP responder when the default does not apply. Its value is a string
# of hexadecimal digits (colon or space separators may be present) which
# identifies a certificate in the set of certificates supplied during cert path
# validation. When this property is set then the "ocsp.responderCertIssuerName"
# property must also be set. When the "ocsp.responderCertSubjectName" property
# is set then this property is ignored.
#
# Example,
# ocsp.responderCertSerialNumber=2A:FF:00
|
{
"pile_set_name": "Github"
}
|
# The Potato Processor - A simple RISC-V based processor for FPGAs
# (c) Kristian Klomsten Skordal 2014 - 2015 <kristian.skordal@wafflemail.net>
# Report bugs and issues on <https://github.com/skordal/potato/issues>
.PHONY: all clean potato.prj
SOURCE_FILES := \
src/pp_alu.vhd \
src/pp_alu_mux.vhd \
src/pp_alu_control_unit.vhd \
src/pp_icache.vhd \
src/pp_comparator.vhd \
src/pp_constants.vhd \
src/pp_control_unit.vhd \
src/pp_core.vhd \
src/pp_counter.vhd \
src/pp_csr.vhd \
src/pp_csr_unit.vhd \
src/pp_csr_alu.vhd \
src/pp_decode.vhd \
src/pp_execute.vhd \
src/pp_fetch.vhd \
src/pp_imm_decoder.vhd \
src/pp_memory.vhd \
src/pp_potato.vhd \
src/pp_register_file.vhd \
src/pp_types.vhd \
src/pp_utilities.vhd \
src/pp_wb_arbiter.vhd \
src/pp_wb_adapter.vhd \
src/pp_writeback.vhd
TESTBENCHES := \
testbenches/tb_processor.vhd \
testbenches/tb_soc.vhd \
soc/pp_soc_memory.vhd
TOOLCHAIN_PREFIX ?= riscv32-unknown-elf
# ISA tests to use from the riscv-tests repository:
RISCV_TESTS += \
simple \
add \
addi \
and \
andi \
auipc \
beq \
bge \
bgeu \
blt \
bltu \
bne \
jal \
jalr \
lb \
lbu \
lh \
lhu \
lui \
lw \
or \
ori \
sb \
sh \
sll \
slt \
slti \
sltiu \
sltu \
sra \
srai \
srl \
sub \
sw \
xor \
xori
# Local tests to run:
LOCAL_TESTS += \
csr_hazard
# Compiler flags to use when building tests:
TARGET_CFLAGS += -march=rv32i -Wall -O0
TARGET_LDFLAGS +=
all: potato.prj run-tests run-soc-tests
potato.prj:
-$(RM) potato.prj
for file in $(SOURCE_FILES) $(TESTBENCHES); do \
echo "vhdl work $$file" >> potato.prj; \
done
copy-riscv-tests:
test -d tests || mkdir tests
for test in $(RISCV_TESTS); do \
cp riscv-tests/$$test.S tests; \
done
compile-tests: copy-riscv-tests
test -d tests-build || mkdir tests-build
for test in $(RISCV_TESTS) $(LOCAL_TESTS); do \
echo "Compiling test $$test..."; \
$(TOOLCHAIN_PREFIX)-gcc -c $(TARGET_CFLAGS) -DPOTATO_TEST_ASSEMBLY -Iriscv-tests -o tests-build/$$test.o tests/$$test.S; \
$(TOOLCHAIN_PREFIX)-ld $(TARGET_LDFLAGS) -T tests.ld tests-build/$$test.o -o tests-build/$$test.elf; \
scripts/extract_hex.sh tests-build/$$test.elf tests-build/$$test-imem.hex tests-build/$$test-dmem.hex; \
done
run-tests: potato.prj compile-tests
for test in $(RISCV_TESTS) $(LOCAL_TESTS); do \
echo -ne "Running test $$test:\t"; \
DMEM_FILENAME="empty_dmem.hex"; \
test -f tests-build/$$test-dmem.hex && DMEM_FILENAME="tests-build/$$test-dmem.hex"; \
xelab tb_processor -generic_top "IMEM_FILENAME=tests-build/$$test-imem.hex" -generic_top "DMEM_FILENAME=$$DMEM_FILENAME" -prj potato.prj > /dev/null; \
xsim tb_processor -R --onfinish quit > tests-build/$$test.results; \
cat tests-build/$$test.results | awk '/Note:/ {print}' | sed 's/Note://' | awk '/Success|Failure/ {print}'; \
done
run-soc-tests: potato.prj compile-tests
for test in $(RISCV_TESTS) $(LOCAL_TESTS); do \
echo -ne "Running SOC test $$test:\t"; \
DMEM_FILENAME="empty_dmem.hex"; \
test -f tests-build/$$test-dmem.hex && DMEM_FILENAME="tests-build/$$test-dmem.hex"; \
xelab tb_soc -generic_top "IMEM_FILENAME=tests-build/$$test-imem.hex" -generic_top "DMEM_FILENAME=$$DMEM_FILENAME" -prj potato.prj > /dev/null; \
xsim tb_soc -R --onfinish quit > tests-build/$$test.results-soc; \
cat tests-build/$$test.results-soc | awk '/Note:/ {print}' | sed 's/Note://' | awk '/Success|Failure/ {print}'; \
done
remove-xilinx-garbage:
-$(RM) -r xsim.dir
-$(RM) xelab.* webtalk* xsim*
clean: remove-xilinx-garbage
for test in $(RISCV_TESTS); do $(RM) tests/$$test.S; done
-$(RM) -r tests-build
-$(RM) potato.prj
distclean: clean
|
{
"pile_set_name": "Github"
}
|
# Install Chef
curl -L https://www.opscode.com/chef/install.sh | sudo bash
|
{
"pile_set_name": "Github"
}
|
# coding=utf-8
# Copyright 2020 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for safe_rl_recs.agents.model."""
from absl.testing import absltest
from agents.recommenders import batched_movielens_rnn_agent
from agents.recommenders import model
class ModelTest(absltest.TestCase):
def test_sequence_model_outputs_can_vary_in_size(self):
vocab_size = 64
num_users = 16
my_model = model.create_model(
max_episode_length=None,
action_space_size=vocab_size,
embedding_size=16,
hidden_size=8,
batch_size=None,
user_id_input=True,
num_users=num_users,
user_embedding_size=4,
repeat_recs_in_episode=False,
genre_vector_input=False,
genre_vec_size=3)
start_token = vocab_size
model_input = batched_movielens_rnn_agent.Sequence(
vocab_size=vocab_size, mask_previous_recs=True, start_token=start_token)
observation = {'user': {'user_id': 4}, 'response': [{'violence_score': 0}]}
# `vocab_size` is used as a start token as it's out of the vocabulary.
start_token = vocab_size
model_input.update(
last_recommendation=start_token, reward=3, observation=observation)
model_input.update(last_recommendation=2, reward=0, observation=observation)
model_input.update(last_recommendation=3, reward=1, observation=observation)
input_ = model_input.build_prediction_input(
['recommendations', 'rewards', 'users', 'final_mask'])
output = my_model.predict(input_)
self.assertEqual(output.shape, (1, 3, 64))
model_input.update(last_recommendation=4, reward=1, observation=observation)
input_ = model_input.build_prediction_input(
['recommendations', 'rewards', 'users', 'final_mask'])
output = my_model.predict(input_)
self.assertEqual(output.shape, (1, 4, 64))
def test_batch_model_input(self):
vocab_size = 64
num_users = 16
start_token = vocab_size
my_model = model.create_model(
max_episode_length=None,
action_space_size=vocab_size,
embedding_size=16,
hidden_size=8,
batch_size=None,
user_id_input=True,
num_users=num_users,
user_embedding_size=4,
repeat_recs_in_episode=False,
genre_vector_input=False,
genre_vec_size=3)
model_input = batched_movielens_rnn_agent.Sequence(
vocab_size=vocab_size, mask_previous_recs=True, start_token=start_token)
observation = {'user': {'user_id': 4}, 'response': [{'violence_score': 0}]}
model_input.update(
last_recommendation=1,
reward=3,
observation=observation,
batch_position=0)
model_input.update(
last_recommendation=2,
reward=0,
observation=observation,
batch_position=0)
model_input.update(
last_recommendation=3,
reward=1,
observation=observation,
batch_position=0)
observation = {'user': {'user_id': 2}, 'response': [{'violence_score': 0}]}
model_input.update(
last_recommendation=3,
reward=3,
observation=observation,
batch_position=1)
model_input.update(
last_recommendation=2,
reward=0,
observation=observation,
batch_position=1)
model_input.update(
last_recommendation=1,
reward=1,
observation=observation,
batch_position=1)
input_ = model_input.build_prediction_input(
['recommendations', 'rewards', 'users', 'final_mask'])
output = my_model.predict(input_)
self.assertEqual(output.shape, (2, 3, 64))
if __name__ == '__main__':
absltest.main()
|
{
"pile_set_name": "Github"
}
|
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ipv4_test
import (
"testing"
"golang.org/x/net/ipv4"
)
func TestControlMessageParseWithFuzz(t *testing.T) {
var cm ipv4.ControlMessage
for _, fuzz := range []string{
"\f\x00\x00\x00\x00\x00\x00\x00\x14\x00\x00\x00",
"\f\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00",
} {
cm.Parse([]byte(fuzz))
}
}
|
{
"pile_set_name": "Github"
}
|
// DejaLu
// Copyright (c) 2015 Hoa V. DINH. All rights reserved.
#ifndef __dejalu__HMMailDBStoreKeyValueOperation__
#define __dejalu__HMMailDBStoreKeyValueOperation__
#include <MailCore/MailCore.h>
#include "HMMailDBOperation.h"
#ifdef __cplusplus
namespace hermes {
class MailDBStoreKeyValueOperation : public MailDBOperation {
public:
MailDBStoreKeyValueOperation();
virtual ~MailDBStoreKeyValueOperation();
virtual mailcore::String * key();
virtual void setKey(mailcore::String * key);
virtual mailcore::Data * value();
virtual void setValue(mailcore::Data * value);
// Implements Operation.
virtual void main();
private:
mailcore::String * mKey;
mailcore::Data * mValue;
};
}
#endif
#endif /* defined(__dejalu__HMMailDBStoreKeyValueOperation__) */
|
{
"pile_set_name": "Github"
}
|
<?php
/*
* This file is part of Twig.
*
* (c) 2010 Fabien Potencier
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
class Twig_Node_Expression_Test extends Twig_Node_Expression_Call
{
public function __construct(Twig_NodeInterface $node, $name, Twig_NodeInterface $arguments = null, $lineno)
{
parent::__construct(array('node' => $node, 'arguments' => $arguments), array('name' => $name), $lineno);
}
public function compile(Twig_Compiler $compiler)
{
$name = $this->getAttribute('name');
$test = $compiler->getEnvironment()->getTest($name);
$this->setAttribute('name', $name);
$this->setAttribute('type', 'test');
$this->setAttribute('thing', $test);
if ($test instanceof Twig_TestCallableInterface || $test instanceof Twig_SimpleTest) {
$this->setAttribute('callable', $test->getCallable());
}
if ($test instanceof Twig_SimpleTest) {
$this->setAttribute('is_variadic', $test->isVariadic());
}
$this->compileCallable($compiler);
}
}
|
{
"pile_set_name": "Github"
}
|
function showIframeDialog(url) {
var dlg = $('#iframeModal');
dlg.find("#iframe").attr("src", url);
dlg.modal({'show':true});
}
|
{
"pile_set_name": "Github"
}
|
/* -*- Mode: C++; c-file-style: "gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2011 Centre Tecnologic de Telecomunicacions de Catalunya (CTTC)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Jaume Nin <jnin@cttc.es>
* Modified by: Danilo Abrignani <danilo.abrignani@unibo.it> (Carrier Aggregation - GSoC 2015)
* Biljana Bojovic <biljana.bojovic@cttc.es> (Carrier Aggregation)
*/
#include "mac-stats-calculator.h"
#include "ns3/string.h"
#include <ns3/simulator.h>
#include <ns3/log.h>
namespace ns3 {
NS_LOG_COMPONENT_DEFINE ("MacStatsCalculator");
NS_OBJECT_ENSURE_REGISTERED (MacStatsCalculator);
MacStatsCalculator::MacStatsCalculator ()
: m_dlFirstWrite (true),
m_ulFirstWrite (true)
{
NS_LOG_FUNCTION (this);
}
MacStatsCalculator::~MacStatsCalculator ()
{
NS_LOG_FUNCTION (this);
}
TypeId
MacStatsCalculator::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::MacStatsCalculator")
.SetParent<LteStatsCalculator> ()
.SetGroupName("Lte")
.AddConstructor<MacStatsCalculator> ()
.AddAttribute ("DlOutputFilename",
"Name of the file where the downlink results will be saved.",
StringValue ("DlMacStats.txt"),
MakeStringAccessor (&MacStatsCalculator::SetDlOutputFilename),
MakeStringChecker ())
.AddAttribute ("UlOutputFilename",
"Name of the file where the uplink results will be saved.",
StringValue ("UlMacStats.txt"),
MakeStringAccessor (&MacStatsCalculator::SetUlOutputFilename),
MakeStringChecker ())
;
return tid;
}
void
MacStatsCalculator::SetUlOutputFilename (std::string outputFilename)
{
LteStatsCalculator::SetUlOutputFilename (outputFilename);
}
std::string
MacStatsCalculator::GetUlOutputFilename (void)
{
return LteStatsCalculator::GetUlOutputFilename ();
}
void
MacStatsCalculator::SetDlOutputFilename (std::string outputFilename)
{
LteStatsCalculator::SetDlOutputFilename (outputFilename);
}
std::string
MacStatsCalculator::GetDlOutputFilename (void)
{
return LteStatsCalculator::GetDlOutputFilename ();
}
void
MacStatsCalculator::DlScheduling (uint16_t cellId, uint64_t imsi, DlSchedulingCallbackInfo dlSchedulingCallbackInfo)
{
NS_LOG_FUNCTION (this << cellId << imsi << dlSchedulingCallbackInfo.frameNo << dlSchedulingCallbackInfo.subframeNo <<
dlSchedulingCallbackInfo.rnti << (uint32_t) dlSchedulingCallbackInfo.mcsTb1 << dlSchedulingCallbackInfo.sizeTb1 << (uint32_t) dlSchedulingCallbackInfo.mcsTb2 << dlSchedulingCallbackInfo.sizeTb2);
NS_LOG_INFO ("Write DL Mac Stats in " << GetDlOutputFilename ().c_str ());
std::ofstream outFile;
if ( m_dlFirstWrite == true )
{
outFile.open (GetDlOutputFilename ().c_str ());
if (!outFile.is_open ())
{
NS_LOG_ERROR ("Can't open file " << GetDlOutputFilename ().c_str ());
return;
}
m_dlFirstWrite = false;
outFile << "% time\tcellId\tIMSI\tframe\tsframe\tRNTI\tmcsTb1\tsizeTb1\tmcsTb2\tsizeTb2\tccId";
outFile << std::endl;
}
else
{
outFile.open (GetDlOutputFilename ().c_str (), std::ios_base::app);
if (!outFile.is_open ())
{
NS_LOG_ERROR ("Can't open file " << GetDlOutputFilename ().c_str ());
return;
}
}
outFile << Simulator::Now ().GetNanoSeconds () / (double) 1e9 << "\t";
outFile << (uint32_t) cellId << "\t";
outFile << imsi << "\t";
outFile << dlSchedulingCallbackInfo.frameNo << "\t";
outFile << dlSchedulingCallbackInfo.subframeNo << "\t";
outFile << dlSchedulingCallbackInfo.rnti << "\t";
outFile << (uint32_t) dlSchedulingCallbackInfo.mcsTb1 << "\t";
outFile << dlSchedulingCallbackInfo.sizeTb1 << "\t";
outFile << (uint32_t) dlSchedulingCallbackInfo.mcsTb2 << "\t";
outFile << dlSchedulingCallbackInfo.sizeTb2 << "\t";
outFile << (uint32_t) dlSchedulingCallbackInfo.componentCarrierId << std::endl;
outFile.close ();
}
void
MacStatsCalculator::UlScheduling (uint16_t cellId, uint64_t imsi, uint32_t frameNo,
uint32_t subframeNo, uint16_t rnti,uint8_t mcsTb, uint16_t size, uint8_t componentCarrierId)
{
NS_LOG_FUNCTION (this << cellId << imsi << frameNo << subframeNo << rnti << (uint32_t) mcsTb << size);
NS_LOG_INFO ("Write UL Mac Stats in " << GetUlOutputFilename ().c_str ());
std::ofstream outFile;
if ( m_ulFirstWrite == true )
{
outFile.open (GetUlOutputFilename ().c_str ());
if (!outFile.is_open ())
{
NS_LOG_ERROR ("Can't open file " << GetUlOutputFilename ().c_str ());
return;
}
m_ulFirstWrite = false;
outFile << "% time\tcellId\tIMSI\tframe\tsframe\tRNTI\tmcs\tsize\tccId";
outFile << std::endl;
}
else
{
outFile.open (GetUlOutputFilename ().c_str (), std::ios_base::app);
if (!outFile.is_open ())
{
NS_LOG_ERROR ("Can't open file " << GetUlOutputFilename ().c_str ());
return;
}
}
outFile << Simulator::Now ().GetNanoSeconds () / (double) 1e9 << "\t";
outFile << (uint32_t) cellId << "\t";
outFile << imsi << "\t";
outFile << frameNo << "\t";
outFile << subframeNo << "\t";
outFile << rnti << "\t";
outFile << (uint32_t) mcsTb << "\t";
outFile << size << "\t";
outFile << (uint32_t) componentCarrierId << std::endl;
outFile.close ();
}
void
MacStatsCalculator::DlSchedulingCallback (Ptr<MacStatsCalculator> macStats, std::string path, DlSchedulingCallbackInfo dlSchedulingCallbackInfo)
{
NS_LOG_FUNCTION (macStats << path);
uint64_t imsi = 0;
std::ostringstream pathAndRnti;
std::string pathEnb = path.substr (0, path.find ("/ComponentCarrierMap"));
pathAndRnti << pathEnb << "/LteEnbRrc/UeMap/" << dlSchedulingCallbackInfo.rnti;
if (macStats->ExistsImsiPath (pathAndRnti.str ()) == true)
{
imsi = macStats->GetImsiPath (pathAndRnti.str ());
}
else
{
imsi = FindImsiFromEnbRlcPath (pathAndRnti.str ());
macStats->SetImsiPath (pathAndRnti.str (), imsi);
}
uint16_t cellId = 0;
if (macStats->ExistsCellIdPath (pathAndRnti.str ()) == true)
{
cellId = macStats->GetCellIdPath (pathAndRnti.str ());
}
else
{
cellId = FindCellIdFromEnbRlcPath (pathAndRnti.str ());
macStats->SetCellIdPath (pathAndRnti.str (), cellId);
}
macStats->DlScheduling (cellId, imsi, dlSchedulingCallbackInfo);
}
void
MacStatsCalculator::UlSchedulingCallback (Ptr<MacStatsCalculator> macStats, std::string path,
uint32_t frameNo, uint32_t subframeNo, uint16_t rnti,
uint8_t mcs, uint16_t size, uint8_t componentCarrierId)
{
NS_LOG_FUNCTION (macStats << path);
uint64_t imsi = 0;
std::ostringstream pathAndRnti;
std::string pathEnb = path.substr (0, path.find ("/ComponentCarrierMap"));
pathAndRnti << pathEnb << "/LteEnbRrc/UeMap/" << rnti;
if (macStats->ExistsImsiPath (pathAndRnti.str ()) == true)
{
imsi = macStats->GetImsiPath (pathAndRnti.str ());
}
else
{
imsi = FindImsiFromEnbRlcPath (pathAndRnti.str ());
macStats->SetImsiPath (pathAndRnti.str (), imsi);
}
uint16_t cellId = 0;
if (macStats->ExistsCellIdPath (pathAndRnti.str ()) == true)
{
cellId = macStats->GetCellIdPath (pathAndRnti.str ());
}
else
{
cellId = FindCellIdFromEnbRlcPath (pathAndRnti.str ());
macStats->SetCellIdPath (pathAndRnti.str (), cellId);
}
macStats->UlScheduling (cellId, imsi, frameNo, subframeNo, rnti, mcs, size, componentCarrierId);
}
} // namespace ns3
|
{
"pile_set_name": "Github"
}
|
module Dpl
VERSION = '2.0.2.beta.1'
end
|
{
"pile_set_name": "Github"
}
|
/***************************************************
* 版权声明
*
* 本操作系统名为:MINE
* 该操作系统未经授权不得以盈利或非盈利为目的进行开发,
* 只允许个人学习以及公开交流使用
*
* 代码最终所有权及解释权归田宇所有;
*
* 本模块作者: 田宇
* EMail: 345538255@qq.com
*
*
***************************************************/
#include "interrupt.h"
#include "linkage.h"
#include "lib.h"
#include "printk.h"
#include "memory.h"
#include "gate.h"
#include "ptrace.h"
#include "cpu.h"
#include "APIC.h"
/*
*/
void IOAPIC_enable(unsigned long irq)
{
unsigned long value = 0;
value = ioapic_rte_read((irq - 32) * 2 + 0x10);
value = value & (~0x10000UL);
ioapic_rte_write((irq - 32) * 2 + 0x10,value);
}
void IOAPIC_disable(unsigned long irq)
{
unsigned long value = 0;
value = ioapic_rte_read((irq - 32) * 2 + 0x10);
value = value | 0x10000UL;
ioapic_rte_write((irq - 32) * 2 + 0x10,value);
}
unsigned long IOAPIC_install(unsigned long irq,void * arg)
{
struct IO_APIC_RET_entry *entry = (struct IO_APIC_RET_entry *)arg;
ioapic_rte_write((irq - 32) * 2 + 0x10,*(unsigned long *)entry);
return 1;
}
void IOAPIC_uninstall(unsigned long irq)
{
ioapic_rte_write((irq - 32) * 2 + 0x10,0x10000UL);
}
void IOAPIC_level_ack(unsigned long irq)
{
__asm__ __volatile__( "movq $0x00, %%rdx \n\t"
"movq $0x00, %%rax \n\t"
"movq $0x80b, %%rcx \n\t"
"wrmsr \n\t"
:::"memory");
*ioapic_map.virtual_EOI_address = 0;
}
void IOAPIC_edge_ack(unsigned long irq)
{
__asm__ __volatile__( "movq $0x00, %%rdx \n\t"
"movq $0x00, %%rax \n\t"
"movq $0x80b, %%rcx \n\t"
"wrmsr \n\t"
:::"memory");
}
/*
*/
unsigned long ioapic_rte_read(unsigned char index)
{
unsigned long ret;
*ioapic_map.virtual_index_address = index + 1;
io_mfence();
ret = *ioapic_map.virtual_data_address;
ret <<= 32;
io_mfence();
*ioapic_map.virtual_index_address = index;
io_mfence();
ret |= *ioapic_map.virtual_data_address;
io_mfence();
return ret;
}
/*
*/
void ioapic_rte_write(unsigned char index,unsigned long value)
{
*ioapic_map.virtual_index_address = index;
io_mfence();
*ioapic_map.virtual_data_address = value & 0xffffffff;
value >>= 32;
io_mfence();
*ioapic_map.virtual_index_address = index + 1;
io_mfence();
*ioapic_map.virtual_data_address = value & 0xffffffff;
io_mfence();
}
/*
*/
void IOAPIC_pagetable_remap()
{
unsigned long * tmp;
unsigned char * IOAPIC_addr = (unsigned char *)Phy_To_Virt(0xfec00000);
ioapic_map.physical_address = 0xfec00000;
ioapic_map.virtual_index_address = IOAPIC_addr;
ioapic_map.virtual_data_address = (unsigned int *)(IOAPIC_addr + 0x10);
ioapic_map.virtual_EOI_address = (unsigned int *)(IOAPIC_addr + 0x40);
Global_CR3 = Get_gdt();
tmp = Phy_To_Virt(Global_CR3 + (((unsigned long)IOAPIC_addr >> PAGE_GDT_SHIFT) & 0x1ff));
if (*tmp == 0)
{
unsigned long * virtual = kmalloc(PAGE_4K_SIZE,0);
set_mpl4t(tmp,mk_mpl4t(Virt_To_Phy(virtual),PAGE_KERNEL_GDT));
}
color_printk(YELLOW,BLACK,"1:%#018lx\t%#018lx\n",(unsigned long)tmp,(unsigned long)*tmp);
tmp = Phy_To_Virt((unsigned long *)(*tmp & (~ 0xfffUL)) + (((unsigned long)IOAPIC_addr >> PAGE_1G_SHIFT) & 0x1ff));
if(*tmp == 0)
{
unsigned long * virtual = kmalloc(PAGE_4K_SIZE,0);
set_pdpt(tmp,mk_pdpt(Virt_To_Phy(virtual),PAGE_KERNEL_Dir));
}
color_printk(YELLOW,BLACK,"2:%#018lx\t%#018lx\n",(unsigned long)tmp,(unsigned long)*tmp);
tmp = Phy_To_Virt((unsigned long *)(*tmp & (~ 0xfffUL)) + (((unsigned long)IOAPIC_addr >> PAGE_2M_SHIFT) & 0x1ff));
set_pdt(tmp,mk_pdt(ioapic_map.physical_address,PAGE_KERNEL_Page | PAGE_PWT | PAGE_PCD));
color_printk(BLUE,BLACK,"3:%#018lx\t%#018lx\n",(unsigned long)tmp,(unsigned long)*tmp);
color_printk(BLUE,BLACK,"ioapic_map.physical_address:%#010x\t\t\n",ioapic_map.physical_address);
color_printk(BLUE,BLACK,"ioapic_map.virtual_address:%#018lx\t\t\n",(unsigned long)ioapic_map.virtual_index_address);
flush_tlb();
}
/*
*/
void Local_APIC_init()
{
unsigned int x,y;
unsigned int a,b,c,d;
//check APIC & x2APIC support
get_cpuid(1,0,&a,&b,&c,&d);
//void get_cpuid(unsigned int Mop,unsigned int Sop,unsigned int * a,unsigned int * b,unsigned int * c,unsigned int * d)
color_printk(WHITE,BLACK,"CPUID\t01,eax:%#010x,ebx:%#010x,ecx:%#010x,edx:%#010x\n",a,b,c,d);
if((1<<9) & d)
color_printk(WHITE,BLACK,"HW support APIC&xAPIC\t");
else
color_printk(WHITE,BLACK,"HW NO support APIC&xAPIC\t");
if((1<<21) & c)
color_printk(WHITE,BLACK,"HW support x2APIC\n");
else
color_printk(WHITE,BLACK,"HW NO support x2APIC\n");
//enable xAPIC & x2APIC
__asm__ __volatile__( "movq $0x1b, %%rcx \n\t"
"rdmsr \n\t"
"bts $10, %%rax \n\t"
"bts $11, %%rax \n\t"
"wrmsr \n\t"
"movq $0x1b, %%rcx \n\t"
"rdmsr \n\t"
:"=a"(x),"=d"(y)
:
:"memory");
color_printk(WHITE,BLACK,"eax:%#010x,edx:%#010x\t",x,y);
if(x&0xc00)
color_printk(WHITE,BLACK,"xAPIC & x2APIC enabled\n");
//enable SVR[8]
__asm__ __volatile__( "movq $0x80f, %%rcx \n\t"
"rdmsr \n\t"
"bts $8, %%rax \n\t"
"bts $12, %%rax\n\t"
"wrmsr \n\t"
"movq $0x80f, %%rcx \n\t"
"rdmsr \n\t"
:"=a"(x),"=d"(y)
:
:"memory");
color_printk(WHITE,BLACK,"eax:%#010x,edx:%#010x\t",x,y);
if(x&0x100)
color_printk(WHITE,BLACK,"SVR[8] enabled\n");
if(x&0x1000)
color_printk(WHITE,BLACK,"SVR[12] enabled\n");
//get local APIC ID
__asm__ __volatile__( "movq $0x802, %%rcx \n\t"
"rdmsr \n\t"
:"=a"(x),"=d"(y)
:
:"memory");
color_printk(WHITE,BLACK,"eax:%#010x,edx:%#010x\tx2APIC ID:%#010x\n",x,y,x);
//get local APIC version
__asm__ __volatile__( "movq $0x803, %%rcx \n\t"
"rdmsr \n\t"
:"=a"(x),"=d"(y)
:
:"memory");
color_printk(WHITE,BLACK,"local APIC Version:%#010x,Max LVT Entry:%#010x,SVR(Suppress EOI Broadcast):%#04x\t",x & 0xff,(x >> 16 & 0xff) + 1,x >> 24 & 0x1);
if((x & 0xff) < 0x10)
color_printk(WHITE,BLACK,"82489DX discrete APIC\n");
else if( ((x & 0xff) >= 0x10) && ((x & 0xff) <= 0x15) )
color_printk(WHITE,BLACK,"Integrated APIC\n");
//mask all LVT
__asm__ __volatile__( "movq $0x82f, %%rcx \n\t" //CMCI
"wrmsr \n\t"
"movq $0x832, %%rcx \n\t" //Timer
"wrmsr \n\t"
"movq $0x833, %%rcx \n\t" //Thermal Monitor
"wrmsr \n\t"
"movq $0x834, %%rcx \n\t" //Performance Counter
"wrmsr \n\t"
"movq $0x835, %%rcx \n\t" //LINT0
"wrmsr \n\t"
"movq $0x836, %%rcx \n\t" //LINT1
"wrmsr \n\t"
"movq $0x837, %%rcx \n\t" //Error
"wrmsr \n\t"
:
:"a"(0x10000),"d"(0x00)
:"memory");
color_printk(GREEN,BLACK,"Mask ALL LVT\n");
//TPR
__asm__ __volatile__( "movq $0x808, %%rcx \n\t"
"rdmsr \n\t"
:"=a"(x),"=d"(y)
:
:"memory");
color_printk(GREEN,BLACK,"Set LVT TPR:%#010x\t",x);
//PPR
__asm__ __volatile__( "movq $0x80a, %%rcx \n\t"
"rdmsr \n\t"
:"=a"(x),"=d"(y)
:
:"memory");
color_printk(GREEN,BLACK,"Set LVT PPR:%#010x\n",x);
}
/*
*/
void IOAPIC_init()
{
int i ;
// I/O APIC
// I/O APIC ID
*ioapic_map.virtual_index_address = 0x00;
io_mfence();
*ioapic_map.virtual_data_address = 0x0f000000;
io_mfence();
color_printk(GREEN,BLACK,"Get IOAPIC ID REG:%#010x,ID:%#010x\n",*ioapic_map.virtual_data_address, *ioapic_map.virtual_data_address >> 24 & 0xf);
io_mfence();
// I/O APIC Version
*ioapic_map.virtual_index_address = 0x01;
io_mfence();
color_printk(GREEN,BLACK,"Get IOAPIC Version REG:%#010x,MAX redirection enties:%#08d\n",*ioapic_map.virtual_data_address ,((*ioapic_map.virtual_data_address >> 16) & 0xff) + 1);
//RTE
for(i = 0x10;i < 0x40;i += 2)
ioapic_rte_write(i,0x10020 + ((i - 0x10) >> 1));
color_printk(GREEN,BLACK,"I/O APIC Redirection Table Entries Set Finished.\n");
}
/*
*/
void APIC_IOAPIC_init()
{
// init trap abort fault
int i ;
unsigned int x;
unsigned int * p;
IOAPIC_pagetable_remap();
for(i = 32;i < 56;i++)
{
set_intr_gate(i , 2 , interrupt[i - 32]);
}
//mask 8259A
color_printk(GREEN,BLACK,"MASK 8259A\n");
io_out8(0x21,0xff);
io_out8(0xa1,0xff);
//enable IMCR
io_out8(0x22,0x70);
io_out8(0x23,0x01);
//init local apic
Local_APIC_init();
//init ioapic
IOAPIC_init();
//get RCBA address
io_out32(0xcf8,0x8000f8f0);
x = io_in32(0xcfc);
color_printk(RED,BLACK,"Get RCBA Address:%#010x\n",x);
x = x & 0xffffc000;
color_printk(RED,BLACK,"Get RCBA Address:%#010x\n",x);
//get OIC address
if(x > 0xfec00000 && x < 0xfee00000)
{
p = (unsigned int *)Phy_To_Virt(x + 0x31feUL);
}
//enable IOAPIC
x = (*p & 0xffffff00) | 0x100;
io_mfence();
*p = x;
io_mfence();
memset(interrupt_desc,0,sizeof(irq_desc_T)*NR_IRQS);
//open IF eflages
sti();
}
/*
*/
void do_IRQ(struct pt_regs * regs,unsigned long nr) //regs:rsp,nr
{
irq_desc_T * irq = &interrupt_desc[nr - 32];
if(irq->handler != NULL)
irq->handler(nr,irq->parameter,regs);
if(irq->controller != NULL && irq->controller->ack != NULL)
irq->controller->ack(nr);
}
|
{
"pile_set_name": "Github"
}
|
{
"ar": "FLDPI",
"bg": "FLDPI",
"cs": "FLDPI",
"da": "FLDPI",
"de": "FLDPI",
"el": "FLDPI",
"en": "FLDPI",
"es": "FLDPI",
"es-419": "FLDPI",
"fi": "FLDPI",
"fr": "FLDPI",
"hu": "FLDPI",
"it": "FLDPI",
"ja": "FLDPI",
"ko": "FLDPI",
"nl": "FLDPI",
"no": "FLDPI",
"pl": "FLDPI",
"pt": "FLDPI",
"pt-BR": "FLDPI",
"ro": "FLDPI",
"ru": "FLDPI",
"sv": "FLDPI",
"th": "FLDPI",
"tr": "FLDPI",
"uk": "FLDPI",
"vi": "FLDPI",
"zh-CN": "FLDPI",
"zh-TW": "FLDPI"
}
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright (c) 2006, 2020 IBM Corp. and others
This program and the accompanying materials are made available under
the terms of the Eclipse Public License 2.0 which accompanies this
distribution and is available at https://www.eclipse.org/legal/epl-2.0/
or the Apache License, Version 2.0 which accompanies this distribution and
is available at https://www.apache.org/licenses/LICENSE-2.0.
This Source Code may also be made available under the following
Secondary Licenses when the conditions for such availability set
forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
General Public License, version 2 with the GNU Classpath
Exception [1] and GNU General Public License, version 2 with the
OpenJDK Assembly Exception [2].
[1] https://www.gnu.org/software/classpath/license.html
[2] http://openjdk.java.net/legal/assembly-exception.html
SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
-->
<flags xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.ibm.com/j9/builder/flags" xsi:schemaLocation="http://www.ibm.com/j9/builder/flags flags-v1.xsd">
<flag id="arch_aarch64">
<description>This spec targets AArch64 processors.</description>
<ifRemoved></ifRemoved>
<precludes>
<preclude flag="arch_arm"/>
<preclude flag="arch_power"/>
<preclude flag="arch_riscv"/>
<preclude flag="arch_s390"/>
<preclude flag="arch_x86"/>
</precludes>
</flag>
<flag id="arch_arm">
<description>This spec targets ARM processors.</description>
<ifRemoved></ifRemoved>
<precludes>
<preclude flag="arch_aarch64"/>
<preclude flag="arch_power"/>
<preclude flag="arch_riscv"/>
<preclude flag="arch_s390"/>
<preclude flag="arch_x86"/>
</precludes>
</flag>
<flag id="arch_power">
<description>This spec targets PPC processors.</description>
<ifRemoved></ifRemoved>
<precludes>
<preclude flag="arch_aarch64"/>
<preclude flag="arch_arm"/>
<preclude flag="arch_riscv"/>
<preclude flag="arch_s390"/>
<preclude flag="arch_x86"/>
</precludes>
</flag>
<flag id="arch_riscv">
<description>This spec targets RISC-V processors.</description>
<ifRemoved></ifRemoved>
<precludes>
<preclude flag="arch_aarch64"/>
<preclude flag="arch_arm"/>
<preclude flag="arch_power"/>
<preclude flag="arch_s390"/>
<preclude flag="arch_x86"/>
</precludes>
</flag>
<flag id="arch_s390">
<description>This spec targets S390 processors.</description>
<ifRemoved></ifRemoved>
<precludes>
<preclude flag="arch_aarch64"/>
<preclude flag="arch_arm"/>
<preclude flag="arch_power"/>
<preclude flag="arch_riscv"/>
<preclude flag="arch_x86"/>
</precludes>
</flag>
<flag id="arch_x86">
<description>This spec targets x86 processors.</description>
<ifRemoved></ifRemoved>
<precludes>
<preclude flag="arch_aarch64"/>
<preclude flag="arch_arm"/>
<preclude flag="arch_power"/>
<preclude flag="arch_riscv"/>
<preclude flag="arch_s390"/>
</precludes>
</flag>
<flag id="build_SE6_package">
<description></description>
<ifRemoved></ifRemoved>
</flag>
<flag id="build_VS12AndHigher">
<description>Windows compiler Visual Studio 12 (2013) or higher will be used.</description>
<ifRemoved>The old Windows compiler will be used which is Visual Studio 10.</ifRemoved>
</flag>
<flag id="build_autobuild">
<description>BuildSpec represents a testing configuration. Autobuilds enabled.</description>
<ifRemoved>Autobuilds disabled.</ifRemoved>
</flag>
<flag id="build_cmake">
<description>Build components using CMake whenever possible</description>
<ifRemoved>UMA will be used to build all components</ifRemoved>
</flag>
<flag id="build_dropToHursley">
<description>Source will be zipped and sent to Hursley.</description>
<ifRemoved>Drop will not be zipped and sent to Hursley.</ifRemoved>
</flag>
<flag id="build_dropToPhoenix">
<description>Source will be zipped and sent to Phoenix.</description>
<ifRemoved>Drop will not be zipped and sent to Phoenix.</ifRemoved>
</flag>
<flag id="build_dropToToronto">
<description>Source will be zipped and sent to Toronto (Host: Iguana).</description>
<ifRemoved>Drop will not be zipped and sent to Toronto (Host: Iguana).</ifRemoved>
</flag>
<flag id="build_fips">
<description>BuildSpec represents a FIPS configuration.</description>
<ifRemoved>BuildSpec represents a non-FIPS configuration.</ifRemoved>
</flag>
<flag id="build_gcContinuous">
<description>This spec will be part of the GC continuous build.</description>
<ifRemoved>This spec will not be built continuously.</ifRemoved>
</flag>
<flag id="build_j2me">
<description>BuildSpec represents a J2ME configuration.</description>
<ifRemoved>BuildSpec represents a non-J2ME configuration.</ifRemoved>
</flag>
<flag id="build_j2se">
<description>BuildSpec represents a J2SE configuration.</description>
<ifRemoved>BuildSpec represents a non-J2SE configuration.</ifRemoved>
</flag>
<flag id="build_j9vmDoc">
<description>Create J9 VM Documentation to be used as part of Eclipse InfoCenter.</description>
<ifRemoved>Generation of J9 Documentation is Enabled.</ifRemoved>
</flag>
<flag id="build_java5">
<description>BuildSpec represents a Java 5x configuration.</description>
<ifRemoved>BuildSpec represents a non-Java 5x configuration.</ifRemoved>
<requires>
<require flag="build_j2se"/>
</requires>
</flag>
<flag id="build_java6">
<description>BuildSpec represents a Java 6x configuration.</description>
<ifRemoved>BuildSpec represents a non-Java 6x configuration.</ifRemoved>
<requires>
<require flag="build_j2se"/>
</requires>
</flag>
<flag id="build_java60_26">
<description>BuildSpec represents a Java 6x configuration with J9VM 2.6.</description>
<ifRemoved>BuildSpec represents a non-Java 6x configuration with J9VM 2.6.</ifRemoved>
</flag>
<flag id="build_java6proxy">
<description>Determines if Java 6 vm-proxy builds will be created.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="build_java7">
<description>BuildSpec represents a Java 7x configuration.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="build_java70_27">
<description>BuildSpec represents a Java 7x configuration with J9VM 2.7.</description>
<ifRemoved>BuildSpec represents a non-Java 7x configuration with J9VM 2.7.</ifRemoved>
</flag>
<flag id="build_java7basic">
<description>Determines if a basic (IBM recompiled Sun code plus fixes) build is required.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="build_java7raw">
<description>Determines if a raw (Sun-binary) build is required.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="build_java8">
<description>BuildSpec represents a Java 8x configuration.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="build_java8raw">
<description>Determines if a raw (Sun-binary) build is required.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="build_java9">
<description>BuildSpec represents a Java 9x configuration.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="build_openj9">
<description>Buildspec compiles sources with openjdk.</description>
<ifRemoved>Openj9 clone,make jobs longer run on this buildspec.</ifRemoved>
</flag>
<flag id="build_openj9JDK8">
<description>Buildspec compiles sources with openjdk-jdk8.</description>
<ifRemoved>OpenJ9 compile job, sanity and extended tests no longer run on this buildspec.</ifRemoved>
</flag>
<flag id="build_ouncemake">
<description>Buildspec compiles source using ouncemake for appscan.</description>
<ifRemoved>Ouncemake compile jobs will no longer run on this buildspec.</ifRemoved>
</flag>
<flag id="build_product">
<description>BuildSpec represents a shipping product.</description>
<ifRemoved>BuildSpec represents a non-shipping configuration.</ifRemoved>
<requires>
<require flag="build_autobuild"/>
</requires>
</flag>
<flag id="build_realtime">
<description>BuildSpec represents a Realtime Java configuration.</description>
<ifRemoved>BuildSpec represents a non-Realtime Java configuration.</ifRemoved>
</flag>
<flag id="build_stage_ottawa_vmlab">
<description>Controls staging of builds on Ottawa Lab NFS server.</description>
<ifRemoved>No staging at Ottawa VM Lab.</ifRemoved>
</flag>
<flag id="build_stage_toronto_lab">
<description>Controls staging of builds on Ottawa Lab NFS server.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="build_uma">
<description>UMA will be used to repackage the source tree.</description>
<ifRemoved>UMA repackaging of source disabled.</ifRemoved>
</flag>
<flag id="build_vmContinuous">
<description>This spec will be part of the VM continuous build.</description>
<ifRemoved>This spec will not be built continuously.</ifRemoved>
</flag>
<flag id="compiler_promotion">
<description>for silo dancing between VM/GC to integrate compiling warning fix for Java 8 new GCC level 4.4.6</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="danger_memleaksBroken">
<description>Indicates that the memleaks test has been excluded, as a result the system may be leaking and repeated calls to CreateJavaVM()//DestroyJavaVM() may run out of memory.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="env_advanceToolchain">
<description>Controls if this platform builds using the advance toolchain.</description>
<ifRemoved>No advance toolchain support</ifRemoved>
</flag>
<flag id="env_callViaTable">
<description>Controls if C calls from builder must be made via a table.</description>
<ifRemoved>Builder calls C as usual.</ifRemoved>
<precludes>
<preclude flag="env_sharedLibsUseGlobalTable"/>
</precludes>
</flag>
<flag id="env_crossbuild">
<description>Use toolchain for cross build.</description>
<ifRemoved>Build uses native build environment.</ifRemoved>
</flag>
<flag id="env_data64">
<description>Default register width is 64-bits.</description>
<ifRemoved>Default register width is 32-bits.</ifRemoved>
</flag>
<flag id="env_dlpar">
<description>Controls if dynamic logical partitioning is enabled.</description>
<ifRemoved>Dynamic logical partitioning is not available.</ifRemoved>
</flag>
<flag id="env_gcc">
<description>Forces this platform to build with GCC.</description>
<ifRemoved>Use default compiler</ifRemoved>
</flag>
<flag id="env_hasFPU">
<description>Target machine has a hardware (or emulated) FPU.</description>
<ifRemoved>Target machine uses software floating-point.</ifRemoved>
</flag>
<flag id="env_littleEndian">
<description>Target machine is little-endian.</description>
<ifRemoved>Target machine is big-endian.</ifRemoved>
</flag>
<flag id="env_sharedLibsCalleeGlobalTableSetup">
<description>Controls if GOT/TOC is set up by the callee.</description>
<ifRemoved>Global table is set up by the caller.</ifRemoved>
</flag>
<flag id="env_sharedLibsUseGlobalTable">
<description>Controls if shared library implementation relies on runtime table references.</description>
<ifRemoved>No runtime table references required</ifRemoved>
<precludes>
<preclude flag="env_callViaTable"/>
</precludes>
</flag>
<flag id="env_sse2SupportDetection">
<description>Used to determine a spec needs to check if the CPU and OS support SSE2.
Currently only available on linux_x86 and win_x96 32 bit builds.</description>
<ifRemoved>VM may attempt to run instructions not supported by the CPU/OS</ifRemoved>
<precludes>
<preclude flag="env_data64"/>
</precludes>
</flag>
<flag id="env_zTPF">
<description>Used to determine is the platform is zTPF.</description>
<ifRemoved>Assembler cannot be generated for the zTPF platform</ifRemoved>
<requires>
<require flag="env_data64"/>
</requires>
</flag>
<flag id="gc_adaptiveTenuring">
<description>The scavenger will adjust the tenure age based on objects remaining in allocate space</description>
<ifRemoved>Tenure age of the scavenger is constant</ifRemoved>
<requires>
<require flag="gc_generational"/>
</requires>
</flag>
<flag id="gc_alignObjects">
<description>Align all objects to 8 byte boundaries</description>
<ifRemoved>Do not guarantee alignment for objects</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
</requires>
<precludes>
<preclude flag="env_data64"/>
</precludes>
</flag>
<flag id="gc_allocationTax">
<description>Enable allocation tax capabilities</description>
<ifRemoved>DISABLE allocation tax capabilities</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
<require flag="gc_modronStandard"/>
</requires>
</flag>
<flag id="gc_alwaysCallObjectAccessBarrier">
<description>Always call the out-of-line C access barrier from the VM</description>
<ifRemoved>Common case of access barrier will be inlined</ifRemoved>
<requires>
<require flag="gc_objectAccessBarrier"/>
</requires>
</flag>
<flag id="gc_alwaysCallWriteBarrier">
<description>Always call the write barrier from the VM without checks</description>
<ifRemoved>Short circuit checks will be made in VM before write barrier call</ifRemoved>
</flag>
<flag id="gc_batchClearTLH">
<description>Zero any TLH allocated</description>
<ifRemoved>Do not initial memory allocated as a TLH</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
</requires>
</flag>
<flag id="gc_classesOnHeap">
<description>Enable classes on the heap</description>
<ifRemoved>UNSUPPORTED: disable classes on the heap</ifRemoved>
</flag>
<flag id="gc_combinationSpec">
<description>Set on specs which implement LIR 16325: reduce memory footprint by combining multiple sidecars into the same set of libraries.</description>
<ifRemoved></ifRemoved>
<requires>
<require flag="gc_heapCardTable"/>
<require flag="gc_modronStandard"/>
<require flag="gc_realtime"/>
</requires>
</flag>
<flag id="gc_enableDoubleMap">
<description>Set on double map. Allows LINUX systems to double map arrays that are stored as arraylets.
When enabled, a contiguous block of memory is created for each array which data surpasses the size of a region. This contiguous block represents the array as
if the data was stored in a contiguous region of memory. All of the array data will be stored at their own region (not with spine); hence, all arraylets
become discontiguous whenever this flag is enabled. Since there won’t be any empty arraylet leaves, then arrayoid NULL pointers are no longer required since
all data is stored in their own region. It additionaly reduces footprint, mainly for JNI primitive array critical.</description>
<ifRemoved></ifRemoved>
<requires>
<require flag="gc_vlhgc"/>
</requires>
</flag>
<flag id="gc_compressedPointerBarrier">
<description>VM performs runtime checks for missed access barriers in a compressed pointer sense</description>
<ifRemoved>VM does not check for missed access barriers in a compressed pointer sense</ifRemoved>
<requires>
<require flag="gc_alwaysCallObjectAccessBarrier"/>
<require flag="gc_verifyAccessBarrier"/>
</requires>
<precludes>
<preclude flag="gc_useInlineAllocate"/>
</precludes>
</flag>
<flag id="gc_compressedPointers">
<description>Object fields are compressed to 32-bits</description>
<ifRemoved>Full 64-bit memory space is available</ifRemoved>
<requires>
<require flag="gc_classesOnHeap"/>
<require flag="gc_objectAccessBarrier"/>
</requires>
</flag>
<flag id="gc_concurrentSweep">
<description>Enable concurrent sweep in Modron</description>
<ifRemoved>DISABLE concurrent sweep in Modron</ifRemoved>
<requires>
<require flag="gc_allocationTax"/>
<require flag="gc_modronGC"/>
<require flag="gc_modronStandard"/>
</requires>
</flag>
<flag id="gc_debugAsserts">
<description>Specialized GC assertions are used instead of standard trace asserts for GC assertions</description>
<ifRemoved>GC Assertions during startup may not cause an abort. No messages for GC asserts. </ifRemoved>
</flag>
<flag id="gc_dynamicClassUnloading">
<description>Dynamic class unloading is supported</description>
<ifRemoved>Dynamic class unloading is NOT supported</ifRemoved>
</flag>
<flag id="gc_dynamicNewSpaceSizing">
<description>Enable dynamic resizing of the new space</description>
<ifRemoved>DISABLE dynamic resizing of the new space</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
<require flag="gc_modronScavenger"/>
</requires>
</flag>
<flag id="gc_finalization">
<description>Build a VM that supports finalization</description>
<ifRemoved>VM does not support finalization</ifRemoved>
</flag>
<flag id="gc_fragmentedHeap">
<description>VM memory manager supports fragmented heaps (enable with caution!)</description>
<ifRemoved>VM memory manager DOES NOT support fragmented heaps (enable with caution!)</ifRemoved>
</flag>
<flag id="gc_generational">
<description>Does the VM use a multi-generational collector</description>
<ifRemoved>The VM does NOT use a multi-generational collector</ifRemoved>
</flag>
<flag id="gc_heapCardTable">
<description>Means that the Java heap has a card table which is used to track modifications during the GC cycle (be it concurrent or incremental).</description>
<ifRemoved>The card table will not be used</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
</requires>
</flag>
<flag id="gc_inlinedAllocFields">
<description>Inlined allocation fields</description>
<ifRemoved>Inlined allocation fields not supported</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
<require flag="gc_threadLocalHeap"/>
</requires>
</flag>
<flag id="gc_jniArrayCache">
<description>Enable caching of JNI primitive arrays</description>
<ifRemoved>Disable caching of JNI primitive arrays</ifRemoved>
<requires>
<require flag="interp_jniSupport"/>
</requires>
</flag>
<flag id="gc_largeObjectArea">
<description>Enable large object area (LOA) support</description>
<ifRemoved>Disable large object area (LOA) support</ifRemoved>
<requires>
<require flag="gc_modronStandard"/>
</requires>
</flag>
<flag id="gc_leafBits">
<description>Add leaf bit instance descriptions to classes</description>
<ifRemoved>Disable leaf bit instance descriptions</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
</requires>
</flag>
<flag id="gc_minimumObjectSize">
<description>Guarantee a minimum size to all objects allocated</description>
<ifRemoved>No guaranteed minimum size on allocated objects</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
</requires>
</flag>
<flag id="gc_modronCompaction">
<description>Enable compaction in Modron</description>
<ifRemoved>DISABLE compaction in Modron</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
</requires>
</flag>
<flag id="gc_modronConcurrentMark">
<description>Enable concurrent mark in Modron</description>
<ifRemoved>DISABLE concurrent mark in Modron</ifRemoved>
<requires>
<require flag="gc_allocationTax"/>
<require flag="gc_heapCardTable"/>
<require flag="gc_modronGC"/>
<require flag="gc_modronStandard"/>
</requires>
</flag>
<flag id="gc_modronGC">
<description>Build a VM that uses the Modron GC</description>
<ifRemoved>VM does not use the Modron GC</ifRemoved>
</flag>
<flag id="gc_modronScavenger">
<description>Enable scavenger in Modron</description>
<ifRemoved>DISABLE scavenger in Modron</ifRemoved>
<requires>
<require flag="gc_generational"/>
<require flag="gc_modronGC"/>
</requires>
</flag>
<flag id="gc_modronStandard">
<description>Enable Modron standard configuration</description>
<ifRemoved>DISABLE Modron standard configuration</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
</requires>
</flag>
<flag id="gc_modronTrace">
<description>Enable trace facility in Modron</description>
<ifRemoved>DISABLE trace facility in Modron</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
</requires>
</flag>
<flag id="gc_modronVerbose">
<description>Enable verbose facility in Modron</description>
<ifRemoved>DISABLE verbose facility in Modron</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
</requires>
</flag>
<flag id="gc_newSpinlockSupport">
<description></description>
<ifRemoved></ifRemoved>
</flag>
<flag id="gc_nonZeroTLH">
<description>Allocator might use special non-zeroed thread local heap for objects</description>
<ifRemoved>Allocator uses a general thread local heap for objects</ifRemoved>
<requires>
<require flag="gc_threadLocalHeap"/>
</requires>
</flag>
<flag id="gc_objectAccessBarrier">
<description>Enable object access (read and write) barrier</description>
<ifRemoved>Disable object access barrier</ifRemoved>
</flag>
<flag id="gc_realtime">
<description>Realtime Garbage Collection is supported</description>
<ifRemoved>Realtime Garbage Collection is NOT supported</ifRemoved>
<requires>
<require flag="gc_objectAccessBarrier"/>
<require flag="gc_segregatedHeap"/>
</requires>
</flag>
<flag id="gc_segregatedHeap">
<description>Enable Segregated Heap model.</description>
<ifRemoved>The Segregated Heap model will not be used</ifRemoved>
</flag>
<flag id="gc_strictOmr">
<description>Enable Strict OMR compile and link for testing</description>
<ifRemoved>Compile for JAVA</ifRemoved>
</flag>
<flag id="gc_subpools">
<description>Enable subpool allocation system</description>
<ifRemoved>Disable subpool allocation system</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
<require flag="gc_modronStandard"/>
<require flag="gc_subpoolsAlias"/>
</requires>
</flag>
<flag id="gc_subpoolsAlias">
<description>Enable a recognition of -Xgcpolicy:subpool parameter for compatibility with Java 6 options. Subpool allocation system is not supported any more, so make it an alias of -Xgcpolicy:optthruput</description>
<ifRemoved></ifRemoved>
<requires>
<require flag="gc_modronGC"/>
<require flag="gc_modronStandard"/>
</requires>
</flag>
<flag id="gc_threadLocalHeap">
<description>Allocator uses a thread local heap for objects</description>
<ifRemoved>Allocator does NOT use a thread local heap for objects</ifRemoved>
<requires>
<require flag="gc_fragmentedHeap"/>
</requires>
</flag>
<flag id="gc_tiltedNewSpace">
<description>Enable tilting of new space memory</description>
<ifRemoved>DISABLE tilting of new space memory</ifRemoved>
<requires>
<require flag="gc_modronGC"/>
<require flag="gc_modronScavenger"/>
</requires>
</flag>
<flag id="gc_tlhPrefetchFTA">
<description>Enable tlhPrefetchFTA</description>
<ifRemoved>DISABLE tlhPrefetchFTA</ifRemoved>
<requires>
<require flag="gc_threadLocalHeap"/>
</requires>
</flag>
<flag id="gc_useInlineAllocate">
<description>Controls if inline allocates are used in VM code generation (rather than calls).</description>
<ifRemoved>Object allocation is slower.</ifRemoved>
</flag>
<flag id="gc_verifyAccessBarrier">
<description>VM performs runtime checks for missed access barriers</description>
<ifRemoved>VM does not check for missed access barriers</ifRemoved>
<requires>
<require flag="gc_alwaysCallObjectAccessBarrier"/>
</requires>
<precludes>
<preclude flag="gc_useInlineAllocate"/>
</precludes>
</flag>
<flag id="gc_vlhgc">
<description>Enables the Very Large Heap Garbage Collector</description>
<ifRemoved>Another GC policy must be enabled</ifRemoved>
<requires>
<require flag="gc_heapCardTable"/>
</requires>
</flag>
<flag id="graph_cmdLineTester">
<description>Indicates that this spec wishes to run the command line tester suites in the nightly build</description>
<ifRemoved>cmdLineTester suites will not be run</ifRemoved>
</flag>
<flag id="graph_common_jobs">
<description>Indicates that this spec wishes to run the common tests in the nightly build (fib, scheme, etc)</description>
<ifRemoved>Basic sanity testing will not be done</ifRemoved>
</flag>
<flag id="graph_compile">
<description>Indicates that this spec wishes to compile the build</description>
<ifRemoved>The VM build will not be compiled</ifRemoved>
</flag>
<flag id="graph_copyJ2SEToToronto">
<description>Mirror J2SE rebuilds to Toronto</description>
<ifRemoved>Mirroring of J2SE rebuilds to toronto will not be done causing test failures</ifRemoved>
<requires>
<require flag="build_j2se"/>
</requires>
</flag>
<flag id="graph_copyJ2SEViaNovell">
<description>This flag is used by the graph generator to create the jobs which will copy the J2SE rebuilds for Java5 and Java6 from linuxnfs (where they are created) to Novell. This is required for specs which test on Novell (like Win32) or copy from Novell to remote sites (like z/OS and zLinux)</description>
<ifRemoved></ifRemoved>
<requires>
<require flag="build_j2se"/>
</requires>
</flag>
<flag id="graph_copyJ2SEWinFS">
<description>This flag is used by the graph generator to create the jobs which will copy the J2SE rebuilds for Java5 and Java6 from linuxnfs (where they are created) to Novell. This is required for specs which test on Novell (like Win32) or copy from Novell to remote sites (like z/OS and zLinux)</description>
<ifRemoved></ifRemoved>
<requires>
<require flag="build_j2se"/>
</requires>
</flag>
<flag id="graph_ebcdicConversion">
<description>Include the ASCII to EBCDIC conversion job in cs.opensource archive.zip for z/OS unzip jobs</description>
<ifRemoved>The conversion job will not be included</ifRemoved>
</flag>
<flag id="graph_eembc">
<description>Indicates that this spec wishes to run the eembc performance suites in the nightly build</description>
<ifRemoved>eembc suites will not be run</ifRemoved>
</flag>
<flag id="graph_enableBuilderTesting">
<description>Indicates that this spec should run automated JIT sanity testing jobs in the nightly build</description>
<ifRemoved>This spec will not run any tests and will be a "build only" spec</ifRemoved>
</flag>
<flag id="graph_enableModularityTesting">
<description>Indicates that this spec should run automated testing jobs in the nightly build</description>
<ifRemoved>This spec will only apply to Modularity related tests jobs</ifRemoved>
</flag>
<flag id="graph_enableNonModularityTesting">
<description>Indicates that this spec should run automated testing jobs in the nightly build</description>
<ifRemoved>This spec will only apply to Modularity related tests jobs</ifRemoved>
</flag>
<flag id="graph_enableTesting">
<description>Indicates that this spec should run automated testing jobs in the nightly build</description>
<ifRemoved>This spec will not run any tests and will be a "build only" spec</ifRemoved>
</flag>
<flag id="graph_enableTesting_Java8">
<description>Indicates that this spec should run automated testing jobs in the nightly build</description>
<ifRemoved>This spec will not run any tests and will be a "build only" spec</ifRemoved>
</flag>
<flag id="graph_enableTesting_Panama">
<description>Indicates that this spec should run automated testing jobs in the nightly build</description>
<ifRemoved>This spec will not run any tests and will be a "build only" spec</ifRemoved>
</flag>
<flag id="graph_excludeAllocFib">
<description>Indicates that this spec wishes to not run the AllocFib test suite in the nightly build</description>
<ifRemoved>AllocFib test suite will be run in the nightly build, if otherwise included</ifRemoved>
</flag>
<flag id="graph_excludeBVT">
<description></description>
<ifRemoved></ifRemoved>
</flag>
<flag id="graph_excludeCaffeine">
<description>Indicates that this spec wishes to not run the caffeine test in the nightly build.</description>
<ifRemoved>The caffeine test will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeCmdLineTestJ2J">
<description>Indicates that this spec wishes to not run the J2J test suite in the nightly build.</description>
<ifRemoved>J2J test suite will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeFloatsanity">
<description>Indicates that this spec wishes to not run the floatsanity(g) test in the nightly build.</description>
<ifRemoved>The floatsanity(g) test will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeInvtest">
<description>Indicates that this spec wishes to not run the invtest in the nightly build.</description>
<ifRemoved>The invtest will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeJ9vm">
<description>Indicates that this spec wishes to not run the j9vm test in the nightly build.</description>
<ifRemoved>The j9vm test will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeJGrinder">
<description>If set, don't run any JGrinder testing (useful for specs which represent experimental technology which shouldn't consume large amounts of testing resources)</description>
<ifRemoved>JGrinder tests will be run if they apply to the spec (typically means the spec is a J2SE build)</ifRemoved>
</flag>
<flag id="graph_excludeJIT">
<description>Excludes all JIT testing from the build.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="graph_excludeJartester">
<description>Indicates that this spec wishes to not run the jartester test in the nightly build.</description>
<ifRemoved>The jartester test will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeJavaHMinimal">
<description>Indicates that this spec wishes to not run the javah_minimal test in the nightly build.</description>
<ifRemoved>The javah_minimal test will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeJclrm">
<description>Indicates that this spec wishes to not run the jclrm test suite in the nightly build.</description>
<ifRemoved>The jclrm test suite will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeMemleaks">
<description>Indicates that this spec wishes to not run the memleaks test in the nightly build.</description>
<ifRemoved>The memleaks test will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeModena">
<description>Indicates that this spec wishes to not run the modena test suite in the nightly build.</description>
<ifRemoved>The modena test suite will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeSPECjbb2000">
<description>Indicates that this spec wishes to not run the SPECjbb200 test suite in the nightly build.</description>
<ifRemoved>The SPECjbb2000 test suite will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeScheme">
<description>Indicates that this spec wishes to not run the scheme test in the nightly build.</description>
<ifRemoved>The scheme test will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeUnzipTestSuite390">
<description>Indicates that the 390 (linuz390 or z390) spec wishes to not run the unzip job for a particular test suite.</description>
<ifRemoved>The unzip job will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeVich">
<description>Indicates that this spec wishes to not run the vich test in the nightly build.</description>
<ifRemoved>The vich test will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_excludeXcheckJNI">
<description>Indicates that this spec wishes to not run the XcheckJNI test suite in the nightly build.</description>
<ifRemoved>XcheckJNI test suite will be run in the nightly build, if otherwise included.</ifRemoved>
</flag>
<flag id="graph_includeThrstatetest">
<description>Indicates that this spec should run thrstatetest in the nightly build. thrstatetest is implemented using VM hacks that may not be robust across all VM configurations. The tested functionality is not particularly platform-dependent.</description>
<ifRemoved>thrstatetest will not run in this spec.</ifRemoved>
</flag>
<flag id="graph_j2seSanity">
<description>States that we want to do the basic sanity tests on a J2SE VM. This is required for platforms like z/OS which have no embedded-style J9 VM.</description>
<ifRemoved>The J2SE versions of the basic sanity tests won't be run.</ifRemoved>
</flag>
<flag id="graph_j9Sanity">
<description>States that we want to do the basic sanity tests on a J9 VM. This is required for platforms for which we do want basic sanity tests and we do also have an embedded-style J9 VM. This is not appropriate for J2SE-only platforms like z/OS</description>
<ifRemoved>The J9 versions of the basic sanity tests won't be run.</ifRemoved>
</flag>
<flag id="graph_jclmaxtest">
<description>Indicates that this spec wishes to run jcl MAX testing in the nightly build</description>
<ifRemoved>jclmaxtests will not be run</ifRemoved>
</flag>
<flag id="graph_jgrinder">
<description>Indicates that this spec wishes to run jgrinder stress test suites in the nightly build</description>
<ifRemoved>jgrinder suites will not be run</ifRemoved>
</flag>
<flag id="graph_midpSanity">
<description>States that we want to do the basic sanity tests on a J9 MIDP VM. This is required for platforms for which we do want basic sanity tests and we do also have an embedded-style J9 VM. This is not appropriate for J2SE-only platforms like z/OS</description>
<ifRemoved>The J9 MIDP versions of the basic sanity tests won't be run. NOTE: these tests are also a subset of test_j9Sanity</ifRemoved>
</flag>
<flag id="graph_mirrorToPoughkeepsie">
<description>Mirror J2SE rebuilds to Remote Site</description>
<ifRemoved>Mirroring of J2SE rebuilds to remote site will blocking testing and delivery for PPCLE</ifRemoved>
<requires>
<require flag="build_j2se"/>
</requires>
</flag>
<flag id="graph_nfsUploadForRebuild">
<description>States whether this spec needs its product to be uploaded to the NFS server before it can be rebuilt as a J2SE VM (all specs other than our local Unix specs require this since the rebuild is done on Unix machines).</description>
<ifRemoved>The "Upload intrel drop for J2SE rebuild" job will be stripped from the build for this spec.</ifRemoved>
</flag>
<flag id="graph_omitConsoleChallengedTests">
<description>Used to not run test that don't work well with CE consoles.</description>
<ifRemoved>The normal set of tests requested for the spec will be run.</ifRemoved>
</flag>
<flag id="graph_omitExpensiveTests">
<description>If set, the spec won't run the tests which have been flagged as requiring a lot of resources. This vague definition is deliberate since the set of tests turned off by this flag is a moving target. Generally, this flag is set on embedded specs since their test machines don't have massive memory, CPU power, and storage space.</description>
<ifRemoved>The normal set of tests requested for the spec will be run</ifRemoved>
</flag>
<flag id="graph_omitJava5Tests">
<description>Used as a temporary measure to restrict redundant testing on the J2SE Java6 shipping platforms so that we get more test time to run the tests we are actually interested in seeing (we will get more Java6 JGrinders if we don't run the Java5 versions)</description>
<ifRemoved>Java 5 J2SE testing will be performed</ifRemoved>
<requires>
<require flag="build_java5"/>
</requires>
</flag>
<flag id="graph_plumhall">
<description>Indicates that this spec wishes to run plumhall test suites in the nightly build</description>
<ifRemoved>plumhall suites will not be run</ifRemoved>
</flag>
<flag id="graph_rtj_aot">
<description>Indicates that this spec wishes to run the RTJ AOT tests in the nightly build</description>
<ifRemoved>RTJ AOT testing will not be run</ifRemoved>
</flag>
<flag id="graph_smalltalkArtifacts">
<description>Indicates that this spec needs to generate smalltalk artifacts</description>
<ifRemoved>No smalltalk artifacts will be generated</ifRemoved>
</flag>
<flag id="graph_tck">
<description>Indicates that this spec wishes to run TCK compliance tests in the nightly build</description>
<ifRemoved>TCK Compliance testing will not be run</ifRemoved>
</flag>
<flag id="graph_toronto">
<description>Indicates that a spec runs in Toronto and requires a Toronto local network filesystem and resources</description>
<ifRemoved>Spec runs in Ottawa</ifRemoved>
</flag>
<flag id="graph_useJTCTestingPlaylist">
<description>Indicates that this spec uses the common JTC Testing Playlists for testing</description>
<ifRemoved>JTC playlist anchors won't be generated and our local testing will be used where there was a JTC equivalent</ifRemoved>
</flag>
<flag id="graph_verification">
<description>Enables JSR verification tests</description>
<ifRemoved>JSR verification tests will not be enabled. We will not perform a cfdump and compare them with "golden" ROM dumps</ifRemoved>
</flag>
<flag id="interp_aotCompileSupport">
<description>Controls if the AOT compilation support is included in the VM</description>
<ifRemoved>No AOT compilation support available</ifRemoved>
<requires>
<require flag="interp_nativeSupport"/>
</requires>
</flag>
<flag id="interp_aotRuntimeSupport">
<description>Controls if the AOT runtime support is included in the VM</description>
<ifRemoved>No AOT runtime support available</ifRemoved>
<requires>
<require flag="interp_nativeSupport"/>
</requires>
</flag>
<flag id="interp_atomicFreeJni">
<description>Use the new atomic-free JNI support</description>
<ifRemoved>Atomics used to release/acquire VM access at JNI calls</ifRemoved>
</flag>
<flag id="interp_atomicFreeJniUsesFlush">
<description>Atomic free uses FlushProcessWriteBuffers instead of barriers</description>
<ifRemoved>Atomic free uses barriers</ifRemoved>
<requires>
<require flag="interp_atomicFreeJni"/>
<require flag="interp_twoPassExclusive"/>
</requires>
</flag>
<flag id="interp_bytecodePreverification">
<description>Does this VM support 1st pass bytecode verification (able to dynamically generate pre-verify data)</description>
<ifRemoved>No 1st pass bytecode verification support</ifRemoved>
<requires>
<require flag="interp_bytecodeVerification"/>
</requires>
</flag>
<flag id="interp_bytecodeVerification">
<description>Does this VM support 2nd pass bytecode verification (pre-verify data in .jxe only)</description>
<ifRemoved>No 2nd pass bytecode verification support</ifRemoved>
</flag>
<flag id="interp_compressedObjectHeader">
<description>Flag to indicate that on 64-bit platforms, the monitor slot and class slot in object headers are U32 rather than UDATA.</description>
<ifRemoved></ifRemoved>
<requires>
<require flag="interp_smallMonitorSlot"/>
</requires>
</flag>
<flag id="interp_customSpinOptions">
<description>Enables support for the "-Xthr:customSpinOptions=" option, which is used to specify class-specific spin parameters.</description>
<ifRemoved>Disables support for the "-Xthr:customSpinOptions=" option.</ifRemoved>
</flag>
<flag id="interp_debugSupport">
<description>Controls if a debugging support is included in the VM.</description>
<ifRemoved>No remote debugging/stepping.</ifRemoved>
</flag>
<flag id="interp_enableJitOnDesktop">
<description>TEMPORARY FLAG for arm test environment</description>
<ifRemoved>TEMPORARY FLAG for arm test environment</ifRemoved>
</flag>
<flag id="interp_flagsInClassSlot">
<description>Store object flags in low bits of Class Slot</description>
<ifRemoved>Separate flags slot in object header</ifRemoved>
<requires>
<require flag="gc_minimumObjectSize"/>
</requires>
</flag>
<flag id="interp_floatSupport">
<description>Determines if float and double data types are supported by the VM.</description>
<ifRemoved>Float and double bytecodes and natives are removed.</ifRemoved>
</flag>
<flag id="interp_floatmathTracing">
<description>TRACING FEATURE. Determines if the interpreter produces trace information at every math bytecode. </description>
<ifRemoved>No math trace available.</ifRemoved>
<requires>
<require flag="interp_floatSupport"/>
</requires>
<precludes>
<preclude flag="env_data64"/>
</precludes>
</flag>
<flag id="interp_floatmathlibTracing">
<description>TRACING FEATURE. Determines if the interpreter produces trace information at every math library call. </description>
<ifRemoved>No math lib trace available.</ifRemoved>
<requires>
<require flag="interp_floatSupport"/>
</requires>
<precludes>
<preclude flag="env_data64"/>
</precludes>
</flag>
<flag id="interp_gpHandler">
<description>Determines if protection faults are caught by the VM and handled cleanly.</description>
<ifRemoved>Protection faults in natives cause fatal exits.</ifRemoved>
</flag>
<flag id="interp_growableStacks">
<description>Enables dynamic java stack growing</description>
<ifRemoved>Stack size is fixed</ifRemoved>
</flag>
<flag id="interp_hotCodeReplacement">
<description>Controls if a hot code replacement support is included in the VM.</description>
<ifRemoved>No hot code replacement, space savings in debugger and JNI</ifRemoved>
<requires>
<require flag="interp_debugSupport"/>
<require flag="opt_dynamicLoadSupport"/>
</requires>
</flag>
<flag id="interp_jitOnByDefault">
<description>Turns JIT on by default</description>
<ifRemoved>JIT will be off unless build spec is "desktop"</ifRemoved>
</flag>
<flag id="interp_jniSupport">
<description>Determines if JNI native support is available.</description>
<ifRemoved>JCL won't work!</ifRemoved>
</flag>
<flag id="interp_minimalJCL">
<description>Does the VM support the full J2SE JCL, or merely a minimal subset.</description>
<ifRemoved>VM supports full J2SE.</ifRemoved>
<precludes>
<preclude flag="opt_methodHandle"/>
</precludes>
</flag>
<flag id="interp_minimalJNI">
<description>Does the VM support full JNI, or merely a minimal subset.</description>
<ifRemoved>VM supports all JNI functionality.</ifRemoved>
<requires>
<require flag="interp_jniSupport"/>
</requires>
<precludes>
<preclude flag="opt_bigInteger"/>
<preclude flag="opt_reflect"/>
<preclude flag="opt_sidecar"/>
</precludes>
</flag>
<flag id="interp_nativeSupport">
<description>Controls if the native translator is included in the VM.</description>
<ifRemoved>No native translation of code, bytecode interpretation only.</ifRemoved>
</flag>
<flag id="interp_newHeaderShape">
<description>Temporary flag for experimentation with the object header shape.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="interp_profilingBytecodes">
<description>If enabled, profiling versions of branch and call bytecodes are available. These gather additional info for the JIT for optimal code generation</description>
<ifRemoved>No profiling bytecodes available</ifRemoved>
</flag>
<flag id="interp_romableAotSupport">
<description>ROMable AOT Support for TJ Watson</description>
<ifRemoved>NO ROMable AOT Support for TJ Watson</ifRemoved>
</flag>
<flag id="interp_sigQuitThread">
<description>Controls if a separate thread handles SIGQUITsignal.</description>
<ifRemoved>platform cannot answer SIGQUIT.</ifRemoved>
<requires>
<require flag="interp_gpHandler"/>
</requires>
</flag>
<flag id="interp_smallMonitorSlot">
<description>Flag to indicate that on 64-bit platforms, the monitor slot in object headers is a U32 rather than a UDATA.</description>
<ifRemoved>The monitor slot is a UDATA.</ifRemoved>
<requires>
<require flag="env_data64"/>
</requires>
</flag>
<flag id="interp_tracing">
<description>DEBUGGING FEATURE. Determines if the interpreter produces debug information at every bytecode.</description>
<ifRemoved>No full trace available.</ifRemoved>
</flag>
<flag id="interp_twoPassExclusive">
<description>Exclusive VM access - Set halt bit in one pass, count responders in another pass</description>
<ifRemoved>Set and count in a single pass</ifRemoved>
</flag>
<flag id="interp_updateVMCTracing">
<description>Debug mode for tracing uses of updateVMStruct</description>
<ifRemoved>No tracing of updateVMStruct helpers</ifRemoved>
</flag>
<flag id="interp_useUnsafeHelper">
<description>If set, use helper functions in UnsafeAPI to access native memory</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="interp_verbose">
<description>Determines if verbose reporting is available.</description>
<ifRemoved>-verbose options don't work</ifRemoved>
</flag>
<flag id="ive_jxeFileRelocator">
<description>Allows JXEs to be loaded from disk</description>
<ifRemoved>JXEs must be loaded by the caller</ifRemoved>
<requires>
<require flag="ive_jxeInPlaceRelocator"/>
<require flag="opt_jxeLoadSupport"/>
</requires>
</flag>
<flag id="ive_jxeInPlaceRelocator">
<description>Allows JXEs to be relocated in place in memory</description>
<ifRemoved>The streaming relocator must be used</ifRemoved>
<requires>
<require flag="opt_jxeLoadSupport"/>
</requires>
</flag>
<flag id="ive_jxeNatives">
<description>Determines if the natives for accessing JXEs are generated</description>
<ifRemoved>JXEs can only be used for bootstrapping (or extreme)</ifRemoved>
<requires>
<require flag="interp_jniSupport"/>
<require flag="opt_jxeLoadSupport"/>
</requires>
</flag>
<flag id="ive_jxeOERelocator">
<description>Support for relocating opposite endian JXEs</description>
<ifRemoved>Only native endian JXEs can be loaded</ifRemoved>
<requires>
<require flag="ive_jxeInPlaceRelocator"/>
<require flag="opt_jxeLoadSupport"/>
</requires>
</flag>
<flag id="ive_jxeStreamingRelocator">
<description>Allows JXEs to be streamed into ROM</description>
<ifRemoved>No streaming relocator</ifRemoved>
<requires>
<require flag="opt_jxeLoadSupport"/>
</requires>
</flag>
<flag id="ive_memorySpaceHelpers">
<description>Controls if the VM includes support for memorySpace creation and deletion</description>
<ifRemoved>No memorySpace routines available.</ifRemoved>
</flag>
<flag id="ive_romImageHelpers">
<description>Controls if the VM includes support for romImageLoad and romImageUnload</description>
<ifRemoved>No facility for installing jxes or romImages</ifRemoved>
</flag>
<flag id="ive_rawBuild">
<description>Pure OpenJDK Java code without any OpenJ9 modifications</description>
<ifRemoved>No raw builds</ifRemoved>
</flag>
<flag id="jit_32bitUses64bitRegisters">
<description>Allow the 32 bit JIT to use the 64 bit registers</description>
<ifRemoved></ifRemoved>
<precludes>
<preclude flag="env_data64"/>
<preclude flag="env_littleEndian"/>
</precludes>
</flag>
<flag id="jit_cHelpers">
<description>If set, the C version of JIT helpers is used.</description>
<ifRemoved>Builder version of the helpers is used.</ifRemoved>
</flag>
<flag id="jit_classUnloadRwmonitor">
<description>If set, the GC class unloading monitor will be a RW monitor rather than a normal monitor.</description>
<ifRemoved>GC class unloading monitor is a normal monitor.</ifRemoved>
</flag>
<flag id="jit_dynamicLoopTransfer">
<description>Controls whether Dynamic Loop Transfer (DLT) is enabled in the VM</description>
<ifRemoved>DLT is disabled</ifRemoved>
<requires>
<require flag="interp_nativeSupport"/>
</requires>
</flag>
<flag id="jit_freeSystemStackPointer">
<description>Free the system stack pointer so the JIT can use it as a GPR.
Only available on zOS</description>
<ifRemoved></ifRemoved>
<requires>
<require flag="interp_nativeSupport"/>
</requires>
</flag>
<flag id="jit_fullSpeedDebug">
<description>Controls whether the jit supports debugging.</description>
<ifRemoved>JIT will be disabled if the debugger is active</ifRemoved>
<requires>
<require flag="interp_debugSupport"/>
<require flag="interp_nativeSupport"/>
</requires>
</flag>
<flag id="jit_gcOnResolveSupport">
<description>Controls if the JIT VM interface supports gcOnResolve.</description>
<ifRemoved>gcOnResolve has no effect, saving space</ifRemoved>
<requires>
<require flag="interp_nativeSupport"/>
</requires>
</flag>
<flag id="jit_ia32FixedFrame">
<description>Temp flag to enable IA32 fixed frame linkage</description>
<ifRemoved>JIT works as normal</ifRemoved>
</flag>
<flag id="jit_microJit">
<description>Controls whether microJIT (aka "Vespa")is supported.</description>
<ifRemoved>MicroJIT not supported</ifRemoved>
<requires>
<require flag="gc_threadLocalHeap"/>
</requires>
</flag>
<flag id="jit_nathelpUsesClassObjects">
<description>Temp flag: indicates that nathelp helpers expect class object pointers rather than J9Class*s</description>
<ifRemoved>nathelp helpers expect J9Class*s</ifRemoved>
</flag>
<flag id="jit_needsTrampolines">
<description>Controls if multiple code caches need calls to trampolines .</description>
<ifRemoved>platform cannot not grow caches if it needs trampolines.</ifRemoved>
<requires>
<require flag="interp_nativeSupport"/>
</requires>
</flag>
<flag id="jit_newDualHelpers">
<description>Enables the new-style dual mode helpers</description>
<ifRemoved>Dual mode helpers use the old convention</ifRemoved>
<requires>
<require flag="jit_cHelpers"/>
</requires>
</flag>
<flag id="jit_newInstancePrototype">
<description>Controls whether the JIT will create per-class newInstanceImpl translations.</description>
<ifRemoved>newInstanceImpl is always called, never translated</ifRemoved>
<requires>
<require flag="interp_nativeSupport"/>
</requires>
</flag>
<flag id="jit_onStackReplacement">
<description>Controls whether On Stack Replacement (OSR) is enabled in the VM</description>
<ifRemoved>OSR is disabled</ifRemoved>
<requires>
<require flag="interp_nativeSupport"/>
</requires>
</flag>
<flag id="jit_requiresTrapHandler">
<description>Controls if code in the cache requires a gpHandler.</description>
<ifRemoved>No trap or gp handling used in generated code (means explicit NullCheck, DivCheck, ArrayBounds).</ifRemoved>
<requires>
<require flag="interp_nativeSupport"/>
</requires>
</flag>
<flag id="jit_runtimeInstrumentation">
<description>Controls whether runtime instrumentation on/off checks will be generated at transition points.</description>
<ifRemoved>RI code is not generated.</ifRemoved>
<requires>
<require flag="interp_nativeSupport"/>
<require flag="port_runtimeInstrumentation"/>
</requires>
</flag>
<flag id="jit_small">
<description>Controls whether the jit should be built for size, not speed.</description>
<ifRemoved>Will be big and fast instead of smaller and slower.</ifRemoved>
<requires>
<require flag="interp_nativeSupport"/>
</requires>
<precludes>
<preclude flag="jit_fullSpeedDebug"/>
<preclude flag="jit_newInstancePrototype"/>
</precludes>
</flag>
<flag id="jit_supportsDirectJNI">
<description>Controls if code in the cache can call JNI directly.</description>
<ifRemoved>JNI/INL calls require a transition through the interpreter.</ifRemoved>
<requires>
<require flag="interp_nativeSupport"/>
</requires>
</flag>
<flag id="jit_transactionDiagnosticThreadBlock">
<description>Controls whether the 256 byte (z/OS) Transaction Diagnostic Block (TDB) is added to J9VMThread</description>
<ifRemoved>The JIT cannot use the TDB for debugging failed transactions</ifRemoved>
<requires>
<require flag="interp_nativeSupport"/>
</requires>
</flag>
<flag id="math_directHelpers">
<description>If on, publishes the direct (not via pointer) floating point math helpers in the VM internal function table. Note that this does not control whether the helpers exist - they always exist in any stream where this flag exists at all.</description>
<ifRemoved>Fields in the internal function table are removed.</ifRemoved>
<requires>
<require flag="interp_floatSupport"/>
</requires>
</flag>
<flag id="module_a2e">
<description>Enables compilation of the a2e module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_algorithm_test">
<description>Enables compilation of the algorithm_test module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_aotrt_common">
<description>Enables compilation of the aotrt_common module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_avl">
<description>Enables compilation of the avl module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_bcproftest">
<description>Enables compilation of the bcproftest module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_bcutil">
<description>Enables compilation of the bcutil module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_bcverify">
<description>Enables compilation of the bcverify module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_buildtools">
<description>Enables compilation of the buildtools module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_cassume">
<description>Enables compilation of the cassume module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_cfdumper">
<description>Enables compilation of the cfdumper module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen">
<description>Enables compilation of the codegen module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen_aarch64">
<description>Enables compilation of the codegen_aarch64 module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen_arm">
<description>Enables compilation of the codegen_arm module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen_common">
<description>Enables compilation of the codegen_common module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen_common_aot">
<description>Enables compilation of the codegen_common_aot module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen_comsched">
<description>Enables compilation of the codegen_comsched module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen_ia32">
<description>Enables compilation of the codegen_ia32 module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen_ilgen">
<description>Enables compilation of the codegen_ilgen module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen_opt">
<description>Enables compilation of the codegen_opt module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen_ppc">
<description>Enables compilation of the codegen_ppc module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen_riscv">
<description>Enables compilation of the codegen_riscv module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen_s390">
<description>Enables compilation of the codegen_s390 module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen_sched">
<description>Enables compilation of the codegen_sched module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codegen_wcode">
<description>Enables compilation of the codegen_wcode module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codert_aarch64">
<description>Enables compilation of the codert_aarch64 module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codert_arm">
<description>Enables compilation of the codert_arm module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codert_common">
<description>Enables compilation of the codert_common module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codert_ia32">
<description>Enables compilation of the codert_ia32 module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codert_ppc">
<description>Enables compilation of the codert_ppc module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codert_riscv">
<description>Enables compilation of the codert_riscv module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codert_s390">
<description>Enables compilation of the codert_s390 module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_codert_vm">
<description>Enables compilation of the codert_vm module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_dbgext">
<description>Enables compilation of the dbgext module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_dbx">
<description>Enables compilation of the dbx module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_dbx_32dbg">
<description>Enables compilation of the dbx_32dbg module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_dbx_64dbg">
<description>Enables compilation of the dbx_64dbg module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_ddr">
<description>Enables compilation of the ddr module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_ddr_dbx_plugin">
<description>Enables compilation of the ddr dbx plugin module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_ddr_gdb_plugin">
<description>Enables compilation of the ddr gdb plugin module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_ddrext">
<description>Enables compilation of the ddr extensions module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_exelib">
<description>Enables compilation of the exelib module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gc">
<description>Enables compilation of the gc module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gc_api">
<description>Enables compilation of the gc_api module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gc_base">
<description>Enables compilation of the gc_base module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gc_check">
<description>Enables compilation of the gc_check module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gc_common">
<description></description>
<ifRemoved></ifRemoved>
</flag>
<flag id="module_gc_doc">
<description>Enables compilation of the gc_doc module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gc_include">
<description>Enables compilation of the gc_include module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gc_modron_base">
<description>Enables compilation of the gc_modron_base module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gc_modron_standard">
<description>Enables compilation of the gc_modron_standard module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gc_modron_startup">
<description>Enables compilation of the gc_modron_startup module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gc_realtime">
<description>Enables compilation of the gc_realtime module</description>
<ifRemoved>The module will not be compiled</ifRemoved>
</flag>
<flag id="module_gc_stats">
<description>Enables compilation of the gc_stats module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gc_structs">
<description>Enables compilation of the gc_structs module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gc_trace">
<description>Enables compilation of the gc_trace module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gcchk">
<description>Enables compilation of the gcchk module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gdb">
<description>Enables compilation of the gdb module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gdb_plugin">
<description>Enables compilation of the gdb plugin module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_gptest">
<description>Enables compilation of the gptest module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_hashtable">
<description>Enables compilation of the hashtable module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_hookable">
<description>Enables compilation of the hookable module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_ifa">
<description>Enables compilation of the ifa module (permits use of z/OS application assist processors).</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
<requires>
<require flag="opt_javaOffloadSupport"/>
</requires>
</flag>
<flag id="module_include">
<description>Enables compilation of the include module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_j9vm">
<description>Enables compilation of the j9vm module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_j9vmtest">
<description>Enables compilation of the j9vmtest module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jcl">
<description>Enables compilation of the jcl module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jextractnatives">
<description>Enables compilation of the jextractnatives module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_aarch64">
<description>Enables compilation of the jit_aarch64 module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_arm">
<description>Enables compilation of the jit_arm module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_aarch64aot">
<description>Enables compilation of the jit_aarch64aot module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_armaot">
<description>Enables compilation of the jit_armaot module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_common">
<description>Enables compilation of the jit_common module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_common_aot">
<description>Enables compilation of the jit_common_aot module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_ia32">
<description>Enables compilation of the jit_ia32 module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_ia32aot">
<description>Enables compilation of the jit_ia32aot module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_ppc">
<description>Enables compilation of the jit_ppc module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_ppcaot">
<description>Enables compilation of the jit_ppcaot module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_riscv">
<description>Enables compilation of the jit_riscv module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_riscvaot">
<description>Enables compilation of the jit_riscvaot module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_s390">
<description>Enables compilation of the jit_s390 module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_s390aot">
<description>Enables compilation of the jit_s390aot module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jit_vm">
<description>Enables compilation of the jit_vm module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jitrt_aarch64">
<description>Enables compilation of the jitrt_aarch64 module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jitrt_arm">
<description>Enables compilation of the jitrt_arm module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jitrt_common">
<description>Enables compilation of the jitrt_common module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jitrt_ia32">
<description>Enables compilation of the jitrt_ia32 module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jitrt_ppc">
<description>Enables compilation of the jitrt_ppc module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jitrt_riscv">
<description>Enables compilation of the jitrt_riscv module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jitrt_s390">
<description>Enables compilation of the jitrt_s390 module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jniargtests">
<description>Enables compilation of the jniargtests module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jnichk">
<description>Enables compilation of the jnichk module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jniinv">
<description>Enables compilation of the jniinv module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jnitest">
<description>Enables compilation of the jnitest module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jvmti">
<description>Enables compilation of the jvmti module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_jvmtitst">
<description>Enables compilation of the jvmtitst module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_lifecycle_tests">
<description>Test creation and shutdown of the JVM using JNI functions</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="module_masm2gas">
<description>Enables compilation of the masm2gas module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_omr">
<description>Enables compilation of the omr module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_oti">
<description>Enables compilation of the oti module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_pool">
<description>Enables compilation of the pool module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_port">
<description>Enables compilation of the port module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_porttest">
<description>Enables compilation of the porttest module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_rasdump">
<description>Enables compilation of the rasdump module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_rastrace">
<description>Enables compilation of the rastrace module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_redirect">
<description>Enables compilation of the redirect module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_shared">
<description>Enables compilation of the shared module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_shared_common">
<description>Enables compilation of the shared_common module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_shared_servicetest">
<description>Enables compilation of the shared_servicetest module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_shared_test">
<description>Enables compilation of the shared_test module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_shared_util">
<description>Enables compilation of the shared_util module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_simplepool">
<description>Enables compilation of the simplepool module</description>
<ifRemoved>The module will not be compiled</ifRemoved>
</flag>
<flag id="module_srphashtable">
<description>Enables compilation of the srphashtable module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_stackmap">
<description>Enables compilation of the stackmap module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_strictm">
<description>Enables compilation of the strictm module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_thread">
<description>Enables compilation of the thread module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_thrtrace">
<description>Enables compilation of the thrtrace module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_util">
<description>Enables compilation of the util module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_util_core">
<description>Enables compilation of the util_core module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_verbose">
<description>Enables compilation of the verbose module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_verutil">
<description></description>
<ifRemoved></ifRemoved>
</flag>
<flag id="module_vm">
<description>Enables compilation of the vm module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_windbg">
<description>Enables compilation of the windbg module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_zip">
<description>Enables compilation of the zip module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="module_zlib">
<description>Enables compilation of the zlib module.</description>
<ifRemoved>The module will not be compiled.</ifRemoved>
</flag>
<flag id="opt_annotations">
<description>VM supports reading JDK 5.0 annotations and natives for accessing them.</description>
<ifRemoved>VM ignores JDK 5.0 annotations</ifRemoved>
</flag>
<flag id="opt_bigInteger">
<description>Controls if the VM includes support for java.math.BigInteger natives.</description>
<ifRemoved>No java.math.BigInteger natives available.</ifRemoved>
</flag>
<flag id="opt_cuda">
<description>Add support for CUDA</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="opt_debugInfoServer">
<description>Controls if the debug info server is included in the VM.</description>
<ifRemoved>No debug info saved by VM, i.e. no line numbers in stack traces</ifRemoved>
</flag>
<flag id="opt_debugJsr45Support">
<description>Support for JSR 45 stratum commands</description>
<ifRemoved>Disable JSR 45 support</ifRemoved>
<requires>
<require flag="interp_debugSupport"/>
</requires>
</flag>
<flag id="opt_deprecatedMethods">
<description>VM supports deprecated APIs.</description>
<ifRemoved>VM does not support deprecated APIs.</ifRemoved>
</flag>
<flag id="opt_dmaNatives">
<description>Include com.ibm.oti.vm.DMA natives in all ME JCLs</description>
<ifRemoved>DMA natives excluded from all JCLs</ifRemoved>
</flag>
<flag id="opt_dynamicLoadSupport">
<description>Determines if the dynamic loader is included.</description>
<ifRemoved>No dynamic loading of classes supported.</ifRemoved>
</flag>
<flag id="opt_fips">
<description>Add supports for FIPs</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="opt_fragmentRamClasses">
<description>Transitional flag for the the GC during the switch to fragmented RAM class allocation</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="opt_inlineJsrs">
<description>Turns on JSR inlining in the dynamic loader.</description>
<ifRemoved>Only pre-inlined classes supported.</ifRemoved>
</flag>
<flag id="opt_invariantInterning">
<description>Support invariant interning at runtime (such as UTF8s)</description>
<ifRemoved>Disable invariant interning at runtime (such as UTF8s)</ifRemoved>
<requires>
<require flag="opt_dynamicLoadSupport"/>
</requires>
</flag>
<flag id="opt_javaOffloadSupport">
<description>Enables Java Offload Processor support - z/OS only.</description>
<ifRemoved>No Java offload processor support will be built.</ifRemoved>
<requires>
<require flag="module_ifa"/>
</requires>
</flag>
<flag id="opt_jitserver">
<description>JITServer support is enabled in the buildspec.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="opt_jvmti">
<description>Support for the JVMTI interface</description>
<ifRemoved>No support for JVMTI</ifRemoved>
<requires>
<require flag="opt_debugInfoServer"/>
</requires>
</flag>
<flag id="opt_jxeLoadSupport">
<description>Controls if main will allow -jxe: and relocate the disk image for you.</description>
<ifRemoved>No -jxe option.</ifRemoved>
<requires>
<require flag="opt_romImageSupport"/>
</requires>
</flag>
<flag id="opt_memoryCheckSupport">
<description>Include support for memory error checking (via the -Xcheck:memory cmdline opt).</description>
<ifRemoved>No -Xcheck:memory support</ifRemoved>
</flag>
<flag id="opt_methodHandle">
<description>Enables support for OpenJ9 MethodHandles. opt_openjdkMethodhandle should be disabled.</description>
<ifRemoved>Disables support for OpenJ9 MethodHandles.</ifRemoved>
</flag>
<flag id="opt_methodHandleCommon">
<description>Enables common dependencies between OpenJ9 and OpenJDK MethodHandles.</description>
<ifRemoved>Disables common dependencies between OpenJ9 and OpenJDK MethodHandles.</ifRemoved>
</flag>
<flag id="opt_module">
<description>Turns on module support</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="opt_multiVm">
<description>Decides if multiple VMs can be created in the same address space</description>
<ifRemoved>Only one VM per address space (process) allowed</ifRemoved>
</flag>
<flag id="opt_nativeCharacterConverter">
<description>Use OS character conversion routines where possible</description>
<ifRemoved>Only use Java character converters</ifRemoved>
</flag>
<flag id="opt_nativeLocaleSupport">
<description>Use OS locale routines where possible</description>
<ifRemoved>Only use Java locale routines</ifRemoved>
</flag>
<flag id="opt_newObjectHash">
<description>TEMPORARY: Transition to new object hashcode implementation</description>
<ifRemoved>Use old (working) hashcode implementation</ifRemoved>
</flag>
<flag id="opt_newRomClassBuilder">
<description>TEMPORARY: being used to transition from old bcutil code to new code.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="opt_noClassloaders">
<description>Disables support for classloaders (other than the system classloader)</description>
<ifRemoved>Enables support for multiple classloaders</ifRemoved>
</flag>
<flag id="opt_nrr">
<description>Enables New Reality Runtime (NRR) features.</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="opt_openjdkMethodhandle">
<description>Enables support for OpenJDK MethodHandles. opt_methodHandle should be disabled.</description>
<ifRemoved>Disables support for OpenJDK MethodHandles.</ifRemoved>
</flag>
<flag id="opt_packed">
<description>Enables support for @Packed types (Structs)</description>
<ifRemoved>No support for @Packed types (Structs)</ifRemoved>
</flag>
<flag id="opt_panama">
<description>Enables support for Project Panama features such as native method handles</description>
<ifRemoved>No support for Project Panama features such as native method handles</ifRemoved>
</flag>
<flag id="opt_valhallaValueTypes">
<description>Enables support for Project Valhalla L-World Value Types</description>
<ifRemoved>No support for Project Valhalla L-World Value Types</ifRemoved>
</flag>
<flag id="opt_phpSupport">
<description>Support for PHP interpreter.</description>
<ifRemoved>No support for PHP.</ifRemoved>
</flag>
<flag id="opt_reflect">
<description>Controls if the VM includes support for java.lang.reflect natives.</description>
<ifRemoved>No java.lang.reflect natives available.</ifRemoved>
<requires>
<require flag="interp_jniSupport"/>
</requires>
</flag>
<flag id="opt_romImageSupport">
<description>Controls if the VM includes basic support for linked rom images</description>
<ifRemoved>No facility for finding rom image from jxe pointer</ifRemoved>
</flag>
<flag id="opt_sharedClasses">
<description>Support for class sharing</description>
<ifRemoved>No support for class sharing</ifRemoved>
<requires>
<require flag="interp_jniSupport"/>
</requires>
</flag>
<flag id="opt_sidecar">
<description>Determines if sidecar files are built.</description>
<ifRemoved>Standard J9 VM only - no sidecar support</ifRemoved>
<requires>
<require flag="interp_jniSupport"/>
</requires>
</flag>
<flag id="opt_srpAvlTreeSupport">
<description>Include support for AVL trees with SRPs</description>
<ifRemoved>Use normal AVL trees with direct pointers</ifRemoved>
</flag>
<flag id="opt_stringCompression">
<description>Compresses strings that contain only characters with values less than 256 into a single byte rather than two</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="opt_switchStacksForSignalHandler">
<description>Applies to x86 only. If enabled, the VM will switch away from the java stack to the C stack before invoking the VM signal handler to avoid overflow and corruption issues.</description>
<ifRemoved>The VM signal handler will run on whichever stack was active when the port library signal handler was invoked.</ifRemoved>
</flag>
<flag id="opt_tempNewInterfaceInvocation">
<description>Temp flag until JIT side of invokeInterface support promotes though to us</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="opt_useFfi">
<description>Use libFFI for native callouts</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="opt_useFfiOnly">
<description>Force the use of libFFI for native callouts</description>
<ifRemoved>FFI is optional</ifRemoved>
<requires>
<require flag="opt_useFfi"/>
</requires>
</flag>
<flag id="opt_useOmrDdr">
<description>Use the ddrgen tool from OMR in support of DDR.</description>
<ifRemoved>If DDR support is enabled, the legacy tools will be used instead.</ifRemoved>
</flag>
<flag id="opt_veeSupport">
<description>Support for multiple Virtual Execution Environments.</description>
<ifRemoved>No support for multiple languages</ifRemoved>
</flag>
<flag id="opt_vmLocalStorage">
<description>Decides if this VM supports the VM local storage interface</description>
<ifRemoved>No VM local storage functions</ifRemoved>
</flag>
<flag id="opt_zero">
<description>Option to enable Java Zero features. Design 1704.</description>
<ifRemoved>No Java Zero features.</ifRemoved>
</flag>
<flag id="opt_zipSupport">
<description>Controls if the VM includes zip reading and caching support.</description>
<ifRemoved>No zip support (which implies no dynamic loading)</ifRemoved>
</flag>
<flag id="opt_zlibCompression">
<description>Controls if the compression routines in zlib are included.</description>
<ifRemoved>zlib compression routines will be unavailable, some JCL may not compile/link.</ifRemoved>
<requires>
<require flag="opt_zlibSupport"/>
</requires>
</flag>
<flag id="opt_zlibSupport">
<description>Controls if the VM includes the zlib compression library.</description>
<ifRemoved>Only uncompressed zips are supported.</ifRemoved>
<requires>
<require flag="opt_zipSupport"/>
</requires>
</flag>
<flag id="port_omrsigSupport">
<description>Handle signals via external OMRSIG library.</description>
<ifRemoved>Handle signals using OS primitives.</ifRemoved>
</flag>
<flag id="port_runtimeInstrumentation">
<description>Controls whether runtime instrumentation support exists on this platform.</description>
<ifRemoved>RI functions will not exist in the port library function table.</ifRemoved>
</flag>
<flag id="port_signalSupport">
<description>At least some signals can be handled by the port library j9sig_ functions</description>
<ifRemoved>Signal functions are all stubs.</ifRemoved>
</flag>
<flag id="port_zosCEEHDLRSupport">
<description>If set, j9sig_protect will include support for registering a handler using CEEHDLR.
Pass J9PORT_SIG_OPTIONS_ZOS_USE_CEEHDLR into j9sig_set_options() before the first call to j9sig_protect() to turn on the use of CEEHDLR at runtime.</description>
<ifRemoved>If removed j9sig_protect will not include support for CEEHDLR.</ifRemoved>
<requires>
<require flag="port_signalSupport"/>
</requires>
</flag>
<flag id="prof_countArgsTemps">
<description>display a count of args/temps usage for loaded methods</description>
<ifRemoved>no report of args/temps usage</ifRemoved>
</flag>
<flag id="prof_eventReporting">
<description>Generic event reporting facilities are included in the VM (internal interface)</description>
<ifRemoved>No internal event reporting facilities</ifRemoved>
</flag>
<flag id="prof_jvmti">
<description>Java Virtual Machine Tool Interface (JVMTI)</description>
<ifRemoved>JVMTI not supported</ifRemoved>
<requires>
<require flag="interp_jniSupport"/>
<require flag="prof_eventReporting"/>
</requires>
</flag>
<flag id="ras_dumpAgents">
<description>Support multiple dump agents</description>
<ifRemoved>Standard console dump</ifRemoved>
</flag>
<flag id="ras_eyecatchers">
<description>Add eyecatcher blocks to key structures</description>
<ifRemoved>No eyecatchers</ifRemoved>
</flag>
<flag id="ras_fatalAssert">
<description>Make assertion tracepoints fatal by default.</description>
<ifRemoved>Assertion tracepoints are not fatal by default - can be enabled via command-line.</ifRemoved>
</flag>
<flag id="size_optimizeSendTargets">
<description>Determines if extra optimized send targets will be generated.</description>
<ifRemoved>Space saving at the cost of some speed.</ifRemoved>
</flag>
<flag id="size_smallCode">
<description>Reduce code size where possible.</description>
<ifRemoved>Normal code paths (normally larger and faster).</ifRemoved>
</flag>
<flag id="size_smallOsStack">
<description>Target machine has a very small OS stack size; reduce stack usage.</description>
<ifRemoved>Normal stack usage.</ifRemoved>
</flag>
<flag id="size_smallRAM">
<description>Target machine has very little free RAM; reduce memory usage.</description>
<ifRemoved>Normal memory usage.</ifRemoved>
</flag>
<flag id="temp_alignClassSlot">
<description>Add padding slot if required to align class slot to 64-bit. TEMPORARY.</description>
<ifRemoved></ifRemoved>
<requires>
<require flag="interp_compressedObjectHeader"/>
<require flag="thr_lockNursery"/>
</requires>
</flag>
<flag id="temp_keepFlagsSlot">
<description>Keep flags slot even all flags are moved to low bits of clazz slot</description>
<ifRemoved></ifRemoved>
<requires>
<require flag="interp_flagsInClassSlot"/>
</requires>
</flag>
<flag id="test_cunit">
<description>C unit test harness is supported on this platform. Requires support for assert.</description>
<ifRemoved>No C unit test harness is available, all C unit tests will be excluded.</ifRemoved>
</flag>
<flag id="test_jvmti">
<description>Indicates availability of JVMTI testing</description>
<ifRemoved>JVMTI testing will not be performed for the given spec</ifRemoved>
<requires>
<require flag="opt_jvmti"/>
</requires>
</flag>
<flag id="thr_asyncNameUpdate">
<description>Some platforms allow an O/S thread name update only to the current thread</description>
<ifRemoved>On such platforms (currently Linux 2.6 and earlier) thread name updates by other threads (java.lang.Thread.setName) will not appear in the O/S</ifRemoved>
</flag>
<flag id="thr_extraChecks">
<description>Check for additional illegal thread states</description>
<ifRemoved>Turn off in production VMs</ifRemoved>
</flag>
<flag id="thr_jlmHst">
<description>VM uses Extended JVMPI for Java Lock Monitor to profile locking behaviours - counts, timings and histograms only</description>
<ifRemoved>Java Lock Monitor cannot be enabled</ifRemoved>
</flag>
<flag id="thr_lockNursery">
<description>object header does not contain the monitor word</description>
<ifRemoved>object header will contain the monitor word</ifRemoved>
</flag>
<flag id="thr_lockNurseryFatArrays">
<description>lock nursery, but arrays always contain the monitor word</description>
<ifRemoved>array object header will not contain the monitor word</ifRemoved>
<requires>
<require flag="thr_lockNursery"/>
</requires>
</flag>
<flag id="thr_lockReservation">
<description>Include support for reserved locks</description>
<ifRemoved>Reserved locks are not supported</ifRemoved>
</flag>
<flag id="thr_preemptive">
<description>The VM thread library allows preemptive context switches</description>
<ifRemoved>Blocking calls may not be used</ifRemoved>
</flag>
<flag id="thr_smartDeflation">
<description>Keep runtime data to determine when a monitor should be deflated</description>
<ifRemoved>Monitor will be deflated when nobody contended on monitor</ifRemoved>
</flag>
<flag id="uma_codeCoverage">
<description>This enables the required compile and link options for using gcov/lcov</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="uma_codeCoverage_bullseye">
<description>This enables the required compile and link options for using bullseye</description>
<ifRemoved></ifRemoved>
</flag>
<flag id="uma_gnuDebugSymbols">
<description></description>
<ifRemoved></ifRemoved>
</flag>
<flag id="uma_supportsIpv6">
<description>Supports IPv6</description>
<ifRemoved>No IPv6 support (NOTE: this is note universal, only need in CE/Linux specs)</ifRemoved>
</flag>
<flag id="uma_tracegenc">
<description>Compile tracegenc and run it to generate tracefiles</description>
<ifRemoved>Run java version of tracegenc</ifRemoved>
</flag>
<flag id="uma_windowsRebase">
<description>This enables the rebase targets generation for windows platforms</description>
<ifRemoved></ifRemoved>
</flag>
</flags>
|
{
"pile_set_name": "Github"
}
|
## pydictor API developer document
### *plugin*
#### step 0:
create "name.py" script in /plugins/ folder:
#### step 1:
import following modules and write author name:
```
#!/usr/bin/env python
# coding:utf-8
# author: LandGrey
"""
Copyright (c) 2016-2017 LandGrey (https://github.com/LandGrey/pydictor)
License: GNU GENERAL PUBLIC LICENSE Version 3
"""
from __future__ import unicode_literals
from lib.fun.fun import cool
from lib.fun.decorator import magic
from lib.data.data import pyoptions
```
#### step 2:
define "name_magic(*args)" function, and write function usage doc、get args value:
```
def name_magic(*args):
"""[keyword1] [keyword2] ..."""
args = list(args[0])
```
#### step 3:
handle the exception arguments that user input:
```
if len(args) == 1:
exit(pyoptions.CRLF + cool.fuchsia("[!] Usage: {} {}".format(args[0], pyoptions.plugins_info.get(args[0]))))
```
#### step 4:
use "magic" decorator, warp "name()" function:
```
@magic
def name():
```
#### step 5:
in "name()" function, generate your wordlist(python list type) or yield the values:
```
results = []
append something to results ...
retrun results
```
or
```
results is python generator
for r in results:
yield r
```
if you want to add your own weak password wordlist in final word list,
there some folders you can put your wordlist (defined in /lib/data/data.py script)
| path | variable |
| :---------------- | :------------------- |
| /wordlist | paths.wordlist_path |
| /wordlist/App | paths.applist_path |
| /wordlist/IoT | paths.iotlist_path |
| /wordlist/NiP | paths.niplist_path |
| /wordlist/SEDB | paths.sedblist_path |
| /wordlist/Sys | paths.syslist_path |
| /wordlist/Web | paths.weblist_path |
| /wordlist/WiFi | paths.wifilist_path |
use it like:
```
from lib.data.data import paths
from lib.fun.fun import walks_all_files
@magic
def name():
for _ in walks_all_files(paths.weblist_path):
yield "".join(_)
```
##### now, your script supported all hand functions in pydictor
##### if it's "/plugins/ftp.py" script, and it's name must be "ftp", a simple example:
```
#!/usr/bin/env python
# coding:utf-8
# author: LandGrey
"""
Copyright (c) 2016-2017 LandGrey (https://github.com/LandGrey/pydictor)
License: GNU GENERAL PUBLIC LICENSE Version 3
"""
from __future__ import unicode_literals
from lib.fun.fun import cool
from lib.fun.decorator import magic
from lib.data.data import pyoptions
def ftp_magic(*args):
"""[keyword1] [keyword2] ..."""
args = list(args[0])
if len(args) == 1:
exit(pyoptions.CRLF + cool.fuchsia("[!] Usage: {} {}".format(args[0], pyoptions.plugins_info.get(args[0]))))
@magic
def ftp():
results = []
default_password = ('ftp', 'anonymous', 'any@', 'craftpw', 'xbox', 'r@p8p0r+', 'pass', 'admin',
'lampp', 'password', 'Exabyte', 'pbxk1064', 'kilo1987', 'help1954', 'tuxalize')
results += default_password
weak_password = ('root', '123456', '111111', '666666', 'ftppass')
results += weak_password
for r in results:
yield r
tails = ['1', '01', '001', '123', 'abc', '!@#', '!QAZ', '1q2w3e', '!@#$', '!', '#', '.', '@123',
'2016', '2017', '2018', '@2016', '@2017', '@2018', ]
for keyword in args:
for tail in tails:
yield keyword + tail
```
#### call ftp plugin, with command "python pydictor.py -plug ftp \[keyword1\] \[keyword2\] ..."
#
### *tool*
same as plugin api, except
```
place script in "/tools/" folder
```
#### call tool with command "python pydictor.py -tool name \[some_args\]"
#
### *encode*
just write your python script in "/lib/encode/" folder, and
```
1. script file name end with "_encode", like "name_encode.py"
2. function name must same to script file name, like "def name_encode(item)"
3. function "name_encode(item)" next line must be function usage tips and wrap with """
4. return item after your encode
```
#### an example for "base64" encode function:
create "b64_encode.py" file in "/lib/encode/" folder, and write code:
```
#!/usr/bin/env python
# coding:utf-8
#
"""
Copyright (c) 2016-2019 LandGrey (https://github.com/LandGrey/pydictor)
License: GNU GENERAL PUBLIC LICENSE Version 3
"""
from __future__ import unicode_literals
from base64 import b64encode
def b64_encode(item):
"""base64 encode"""
try:
return (b64encode(item.encode('utf-8'))).decode()
except:
return ''
```
#### call encode function with command "python pydictor.py --encode name"
|
{
"pile_set_name": "Github"
}
|
---
title: " ElasticBox introduces ElasticKube to help manage Kubernetes within the enterprise "
date: 2016-03-11
slug: elasticbox-introduces-elastickube-to
url: /blog/2016/03/Elasticbox-Introduces-Elastickube-To
---
Today’s guest post is brought to you by Brannan Matherson, from ElasticBox, who’ll discuss a new open source project to help standardize container deployment and management in enterprise environments. This highlights the advantages of authentication and user management for containerized applications
I’m delighted to share some exciting work that we’re doing at ElasticBox to contribute to the open source community regarding the rapidly changing advancements in container technologies. Our team is kicking off a new initiative called [ElasticKube](http://elastickube.com/) to help solve the problem of challenging container management scenarios within the enterprise. This project is a native container management experience that is specific to Kubernetes and leverages automation to provision clusters for containerized applications based on the latest release of Kubernetes 1.2.
I’ve talked to many enterprise companies, both large and small, and the plethora of cloud offering capabilities is often confusing and makes the evaluation process very difficult, so why Kubernetes? Of the large public cloud players - Amazon Web Services, Microsoft Azure, and Google Cloud Platform - Kubernetes is poised to take an innovative leadership role in framing the container management space. The Kubernetes platform does not restrict or dictate any given technical approach for containers, but encourages the community to collectively solve problems as this container market still takes form. With a proven track record of supporting open source efforts, Kubernetes platform allows my team and me to actively contribute to this fundamental shift in the IT and developer world.
We’ve chosen Kubernetes, not just for the core infrastructure services, but also the agility of Kubernetes to leverage the cluster management layer across any cloud environment - GCP, AWS, Azure, vSphere, and Rackspace. Kubernetes also provides a huge benefit for users to run clusters for containers locally on many popular technologies such as: Docker, Vagrant (and VirtualBox), CoreOS, Mesos and more. This amount of choice enables our team and many others in the community to consider solutions that will be viable for a wide range of enterprise scenarios. In the case of ElasticKube, we’re pleased with Kubernetes 1.2 which includes the full release of the deployment API. This provides the ability for us to perform seamless rolling updates of containerized applications that are running in production. In addition, we’ve been able to support new resource types like ConfigMaps and Horizontal Pod Autoscalers.
Fundamentally, ElasticKube delivers a web console for which compliments Kubernetes for users managing their clusters. The initial experience incorporates team collaboration, lifecycle management and reporting, so organizations can efficiently manage resources in a predictable manner. Users will see an ElasticKube portal that takes advantage of the infrastructure abstraction that enables users to run a container that has already been built. With ElasticKube assuming the cluster has been deployed, the overwhelming value is to provide visibility into who did what and define permissions for access to the cluster with multiple containers running on them. Secondly, by partitioning clusters into namespaces, authorization management is more effective. Finally, by empowering users to build a set of reusable templates in a modern portal, ElasticKube provides a vehicle for delivering a self-service template catalog that can be stored in GitHub (for instance, using Helm templates) and deployed easily.
ElasticKube enables organizations to accelerate adoption by developers, application operations and traditional IT operations teams and shares a mutual goal of increasing developer productivity, driving efficiency in container management and promoting the use of microservices as a modern application delivery methodology. When leveraging ElasticKube in your environment, users need to ensure the following technologies are configured appropriately to guarantee everything runs correctly:
- Configure Google Container Engine (GKE) for cluster installation and management
- Use Kubernetes to provision the infrastructure and clusters for containers
- Use your existing tools of choice to actually build your containers
- Use ElasticKube to run, deploy and manage your containers and services
[](http://cl.ly/0i3M2L3Q030z/Image%202016-03-11%20at%209.49.12%20AM.png)
Getting Started with Kubernetes and ElasticKube
(this is a 3min walk through video with the following topics)
1. Deploy ElasticKube to a Kubernetes cluster
2. Configuration
3. Admin: Setup and invite a user
4. Deploy an instance
Hear What Others are Saying
“Kubernetes has provided us the level of sophistication required for enterprises to manage containers across complex networking environments and the appropriate amount of visibility into the application lifecycle. Additionally, the community commitment and engagement has been exceptional, and we look forward to being a major contributor to this next wave of modern cloud computing and application management.”
_~Alberto Arias Maestro, Co-founder and Chief Technology Officer, ElasticBox_
_-- Brannan Matherson, Head of Product Marketing, ElasticBox_
|
{
"pile_set_name": "Github"
}
|
// Copyright 2001-2019 Crytek GmbH / Crytek Group. All rights reserved.
#include "StdAfx.h"
#include "RuntimeRegistry.h"
#include "Runtime/RuntimeClass.h"
#ifdef RegisterClass
#undef RegisterClass
#endif
namespace Schematyc
{
IRuntimeClassConstPtr CRuntimeRegistry::GetClass(const CryGUID& guid) const
{
Classes::const_iterator itClass = m_classes.find(guid);
return itClass != m_classes.end() ? itClass->second : IRuntimeClassConstPtr();
}
void CRuntimeRegistry::RegisterClass(const CRuntimeClassConstPtr& pClass)
{
SCHEMATYC_CORE_ASSERT(pClass);
const CryGUID guid = pClass->GetGUID();
if (GUID::IsEmpty(guid) || GetClass(guid))
{
SCHEMATYC_CORE_CRITICAL_ERROR("Unable to register runtime class!");
return;
}
m_classes.insert(Classes::value_type(guid, pClass));
}
void CRuntimeRegistry::ReleaseClass(const CryGUID& guid)
{
m_classes.erase(guid);
}
CRuntimeClassConstPtr CRuntimeRegistry::GetClassImpl(const CryGUID& guid) const
{
Classes::const_iterator itClass = m_classes.find(guid);
return itClass != m_classes.end() ? itClass->second : CRuntimeClassConstPtr();
}
void CRuntimeRegistry::Reset()
{
m_classes.clear();
}
} // Schematyc
|
{
"pile_set_name": "Github"
}
|
package cn.tycoding.common.controller;
import org.apache.shiro.authz.annotation.RequiresPermissions;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.GetMapping;
/**
* 路由地址管理
* 此控制层主要管理页面请求跳转地址,返回String为HTML页面的名称,具体看application.yml中Thymeleaf的配置
*
* @author tycoding
* @date 2019-01-27
*/
@Controller
public class RouterController {
@GetMapping("/login")
public String login() {
return "login";
}
@GetMapping("/")
public String i() {
return "index";
}
@GetMapping("/index")
public String index() {
return "index";
}
@GetMapping("/doc")
public String doc() {
return "page/doc";
}
@GetMapping("/403")
public String unAuthorized() {
return "error/403";
}
/**
* 系统管理
*/
@GetMapping("/system/user")
@RequiresPermissions("user:list")
public String user() {
return "page/system/user/index";
}
@GetMapping("/system/role")
@RequiresPermissions("role:list")
public String role() {
return "page/system/role/index";
}
@GetMapping("/system/menu")
@RequiresPermissions("menu:list")
public String menu() {
return "page/system/menu/index";
}
@GetMapping("/system/self")
public String self() {
return "page/system/my/index";
}
@GetMapping("/system/dept")
@RequiresPermissions("dept:list")
public String dept() {
return "page/system/dept/index";
}
/**
* 系统监控
*/
@GetMapping("/monitor/online")
@RequiresPermissions("online:list")
public String online() {
return "page/monitor/online/index";
}
@GetMapping("/monitor/loginlog")
@RequiresPermissions("loginlog:list")
public String loginLog() {
return "page/monitor/loginlog/index";
}
@GetMapping("/monitor/log")
@RequiresPermissions("log:list")
public String log() {
return "page/monitor/log/index";
}
@GetMapping("/monitor/redis/monitor")
@RequiresPermissions("redis:list")
public String monitor() {
return "page/monitor/redis/index";
}
@GetMapping("/monitor/druid")
@RequiresPermissions("druid:list")
public String druid() {
return "page/monitor/druid";
}
/**
* 对象储存
*/
@GetMapping("/storage/qiniu")
@RequiresPermissions("qiniu:list")
public String qiniu() {
return "page/storage/qiniu/index";
}
/**
* 网络资源
*/
@GetMapping("/web/weather")
@RequiresPermissions("weather:list")
public String weather() {
return "page/web/weather";
}
@GetMapping("/web/movie")
@RequiresPermissions("movie:list")
public String movie() {
return "page/web/movie";
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.tasks.compile.incremental;
import org.gradle.api.internal.tasks.compile.CleaningJavaCompiler;
import org.gradle.api.internal.tasks.compile.JavaCompileSpec;
import org.gradle.api.internal.tasks.compile.incremental.cache.CompileCaches;
import org.gradle.api.internal.tasks.compile.incremental.deps.ClassSetAnalysis;
import org.gradle.api.internal.tasks.compile.incremental.deps.ClassSetAnalysisData;
import org.gradle.api.internal.tasks.compile.incremental.jar.JarClasspathSnapshotMaker;
import org.gradle.api.internal.tasks.compile.incremental.jar.PreviousCompilation;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import org.gradle.api.tasks.incremental.IncrementalTaskInputs;
import org.gradle.language.base.internal.compile.Compiler;
public class IncrementalCompilerDecorator {
private static final Logger LOG = Logging.getLogger(IncrementalCompilerDecorator.class);
private final JarClasspathSnapshotMaker jarClasspathSnapshotMaker;
private final CompileCaches compileCaches;
private final CleaningJavaCompiler cleaningCompiler;
private final String displayName;
private final RecompilationSpecProvider staleClassDetecter;
private final ClassSetAnalysisUpdater classSetAnalysisUpdater;
private final CompilationSourceDirs sourceDirs;
private final IncrementalCompilationInitializer compilationInitializer;
public IncrementalCompilerDecorator(JarClasspathSnapshotMaker jarClasspathSnapshotMaker, CompileCaches compileCaches,
IncrementalCompilationInitializer compilationInitializer, CleaningJavaCompiler cleaningCompiler, String displayName,
RecompilationSpecProvider staleClassDetecter, ClassSetAnalysisUpdater classSetAnalysisUpdater,
CompilationSourceDirs sourceDirs) {
this.jarClasspathSnapshotMaker = jarClasspathSnapshotMaker;
this.compileCaches = compileCaches;
this.compilationInitializer = compilationInitializer;
this.cleaningCompiler = cleaningCompiler;
this.displayName = displayName;
this.staleClassDetecter = staleClassDetecter;
this.classSetAnalysisUpdater = classSetAnalysisUpdater;
this.sourceDirs = sourceDirs;
}
public Compiler<JavaCompileSpec> prepareCompiler(final IncrementalTaskInputs inputs) {
final Compiler<JavaCompileSpec> compiler = getCompiler(inputs, sourceDirs);
return new IncrementalCompilationFinalizer(compiler, jarClasspathSnapshotMaker, classSetAnalysisUpdater);
}
private Compiler<JavaCompileSpec> getCompiler(IncrementalTaskInputs inputs, CompilationSourceDirs sourceDirs) {
if (!inputs.isIncremental()) {
LOG.lifecycle("{} - is not incremental (e.g. outputs have changed, no previous execution, etc.).", displayName);
return cleaningCompiler;
}
if (!sourceDirs.areSourceDirsKnown()) {
LOG.lifecycle("{} - is not incremental. Unable to infer the source directories.", displayName);
return cleaningCompiler;
}
ClassSetAnalysisData data = compileCaches.getLocalClassSetAnalysisStore().get();
if (data == null) {
LOG.lifecycle("{} - is not incremental. No class analysis data available from the previous build.", displayName);
return cleaningCompiler;
}
PreviousCompilation previousCompilation = new PreviousCompilation(new ClassSetAnalysis(data), compileCaches.getLocalJarClasspathSnapshotStore(), compileCaches.getJarSnapshotCache());
return new SelectiveCompiler(inputs, previousCompilation, cleaningCompiler, staleClassDetecter, compilationInitializer, jarClasspathSnapshotMaker);
}
}
|
{
"pile_set_name": "Github"
}
|
//
// FieldReference.cs
//
// Author:
// Jb Evain (jbevain@gmail.com)
//
// Copyright (c) 2008 - 2011 Jb Evain
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
using System;
namespace Mono.Cecil {
public class FieldReference : MemberReference {
TypeReference field_type;
public TypeReference FieldType {
get { return field_type; }
set { field_type = value; }
}
public override string FullName {
get { return field_type.FullName + " " + MemberFullName (); }
}
internal override bool ContainsGenericParameter {
get { return field_type.ContainsGenericParameter || base.ContainsGenericParameter; }
}
internal FieldReference ()
{
this.token = new MetadataToken (TokenType.MemberRef);
}
public FieldReference (string name, TypeReference fieldType)
: base (name)
{
if (fieldType == null)
throw new ArgumentNullException ("fieldType");
this.field_type = fieldType;
this.token = new MetadataToken (TokenType.MemberRef);
}
public FieldReference (string name, TypeReference fieldType, TypeReference declaringType)
: this (name, fieldType)
{
if (declaringType == null)
throw new ArgumentNullException("declaringType");
this.DeclaringType = declaringType;
}
public virtual FieldDefinition Resolve ()
{
var module = this.Module;
if (module == null)
throw new NotSupportedException ();
return module.Resolve (this);
}
}
}
|
{
"pile_set_name": "Github"
}
|
class IncreasePrecomputedQueryDocSize < ActiveRecord::Migration[4.2]
def change
change_column :precomputed_query_docs, :key, :text, :limit => nil
change_column :precomputed_query_docs, :json, :text, :limit => nil
end
end
|
{
"pile_set_name": "Github"
}
|
-- source include/have_ndb.inc
connect (con1,localhost,root,,);
connect (con2,localhost,root,,);
--disable_warnings
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
--enable_warnings
#
# Transaction lock test to show that the NDB
# table handler is working properly with
# transaction locks
#
#
# Testing of scan isolation
#
connection con1;
create table t1 (x integer not null primary key, y varchar(32)) engine = ndb;
insert into t1 values (1,'one'), (2,'two');
select * from t1 order by x;
connection con2;
select * from t1 order by x;
connection con1;
start transaction;
insert into t1 values (3,'three');
select * from t1 order by x;
connection con2;
start transaction;
select * from t1 order by x;
connection con1;
commit;
connection con2;
select * from t1 order by x;
commit;
drop table t1;
###
# Bug#6020
create table t1 (pk integer not null primary key, u int not null, o int not null,
unique(u), key(o)) engine = ndb;
insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
lock tables t1 write;
delete from t1 where pk = 1;
unlock tables;
select * from t1 order by pk;
insert into t1 values (1,1,1);
lock tables t1 write;
delete from t1 where u = 1;
unlock tables;
select * from t1 order by pk;
insert into t1 values (1,1,1);
lock tables t1 write;
delete from t1 where o = 1;
unlock tables;
select * from t1 order by pk;
insert into t1 values (1,1,1);
drop table t1;
# Lock for update
create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb;
insert into t1 values (1,'one',1);
# PK access
connection con1;
begin;
select * from t1 where x = 1 for update;
connection con2;
begin;
--error 1205
select * from t1 where x = 1 for update;
rollback;
connection con1;
rollback;
insert into t1 values (2,'two',2),(3,"three",3);
begin;
select * from t1 where x = 1 for update;
connection con2;
--error 1205
select * from t1 where x = 1 for update;
select * from t1 where x = 2 for update;
rollback;
connection con1;
commit;
# table scan
#
# Note that there are two distinct execution paths in which we unlock
# non-matching rows inspected during table scan - one that is used in
# case of filesort and one that used in rest of cases. Below we cover
# the latter (Bug #20390 "SELECT FOR UPDATE does not release locks of
# untouched rows in full table scans").
connection con1;
begin;
# We can't use "order by x" here as it will cause filesort
--replace_column 1 # 2 # 3 #
select * from t1 where y = 'one' or y = 'three' for update;
connection con2;
begin;
# Have to check with pk access here since scans take locks on
# all rows and then release them in chunks
select * from t1 where x = 2 for update;
--error 1205
select * from t1 where x = 1 for update;
rollback;
connection con1;
commit;
# And now the test for case with filesort
begin;
select * from t1 where y = 'one' or y = 'three' order by x for update;
connection con2;
begin;
select * from t1 where x = 2 for update;
--error 1205
select * from t1 where x = 1 for update;
rollback;
connection con1;
commit;
# index scan
connection con1;
begin;
select * from t1 where z > 1 and z < 3 for update;
connection con2;
begin;
# Have to check with pk access here since scans take locks on
# all rows and then release them in chunks
select * from t1 where x = 1 for update;
--error 1105,1205
select * from t1 where x = 2 for update;
rollback;
connection con1;
commit;
# share locking
# PK access
connection con1;
begin;
select * from t1 where x = 1 lock in share mode;
connection con2;
begin;
select * from t1 where x = 1 lock in share mode;
select * from t1 where x = 2 for update;
--error 1205
select * from t1 where x = 1 for update;
rollback;
connection con1;
commit;
# table scan
connection con1;
begin;
# We can't use "order by x" here as it will cause filesort
--replace_column 1 # 2 # 3 #
select * from t1 where y = 'one' or y = 'three' lock in share mode;
connection con2;
begin;
select * from t1 where y = 'one' lock in share mode;
# Have to check with pk access here since scans take locks on
# all rows and then release them in chunks
select * from t1 where x = 2 for update;
--error 1205
select * from t1 where x = 1 for update;
rollback;
connection con1;
commit;
# And the same test for case with filesort
connection con1;
begin;
select * from t1 where y = 'one' or y = 'three' order by x lock in share mode;
connection con2;
begin;
select * from t1 where y = 'one' lock in share mode;
select * from t1 where x = 2 for update;
--error 1205
select * from t1 where x = 1 for update;
rollback;
connection con1;
commit;
# index scan
connection con1;
begin;
select * from t1 where z > 1 and z < 3 lock in share mode;
connection con2;
begin;
select * from t1 where z = 1 lock in share mode;
# Have to check with pk access here since scans take locks on
# all rows and then release them in chunks
select * from t1 where x = 1 for update;
--error 1205
select * from t1 where x = 2 for update;
rollback;
connection con1;
commit;
drop table t1;
# End of 4.1 tests
#
# Bug #17812 Previous lock table for write causes "stray" lock
# although table is recreated
#
# this creating, locking, and dropping causes a subsequent hang
# on the delete below waiting for table t2 the locking in the
# "other" connection is relevant, as without it there is no problem
#
connection con1;
create table t3 (id2 int) engine=ndb;
connection con2;
lock tables t3 write;
unlock tables;
connection con1;
drop table t3;
connection con1;
create table t2 (id int, j int) engine=ndb;
insert into t2 values (2, 2);
create table t3 (id int) engine=ndb;
connection con2;
lock tables t3 read;
connection con1;
# here we get a hang before bugfix although we shouldn't
delete t2 from t2, t3 where t2.id = t3.id;
connection con2;
unlock tables;
connection con1;
drop table t2, t3;
|
{
"pile_set_name": "Github"
}
|
libXfont provides the core of the legacy X11 font system, handling the
index files (fonts.dir, fonts.alias, fonts.scale), the various font file
formats, and rasterizing them. It is used by the X servers, the
X Font Server (xfs), and some font utilities (bdftopcf for instance),
but should not be used by normal X11 clients. X11 clients access fonts
via either the new API's in libXft, or the legacy API's in libX11.
libXfont supports a number of compression and font formats, and the
configure script takes various options to enable or disable them:
-- Compression types:
gzip - always enabled, no option to disable, requires libz
bzip2 - disabled by default, enable via --with-bzip2, requires libbz2
-- Font formats:
builtins - copies of the "fixed" & "cursor" fonts required by the
X protocol are built into the library so the X server always
has the fonts it requires to start up. Accessed via the
special 'built-ins' entry in the X server font path.
Enabled by default, disable via --disable-builtins.
freetype - handles scalable font formats including OpenType, FreeType,
and PostScript formats. Requires FreeType2 library.
Can also be used to handle bdf & bitmap pcf font formats.
Enabled by default, disable via --disable-freetype.
bdf bitmap fonts - text file format for distributing fonts, described
in http://www.x.org/docs/BDF/bdf.pdf specification. Normally
not used by the X server at runtime, as the fonts distributed
by X.Org in bdf format are compiled with bdftopcf when
installing/packaging them.
Enabled by default, disable via --disable-bdfformat.
pcf bitmap fonts - standard bitmap font format since X11R5 in 1991,
used for all bitmap fonts installed from X.Org packages.
Compiled format is architecture independent.
As noted above, usually produced by bdftopcf.
Enabled by default, disable via --disable-pcfformat.
snf bitmap fonts - standard bitmap font format prior to X11R5 in 1991,
remains only for backwards compatibility. Unlike pcf, snf files
are architecture specific, and contain less font information
than pcf files. snf fonts are deprecated and support for them
may be removed in future libXfont releases.
Disabled by default, enable via --disable-snfformat.
-- Font services:
xfs font servers - allows retreiving fonts as a client of an xfs server.
Enabled by default, disable via --disable-fc (font client).
If enabled, you can also use the standard libxtrans flags to
configure which transports can be used to connect to xfs:
--enable-unix-transport Enable UNIX domain socket transport
--enable-tcp-transport Enable TCP socket transport (IPv4)
--enable-ipv6 Enable IPv6 support for tcp-transport
--enable-local-transport Enable os-specific local transport
(Change --enable to --disable to force disabling support.)
The default setting is to enable all of the transports the
configure script can find OS support for.
--------------------------------------------------------------------------
Please submit bugs & patches to the Xorg bugzilla:
https://bugs.freedesktop.org/enter_bug.cgi?product=xorg
All questions regarding this software should be directed at the
Xorg mailing list:
http://lists.freedesktop.org/mailman/listinfo/xorg
The master development code repository can be found at:
git://anongit.freedesktop.org/git/xorg/lib/libXfont
http://cgit.freedesktop.org/xorg/lib/libXfont
For more information on the git code manager, see:
http://wiki.x.org/wiki/GitPage
|
{
"pile_set_name": "Github"
}
|
// RUN: %clang_cc1 -emit-llvm %s -o /dev/null
double FOO = 17;
double BAR = 12.0;
float XX = 12.0f;
static char *procnames[] = {
"EXIT"
};
void *Data[] = { &FOO, &BAR, &XX };
|
{
"pile_set_name": "Github"
}
|
"use strict";
module.exports = function(Promise,
PromiseArray,
apiRejection,
tryConvertToPromise,
INTERNAL,
debug) {
var getDomain = Promise._getDomain;
var util = require("./util");
var tryCatch = util.tryCatch;
function ReductionPromiseArray(promises, fn, initialValue, _each) {
this.constructor$(promises);
var domain = getDomain();
this._fn = domain === null ? fn : util.domainBind(domain, fn);
if (initialValue !== undefined) {
initialValue = Promise.resolve(initialValue);
initialValue._attachCancellationCallback(this);
}
this._initialValue = initialValue;
this._currentCancellable = null;
if(_each === INTERNAL) {
this._eachValues = Array(this._length);
} else if (_each === 0) {
this._eachValues = null;
} else {
this._eachValues = undefined;
}
this._promise._captureStackTrace();
this._init$(undefined, -5);
}
util.inherits(ReductionPromiseArray, PromiseArray);
ReductionPromiseArray.prototype._gotAccum = function(accum) {
if (this._eachValues !== undefined &&
this._eachValues !== null &&
accum !== INTERNAL) {
this._eachValues.push(accum);
}
};
ReductionPromiseArray.prototype._eachComplete = function(value) {
if (this._eachValues !== null) {
this._eachValues.push(value);
}
return this._eachValues;
};
ReductionPromiseArray.prototype._init = function() {};
ReductionPromiseArray.prototype._resolveEmptyArray = function() {
this._resolve(this._eachValues !== undefined ? this._eachValues
: this._initialValue);
};
ReductionPromiseArray.prototype.shouldCopyValues = function () {
return false;
};
ReductionPromiseArray.prototype._resolve = function(value) {
this._promise._resolveCallback(value);
this._values = null;
};
ReductionPromiseArray.prototype._resultCancelled = function(sender) {
if (sender === this._initialValue) return this._cancel();
if (this._isResolved()) return;
this._resultCancelled$();
if (this._currentCancellable instanceof Promise) {
this._currentCancellable.cancel();
}
if (this._initialValue instanceof Promise) {
this._initialValue.cancel();
}
};
ReductionPromiseArray.prototype._iterate = function (values) {
this._values = values;
var value;
var i;
var length = values.length;
if (this._initialValue !== undefined) {
value = this._initialValue;
i = 0;
} else {
value = Promise.resolve(values[0]);
i = 1;
}
this._currentCancellable = value;
if (!value.isRejected()) {
for (; i < length; ++i) {
var ctx = {
accum: null,
value: values[i],
index: i,
length: length,
array: this
};
value = value._then(gotAccum, undefined, undefined, ctx, undefined);
}
}
if (this._eachValues !== undefined) {
value = value
._then(this._eachComplete, undefined, undefined, this, undefined);
}
value._then(completed, completed, undefined, value, this);
};
Promise.prototype.reduce = function (fn, initialValue) {
return reduce(this, fn, initialValue, null);
};
Promise.reduce = function (promises, fn, initialValue, _each) {
return reduce(promises, fn, initialValue, _each);
};
function completed(valueOrReason, array) {
if (this.isFulfilled()) {
array._resolve(valueOrReason);
} else {
array._reject(valueOrReason);
}
}
function reduce(promises, fn, initialValue, _each) {
if (typeof fn !== "function") {
return apiRejection("expecting a function but got " + util.classString(fn));
}
var array = new ReductionPromiseArray(promises, fn, initialValue, _each);
return array.promise();
}
function gotAccum(accum) {
this.accum = accum;
this.array._gotAccum(accum);
var value = tryConvertToPromise(this.value, this.array._promise);
if (value instanceof Promise) {
this.array._currentCancellable = value;
return value._then(gotValue, undefined, undefined, this, undefined);
} else {
return gotValue.call(this, value);
}
}
function gotValue(value) {
var array = this.array;
var promise = array._promise;
var fn = tryCatch(array._fn);
promise._pushContext();
var ret;
if (array._eachValues !== undefined) {
ret = fn.call(promise._boundValue(), value, this.index, this.length);
} else {
ret = fn.call(promise._boundValue(),
this.accum, value, this.index, this.length);
}
if (ret instanceof Promise) {
array._currentCancellable = ret;
}
var promiseCreated = promise._popContext();
debug.checkForgottenReturns(
ret,
promiseCreated,
array._eachValues !== undefined ? "Promise.each" : "Promise.reduce",
promise
);
return ret;
}
};
|
{
"pile_set_name": "Github"
}
|
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2015-2015. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/container for documentation.
//
//////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_CONTAINER_PMR_UNSYNCHRONIZED_POOL_RESOURCE_HPP
#define BOOST_CONTAINER_PMR_UNSYNCHRONIZED_POOL_RESOURCE_HPP
#if defined (_MSC_VER)
# pragma once
#endif
#include <boost/container/detail/config_begin.hpp>
#include <boost/container/detail/workaround.hpp>
#include <boost/container/detail/auto_link.hpp>
#include <boost/container/pmr/memory_resource.hpp>
#include <boost/container/detail/pool_resource.hpp>
#include <cstddef>
namespace boost {
namespace container {
namespace pmr {
//! A unsynchronized_pool_resource is a general-purpose memory resources having
//! the following qualities:
//!
//! - Each resource owns the allocated memory, and frees it on destruction,
//! even if deallocate has not been called for some of the allocated blocks.
//!
//! - A pool resource consists of a collection of pools, serving
//! requests for different block sizes. Each individual pool manages a
//! collection of chunks that are in turn divided into blocks of uniform size,
//! returned via calls to do_allocate. Each call to do_allocate(size, alignment)
//! is dispatched to the pool serving the smallest blocks accommodating at
//! least size bytes.
//!
//! - When a particular pool is exhausted, allocating a block from that pool
//! results in the allocation of an additional chunk of memory from the upstream
//! allocator (supplied at construction), thus replenishing the pool. With
//! each successive replenishment, the chunk size obtained increases
//! geometrically. [ Note: By allocating memory in chunks, the pooling strategy
//! increases the chance that consecutive allocations will be close together
//! in memory. - end note ]
//!
//! - Allocation requests that exceed the largest block size of any pool are
//! fulfilled directly from the upstream allocator.
//!
//! - A pool_options struct may be passed to the pool resource constructors to
//! tune the largest block size and the maximum chunk size.
//!
//! An unsynchronized_pool_resource class may not be accessed from multiple threads
//! simultaneously and thus avoids the cost of synchronization entirely in
//! single-threaded applications.
class BOOST_CONTAINER_DECL unsynchronized_pool_resource
: public memory_resource
{
pool_resource m_resource;
public:
//! <b>Requires</b>: `upstream` is the address of a valid memory resource.
//!
//! <b>Effects</b>: Constructs a pool resource object that will obtain memory
//! from upstream whenever the pool resource is unable to satisfy a memory
//! request from its own internal data structures. The resulting object will hold
//! a copy of upstream, but will not own the resource to which upstream points.
//! [ Note: The intention is that calls to upstream->allocate() will be
//! substantially fewer than calls to this->allocate() in most cases. - end note
//! The behavior of the pooling mechanism is tuned according to the value of
//! the opts argument.
//!
//! <b>Throws</b>: Nothing unless upstream->allocate() throws. It is unspecified if
//! or under what conditions this constructor calls upstream->allocate().
unsynchronized_pool_resource(const pool_options& opts, memory_resource* upstream) BOOST_NOEXCEPT;
//! <b>Effects</b>: Same as
//! `unsynchronized_pool_resource(pool_options(), get_default_resource())`.
unsynchronized_pool_resource() BOOST_NOEXCEPT;
//! <b>Effects</b>: Same as
//! `unsynchronized_pool_resource(pool_options(), upstream)`.
explicit unsynchronized_pool_resource(memory_resource* upstream) BOOST_NOEXCEPT;
//! <b>Effects</b>: Same as
//! `unsynchronized_pool_resource(opts, get_default_resource())`.
explicit unsynchronized_pool_resource(const pool_options& opts) BOOST_NOEXCEPT;
#if !defined(BOOST_NO_CXX11_DELETED_FUNCTIONS) || defined(BOOST_CONTAINER_DOXYGEN_INVOKED)
unsynchronized_pool_resource(const unsynchronized_pool_resource&) = delete;
unsynchronized_pool_resource operator=(const unsynchronized_pool_resource&) = delete;
#else
private:
unsynchronized_pool_resource (const unsynchronized_pool_resource&);
unsynchronized_pool_resource operator=(const unsynchronized_pool_resource&);
public:
#endif
//! <b>Effects</b>: Calls
//! `this->release()`.
virtual ~unsynchronized_pool_resource();
//! <b>Effects</b>: Calls Calls `upstream_resource()->deallocate()` as necessary
//! to release all allocated memory. [ Note: memory is released back to
//! `upstream_resource()` even if deallocate has not been called for some
//! of the allocated blocks. - end note ]
void release();
//! <b>Returns</b>: The value of the upstream argument provided to the
//! constructor of this object.
memory_resource* upstream_resource() const;
//! <b>Returns</b>: The options that control the pooling behavior of this resource.
//! The values in the returned struct may differ from those supplied to the pool
//! resource constructor in that values of zero will be replaced with
//! implementation-defined defaults and sizes may be rounded to unspecified granularity.
pool_options options() const;
protected:
//! <b>Returns</b>: A pointer to allocated storage with a size of at least `bytes`.
//! The size and alignment of the allocated memory shall meet the requirements for
//! a class derived from `memory_resource`.
//!
//! <b>Effects</b>: If the pool selected for a block of size bytes is unable to
//! satisfy the memory request from its own internal data structures, it will call
//! `upstream_resource()->allocate()` to obtain more memory. If `bytes` is larger
//! than that which the largest pool can handle, then memory will be allocated
//! using `upstream_resource()->allocate()`.
//!
//! <b>Throws</b>: Nothing unless `upstream_resource()->allocate()` throws.
virtual void* do_allocate(std::size_t bytes, std::size_t alignment);
//! <b>Effects</b>: Return the memory at p to the pool. It is unspecified if or under
//! what circumstances this operation will result in a call to
//! `upstream_resource()->deallocate()`.
//!
//! <b>Throws</b>: Nothing.
virtual void do_deallocate(void* p, std::size_t bytes, std::size_t alignment);
//! <b>Returns</b>:
//! `this == dynamic_cast<const unsynchronized_pool_resource*>(&other)`.
virtual bool do_is_equal(const memory_resource& other) const BOOST_NOEXCEPT;
//Non-standard observers
public:
//! <b>Returns</b>: The number of pools that will be used in the pool resource.
//!
//! <b>Note</b>: Non-standard extension.
std::size_t pool_count() const;
//! <b>Returns</b>: The index of the pool that will be used to serve the allocation of `bytes`.
//! Returns `pool_count()` if `bytes` is bigger
//! than `options().largest_required_pool_block` (no pool will be used to serve this).
//!
//! <b>Note</b>: Non-standard extension.
std::size_t pool_index(std::size_t bytes) const;
//! <b>Requires</b>: `pool_idx < pool_index()`
//!
//! <b>Returns</b>: The number blocks that will be allocated in the next chunk
//! from the pool specified by `pool_idx`.
//!
//! <b>Note</b>: Non-standard extension.
std::size_t pool_next_blocks_per_chunk(std::size_t pool_idx) const;
//! <b>Requires</b>: `pool_idx < pool_index()`
//!
//! <b>Returns</b>: The number of bytes of the block that the specified `pool_idx` pool manages.
//!
//! <b>Note</b>: Non-standard extension.
std::size_t pool_block(std::size_t pool_idx) const;
//! <b>Requires</b>: `pool_idx < pool_index()`
//!
//! <b>Returns</b>: The number of blocks that the specified `pool_idx` pool has cached
//! and will be served without calling the upstream_allocator.
//!
//! <b>Note</b>: Non-standard extension.
std::size_t pool_cached_blocks(std::size_t pool_idx) const;
};
} //namespace pmr {
} //namespace container {
} //namespace boost {
#include <boost/container/detail/config_end.hpp>
#endif //BOOST_CONTAINER_PMR_UNSYNCHRONIZED_POOL_RESOURCE_HPP
|
{
"pile_set_name": "Github"
}
|
# -*- mode: sh -*-
# Pre-Processing / stereo_pprc
################################################################
# Pre-alignment options
#
# Available choices are (however not all are supported by all sessions):
# NONE (Recommended for anything map projected)
# EPIPOLAR (Recommended for Pinhole Sessions)
# HOMOGRAPHY (Recommended for ISIS wide-angle shots)
# AFFINEEPIPOLAR (Recommended for ISIS narrow-angle and DG sessions)
alignment-method epipolar
# Intensity Normalization
force-use-entire-range # Use entire input range
# Select a preprocessing filter:
#
# 0 - None
# 1 - Subtracted Mean
# 2 - Laplacian of Gaussian (recommended)
prefilter-mode 2
# Kernel size (1-sigma) for pre-processing
#
# Recommend 1.4 px for Laplacian of Gaussian
# Recommend 25 px for Subtracted Mean
prefilter-kernel-width 1.4
# Integer Correlation / stereo_corr
################################################################
# Select a cost function to use for initialization:
#
# 0 - absolute difference (fast)
# 1 - squared difference (faster .. but usually bad)
# 2 - normalized cross correlation (recommended)
cost-mode 0
# Initialization step: correlation kernel size
corr-kernel 23 23
# Initialization step: correlation window size
#corr-search -100 -100 100 100
# Subpixel Refinement / stereo_rfne
################################################################
# Subpixel step: subpixel modes
#
# 0 - disable subpixel correlation (fastest)
# 1 - parabola fitting (draft mode - not as accurate)
# 2 - affine adaptive window, bayes EM weighting (slower, but much more accurate)
subpixel-mode 1
# Subpixel step: correlation kernel size
subpixel-kernel 25 25
# Post Filtering / stereo_fltr
################################################################
# Fill in holes up to 100,000 pixels in size with an inpainting method
disable-fill-holes
# Automatic "erode" low confidence pixels
rm-half-kernel 3 3
rm-min-matches 60
rm-threshold 3
rm-cleanup-passes 1
# Triangulation / stereo_tri
################################################################
# Size max of the universe in meters and altitude off the ground.
# Setting both values to zero turns this post-processing step off.
universe-center camera
near-universe-radius 0.7
far-universe-radius 80.0
|
{
"pile_set_name": "Github"
}
|
/* $NetBSD: os.c,v 1.4 2020/05/24 19:46:28 christos Exp $ */
/*
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
* See the COPYRIGHT file distributed with this work for additional
* information regarding copyright ownership.
*/
#include <windows.h>
#include <isc/os.h>
static BOOL bInit = FALSE;
static SYSTEM_INFO SystemInfo;
static void
initialize_action(void) {
if (bInit) {
return;
}
GetSystemInfo(&SystemInfo);
bInit = TRUE;
}
unsigned int
isc_os_ncpus(void) {
long ncpus;
initialize_action();
ncpus = SystemInfo.dwNumberOfProcessors;
if (ncpus <= 0) {
ncpus = 1;
}
return ((unsigned int)ncpus);
}
|
{
"pile_set_name": "Github"
}
|
<?php
$lang->message = 'エラー表示';
$lang->about_skin = 'エラーメッセージを表示する際のスキンを指定できます。';
|
{
"pile_set_name": "Github"
}
|
<?php
$data = "<a ${t_url_href}${t_url_target}>${p_match[1]}</a>";
|
{
"pile_set_name": "Github"
}
|
var baseCreate = require('./_baseCreate'),
isObject = require('./isObject');
/**
* Creates a function that produces an instance of `Ctor` regardless of
* whether it was invoked as part of a `new` expression or by `call` or `apply`.
*
* @private
* @param {Function} Ctor The constructor to wrap.
* @returns {Function} Returns the new wrapped function.
*/
function createCtor(Ctor) {
return function() {
// Use a `switch` statement to work with class constructors. See
// http://ecma-international.org/ecma-262/7.0/#sec-ecmascript-function-objects-call-thisargument-argumentslist
// for more details.
var args = arguments;
switch (args.length) {
case 0: return new Ctor;
case 1: return new Ctor(args[0]);
case 2: return new Ctor(args[0], args[1]);
case 3: return new Ctor(args[0], args[1], args[2]);
case 4: return new Ctor(args[0], args[1], args[2], args[3]);
case 5: return new Ctor(args[0], args[1], args[2], args[3], args[4]);
case 6: return new Ctor(args[0], args[1], args[2], args[3], args[4], args[5]);
case 7: return new Ctor(args[0], args[1], args[2], args[3], args[4], args[5], args[6]);
}
var thisBinding = baseCreate(Ctor.prototype),
result = Ctor.apply(thisBinding, args);
// Mimic the constructor's `return` behavior.
// See https://es5.github.io/#x13.2.2 for more details.
return isObject(result) ? result : thisBinding;
};
}
module.exports = createCtor;
|
{
"pile_set_name": "Github"
}
|
../src/main/dist/build/agda/agda ac/AC.agda --ignore-interfaces -iac -v0 +RTS -slogs/livia-20080306-11.59/ac1
1,372,196,000 bytes allocated in the heap
108,936,692 bytes copied during GC (scavenged)
2,741,488 bytes copied during GC (not scavenged)
7,966,720 bytes maximum residency (21 sample(s))
2615 collections in generation 0 ( 0.38s)
21 collections in generation 1 ( 0.27s)
24 Mb total memory in use
INIT time 0.00s ( 0.00s elapsed)
MUT time 1.36s ( 2.00s elapsed)
GC time 0.64s ( 0.94s elapsed)
EXIT time 0.00s ( 0.00s elapsed)
Total time 2.01s ( 2.94s elapsed)
%GC time 32.1% (32.0% elapsed)
Alloc rate 1,005,944,635 bytes per MUT second
Productivity 67.9% of total user, 46.4% of total elapsed
──────────────────────────────────────────────────────────────────
Linux 2.6.22-14-generic (buildd@palmer) (gcc [can't parse]) #??? 1CPU [livia]
Memory: Total Used Free Shared Buffers
Mem: 515844 324552 191292 0 4808
Swap: 489940 208208 281732
Bootup: Sat Mar 1 19:29:30 2008 Load average: 0.52 0.36 0.32 2/187 1822
user : 3:00:26.07 2.7% page in : 12141080 disk 1: 813981r 945679w
nice : 0:10:38.44 0.2% page out: 22159604
system: 1:32:16.78 1.4% page act: 9741569
IOwait: 0:41:20.58 0.6% page dea: 10134509
hw irq: 0:02:06.52 0.0% page flt:170064930
sw irq: 0:01:25.89 0.0% swap in : 1047504
idle : 4d 3:46:02.27 88.7% swap out: 1257676
uptime: 4d 16:30:06.23 context : 98166272
irq 0: 110 timer irq 12: 482175 i8042
irq 1: 832805 i8042 irq 14: 0 libata
irq 3: 1 irq 15: 1410527 libata
irq 4: 1 irq 16: 226089 eth0
irq 6: 5 floppy [2] irq 17: 1572488 ioc0
irq 7: 0 parport0 irq 18: 773 uhci_hcd:usb1, Enson
irq 8: 3 rtc irq 19: 0 ehci_hcd:usb2
irq 9: 0 acpi
|
{
"pile_set_name": "Github"
}
|
// ASM: a very small and fast Java bytecode manipulation framework
// Copyright (c) 2000-2011 INRIA, France Telecom
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. Neither the name of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
package org.springframework.asm;
/**
* An {@link AnnotationVisitor} that generates a corresponding 'annotation' or 'type_annotation'
* structure, as defined in the Java Virtual Machine Specification (JVMS). AnnotationWriter
* instances can be chained in a doubly linked list, from which Runtime[In]Visible[Type]Annotations
* attributes can be generated with the {@link #putAnnotations} method. Similarly, arrays of such
* lists can be used to generate Runtime[In]VisibleParameterAnnotations attributes.
*
* @see <a href="https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.7.16">JVMS
* 4.7.16</a>
* @see <a href="https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.7.20">JVMS
* 4.7.20</a>
* @author Eric Bruneton
* @author Eugene Kuleshov
*/
final class AnnotationWriter extends AnnotationVisitor {
/** Where the constants used in this AnnotationWriter must be stored. */
private final SymbolTable symbolTable;
/**
* Whether values are named or not. AnnotationWriter instances used for annotation default and
* annotation arrays use unnamed values (i.e. they generate an 'element_value' structure for each
* value, instead of an element_name_index followed by an element_value).
*/
private final boolean useNamedValues;
/**
* The 'annotation' or 'type_annotation' JVMS structure corresponding to the annotation values
* visited so far. All the fields of these structures, except the last one - the
* element_value_pairs array, must be set before this ByteVector is passed to the constructor
* (num_element_value_pairs can be set to 0, it is reset to the correct value in {@link
* #visitEnd()}). The element_value_pairs array is filled incrementally in the various visit()
* methods.
*
* <p>Note: as an exception to the above rules, for AnnotationDefault attributes (which contain a
* single element_value by definition), this ByteVector is initially empty when passed to the
* constructor, and {@link #numElementValuePairsOffset} is set to -1.
*/
private final ByteVector annotation;
/**
* The offset in {@link #annotation} where {@link #numElementValuePairs} must be stored (or -1 for
* the case of AnnotationDefault attributes).
*/
private final int numElementValuePairsOffset;
/** The number of element value pairs visited so far. */
private int numElementValuePairs;
/**
* The previous AnnotationWriter. This field is used to store the list of annotations of a
* Runtime[In]Visible[Type]Annotations attribute. It is unused for nested or array annotations
* (annotation values of annotation type), or for AnnotationDefault attributes.
*/
private final AnnotationWriter previousAnnotation;
/**
* The next AnnotationWriter. This field is used to store the list of annotations of a
* Runtime[In]Visible[Type]Annotations attribute. It is unused for nested or array annotations
* (annotation values of annotation type), or for AnnotationDefault attributes.
*/
private AnnotationWriter nextAnnotation;
// -----------------------------------------------------------------------------------------------
// Constructors and factories
// -----------------------------------------------------------------------------------------------
/**
* Constructs a new {@link AnnotationWriter}.
*
* @param symbolTable where the constants used in this AnnotationWriter must be stored.
* @param useNamedValues whether values are named or not. AnnotationDefault and annotation arrays
* use unnamed values.
* @param annotation where the 'annotation' or 'type_annotation' JVMS structure corresponding to
* the visited content must be stored. This ByteVector must already contain all the fields of
* the structure except the last one (the element_value_pairs array).
* @param previousAnnotation the previously visited annotation of the
* Runtime[In]Visible[Type]Annotations attribute to which this annotation belongs, or
* {@literal null} in other cases (e.g. nested or array annotations).
*/
AnnotationWriter(
final SymbolTable symbolTable,
final boolean useNamedValues,
final ByteVector annotation,
final AnnotationWriter previousAnnotation) {
super(/* latest api = */ Opcodes.ASM9);
this.symbolTable = symbolTable;
this.useNamedValues = useNamedValues;
this.annotation = annotation;
// By hypothesis, num_element_value_pairs is stored in the last unsigned short of 'annotation'.
this.numElementValuePairsOffset = annotation.length == 0 ? -1 : annotation.length - 2;
this.previousAnnotation = previousAnnotation;
if (previousAnnotation != null) {
previousAnnotation.nextAnnotation = this;
}
}
/**
* Creates a new {@link AnnotationWriter} using named values.
*
* @param symbolTable where the constants used in this AnnotationWriter must be stored.
* @param descriptor the class descriptor of the annotation class.
* @param previousAnnotation the previously visited annotation of the
* Runtime[In]Visible[Type]Annotations attribute to which this annotation belongs, or
* {@literal null} in other cases (e.g. nested or array annotations).
* @return a new {@link AnnotationWriter} for the given annotation descriptor.
*/
static AnnotationWriter create(
final SymbolTable symbolTable,
final String descriptor,
final AnnotationWriter previousAnnotation) {
// Create a ByteVector to hold an 'annotation' JVMS structure.
// See https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.7.16.
ByteVector annotation = new ByteVector();
// Write type_index and reserve space for num_element_value_pairs.
annotation.putShort(symbolTable.addConstantUtf8(descriptor)).putShort(0);
return new AnnotationWriter(
symbolTable, /* useNamedValues = */ true, annotation, previousAnnotation);
}
/**
* Creates a new {@link AnnotationWriter} using named values.
*
* @param symbolTable where the constants used in this AnnotationWriter must be stored.
* @param typeRef a reference to the annotated type. The sort of this type reference must be
* {@link TypeReference#CLASS_TYPE_PARAMETER}, {@link
* TypeReference#CLASS_TYPE_PARAMETER_BOUND} or {@link TypeReference#CLASS_EXTENDS}. See
* {@link TypeReference}.
* @param typePath the path to the annotated type argument, wildcard bound, array element type, or
* static inner type within 'typeRef'. May be {@literal null} if the annotation targets
* 'typeRef' as a whole.
* @param descriptor the class descriptor of the annotation class.
* @param previousAnnotation the previously visited annotation of the
* Runtime[In]Visible[Type]Annotations attribute to which this annotation belongs, or
* {@literal null} in other cases (e.g. nested or array annotations).
* @return a new {@link AnnotationWriter} for the given type annotation reference and descriptor.
*/
static AnnotationWriter create(
final SymbolTable symbolTable,
final int typeRef,
final TypePath typePath,
final String descriptor,
final AnnotationWriter previousAnnotation) {
// Create a ByteVector to hold a 'type_annotation' JVMS structure.
// See https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.7.20.
ByteVector typeAnnotation = new ByteVector();
// Write target_type, target_info, and target_path.
TypeReference.putTarget(typeRef, typeAnnotation);
TypePath.put(typePath, typeAnnotation);
// Write type_index and reserve space for num_element_value_pairs.
typeAnnotation.putShort(symbolTable.addConstantUtf8(descriptor)).putShort(0);
return new AnnotationWriter(
symbolTable, /* useNamedValues = */ true, typeAnnotation, previousAnnotation);
}
// -----------------------------------------------------------------------------------------------
// Implementation of the AnnotationVisitor abstract class
// -----------------------------------------------------------------------------------------------
@Override
public void visit(final String name, final Object value) {
// Case of an element_value with a const_value_index, class_info_index or array_index field.
// See https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.7.16.1.
++numElementValuePairs;
if (useNamedValues) {
annotation.putShort(symbolTable.addConstantUtf8(name));
}
if (value instanceof String) {
annotation.put12('s', symbolTable.addConstantUtf8((String) value));
} else if (value instanceof Byte) {
annotation.put12('B', symbolTable.addConstantInteger(((Byte) value).byteValue()).index);
} else if (value instanceof Boolean) {
int booleanValue = ((Boolean) value).booleanValue() ? 1 : 0;
annotation.put12('Z', symbolTable.addConstantInteger(booleanValue).index);
} else if (value instanceof Character) {
annotation.put12('C', symbolTable.addConstantInteger(((Character) value).charValue()).index);
} else if (value instanceof Short) {
annotation.put12('S', symbolTable.addConstantInteger(((Short) value).shortValue()).index);
} else if (value instanceof Type) {
annotation.put12('c', symbolTable.addConstantUtf8(((Type) value).getDescriptor()));
} else if (value instanceof byte[]) {
byte[] byteArray = (byte[]) value;
annotation.put12('[', byteArray.length);
for (byte byteValue : byteArray) {
annotation.put12('B', symbolTable.addConstantInteger(byteValue).index);
}
} else if (value instanceof boolean[]) {
boolean[] booleanArray = (boolean[]) value;
annotation.put12('[', booleanArray.length);
for (boolean booleanValue : booleanArray) {
annotation.put12('Z', symbolTable.addConstantInteger(booleanValue ? 1 : 0).index);
}
} else if (value instanceof short[]) {
short[] shortArray = (short[]) value;
annotation.put12('[', shortArray.length);
for (short shortValue : shortArray) {
annotation.put12('S', symbolTable.addConstantInteger(shortValue).index);
}
} else if (value instanceof char[]) {
char[] charArray = (char[]) value;
annotation.put12('[', charArray.length);
for (char charValue : charArray) {
annotation.put12('C', symbolTable.addConstantInteger(charValue).index);
}
} else if (value instanceof int[]) {
int[] intArray = (int[]) value;
annotation.put12('[', intArray.length);
for (int intValue : intArray) {
annotation.put12('I', symbolTable.addConstantInteger(intValue).index);
}
} else if (value instanceof long[]) {
long[] longArray = (long[]) value;
annotation.put12('[', longArray.length);
for (long longValue : longArray) {
annotation.put12('J', symbolTable.addConstantLong(longValue).index);
}
} else if (value instanceof float[]) {
float[] floatArray = (float[]) value;
annotation.put12('[', floatArray.length);
for (float floatValue : floatArray) {
annotation.put12('F', symbolTable.addConstantFloat(floatValue).index);
}
} else if (value instanceof double[]) {
double[] doubleArray = (double[]) value;
annotation.put12('[', doubleArray.length);
for (double doubleValue : doubleArray) {
annotation.put12('D', symbolTable.addConstantDouble(doubleValue).index);
}
} else {
Symbol symbol = symbolTable.addConstant(value);
annotation.put12(".s.IFJDCS".charAt(symbol.tag), symbol.index);
}
}
@Override
public void visitEnum(final String name, final String descriptor, final String value) {
// Case of an element_value with an enum_const_value field.
// See https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.7.16.1.
++numElementValuePairs;
if (useNamedValues) {
annotation.putShort(symbolTable.addConstantUtf8(name));
}
annotation
.put12('e', symbolTable.addConstantUtf8(descriptor))
.putShort(symbolTable.addConstantUtf8(value));
}
@Override
public AnnotationVisitor visitAnnotation(final String name, final String descriptor) {
// Case of an element_value with an annotation_value field.
// See https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.7.16.1.
++numElementValuePairs;
if (useNamedValues) {
annotation.putShort(symbolTable.addConstantUtf8(name));
}
// Write tag and type_index, and reserve 2 bytes for num_element_value_pairs.
annotation.put12('@', symbolTable.addConstantUtf8(descriptor)).putShort(0);
return new AnnotationWriter(symbolTable, /* useNamedValues = */ true, annotation, null);
}
@Override
public AnnotationVisitor visitArray(final String name) {
// Case of an element_value with an array_value field.
// https://docs.oracle.com/javase/specs/jvms/se9/html/jvms-4.html#jvms-4.7.16.1
++numElementValuePairs;
if (useNamedValues) {
annotation.putShort(symbolTable.addConstantUtf8(name));
}
// Write tag, and reserve 2 bytes for num_values. Here we take advantage of the fact that the
// end of an element_value of array type is similar to the end of an 'annotation' structure: an
// unsigned short num_values followed by num_values element_value, versus an unsigned short
// num_element_value_pairs, followed by num_element_value_pairs { element_name_index,
// element_value } tuples. This allows us to use an AnnotationWriter with unnamed values to
// visit the array elements. Its num_element_value_pairs will correspond to the number of array
// elements and will be stored in what is in fact num_values.
annotation.put12('[', 0);
return new AnnotationWriter(symbolTable, /* useNamedValues = */ false, annotation, null);
}
@Override
public void visitEnd() {
if (numElementValuePairsOffset != -1) {
byte[] data = annotation.data;
data[numElementValuePairsOffset] = (byte) (numElementValuePairs >>> 8);
data[numElementValuePairsOffset + 1] = (byte) numElementValuePairs;
}
}
// -----------------------------------------------------------------------------------------------
// Utility methods
// -----------------------------------------------------------------------------------------------
/**
* Returns the size of a Runtime[In]Visible[Type]Annotations attribute containing this annotation
* and all its <i>predecessors</i> (see {@link #previousAnnotation}. Also adds the attribute name
* to the constant pool of the class (if not null).
*
* @param attributeName one of "Runtime[In]Visible[Type]Annotations", or {@literal null}.
* @return the size in bytes of a Runtime[In]Visible[Type]Annotations attribute containing this
* annotation and all its predecessors. This includes the size of the attribute_name_index and
* attribute_length fields.
*/
int computeAnnotationsSize(final String attributeName) {
if (attributeName != null) {
symbolTable.addConstantUtf8(attributeName);
}
// The attribute_name_index, attribute_length and num_annotations fields use 8 bytes.
int attributeSize = 8;
AnnotationWriter annotationWriter = this;
while (annotationWriter != null) {
attributeSize += annotationWriter.annotation.length;
annotationWriter = annotationWriter.previousAnnotation;
}
return attributeSize;
}
/**
* Returns the size of the Runtime[In]Visible[Type]Annotations attributes containing the given
* annotations and all their <i>predecessors</i> (see {@link #previousAnnotation}. Also adds the
* attribute names to the constant pool of the class (if not null).
*
* @param lastRuntimeVisibleAnnotation The last runtime visible annotation of a field, method or
* class. The previous ones can be accessed with the {@link #previousAnnotation} field. May be
* {@literal null}.
* @param lastRuntimeInvisibleAnnotation The last runtime invisible annotation of this a field,
* method or class. The previous ones can be accessed with the {@link #previousAnnotation}
* field. May be {@literal null}.
* @param lastRuntimeVisibleTypeAnnotation The last runtime visible type annotation of this a
* field, method or class. The previous ones can be accessed with the {@link
* #previousAnnotation} field. May be {@literal null}.
* @param lastRuntimeInvisibleTypeAnnotation The last runtime invisible type annotation of a
* field, method or class field. The previous ones can be accessed with the {@link
* #previousAnnotation} field. May be {@literal null}.
* @return the size in bytes of a Runtime[In]Visible[Type]Annotations attribute containing the
* given annotations and all their predecessors. This includes the size of the
* attribute_name_index and attribute_length fields.
*/
static int computeAnnotationsSize(
final AnnotationWriter lastRuntimeVisibleAnnotation,
final AnnotationWriter lastRuntimeInvisibleAnnotation,
final AnnotationWriter lastRuntimeVisibleTypeAnnotation,
final AnnotationWriter lastRuntimeInvisibleTypeAnnotation) {
int size = 0;
if (lastRuntimeVisibleAnnotation != null) {
size +=
lastRuntimeVisibleAnnotation.computeAnnotationsSize(
Constants.RUNTIME_VISIBLE_ANNOTATIONS);
}
if (lastRuntimeInvisibleAnnotation != null) {
size +=
lastRuntimeInvisibleAnnotation.computeAnnotationsSize(
Constants.RUNTIME_INVISIBLE_ANNOTATIONS);
}
if (lastRuntimeVisibleTypeAnnotation != null) {
size +=
lastRuntimeVisibleTypeAnnotation.computeAnnotationsSize(
Constants.RUNTIME_VISIBLE_TYPE_ANNOTATIONS);
}
if (lastRuntimeInvisibleTypeAnnotation != null) {
size +=
lastRuntimeInvisibleTypeAnnotation.computeAnnotationsSize(
Constants.RUNTIME_INVISIBLE_TYPE_ANNOTATIONS);
}
return size;
}
/**
* Puts a Runtime[In]Visible[Type]Annotations attribute containing this annotations and all its
* <i>predecessors</i> (see {@link #previousAnnotation} in the given ByteVector. Annotations are
* put in the same order they have been visited.
*
* @param attributeNameIndex the constant pool index of the attribute name (one of
* "Runtime[In]Visible[Type]Annotations").
* @param output where the attribute must be put.
*/
void putAnnotations(final int attributeNameIndex, final ByteVector output) {
int attributeLength = 2; // For num_annotations.
int numAnnotations = 0;
AnnotationWriter annotationWriter = this;
AnnotationWriter firstAnnotation = null;
while (annotationWriter != null) {
// In case the user forgot to call visitEnd().
annotationWriter.visitEnd();
attributeLength += annotationWriter.annotation.length;
numAnnotations++;
firstAnnotation = annotationWriter;
annotationWriter = annotationWriter.previousAnnotation;
}
output.putShort(attributeNameIndex);
output.putInt(attributeLength);
output.putShort(numAnnotations);
annotationWriter = firstAnnotation;
while (annotationWriter != null) {
output.putByteArray(annotationWriter.annotation.data, 0, annotationWriter.annotation.length);
annotationWriter = annotationWriter.nextAnnotation;
}
}
/**
* Puts the Runtime[In]Visible[Type]Annotations attributes containing the given annotations and
* all their <i>predecessors</i> (see {@link #previousAnnotation} in the given ByteVector.
* Annotations are put in the same order they have been visited.
*
* @param symbolTable where the constants used in the AnnotationWriter instances are stored.
* @param lastRuntimeVisibleAnnotation The last runtime visible annotation of a field, method or
* class. The previous ones can be accessed with the {@link #previousAnnotation} field. May be
* {@literal null}.
* @param lastRuntimeInvisibleAnnotation The last runtime invisible annotation of this a field,
* method or class. The previous ones can be accessed with the {@link #previousAnnotation}
* field. May be {@literal null}.
* @param lastRuntimeVisibleTypeAnnotation The last runtime visible type annotation of this a
* field, method or class. The previous ones can be accessed with the {@link
* #previousAnnotation} field. May be {@literal null}.
* @param lastRuntimeInvisibleTypeAnnotation The last runtime invisible type annotation of a
* field, method or class field. The previous ones can be accessed with the {@link
* #previousAnnotation} field. May be {@literal null}.
* @param output where the attributes must be put.
*/
static void putAnnotations(
final SymbolTable symbolTable,
final AnnotationWriter lastRuntimeVisibleAnnotation,
final AnnotationWriter lastRuntimeInvisibleAnnotation,
final AnnotationWriter lastRuntimeVisibleTypeAnnotation,
final AnnotationWriter lastRuntimeInvisibleTypeAnnotation,
final ByteVector output) {
if (lastRuntimeVisibleAnnotation != null) {
lastRuntimeVisibleAnnotation.putAnnotations(
symbolTable.addConstantUtf8(Constants.RUNTIME_VISIBLE_ANNOTATIONS), output);
}
if (lastRuntimeInvisibleAnnotation != null) {
lastRuntimeInvisibleAnnotation.putAnnotations(
symbolTable.addConstantUtf8(Constants.RUNTIME_INVISIBLE_ANNOTATIONS), output);
}
if (lastRuntimeVisibleTypeAnnotation != null) {
lastRuntimeVisibleTypeAnnotation.putAnnotations(
symbolTable.addConstantUtf8(Constants.RUNTIME_VISIBLE_TYPE_ANNOTATIONS), output);
}
if (lastRuntimeInvisibleTypeAnnotation != null) {
lastRuntimeInvisibleTypeAnnotation.putAnnotations(
symbolTable.addConstantUtf8(Constants.RUNTIME_INVISIBLE_TYPE_ANNOTATIONS), output);
}
}
/**
* Returns the size of a Runtime[In]VisibleParameterAnnotations attribute containing all the
* annotation lists from the given AnnotationWriter sub-array. Also adds the attribute name to the
* constant pool of the class.
*
* @param attributeName one of "Runtime[In]VisibleParameterAnnotations".
* @param annotationWriters an array of AnnotationWriter lists (designated by their <i>last</i>
* element).
* @param annotableParameterCount the number of elements in annotationWriters to take into account
* (elements [0..annotableParameterCount[ are taken into account).
* @return the size in bytes of a Runtime[In]VisibleParameterAnnotations attribute corresponding
* to the given sub-array of AnnotationWriter lists. This includes the size of the
* attribute_name_index and attribute_length fields.
*/
static int computeParameterAnnotationsSize(
final String attributeName,
final AnnotationWriter[] annotationWriters,
final int annotableParameterCount) {
// Note: attributeName is added to the constant pool by the call to computeAnnotationsSize
// below. This assumes that there is at least one non-null element in the annotationWriters
// sub-array (which is ensured by the lazy instantiation of this array in MethodWriter).
// The attribute_name_index, attribute_length and num_parameters fields use 7 bytes, and each
// element of the parameter_annotations array uses 2 bytes for its num_annotations field.
int attributeSize = 7 + 2 * annotableParameterCount;
for (int i = 0; i < annotableParameterCount; ++i) {
AnnotationWriter annotationWriter = annotationWriters[i];
attributeSize +=
annotationWriter == null ? 0 : annotationWriter.computeAnnotationsSize(attributeName) - 8;
}
return attributeSize;
}
/**
* Puts a Runtime[In]VisibleParameterAnnotations attribute containing all the annotation lists
* from the given AnnotationWriter sub-array in the given ByteVector.
*
* @param attributeNameIndex constant pool index of the attribute name (one of
* Runtime[In]VisibleParameterAnnotations).
* @param annotationWriters an array of AnnotationWriter lists (designated by their <i>last</i>
* element).
* @param annotableParameterCount the number of elements in annotationWriters to put (elements
* [0..annotableParameterCount[ are put).
* @param output where the attribute must be put.
*/
static void putParameterAnnotations(
final int attributeNameIndex,
final AnnotationWriter[] annotationWriters,
final int annotableParameterCount,
final ByteVector output) {
// The num_parameters field uses 1 byte, and each element of the parameter_annotations array
// uses 2 bytes for its num_annotations field.
int attributeLength = 1 + 2 * annotableParameterCount;
for (int i = 0; i < annotableParameterCount; ++i) {
AnnotationWriter annotationWriter = annotationWriters[i];
attributeLength +=
annotationWriter == null ? 0 : annotationWriter.computeAnnotationsSize(null) - 8;
}
output.putShort(attributeNameIndex);
output.putInt(attributeLength);
output.putByte(annotableParameterCount);
for (int i = 0; i < annotableParameterCount; ++i) {
AnnotationWriter annotationWriter = annotationWriters[i];
AnnotationWriter firstAnnotation = null;
int numAnnotations = 0;
while (annotationWriter != null) {
// In case user the forgot to call visitEnd().
annotationWriter.visitEnd();
numAnnotations++;
firstAnnotation = annotationWriter;
annotationWriter = annotationWriter.previousAnnotation;
}
output.putShort(numAnnotations);
annotationWriter = firstAnnotation;
while (annotationWriter != null) {
output.putByteArray(
annotationWriter.annotation.data, 0, annotationWriter.annotation.length);
annotationWriter = annotationWriter.nextAnnotation;
}
}
}
}
|
{
"pile_set_name": "Github"
}
|
#ifndef RAPIDJSON_PRETTYWRITER_H_
#define RAPIDJSON_PRETTYWRITER_H_
#include "writer.h"
namespace rapidjson {
//! Writer with indentation and spacing.
/*!
\tparam Stream Type of ouptut stream.
\tparam Encoding Encoding of both source strings and output.
\tparam Allocator Type of allocator for allocating memory of stack.
*/
template<typename Stream, typename Encoding = UTF8<>, typename Allocator = MemoryPoolAllocator<> >
class PrettyWriter : public Writer<Stream, Encoding, Allocator> {
public:
typedef Writer<Stream, Encoding, Allocator> Base;
typedef typename Base::Ch Ch;
//! Constructor
/*! \param stream Output stream.
\param allocator User supplied allocator. If it is null, it will create a private one.
\param levelDepth Initial capacity of
*/
PrettyWriter(Stream& stream, Allocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) :
Base(stream, allocator, levelDepth), indentChar_(' '), indentCharCount_(4) {}
//! Set custom indentation.
/*! \param indentChar Character for indentation. Must be whitespace character (' ', '\t', '\n', '\r').
\param indentCharCount Number of indent characters for each indentation level.
\note The default indentation is 4 spaces.
*/
PrettyWriter& SetIndent(Ch indentChar, unsigned indentCharCount) {
RAPIDJSON_ASSERT(indentChar == ' ' || indentChar == '\t' || indentChar == '\n' || indentChar == '\r');
indentChar_ = indentChar;
indentCharCount_ = indentCharCount;
return *this;
}
//@name Implementation of Handler.
//@{
PrettyWriter& Null() { PrettyPrefix(kNullType); Base::WriteNull(); return *this; }
PrettyWriter& Bool(bool b) { PrettyPrefix(b ? kTrueType : kFalseType); Base::WriteBool(b); return *this; }
PrettyWriter& Int(int i) { PrettyPrefix(kNumberType); Base::WriteInt(i); return *this; }
PrettyWriter& Uint(unsigned u) { PrettyPrefix(kNumberType); Base::WriteUint(u); return *this; }
PrettyWriter& Int64(int64_t i64) { PrettyPrefix(kNumberType); Base::WriteInt64(i64); return *this; }
PrettyWriter& Uint64(uint64_t u64) { PrettyPrefix(kNumberType); Base::WriteUint64(u64); return *this; }
PrettyWriter& Double(double d) { PrettyPrefix(kNumberType); Base::WriteDouble(d); return *this; }
PrettyWriter& String(const Ch* str, SizeType length, bool copy = false) {
(void)copy;
PrettyPrefix(kStringType);
Base::WriteString(str, length);
return *this;
}
PrettyWriter& StartObject() {
PrettyPrefix(kObjectType);
new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(false);
Base::WriteStartObject();
return *this;
}
PrettyWriter& EndObject(SizeType memberCount = 0) {
(void)memberCount;
RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level));
RAPIDJSON_ASSERT(!Base::level_stack_.template Top<typename Base::Level>()->inArray);
bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0;
if (!empty) {
Base::stream_.Put('\n');
WriteIndent();
}
Base::WriteEndObject();
return *this;
}
PrettyWriter& StartArray() {
PrettyPrefix(kArrayType);
new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(true);
Base::WriteStartArray();
return *this;
}
PrettyWriter& EndArray(SizeType memberCount = 0) {
(void)memberCount;
RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level));
RAPIDJSON_ASSERT(Base::level_stack_.template Top<typename Base::Level>()->inArray);
bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0;
if (!empty) {
Base::stream_.Put('\n');
WriteIndent();
}
Base::WriteEndArray();
return *this;
}
//@}
//! Simpler but slower overload.
PrettyWriter& String(const Ch* str) { return String(str, internal::StrLen(str)); }
protected:
void PrettyPrefix(Type type) {
(void)type;
if (Base::level_stack_.GetSize() != 0) { // this value is not at root
typename Base::Level* level = Base::level_stack_.template Top<typename Base::Level>();
if (level->inArray) {
if (level->valueCount > 0) {
Base::stream_.Put(','); // add comma if it is not the first element in array
Base::stream_.Put('\n');
}
else
Base::stream_.Put('\n');
WriteIndent();
}
else { // in object
if (level->valueCount > 0) {
if (level->valueCount % 2 == 0) {
Base::stream_.Put(',');
Base::stream_.Put('\n');
}
else {
Base::stream_.Put(':');
Base::stream_.Put(' ');
}
}
else
Base::stream_.Put('\n');
if (level->valueCount % 2 == 0)
WriteIndent();
}
if (!level->inArray && level->valueCount % 2 == 0)
RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name
level->valueCount++;
}
else
RAPIDJSON_ASSERT(type == kObjectType || type == kArrayType);
}
void WriteIndent() {
size_t count = (Base::level_stack_.GetSize() / sizeof(typename Base::Level)) * indentCharCount_;
PutN(Base::stream_, indentChar_, count);
}
Ch indentChar_;
unsigned indentCharCount_;
};
} // namespace rapidjson
#endif // RAPIDJSON_RAPIDJSON_H_
|
{
"pile_set_name": "Github"
}
|
#include <AP_HAL/AP_HAL.h>
#if HAL_RCINPUT_WITH_AP_RADIO
#include <AP_Math/AP_Math.h>
#include "AP_Radio_cypress.h"
#include <utility>
#include <stdio.h>
#include <StorageManager/StorageManager.h>
#include <AP_HAL/utility/dsm.h>
#include <AP_Math/crc.h>
#include "telem_structure.h"
#include <AP_Notify/AP_Notify.h>
#include <GCS_MAVLink/GCS_MAVLink.h>
/*
driver for CYRF6936 radio
Many thanks to the SuperBitRF project from Paparrazi for their DSM
configuration code and register defines
https://github.com/esden/superbitrf-firmware
*/
#if CONFIG_HAL_BOARD == HAL_BOARD_CHIBIOS
#define TIMEOUT_PRIORITY 181
#define EVT_TIMEOUT EVENT_MASK(0)
#define EVT_IRQ EVENT_MASK(1)
#endif
#ifndef CYRF_SPI_DEVICE
# define CYRF_SPI_DEVICE "cypress"
#endif
#ifndef CYRF_IRQ_INPUT
# define CYRF_IRQ_INPUT (GPIO_INPUT|GPIO_FLOAT|GPIO_EXTI|GPIO_PORTD|GPIO_PIN15)
#endif
#ifndef CYRF_RESET_PIN
# define CYRF_RESET_PIN (GPIO_OUTPUT|GPIO_PUSHPULL|GPIO_EXTI|GPIO_PORTB|GPIO_PIN0)
#endif
extern const AP_HAL::HAL& hal;
#define Debug(level, fmt, args...) do { if ((level) <= get_debug_level()) { gcs().send_text(MAV_SEVERITY_INFO, fmt, ##args); }} while (0)
#define LP_FIFO_SIZE 16 // Physical data FIFO lengths in Radio
/* The SPI interface defines */
enum {
CYRF_CHANNEL = 0x00,
CYRF_TX_LENGTH = 0x01,
CYRF_TX_CTRL = 0x02,
CYRF_TX_CFG = 0x03,
CYRF_TX_IRQ_STATUS = 0x04,
CYRF_RX_CTRL = 0x05,
CYRF_RX_CFG = 0x06,
CYRF_RX_IRQ_STATUS = 0x07,
CYRF_RX_STATUS = 0x08,
CYRF_RX_COUNT = 0x09,
CYRF_RX_LENGTH = 0x0A,
CYRF_PWR_CTRL = 0x0B,
CYRF_XTAL_CTRL = 0x0C,
CYRF_IO_CFG = 0x0D,
CYRF_GPIO_CTRL = 0x0E,
CYRF_XACT_CFG = 0x0F,
CYRF_FRAMING_CFG = 0x10,
CYRF_DATA32_THOLD = 0x11,
CYRF_DATA64_THOLD = 0x12,
CYRF_RSSI = 0x13,
CYRF_EOP_CTRL = 0x14,
CYRF_CRC_SEED_LSB = 0x15,
CYRF_CRC_SEED_MSB = 0x16,
CYRF_TX_CRC_LSB = 0x17,
CYRF_TX_CRC_MSB = 0x18,
CYRF_RX_CRC_LSB = 0x19,
CYRF_RX_CRC_MSB = 0x1A,
CYRF_TX_OFFSET_LSB = 0x1B,
CYRF_TX_OFFSET_MSB = 0x1C,
CYRF_MODE_OVERRIDE = 0x1D,
CYRF_RX_OVERRIDE = 0x1E,
CYRF_TX_OVERRIDE = 0x1F,
CYRF_TX_BUFFER = 0x20,
CYRF_RX_BUFFER = 0x21,
CYRF_SOP_CODE = 0x22,
CYRF_DATA_CODE = 0x23,
CYRF_PREAMBLE = 0x24,
CYRF_MFG_ID = 0x25,
CYRF_XTAL_CFG = 0x26,
CYRF_CLK_OFFSET = 0x27,
CYRF_CLK_EN = 0x28,
CYRF_RX_ABORT = 0x29,
CYRF_AUTO_CAL_TIME = 0x32,
CYRF_AUTO_CAL_OFFSET = 0x35,
CYRF_ANALOG_CTRL = 0x39,
};
#define CYRF_DIR (1<<7) /**< Bit for enabling writing */
// CYRF_MODE_OVERRIDE
#define CYRF_RST (1<<0)
// CYRF_CLK_EN
#define CYRF_RXF (1<<1)
// CYRF_XACT_CFG
enum {
CYRF_MODE_SLEEP = (0x0<<2),
CYRF_MODE_IDLE = (0x1<<2),
CYRF_MODE_SYNTH_TX = (0x2<<2),
CYRF_MODE_SYNTH_RX = (0x3<<2),
CYRF_MODE_RX = (0x4<<2),
};
#define CYRF_FRC_END (1<<5)
#define CYRF_ACK_EN (1<<7)
// CYRF_IO_CFG
#define CYRF_IRQ_GPIO (1<<0)
#define CYRF_SPI_3PIN (1<<1)
#define CYRF_PACTL_GPIO (1<<2)
#define CYRF_PACTL_OD (1<<3)
#define CYRF_XOUT_OD (1<<4)
#define CYRF_MISO_OD (1<<5)
#define CYRF_IRQ_POL (1<<6)
#define CYRF_IRQ_OD (1<<7)
// CYRF_FRAMING_CFG
#define CYRF_LEN_EN (1<<5)
#define CYRF_SOP_LEN (1<<6)
#define CYRF_SOP_EN (1<<7)
// CYRF_RX_STATUS
enum {
CYRF_RX_DATA_MODE_GFSK = 0x00,
CYRF_RX_DATA_MODE_8DR = 0x01,
CYRF_RX_DATA_MODE_DDR = 0x10,
CYRF_RX_DATA_MODE_NV = 0x11,
};
#define CYRF_RX_CODE (1<<2)
#define CYRF_BAD_CRC (1<<3)
#define CYRF_CRC0 (1<<4)
#define CYRF_EOP_ERR (1<<5)
#define CYRF_PKT_ERR (1<<6)
#define CYRF_RX_ACK (1<<7)
// CYRF_TX_IRQ_STATUS
#define CYRF_TXE_IRQ (1<<0)
#define CYRF_TXC_IRQ (1<<1)
#define CYRF_TXBERR_IRQ (1<<2)
#define CYRF_TXB0_IRQ (1<<3)
#define CYRF_TXB8_IRQ (1<<4)
#define CYRF_TXB15_IRQ (1<<5)
#define CYRF_LV_IRQ (1<<6)
#define CYRF_OS_IRQ (1<<7)
// CYRF_RX_IRQ_STATUS
#define CYRF_RXE_IRQ (1<<0)
#define CYRF_RXC_IRQ (1<<1)
#define CYRF_RXBERR_IRQ (1<<2)
#define CYRF_RXB1_IRQ (1<<3)
#define CYRF_RXB8_IRQ (1<<4)
#define CYRF_RXB16_IRQ (1<<5)
#define CYRF_SOPDET_IRQ (1<<6)
#define CYRF_RXOW_IRQ (1<<7)
// CYRF_TX_CTRL
#define CYRF_TXE_IRQEN (1<<0)
#define CYRF_TXC_IRQEN (1<<1)
#define CYRF_TXBERR_IRQEN (1<<2)
#define CYRF_TXB0_IRQEN (1<<3)
#define CYRF_TXB8_IRQEN (1<<4)
#define CYRF_TXB15_IRQEN (1<<5)
#define CYRF_TX_CLR (1<<6)
#define CYRF_TX_GO (1<<7)
// CYRF_RX_CTRL
#define CYRF_RXE_IRQEN (1<<0)
#define CYRF_RXC_IRQEN (1<<1)
#define CYRF_RXBERR_IRQEN (1<<2)
#define CYRF_RXB1_IRQEN (1<<3)
#define CYRF_RXB8_IRQEN (1<<4)
#define CYRF_RXB16_IRQEN (1<<5)
#define CYRF_RSVD (1<<6)
#define CYRF_RX_GO (1<<7)
// CYRF_RX_OVERRIDE
#define CYRF_ACE (1<<1)
#define CYRF_DIS_RXCRC (1<<2)
#define CYRF_DIS_CRC0 (1<<3)
#define CYRF_FRC_RXDR (1<<4)
#define CYRF_MAN_RXACK (1<<5)
#define CYRF_RXTX_DLY (1<<6)
#define CYRF_ACK_RX (1<<7)
// CYRF_TX_OVERRIDE
#define CYRF_TX_INV (1<<0)
#define CYRF_DIS_TXCRC (1<<2)
#define CYRF_OVRD_ACK (1<<3)
#define CYRF_MAN_TXACK (1<<4)
#define CYRF_FRC_PRE (1<<6)
#define CYRF_ACK_TX (1<<7)
// CYRF_RX_CFG
#define CYRF_VLD_EN (1<<0)
#define CYRF_RXOW_EN (1<<1)
#define CYRF_FAST_TURN_EN (1<<3)
#define CYRF_HILO (1<<4)
#define CYRF_ATT (1<<5)
#define CYRF_LNA (1<<6)
#define CYRF_AGC_EN (1<<7)
// CYRF_TX_CFG
enum {
CYRF_PA_M35 = 0x0,
CYRF_PA_M30 = 0x1,
CYRF_PA_M24 = 0x2,
CYRF_PA_M18 = 0x3,
CYRF_PA_M13 = 0x4,
CYRF_PA_M5 = 0x5,
CYRF_PA_0 = 0x6,
CYRF_PA_4 = 0x7,
};
enum {
CYRF_DATA_MODE_GFSK = (0x0 <<3),
CYRF_DATA_MODE_8DR = (0x1 <<3),
CYRF_DATA_MODE_DDR = (0x2 <<3),
CYRF_DATA_MODE_SDR = (0x3 <<3),
};
#define CYRF_DATA_CODE_LENGTH (1<<5)
#define FLAG_WRITE 0x80
#define FLAG_AUTO_INC 0x40
#define DSM_MAX_CHANNEL 0x4F
#define DSM_SCAN_MIN_CH 8
#define DSM_SCAN_MID_CH 40
#define DSM_SCAN_MAX_CH 70
#define FCC_SUPPORT_CW_MODE 0
#define AUTOBIND_CHANNEL 12
// object instance for trampoline
AP_Radio_cypress *AP_Radio_cypress::radio_singleton;
#if CONFIG_HAL_BOARD == HAL_BOARD_CHIBIOS
thread_t *AP_Radio_cypress::_irq_handler_ctx;
#endif
/*
constructor
*/
AP_Radio_cypress::AP_Radio_cypress(AP_Radio &_radio) :
AP_Radio_backend(_radio)
{
// link to instance for irq_trampoline
radio_singleton = this;
}
/*
initialise radio
*/
bool AP_Radio_cypress::init(void)
{
dev = hal.spi->get_device(CYRF_SPI_DEVICE);
#if CONFIG_HAL_BOARD == HAL_BOARD_CHIBIOS
if (_irq_handler_ctx != nullptr) {
AP_HAL::panic("AP_Radio_cypress: double instantiation of irq_handler\n");
}
chVTObjectInit(&timeout_vt);
_irq_handler_ctx = chThdCreateFromHeap(NULL,
THD_WORKING_AREA_SIZE(2048),
"radio_cypress",
TIMEOUT_PRIORITY,
irq_handler_thd,
NULL);
#endif
load_bind_info();
return reset();
}
/*
reset radio
*/
bool AP_Radio_cypress::reset(void)
{
dev->get_semaphore()->take_blocking();
/*
to reset radio hold reset high for 0.5s, then low for 0.5s
*/
#if defined(HAL_GPIO_RADIO_RESET)
hal.scheduler->expect_delay_ms(2000); // avoid main-loop-delay internal error
hal.gpio->write(HAL_GPIO_RADIO_RESET, 1);
hal.scheduler->delay(500);
hal.gpio->write(HAL_GPIO_RADIO_RESET, 0);
hal.scheduler->delay(500);
#endif
radio_init();
dev->get_semaphore()->give();
if (dsm.protocol == DSM_NONE &&
get_autobind_time() == 0) {
start_recv_bind();
}
return true;
}
/*
return statistics structure from radio
*/
const AP_Radio::stats &AP_Radio_cypress::get_stats(void)
{
return stats;
}
/*
read one pwm channel from radio
*/
uint16_t AP_Radio_cypress::read(uint8_t chan)
{
if (dsm.need_bind_save) {
save_bind_info();
}
if (chan >= max_channels) {
return 0;
}
return dsm.pwm_channels[chan];
}
/*
update status - called from main thread
*/
void AP_Radio_cypress::update(void)
{
check_fw_ack();
}
/*
print one second debug info
*/
void AP_Radio_cypress::print_debug_info(void)
{
Debug(2, "recv:%3u bad:%3u to:%3u re:%u N:%2u TXI:%u TX:%u 1:%4u 2:%4u 3:%4u 4:%4u 5:%4u 6:%4u 7:%4u 8:%4u 14:%u\n",
unsigned(stats.recv_packets - last_stats.recv_packets),
unsigned(stats.bad_packets - last_stats.bad_packets),
unsigned(stats.timeouts - last_stats.timeouts),
unsigned(stats.recv_errors - last_stats.recv_errors),
num_channels(),
unsigned(dsm.send_irq_count),
unsigned(dsm.send_count),
dsm.pwm_channels[0], dsm.pwm_channels[1], dsm.pwm_channels[2], dsm.pwm_channels[3],
dsm.pwm_channels[4], dsm.pwm_channels[5], dsm.pwm_channels[6], dsm.pwm_channels[7],
dsm.pwm_channels[13]);
}
/*
return number of active channels
*/
uint8_t AP_Radio_cypress::num_channels(void)
{
uint32_t now = AP_HAL::millis();
uint8_t chan = get_rssi_chan();
if (chan > 0) {
dsm.pwm_channels[chan-1] = dsm.rssi;
dsm.num_channels = MAX(dsm.num_channels, chan);
}
chan = get_pps_chan();
if (chan > 0) {
dsm.pwm_channels[chan-1] = t_status.pps;
dsm.num_channels = MAX(dsm.num_channels, chan);
}
chan = get_tx_rssi_chan();
if (chan > 0) {
dsm.pwm_channels[chan-1] = dsm.tx_rssi;
dsm.num_channels = MAX(dsm.num_channels, chan);
}
chan = get_tx_pps_chan();
if (chan > 0) {
dsm.pwm_channels[chan-1] = dsm.tx_pps;
dsm.num_channels = MAX(dsm.num_channels, chan);
}
if (now - last_debug_print_ms > 1000) {
last_debug_print_ms = now;
if (get_debug_level() > 1) {
print_debug_info();
}
t_status.pps = stats.recv_packets - last_stats.recv_packets;
t_status.rssi = (uint8_t)dsm.rssi;
last_stats = stats;
}
return dsm.num_channels;
}
/*
send a fwupload ack if needed
*/
void AP_Radio_cypress::check_fw_ack(void)
{
Debug(4,"check need_ack\n");
if (fwupload.need_ack && sem.take_nonblocking()) {
// ack the send of a DATA96 fw packet to TX
fwupload.need_ack = false;
uint8_t data16[16] {};
uint32_t ack_to = fwupload.offset + fwupload.acked;
memcpy(&data16[0], &ack_to, 4);
mavlink_msg_data16_send(fwupload.chan, 42, 4, data16);
Debug(4,"sent ack DATA16\n");
sem.give();
}
}
/*
return time of last receive in microseconds
*/
uint32_t AP_Radio_cypress::last_recv_us(void)
{
// we use the parse time, so it matches when channel values are filled in
return dsm.last_parse_us;
}
/*
send len bytes as a single packet
*/
bool AP_Radio_cypress::send(const uint8_t *pkt, uint16_t len)
{
// disabled for now
return false;
}
/* The PN codes */
const uint8_t AP_Radio_cypress::pn_codes[5][9][8] = {
{ /* Row 0 */
/* Col 0 */ {0x03, 0xBC, 0x6E, 0x8A, 0xEF, 0xBD, 0xFE, 0xF8},
/* Col 1 */ {0x88, 0x17, 0x13, 0x3B, 0x2D, 0xBF, 0x06, 0xD6},
/* Col 2 */ {0xF1, 0x94, 0x30, 0x21, 0xA1, 0x1C, 0x88, 0xA9},
/* Col 3 */ {0xD0, 0xD2, 0x8E, 0xBC, 0x82, 0x2F, 0xE3, 0xB4},
/* Col 4 */ {0x8C, 0xFA, 0x47, 0x9B, 0x83, 0xA5, 0x66, 0xD0},
/* Col 5 */ {0x07, 0xBD, 0x9F, 0x26, 0xC8, 0x31, 0x0F, 0xB8},
/* Col 6 */ {0xEF, 0x03, 0x95, 0x89, 0xB4, 0x71, 0x61, 0x9D},
/* Col 7 */ {0x40, 0xBA, 0x97, 0xD5, 0x86, 0x4F, 0xCC, 0xD1},
/* Col 8 */ {0xD7, 0xA1, 0x54, 0xB1, 0x5E, 0x89, 0xAE, 0x86}
},
{ /* Row 1 */
/* Col 0 */ {0x83, 0xF7, 0xA8, 0x2D, 0x7A, 0x44, 0x64, 0xD3},
/* Col 1 */ {0x3F, 0x2C, 0x4E, 0xAA, 0x71, 0x48, 0x7A, 0xC9},
/* Col 2 */ {0x17, 0xFF, 0x9E, 0x21, 0x36, 0x90, 0xC7, 0x82},
/* Col 3 */ {0xBC, 0x5D, 0x9A, 0x5B, 0xEE, 0x7F, 0x42, 0xEB},
/* Col 4 */ {0x24, 0xF5, 0xDD, 0xF8, 0x7A, 0x77, 0x74, 0xE7},
/* Col 5 */ {0x3D, 0x70, 0x7C, 0x94, 0xDC, 0x84, 0xAD, 0x95},
/* Col 6 */ {0x1E, 0x6A, 0xF0, 0x37, 0x52, 0x7B, 0x11, 0xD4},
/* Col 7 */ {0x62, 0xF5, 0x2B, 0xAA, 0xFC, 0x33, 0xBF, 0xAF},
/* Col 8 */ {0x40, 0x56, 0x32, 0xD9, 0x0F, 0xD9, 0x5D, 0x97}
},
{ /* Row 2 */
/* Col 0 */ {0x40, 0x56, 0x32, 0xD9, 0x0F, 0xD9, 0x5D, 0x97},
/* Col 1 */ {0x8E, 0x4A, 0xD0, 0xA9, 0xA7, 0xFF, 0x20, 0xCA},
/* Col 2 */ {0x4C, 0x97, 0x9D, 0xBF, 0xB8, 0x3D, 0xB5, 0xBE},
/* Col 3 */ {0x0C, 0x5D, 0x24, 0x30, 0x9F, 0xCA, 0x6D, 0xBD},
/* Col 4 */ {0x50, 0x14, 0x33, 0xDE, 0xF1, 0x78, 0x95, 0xAD},
/* Col 5 */ {0x0C, 0x3C, 0xFA, 0xF9, 0xF0, 0xF2, 0x10, 0xC9},
/* Col 6 */ {0xF4, 0xDA, 0x06, 0xDB, 0xBF, 0x4E, 0x6F, 0xB3},
/* Col 7 */ {0x9E, 0x08, 0xD1, 0xAE, 0x59, 0x5E, 0xE8, 0xF0},
/* Col 8 */ {0xC0, 0x90, 0x8F, 0xBB, 0x7C, 0x8E, 0x2B, 0x8E}
},
{ /* Row 3 */
/* Col 0 */ {0xC0, 0x90, 0x8F, 0xBB, 0x7C, 0x8E, 0x2B, 0x8E},
/* Col 1 */ {0x80, 0x69, 0x26, 0x80, 0x08, 0xF8, 0x49, 0xE7},
/* Col 2 */ {0x7D, 0x2D, 0x49, 0x54, 0xD0, 0x80, 0x40, 0xC1},
/* Col 3 */ {0xB6, 0xF2, 0xE6, 0x1B, 0x80, 0x5A, 0x36, 0xB4},
/* Col 4 */ {0x42, 0xAE, 0x9C, 0x1C, 0xDA, 0x67, 0x05, 0xF6},
/* Col 5 */ {0x9B, 0x75, 0xF7, 0xE0, 0x14, 0x8D, 0xB5, 0x80},
/* Col 6 */ {0xBF, 0x54, 0x98, 0xB9, 0xB7, 0x30, 0x5A, 0x88},
/* Col 7 */ {0x35, 0xD1, 0xFC, 0x97, 0x23, 0xD4, 0xC9, 0x88},
/* Col 8 */ {0x88, 0xE1, 0xD6, 0x31, 0x26, 0x5F, 0xBD, 0x40}
},
{ /* Row 4 */
/* Col 0 */ {0xE1, 0xD6, 0x31, 0x26, 0x5F, 0xBD, 0x40, 0x93},
/* Col 1 */ {0xDC, 0x68, 0x08, 0x99, 0x97, 0xAE, 0xAF, 0x8C},
/* Col 2 */ {0xC3, 0x0E, 0x01, 0x16, 0x0E, 0x32, 0x06, 0xBA},
/* Col 3 */ {0xE0, 0x83, 0x01, 0xFA, 0xAB, 0x3E, 0x8F, 0xAC},
/* Col 4 */ {0x5C, 0xD5, 0x9C, 0xB8, 0x46, 0x9C, 0x7D, 0x84},
/* Col 5 */ {0xF1, 0xC6, 0xFE, 0x5C, 0x9D, 0xA5, 0x4F, 0xB7},
/* Col 6 */ {0x58, 0xB5, 0xB3, 0xDD, 0x0E, 0x28, 0xF1, 0xB0},
/* Col 7 */ {0x5F, 0x30, 0x3B, 0x56, 0x96, 0x45, 0xF4, 0xA1},
/* Col 8 */ {0x03, 0xBC, 0x6E, 0x8A, 0xEF, 0xBD, 0xFE, 0xF8}
},
};
const uint8_t AP_Radio_cypress::pn_bind[] = { 0x98, 0x88, 0x1B, 0xE4, 0x30, 0x79, 0x03, 0x84 };
/*The CYRF initial config, binding config and transfer config */
const AP_Radio_cypress::config AP_Radio_cypress::cyrf_config[] = {
{CYRF_MODE_OVERRIDE, CYRF_RST}, // Reset the device
{CYRF_CLK_EN, CYRF_RXF}, // Enable the clock
{CYRF_AUTO_CAL_TIME, 0x3C}, // From manual, needed for initialization
{CYRF_AUTO_CAL_OFFSET, 0x14}, // From manual, needed for initialization
{CYRF_RX_CFG, CYRF_LNA | CYRF_FAST_TURN_EN}, // Enable low noise amplifier and fast turning
{CYRF_TX_OFFSET_LSB, 0x55}, // From manual, typical configuration
{CYRF_TX_OFFSET_MSB, 0x05}, // From manual, typical configuration
{CYRF_XACT_CFG, CYRF_MODE_SYNTH_RX | CYRF_FRC_END}, // Force in Synth RX mode
{CYRF_TX_CFG, CYRF_DATA_CODE_LENGTH | CYRF_DATA_MODE_SDR | CYRF_PA_4}, // Enable 64 chip codes, SDR mode and amplifier +4dBm
{CYRF_DATA64_THOLD, 0x0E}, // From manual, typical configuration
{CYRF_XACT_CFG, CYRF_MODE_SYNTH_RX}, // Set in Synth RX mode (again, really needed?)
{CYRF_IO_CFG, CYRF_IRQ_POL}, // IRQ active high
};
const AP_Radio_cypress::config AP_Radio_cypress::cyrf_bind_config[] = {
{CYRF_TX_CFG, CYRF_DATA_CODE_LENGTH | CYRF_DATA_MODE_SDR | CYRF_PA_4}, // Enable 64 chip codes, SDR mode and amplifier +4dBm
{CYRF_FRAMING_CFG, CYRF_SOP_LEN | 0xE}, // Set SOP CODE to 64 chips and SOP Correlator Threshold to 0xE
{CYRF_RX_OVERRIDE, CYRF_FRC_RXDR | CYRF_DIS_RXCRC}, // Force receive data rate and disable receive CRC checker
{CYRF_EOP_CTRL, 0x02}, // Only enable EOP symbol count of 2
{CYRF_TX_OVERRIDE, CYRF_DIS_TXCRC}, // Disable transmit CRC generate
};
const AP_Radio_cypress::config AP_Radio_cypress::cyrf_transfer_config[] = {
{CYRF_TX_CFG, CYRF_DATA_CODE_LENGTH | CYRF_DATA_MODE_8DR | CYRF_PA_4}, // Enable 64 chip codes, 8DR mode and amplifier +4dBm
{CYRF_FRAMING_CFG, CYRF_SOP_EN | CYRF_SOP_LEN | CYRF_LEN_EN | 0xE}, // Set SOP CODE enable, SOP CODE to 64 chips, Packet length enable, and SOP Correlator Threshold to 0xE
{CYRF_TX_OVERRIDE, 0x00}, // Reset TX overrides
{CYRF_RX_OVERRIDE, 0x00}, // Reset RX overrides
};
/*
read radio status, handling the race condition between completion and error
*/
uint8_t AP_Radio_cypress::read_status_debounced(uint8_t adr)
{
uint8_t ret;
dev->set_chip_select(true);
ret = read_register(adr);
// If COMPLETE and ERROR bits mismatch, then re-read register
if ((ret & (CYRF_RXC_IRQ | CYRF_RXE_IRQ)) != 0
&& (ret & (CYRF_RXC_IRQ | CYRF_RXE_IRQ)) != (CYRF_RXC_IRQ | CYRF_RXE_IRQ)) {
uint8_t v2;
dev->read(&v2, 1);
ret |= v2; // re-read and make bits sticky
}
dev->set_chip_select(false);
return ret;
}
/*
force the initial state of the radio
*/
void AP_Radio_cypress::force_initial_state(void)
{
while (true) {
write_register(CYRF_XACT_CFG, CYRF_FRC_END);
uint32_t start_ms = AP_HAL::millis();
do {
if ((read_register(CYRF_XACT_CFG) & CYRF_FRC_END) == 0) {
return; // FORCE_END done (osc running)
}
} while (AP_HAL::millis() - start_ms < 5);
// FORCE_END failed to complete, implying going SLEEP to IDLE and
// oscillator failed to start. Recover by returning to SLEEP and
// trying to start oscillator again.
write_register(CYRF_XACT_CFG, CYRF_MODE_SLEEP);
}
}
/*
set desired channel
*/
void AP_Radio_cypress::set_channel(uint8_t channel)
{
if (dsm.forced_channel != -1) {
channel = dsm.forced_channel;
}
write_register(CYRF_CHANNEL, channel);
}
void AP_Radio_cypress::radio_set_config(const struct config *conf, uint8_t size)
{
// setup required radio config
for (uint8_t i=0; i<size; i++) {
write_register(conf[i].reg, conf[i].value);
}
}
/*
initialise the radio
*/
void AP_Radio_cypress::radio_init(void)
{
Debug(1, "Cypress: radio_init starting\n");
// wait for radio to settle
uint16_t i;
for (i=0; i<1000; i++) {
uint8_t chan = read_register(CYRF_CHANNEL);
if (chan == 1) {
break;
}
write_register(CYRF_CHANNEL, 1);
hal.scheduler->delay(10);
}
if (i == 1000) {
Debug(1, "Cypress: radio_init failed\n");
return;
}
// base config
radio_set_config(cyrf_config, ARRAY_SIZE(cyrf_config));
// start with receive config
radio_set_config(cyrf_transfer_config, ARRAY_SIZE(cyrf_transfer_config));
if (get_disable_crc()) {
write_register(CYRF_RX_OVERRIDE, CYRF_DIS_RXCRC);
}
dsm_setup_transfer_dsmx();
write_register(CYRF_XTAL_CTRL,0x80); // XOUT=BitSerial
force_initial_state();
write_register(CYRF_PWR_CTRL,0x20); // Disable PMU
// start in RECV state
state = STATE_RECV;
Debug(1, "Cypress: radio_init done\n");
start_receive();
// setup handler for rising edge of IRQ pin
hal.gpio->attach_interrupt(HAL_GPIO_RADIO_IRQ, trigger_irq_radio_event, AP_HAL::GPIO::INTERRUPT_RISING);
}
void AP_Radio_cypress::dump_registers(uint8_t n)
{
for (uint8_t i=0; i<n; i++) {
uint8_t v = read_register(i);
printf("%02x:%02x ", i, v);
if ((i+1) % 16 == 0) {
printf("\n");
}
}
if (n % 16 != 0) {
printf("\n");
}
}
/*
read one register value
*/
uint8_t AP_Radio_cypress::read_register(uint8_t reg)
{
uint8_t v = 0;
(void)dev->read_registers(reg, &v, 1);
return v;
}
/*
write multiple bytes
*/
void AP_Radio_cypress::write_multiple(uint8_t reg, uint8_t n, const uint8_t *data)
{
uint8_t pkt[n+1];
pkt[0] = reg | FLAG_WRITE;
memcpy(&pkt[1], data, n);
dev->transfer(pkt, n+1, nullptr, 0);
}
/*
write one register value
*/
void AP_Radio_cypress::write_register(uint8_t reg, uint8_t value)
{
dev->write_register(reg | FLAG_WRITE, value);
}
/*
support all 4 rc input modes by swapping channels.
*/
void AP_Radio_cypress::map_stick_mode(uint16_t *channels)
{
switch (get_stick_mode()) {
case 1: {
// mode1
uint16_t tmp = channels[1];
channels[1] = 3000 - channels[2];
channels[2] = 3000 - tmp;
break;
}
case 3: {
// mode3
uint16_t tmp = channels[1];
channels[1] = 3000 - channels[2];
channels[2] = 3000 - tmp;
tmp = channels[0];
channels[0] = channels[3];
channels[3] = tmp;
break;
}
case 4: {
// mode4
uint16_t tmp = channels[0];
channels[0] = channels[3];
channels[3] = tmp;
break;
}
case 2:
default:
// nothing to do, transmitter is natively mode2
break;
}
}
/*
check if we are the 2nd RX bound to this TX
*/
void AP_Radio_cypress::check_double_bind(void)
{
if (dsm.tx_pps <= dsm.telem_send_count ||
get_autobind_time() == 0) {
return;
}
// the TX has received more telemetry packets in the last second
// than we have ever sent. There must be another RX sending
// telemetry packets. We will reset our mfg_id and go back waiting
// for a new bind packet, hopefully with the right TX
Debug(1,"Double-bind detected\n");
memset(dsm.mfg_id, 1, sizeof(dsm.mfg_id));
dsm.last_recv_us = 0;
dsm_setup_transfer_dsmx();
}
/*
parse channels from a packet
*/
bool AP_Radio_cypress::parse_dsm_channels(const uint8_t *data)
{
uint16_t num_values = 0;
uint16_t pwm_channels[max_channels] {};
// default value for channels above 4 is previous value
memcpy(&pwm_channels[4], &dsm.pwm_channels[4], (max_channels-4)*sizeof(uint16_t));
if (!dsm_decode(AP_HAL::micros64(),
data,
pwm_channels,
&num_values,
ARRAY_SIZE(pwm_channels))) {
// invalid packet
Debug(2, "DSM: bad decode\n");
return false;
}
if (num_values < 5) {
Debug(2, "DSM: num_values=%u\n", num_values);
return false;
}
// cope with mode1/mode2
map_stick_mode(pwm_channels);
memcpy(dsm.pwm_channels, pwm_channels, num_values*sizeof(uint16_t));
dsm.last_parse_us = AP_HAL::micros();
// suppress channel 8 ack values
dsm.num_channels = num_values==8?7:num_values;
if (num_values == 8) {
// decode telemetry ack value and version
uint16_t d=0;
if (is_DSM2()) {
d = data[14] << 8 | data[15];
} else {
// see chan_order[] for DSMX
d = data[10] << 8 | data[11];
}
// extra data is sent on channel 8, with 3 bit key and 8 bit data
uint8_t chan = d>>11;
uint8_t key = (d >> 8) & 0x7;
uint8_t v = d & 0xFF;
if (chan == 7 && key == 0) {
// got an ack from key 0
Debug(4, "ack %u seq=%u acked=%u length=%u len=%u\n",
v, fwupload.sequence, unsigned(fwupload.acked), unsigned(fwupload.length), fwupload.len);
if (fwupload.sequence == v && sem.take_nonblocking()) {
fwupload.sequence++;
fwupload.acked += fwupload.len;
if (fwupload.acked == fwupload.length) {
// trigger send of DATA16 ack to client
fwupload.need_ack = true;
}
sem.give();
}
}
if (chan == 7) {
// extract telemetry extra data
switch (key) {
case 1:
dsm.tx_firmware_year = v;
break;
case 2:
dsm.tx_firmware_month = v;
break;
case 3:
dsm.tx_firmware_day = v;
break;
case 4:
dsm.tx_rssi = v;
break;
case 5:
dsm.tx_pps = v;
dsm.have_tx_pps = true;
check_double_bind();
break;
case 6:
if (v != dsm.tx_bl_version) {
if (v == 2) {
// TX with new filter gets a default power of 6
set_tx_max_power_default(6);
}
}
dsm.tx_bl_version = v;
break;
}
}
}
return true;
}
/*
process an incoming bind packet
*/
void AP_Radio_cypress::process_bind(const uint8_t *pkt, uint8_t len)
{
if (len != 16) {
return;
}
bool ok = (len==16 && pkt[0] == pkt[4] && pkt[1] == pkt[5] && pkt[2] == pkt[6] && pkt[3] == pkt[7]);
// Calculate the first sum
uint16_t bind_sum = 384 - 0x10;
for (uint8_t i = 0; i < 8; i++) {
bind_sum += pkt[i];
}
// Check the first sum
if (pkt[8] != bind_sum >> 8 || pkt[9] != (bind_sum & 0xFF)) {
ok = false;
}
// Calculate second sum
for (uint8_t i = 8; i < 14; i++) {
bind_sum += pkt[i];
}
// Check the second sum
if (pkt[14] != bind_sum >> 8 || pkt[15] != (bind_sum & 0xFF)) {
ok = false;
}
if (state == STATE_AUTOBIND) {
uint8_t rssi = read_register(CYRF_RSSI) & 0x1F;
Debug(3,"bind RSSI %u\n", rssi);
if (rssi < get_autobind_rssi()) {
ok = false;
}
}
if (ok) {
uint8_t mfg_id[4] = {uint8_t(~pkt[0]), uint8_t(~pkt[1]), uint8_t(~pkt[2]), uint8_t(~pkt[3])};
uint8_t num_chan = pkt[11];
uint8_t protocol = pkt[12];
(void)num_chan;
// change to normal receive
memcpy(dsm.mfg_id, mfg_id, 4);
state = STATE_RECV;
radio_set_config(cyrf_transfer_config, ARRAY_SIZE(cyrf_transfer_config));
if (get_disable_crc()) {
write_register(CYRF_RX_OVERRIDE, CYRF_DIS_RXCRC);
}
dsm.protocol = (enum dsm_protocol)protocol;
dsm_setup_transfer_dsmx();
Debug(1, "BIND OK: mfg_id={0x%02x, 0x%02x, 0x%02x, 0x%02x} N=%u P=0x%02x DSM2=%u\n",
mfg_id[0], mfg_id[1], mfg_id[2], mfg_id[3],
num_chan,
protocol,
is_DSM2());
dsm.last_recv_us = AP_HAL::micros();
if (is_DSM2()) {
dsm2_start_sync();
}
dsm.need_bind_save = true;
}
}
/*
start DSM2 sync
*/
void AP_Radio_cypress::dsm2_start_sync(void)
{
uint8_t factory_test = get_factory_test();
if (factory_test != 0) {
dsm.channels[0] = (factory_test*7) % DSM_MAX_CHANNEL;
dsm.channels[1] = (dsm.channels[0] + 5) % DSM_MAX_CHANNEL;
dsm.sync = DSM2_OK;
} else {
Debug(2, "DSM2 start sync\n");
dsm.sync = DSM2_SYNC_A;
}
}
/*
setup a timeout in timeout_ms milliseconds
*/
void AP_Radio_cypress::setup_timeout(uint32_t timeout_ms)
{
chVTSet(&timeout_vt, chTimeMS2I(timeout_ms), trigger_timeout_event, nullptr);
}
/*
process an incoming packet
*/
void AP_Radio_cypress::process_packet(const uint8_t *pkt, uint8_t len)
{
if (len == 16) {
bool ok;
const uint8_t *id = dsm.mfg_id;
uint32_t now = AP_HAL::micros();
if (is_DSM2()) {
ok = (pkt[0] == ((~id[2])&0xFF) && pkt[1] == (~id[3]&0xFF));
} else {
ok = (pkt[0] == id[2] && pkt[1] == id[3]);
}
if (ok && is_DSM2() && dsm.sync < DSM2_OK) {
if (dsm.sync == DSM2_SYNC_A) {
dsm.channels[0] = dsm.current_rf_channel;
dsm.sync = DSM2_SYNC_B;
Debug(2, "DSM2 SYNCA chan=%u\n", dsm.channels[0]);
dsm.last_recv_us = now;
} else {
if (dsm.current_rf_channel != dsm.channels[0]) {
dsm.channels[1] = dsm.current_rf_channel;
dsm.sync = DSM2_OK;
Debug(2, "DSM2 SYNCB chan=%u\n", dsm.channels[1]);
dsm.last_recv_us = now;
}
}
return;
}
if (ok && (!is_DSM2() || dsm.sync >= DSM2_SYNC_B)) {
ok = parse_dsm_channels(pkt);
}
if (ok) {
uint32_t packet_dt_us = now - dsm.last_recv_us;
dsm.last_recv_chan = dsm.current_channel;
dsm.last_recv_us = now;
if (dsm.crc_errors > 2) {
dsm.crc_errors -= 2;
}
stats.recv_packets++;
// sample the RSSI
uint8_t rssi = read_register(CYRF_RSSI) & 0x1F;
dsm.rssi = 0.95 * dsm.rssi + 0.05 * rssi;
if (packet_dt_us < 5000) {
dsm.pkt_time1 = packet_dt_us;
} else if (packet_dt_us < 8000) {
dsm.pkt_time2 = packet_dt_us;
}
if (get_telem_enable()) {
if (packet_dt_us < 5000 &&
(get_autobind_time() == 0 || dsm.have_tx_pps)) {
/*
we have just received two packets rapidly, which
means we have about 7ms before the next
one. That gives time for a telemetry packet. We
send it 1ms after we receive the incoming packet
If auto-bind is enabled we don't send telemetry
till we've received a tx_pps value from the
TX. This allows us to detect double binding (two
RX bound to the same TX)
*/
state = STATE_SEND_TELEM;
setup_timeout(1);
}
}
} else {
stats.bad_packets++;
}
} else {
stats.bad_packets++;
}
}
/*
start packet receive
*/
void AP_Radio_cypress::start_receive(void)
{
dsm_choose_channel();
write_register(CYRF_RX_IRQ_STATUS, CYRF_RXOW_IRQ);
write_register(CYRF_RX_CTRL, CYRF_RX_GO | CYRF_RXC_IRQEN | CYRF_RXE_IRQEN);
dsm.receive_start_us = AP_HAL::micros();
if (state == STATE_AUTOBIND) {
dsm.receive_timeout_msec = 90;
} else if (state == STATE_BIND) {
dsm.receive_timeout_msec = 15;
} else {
dsm.receive_timeout_msec = 12;
}
setup_timeout(dsm.receive_timeout_msec);
}
/*
handle a receive IRQ
*/
void AP_Radio_cypress::irq_handler_recv(uint8_t rx_status)
{
if ((rx_status & (CYRF_RXC_IRQ | CYRF_RXE_IRQ)) == 0) {
// nothing interesting yet
return;
}
uint8_t pkt[16];
uint8_t rlen = read_register(CYRF_RX_COUNT);
if (rlen > 16) {
rlen = 16;
}
if (rlen > 0) {
dev->read_registers(CYRF_RX_BUFFER, pkt, rlen);
}
if (rx_status & CYRF_RXE_IRQ) {
uint8_t reason = read_register(CYRF_RX_STATUS);
if (reason & CYRF_BAD_CRC) {
dsm.crc_errors++;
if (dsm.crc_errors > 20) {
Debug(2, "Flip CRC\n");
// flip crc seed, this allows us to resync with transmitter
dsm.crc_seed = ~dsm.crc_seed;
dsm.crc_errors = 0;
}
}
write_register(CYRF_XACT_CFG, CYRF_MODE_SYNTH_RX | CYRF_FRC_END);
write_register(CYRF_RX_ABORT, 0);
stats.recv_errors++;
} else if (rx_status & CYRF_RXC_IRQ) {
if (state == STATE_RECV) {
process_packet(pkt, rlen);
} else {
process_bind(pkt, rlen);
}
}
if (state == STATE_AUTOBIND) {
state = STATE_RECV;
}
if (state != STATE_SEND_TELEM) {
start_receive();
}
}
/*
handle a send IRQ
*/
void AP_Radio_cypress::irq_handler_send(uint8_t tx_status)
{
if ((tx_status & (CYRF_TXC_IRQ | CYRF_TXE_IRQ)) == 0) {
// nothing interesting yet
return;
}
state = STATE_RECV;
start_receive();
}
/*
IRQ handler
*/
void AP_Radio_cypress::irq_handler(void)
{
//hal.console->printf("IRQ\n");
if (!dev->get_semaphore()->take_nonblocking()) {
// we have to wait for timeout instead
return;
}
// always read both rx and tx status. This ensure IRQ is cleared
uint8_t rx_status = read_status_debounced(CYRF_RX_IRQ_STATUS);
uint8_t tx_status = read_status_debounced(CYRF_TX_IRQ_STATUS);
switch (state) {
case STATE_AUTOBIND:
// fallthrough
case STATE_RECV:
case STATE_BIND:
irq_handler_recv(rx_status);
break;
case STATE_SEND_TELEM:
case STATE_SEND_TELEM_WAIT:
irq_handler_send(tx_status);
break;
case STATE_SEND_FCC:
// stop transmit oscillator
write_register(CYRF_RX_IRQ_STATUS, CYRF_RXOW_IRQ);
write_register(CYRF_RX_CTRL, CYRF_RX_GO | CYRF_RXC_IRQEN | CYRF_RXE_IRQEN);
break;
default:
break;
}
dev->get_semaphore()->give();
}
/*
called on radio timeout
*/
void AP_Radio_cypress::irq_timeout(void)
{
stats.timeouts++;
if (!dev->get_semaphore()->take_nonblocking()) {
// schedule a new timeout
setup_timeout(dsm.receive_timeout_msec);
return;
}
if (get_fcc_test() != 0 && state != STATE_SEND_FCC) {
Debug(3,"Starting FCC test\n");
state = STATE_SEND_FCC;
} else if (get_fcc_test() == 0 && state == STATE_SEND_FCC) {
Debug(3,"Ending FCC test\n");
state = STATE_RECV;
}
switch (state) {
case STATE_SEND_TELEM:
send_telem_packet();
break;
case STATE_SEND_FCC:
send_FCC_test_packet();
break;
case STATE_AUTOBIND:
case STATE_SEND_TELEM_WAIT:
state = STATE_RECV;
// fall through
default:
write_register(CYRF_XACT_CFG, CYRF_MODE_SYNTH_RX | CYRF_FRC_END);
write_register(CYRF_RX_ABORT, 0);
start_receive();
break;
}
dev->get_semaphore()->give();
}
/*
called on HRT timeout
*/
void AP_Radio_cypress::irq_handler_thd(void *arg)
{
_irq_handler_ctx = chThdGetSelfX();
(void)arg;
while (true) {
eventmask_t evt = chEvtWaitAny(ALL_EVENTS);
if (evt & EVT_IRQ) {
radio_singleton->irq_handler();
}
if (evt & EVT_TIMEOUT) {
radio_singleton->irq_timeout();
}
}
}
void AP_Radio_cypress::trigger_timeout_event(void *arg)
{
(void)arg;
//we are called from ISR context
chSysLockFromISR();
if (_irq_handler_ctx) {
chEvtSignalI(_irq_handler_ctx, EVT_TIMEOUT);
}
chSysUnlockFromISR();
}
void AP_Radio_cypress::trigger_irq_radio_event()
{
//we are called from ISR context
chSysLockFromISR();
if (_irq_handler_ctx) {
chEvtSignalI(_irq_handler_ctx, EVT_IRQ);
}
chSysUnlockFromISR();
}
/*
Set the current DSM channel with SOP, CRC and data code
*/
void AP_Radio_cypress::dsm_set_channel(uint8_t channel, bool is_dsm2, uint8_t sop_col, uint8_t data_col, uint16_t crc_seed)
{
//printf("dsm_set_channel: %u\n", channel);
uint8_t pn_row;
pn_row = is_dsm2? channel % 5 : (channel-2) % 5;
// set CRC seed
write_register(CYRF_CRC_SEED_LSB, crc_seed & 0xff);
write_register(CYRF_CRC_SEED_MSB, crc_seed >> 8);
// set start of packet code
if (memcmp(dsm.last_sop_code, pn_codes[pn_row][sop_col], 8) != 0) {
write_multiple(CYRF_SOP_CODE, 8, pn_codes[pn_row][sop_col]);
memcpy(dsm.last_sop_code, pn_codes[pn_row][sop_col], 8);
}
// set data code
if (memcmp(dsm.last_data_code, pn_codes[pn_row][data_col], 16) != 0) {
write_multiple(CYRF_DATA_CODE, 16, pn_codes[pn_row][data_col]);
memcpy(dsm.last_data_code, pn_codes[pn_row][data_col], 16);
}
if (get_disable_crc() != dsm.last_discrc) {
dsm.last_discrc = get_disable_crc();
Debug(3,"Cypress: DISCRC=%u\n", dsm.last_discrc);
write_register(CYRF_RX_OVERRIDE, dsm.last_discrc?CYRF_DIS_RXCRC:0);
}
if (get_transmit_power() != dsm.last_transmit_power+1) {
dsm.last_transmit_power = get_transmit_power()-1;
Debug(3,"Cypress: TXPOWER=%u\n", dsm.last_transmit_power);
write_register(CYRF_TX_CFG, CYRF_DATA_CODE_LENGTH | CYRF_DATA_MODE_8DR | dsm.last_transmit_power);
}
// Change channel
set_channel(channel);
}
/*
Generate the DSMX channels from the manufacturer ID
*/
void AP_Radio_cypress::dsm_generate_channels_dsmx(uint8_t mfg_id[4], uint8_t channels[23])
{
// Calculate the DSMX channels
int idx = 0;
uint32_t id = ~((mfg_id[0] << 24) | (mfg_id[1] << 16) |
(mfg_id[2] << 8) | (mfg_id[3] << 0));
uint32_t id_tmp = id;
// While not all channels are set
while (idx < 23) {
int i;
int count_3_27 = 0, count_28_51 = 0, count_52_76 = 0;
id_tmp = id_tmp * 0x0019660D + 0x3C6EF35F; // Randomization
uint8_t next_ch = ((id_tmp >> 8) % 0x49) + 3; // Use least-significant byte and must be larger than 3
if (((next_ch ^ id) & 0x01 ) == 0) {
continue;
}
// Go trough all already set channels
for (i = 0; i < idx; i++) {
// Channel is already used
if (channels[i] == next_ch) {
break;
}
// Count the channel groups
if (channels[i] <= 27) {
count_3_27++;
} else if (channels[i] <= 51) {
count_28_51++;
} else {
count_52_76++;
}
}
// When channel is already used continue
if (i != idx) {
continue;
}
// Set the channel when channel groups aren't full
if ((next_ch < 28 && count_3_27 < 8) // Channels 3-27: max 8
|| (next_ch >= 28 && next_ch < 52 && count_28_51 < 7) // Channels 28-52: max 7
|| (next_ch >= 52 && count_52_76 < 8)) { // Channels 52-76: max 8
channels[idx++] = next_ch;
}
}
Debug(2, "Generated DSMX channels\n");
}
/*
setup for DSMX transfers
*/
void AP_Radio_cypress::dsm_setup_transfer_dsmx(void)
{
dsm.current_channel = 0;
dsm.crc_seed = ~((dsm.mfg_id[0] << 8) + dsm.mfg_id[1]);
dsm.sop_col = (dsm.mfg_id[0] + dsm.mfg_id[1] + dsm.mfg_id[2] + 2) & 0x07;
dsm.data_col = 7 - dsm.sop_col;
dsm_generate_channels_dsmx(dsm.mfg_id, dsm.channels);
}
/*
choose channel to receive on
*/
void AP_Radio_cypress::dsm_choose_channel(void)
{
uint32_t now = AP_HAL::micros();
uint32_t dt = now - dsm.last_recv_us;
const uint32_t cycle_time = dsm.pkt_time1 + dsm.pkt_time2;
uint8_t next_channel;
if (state == STATE_BIND) {
if (now - dsm.last_chan_change_us > 15000) {
// always use odd channel numbers for bind
dsm.current_rf_channel |= 1;
dsm.current_rf_channel = (dsm.current_rf_channel+2) % DSM_MAX_CHANNEL;
dsm.last_chan_change_us = now;
}
set_channel(dsm.current_rf_channel);
return;
}
if (get_autobind_time() != 0 &&
dsm.last_recv_us == 0 &&
now - dsm.last_autobind_send > 300*1000UL &&
now > get_autobind_time() * 1000*1000UL &&
get_factory_test() == 0 &&
state == STATE_RECV) {
// try to receive an auto-bind packet
dsm_set_channel(AUTOBIND_CHANNEL, true, 0, 0, 0);
state = STATE_AUTOBIND;
Debug(3,"recv autobind %u\n", unsigned(now - dsm.last_autobind_send));
dsm.last_autobind_send = now;
return;
}
if (is_DSM2() && dsm.sync == DSM2_SYNC_A) {
if (now - dsm.last_chan_change_us > 15000) {
// only even channels for DSM2 scan
dsm.current_rf_channel &= ~1;
dsm.current_rf_channel = (dsm.current_rf_channel+2) % DSM_MAX_CHANNEL;
dsm.last_chan_change_us = now;
}
//hal.console->printf("%u chan=%u\n", AP_HAL::micros(), dsm.current_rf_channel);
dsm_set_channel(dsm.current_rf_channel, is_DSM2(),
dsm.sop_col, dsm.data_col,
dsm.sync==DSM2_SYNC_B?~dsm.crc_seed:dsm.crc_seed);
return;
}
if (dt < 1000) {
// normal channel advance
next_channel = dsm.last_recv_chan + 1;
} else if (dt > 20*cycle_time) {
// change channel slowly
next_channel = dsm.last_recv_chan + (dt / (cycle_time*2));
} else {
// predict next channel
next_channel = dsm.last_recv_chan + 1;
next_channel += (dt / cycle_time) * 2;
if (dt % cycle_time > (unsigned)(dsm.pkt_time1 + 1000U)) {
next_channel++;
}
}
uint8_t chan_count = is_DSM2()?2:23;
dsm.current_channel = next_channel;
if (dsm.current_channel >= chan_count) {
dsm.current_channel %= chan_count;
if (!is_DSM2()) {
dsm.crc_seed = ~dsm.crc_seed;
}
}
if (is_DSM2() && dsm.sync == DSM2_SYNC_B && dsm.current_channel == 1) {
// scan to next channelb
do {
dsm.channels[1] &= ~1;
dsm.channels[1] = (dsm.channels[1]+2) % DSM_MAX_CHANNEL;
} while (dsm.channels[1] == dsm.channels[0]);
}
dsm.current_rf_channel = dsm.channels[dsm.current_channel];
uint16_t seed = dsm.crc_seed;
if (dsm.current_channel & 1) {
seed = ~seed;
}
if (is_DSM2()) {
if (now - dsm.last_recv_us > 5000000) {
dsm2_start_sync();
}
}
dsm_set_channel(dsm.current_rf_channel, is_DSM2(),
dsm.sop_col, dsm.data_col, seed);
}
/*
setup radio for bind
*/
void AP_Radio_cypress::start_recv_bind(void)
{
dev->get_semaphore()->take_blocking();
Debug(1, "Cypress: start_recv_bind\n");
write_register(CYRF_XACT_CFG, CYRF_MODE_SYNTH_RX | CYRF_FRC_END);
write_register(CYRF_RX_ABORT, 0);
state = STATE_BIND;
radio_set_config(cyrf_bind_config, ARRAY_SIZE(cyrf_bind_config));
write_register(CYRF_CRC_SEED_LSB, 0);
write_register(CYRF_CRC_SEED_MSB, 0);
write_multiple(CYRF_SOP_CODE, 8, pn_codes[0][0]);
uint8_t data_code[16];
memcpy(data_code, pn_codes[0][8], 8);
memcpy(&data_code[8], pn_bind, 8);
write_multiple(CYRF_DATA_CODE, 16, data_code);
dsm.current_rf_channel = 1;
start_receive();
dev->get_semaphore()->give();
}
/*
save bind info
*/
void AP_Radio_cypress::save_bind_info(void)
{
// access to storage for bind information
StorageAccess bind_storage(StorageManager::StorageBindInfo);
struct bind_info info;
info.magic = bind_magic;
memcpy(info.mfg_id, dsm.mfg_id, sizeof(info.mfg_id));
info.protocol = dsm.protocol;
if (bind_storage.write_block(0, &info, sizeof(info))) {
dsm.need_bind_save = false;
}
}
/*
load bind info
*/
void AP_Radio_cypress::load_bind_info(void)
{
// access to storage for bind information
StorageAccess bind_storage(StorageManager::StorageBindInfo);
struct bind_info info;
uint8_t factory_test = get_factory_test();
if (factory_test != 0) {
Debug(1, "In factory test %u\n", factory_test);
memset(dsm.mfg_id, 0, sizeof(dsm.mfg_id));
dsm.mfg_id[0] = factory_test;
dsm.protocol = DSM_DSM2_2;
dsm2_start_sync();
} else if (bind_storage.read_block(&info, 0, sizeof(info)) && info.magic == bind_magic) {
Debug(1, "Loaded mfg_id %02x:%02x:%02x:%02x\n",
info.mfg_id[0], info.mfg_id[1], info.mfg_id[2], info.mfg_id[3]);
memcpy(dsm.mfg_id, info.mfg_id, sizeof(info.mfg_id));
dsm.protocol = info.protocol;
}
}
bool AP_Radio_cypress::is_DSM2(void)
{
if (get_protocol() != AP_Radio::PROTOCOL_AUTO) {
return get_protocol() == AP_Radio::PROTOCOL_DSM2;
}
return dsm.protocol == DSM_DSM2_1 || dsm.protocol == DSM_DSM2_2;
}
/*
transmit a 16 byte packet
this is a blind send, not waiting for ack or completion
*/
void AP_Radio_cypress::transmit16(const uint8_t data[16])
{
write_register(CYRF_TX_LENGTH, 16);
write_register(CYRF_TX_CTRL, CYRF_TX_CLR);
write_multiple(CYRF_TX_BUFFER, 16, data);
write_register(CYRF_TX_CTRL, CYRF_TX_GO | CYRF_TXC_IRQEN);
dsm.send_count++;
}
/*
send a telemetry structure packet
*/
void AP_Radio_cypress::send_telem_packet(void)
{
struct telem_packet_cypress pkt;
t_status.flags = 0;
t_status.flags |= AP_Notify::flags.gps_status >= 3?TELEM_FLAG_GPS_OK:0;
t_status.flags |= AP_Notify::flags.pre_arm_check?TELEM_FLAG_ARM_OK:0;
t_status.flags |= AP_Notify::flags.failsafe_battery?0:TELEM_FLAG_BATT_OK;
t_status.flags |= hal.util->get_soft_armed()?TELEM_FLAG_ARMED:0;
t_status.flags |= AP_Notify::flags.have_pos_abs?TELEM_FLAG_POS_OK:0;
t_status.flags |= AP_Notify::flags.video_recording?TELEM_FLAG_VIDEO:0;
t_status.flight_mode = AP_Notify::flags.flight_mode;
t_status.tx_max = get_tx_max_power();
t_status.note_adjust = get_tx_buzzer_adjust();
// send fw update packet for 7/8 of packets if any data pending
if (fwupload.length != 0 &&
fwupload.length > fwupload.acked &&
((fwupload.counter++ & 0x07) != 0) &&
sem.take_nonblocking()) {
pkt.type = fwupload.fw_type;
pkt.payload.fw.seq = fwupload.sequence;
uint32_t len = fwupload.length>fwupload.acked?fwupload.length - fwupload.acked:0;
pkt.payload.fw.len = len<=8?len:8;
pkt.payload.fw.offset = fwupload.offset+fwupload.acked;
memcpy(&pkt.payload.fw.data[0], &fwupload.pending_data[fwupload.acked], pkt.payload.fw.len);
fwupload.len = pkt.payload.fw.len;
Debug(4, "sent fw seq=%u offset=%u len=%u type=%u\n",
pkt.payload.fw.seq,
pkt.payload.fw.offset,
pkt.payload.fw.len,
pkt.type);
sem.give();
pkt.crc = crc_crc8((const uint8_t *)&pkt.type, 15);
} else {
pkt.type = TELEM_STATUS;
pkt.payload.status = t_status;
pkt.crc = crc_crc8((const uint8_t *)&pkt.type, 15);
dsm.telem_send_count++;
}
write_register(CYRF_XACT_CFG, CYRF_MODE_SYNTH_TX | CYRF_FRC_END);
write_register(CYRF_RX_ABORT, 0);
transmit16((uint8_t*)&pkt);
state = STATE_SEND_TELEM_WAIT;
setup_timeout(2);
}
/*
send a FCC test packet
*/
void AP_Radio_cypress::send_FCC_test_packet(void)
{
uint8_t pkt[16] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
state = STATE_SEND_FCC;
uint8_t channel=0;
switch (get_fcc_test()) {
case 0:
// switch back to normal operation
dsm.forced_channel = -1;
send_telem_packet();
return;
case 1:
case 4:
channel = DSM_SCAN_MIN_CH;
break;
case 2:
case 5:
channel = DSM_SCAN_MID_CH;
break;
case 3:
case 6:
default:
channel = DSM_SCAN_MAX_CH;
break;
}
Debug(5,"FCC send %u\n", channel);
if (channel != dsm.forced_channel) {
Debug(1,"FCC channel %u\n", channel);
dsm.forced_channel = channel;
radio_set_config(cyrf_config, ARRAY_SIZE(cyrf_config));
radio_set_config(cyrf_transfer_config, ARRAY_SIZE(cyrf_transfer_config));
set_channel(channel);
}
#if FCC_SUPPORT_CW_MODE
if (get_fcc_test() > 3) {
// continuous preamble transmit is closest approximation to CW
// that is possible with this chip
write_register(CYRF_PREAMBLE,0x01);
write_register(CYRF_PREAMBLE,0x00);
write_register(CYRF_PREAMBLE,0x00);
write_register(CYRF_TX_OVERRIDE, CYRF_FRC_PRE);
write_register(CYRF_TX_CTRL, CYRF_TX_GO);
setup_timeout(500);
} else {
write_register(CYRF_XACT_CFG, CYRF_MODE_SYNTH_TX | CYRF_FRC_END);
write_register(CYRF_RX_ABORT, 0);
transmit16(pkt);
setup_timeout(10);
}
#else
write_register(CYRF_XACT_CFG, CYRF_MODE_SYNTH_TX | CYRF_FRC_END);
write_register(CYRF_RX_ABORT, 0);
transmit16(pkt);
setup_timeout(10);
#endif
}
// handle a data96 mavlink packet for fw upload
void AP_Radio_cypress::handle_data_packet(mavlink_channel_t chan, const mavlink_data96_t &m)
{
uint32_t ofs=0;
memcpy(&ofs, &m.data[0], 4);
Debug(4, "got data96 of len %u from chan %u at offset %u\n", m.len, chan, unsigned(ofs));
if (sem.take_nonblocking()) {
fwupload.chan = chan;
fwupload.need_ack = false;
fwupload.offset = ofs;
fwupload.length = MIN(m.len-4, 92);
fwupload.acked = 0;
fwupload.sequence++;
if (m.type == 43) {
// sending a tune to play - for development testing
fwupload.fw_type = TELEM_PLAY;
fwupload.length = MIN(m.len, 90);
fwupload.offset = 0;
memcpy(&fwupload.pending_data[0], &m.data[0], fwupload.length);
} else {
// sending a chunk of firmware OTA upload
fwupload.fw_type = TELEM_FW;
memcpy(&fwupload.pending_data[0], &m.data[4], fwupload.length);
}
sem.give();
}
}
#endif // HAL_RCINPUT_WITH_AP_RADIO
|
{
"pile_set_name": "Github"
}
|
//
// BAKChannelsStore.m
// BackchannelSDK
//
// Created by Soroush Khanlou on 8/13/15.
// Copyright (c) 2015 Backchannel. All rights reserved.
//
#import "BAKChannelsStore.h"
#import <UIKit/UIKit.h>
#import "BAKCache.h"
#import "BAKSendableRequest.h"
#import "BAKChannelsRequest.h"
NSString *BAKChannelsStoreUpdatedNotification = @"BAKChannelsStoreUpdatedNotification";
@interface BAKChannelsStore ()
@property (nonatomic) BAKCache *cache;
@end
@implementation BAKChannelsStore
- (instancetype)initWithConfiguration:(BAKRemoteConfiguration *)configuration {
self = [super init];
if (!self) return nil;
_configuration = configuration;
NSString *bundleIdentifier = [[NSBundle mainBundle] bundleIdentifier];
NSString *cacheName = [NSString stringWithFormat:@"io.backchannel.channelsForPicker.%@", bundleIdentifier];
_cache = [[BAKCache alloc] initWithName:cacheName];
_channels = (NSArray *)[self.cache fetchObject];
return self;
}
- (void)setChannels:(NSArray *)channels {
_channels = channels;
[self.cache saveObject:self.channels];
[[NSNotificationCenter defaultCenter] postNotificationName:BAKChannelsStoreUpdatedNotification object:self];
}
- (void)updateFromAPI {
BAKChannelsRequest *channelsRequest = [[BAKChannelsRequest alloc] initWithConfiguration:self.configuration];
BAKSendableRequest *sendableRequest = [[BAKSendableRequest alloc] initWithRequestTemplate:channelsRequest];
[sendableRequest sendRequestWithSuccessBlock:^(id result) {
self.channels = result;
} failureBlock:nil];
}
@end
|
{
"pile_set_name": "Github"
}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin
"""Relay core operators."""
# operator defs
from .op import (
get,
register_compute,
register_gradient,
register_pattern,
register_alter_op_layout,
register_legalize,
OpPattern,
OpStrategy,
debug,
register_external_compiler,
)
from . import strategy
# Operators
from .reduce import *
from .tensor import *
from .transform import *
from .algorithm import *
from . import vm
from . import nn
from . import annotation
from . import memory
from . import image
from . import vision
from . import op_attrs
# operator registry
from . import _tensor
from . import _tensor_grad
from . import _transform
from . import _reduce
from . import _algorithm
def _register_op_make():
# pylint: disable=import-outside-toplevel
from . import _make
from .. import expr
expr._op_make = _make
_register_op_make()
|
{
"pile_set_name": "Github"
}
|
import os
from conans import ConanFile, CMake, tools
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "cmake_find_package"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
bin_path_c = os.path.join("bin", "test_package_c")
self.run(bin_path_c, run_environment=True)
bin_path_cpp = os.path.join("bin", "test_package_cpp")
self.run(bin_path_cpp, run_environment=True)
|
{
"pile_set_name": "Github"
}
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_KRONECKER_PRODUCT_MODULE_H
#define EIGEN_KRONECKER_PRODUCT_MODULE_H
#include "../../Eigen/Core"
#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
#include "../../Eigen/src/SparseCore/SparseUtil.h"
namespace Eigen {
/**
* \defgroup KroneckerProduct_Module KroneckerProduct module
*
* This module contains an experimental Kronecker product implementation.
*
* \code
* #include <Eigen/KroneckerProduct>
* \endcode
*/
} // namespace Eigen
#include "src/KroneckerProduct/KroneckerTensorProduct.h"
#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_KRONECKER_PRODUCT_MODULE_H
|
{
"pile_set_name": "Github"
}
|
package packer
// MockArtifact is an implementation of Artifact that can be used for tests.
type MockArtifact struct {
BuilderIdValue string
FilesValue []string
IdValue string
StateValues map[string]interface{}
DestroyCalled bool
}
func (a *MockArtifact) BuilderId() string {
if a.BuilderIdValue == "" {
return "bid"
}
return a.BuilderIdValue
}
func (a *MockArtifact) Files() []string {
if a.FilesValue == nil {
return []string{"a", "b"}
}
return a.FilesValue
}
func (a *MockArtifact) Id() string {
id := a.IdValue
if id == "" {
id = "id"
}
return id
}
func (*MockArtifact) String() string {
return "string"
}
func (a *MockArtifact) State(name string) interface{} {
value, _ := a.StateValues[name]
return value
}
func (a *MockArtifact) Destroy() error {
a.DestroyCalled = true
return nil
}
|
{
"pile_set_name": "Github"
}
|
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package moppydesk;
import java.io.Serializable;
/**
*
* @author Sam
*/
public class OutputSetting implements Serializable{
public enum OutputType {MOPPY, MIDI};
public final int MIDIChannel;
public boolean enabled = false;
public OutputType type = OutputType.MOPPY;
public String comPort;
public String midiDeviceName;
public OutputSetting(int MIDIChannel){
this.MIDIChannel = MIDIChannel;
}
}
|
{
"pile_set_name": "Github"
}
|
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
// LZ77 decompresses data through sequences of two forms of commands:
//
// * Literal insertions: Runs of one or more symbols are inserted into the data
// stream as is. This is accomplished through the writeByte method for a
// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
// Any valid stream must start with a literal insertion if no preset dictionary
// is used.
//
// * Backward copies: Runs of one or more symbols are copied from previously
// emitted data. Backward copies come as the tuple (dist, length) where dist
// determines how far back in the stream to copy from and length determines how
// many bytes to copy. Note that it is valid for the length to be greater than
// the distance. Since LZ77 uses forward copies, that situation is used to
// perform a form of run-length encoding on repeated runs of symbols.
// The writeCopy and tryWriteCopy are used to implement this command.
//
// For performance reasons, this implementation performs little to no sanity
// checks about the arguments. As such, the invariants documented for each
// method call must be respected.
type dictDecoder struct {
hist []byte // Sliding window history
// Invariant: 0 <= rdPos <= wrPos <= len(hist)
wrPos int // Current output position in buffer
rdPos int // Have emitted hist[:rdPos] already
full bool // Has a full window length been written yet?
}
// init initializes dictDecoder to have a sliding window dictionary of the given
// size. If a preset dict is provided, it will initialize the dictionary with
// the contents of dict.
func (dd *dictDecoder) init(size int, dict []byte) {
*dd = dictDecoder{hist: dd.hist}
if cap(dd.hist) < size {
dd.hist = make([]byte, size)
}
dd.hist = dd.hist[:size]
if len(dict) > len(dd.hist) {
dict = dict[len(dict)-len(dd.hist):]
}
dd.wrPos = copy(dd.hist, dict)
if dd.wrPos == len(dd.hist) {
dd.wrPos = 0
dd.full = true
}
dd.rdPos = dd.wrPos
}
// histSize reports the total amount of historical data in the dictionary.
func (dd *dictDecoder) histSize() int {
if dd.full {
return len(dd.hist)
}
return dd.wrPos
}
// availRead reports the number of bytes that can be flushed by readFlush.
func (dd *dictDecoder) availRead() int {
return dd.wrPos - dd.rdPos
}
// availWrite reports the available amount of output buffer space.
func (dd *dictDecoder) availWrite() int {
return len(dd.hist) - dd.wrPos
}
// writeSlice returns a slice of the available buffer to write data to.
//
// This invariant will be kept: len(s) <= availWrite()
func (dd *dictDecoder) writeSlice() []byte {
return dd.hist[dd.wrPos:]
}
// writeMark advances the writer pointer by cnt.
//
// This invariant must be kept: 0 <= cnt <= availWrite()
func (dd *dictDecoder) writeMark(cnt int) {
dd.wrPos += cnt
}
// writeByte writes a single byte to the dictionary.
//
// This invariant must be kept: 0 < availWrite()
func (dd *dictDecoder) writeByte(c byte) {
dd.hist[dd.wrPos] = c
dd.wrPos++
}
// writeCopy copies a string at a given (dist, length) to the output.
// This returns the number of bytes copied and may be less than the requested
// length if the available space in the output buffer is too small.
//
// This invariant must be kept: 0 < dist <= histSize()
func (dd *dictDecoder) writeCopy(dist, length int) int {
dstBase := dd.wrPos
dstPos := dstBase
srcPos := dstPos - dist
endPos := dstPos + length
if endPos > len(dd.hist) {
endPos = len(dd.hist)
}
// Copy non-overlapping section after destination position.
//
// This section is non-overlapping in that the copy length for this section
// is always less than or equal to the backwards distance. This can occur
// if a distance refers to data that wraps-around in the buffer.
// Thus, a backwards copy is performed here; that is, the exact bytes in
// the source prior to the copy is placed in the destination.
if srcPos < 0 {
srcPos += len(dd.hist)
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
srcPos = 0
}
// Copy possibly overlapping section before destination position.
//
// This section can overlap if the copy length for this section is larger
// than the backwards distance. This is allowed by LZ77 so that repeated
// strings can be succinctly represented using (dist, length) pairs.
// Thus, a forwards copy is performed here; that is, the bytes copied is
// possibly dependent on the resulting bytes in the destination as the copy
// progresses along. This is functionally equivalent to the following:
//
// for i := 0; i < endPos-dstPos; i++ {
// dd.hist[dstPos+i] = dd.hist[srcPos+i]
// }
// dstPos = endPos
//
for dstPos < endPos {
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
}
dd.wrPos = dstPos
return dstPos - dstBase
}
// tryWriteCopy tries to copy a string at a given (distance, length) to the
// output. This specialized version is optimized for short distances.
//
// This method is designed to be inlined for performance reasons.
//
// This invariant must be kept: 0 < dist <= histSize()
func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
dstPos := dd.wrPos
endPos := dstPos + length
if dstPos < dist || endPos > len(dd.hist) {
return 0
}
dstBase := dstPos
srcPos := dstPos - dist
// Copy possibly overlapping section before destination position.
loop:
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
if dstPos < endPos {
goto loop // Avoid for-loop so that this function can be inlined
}
dd.wrPos = dstPos
return dstPos - dstBase
}
// readFlush returns a slice of the historical buffer that is ready to be
// emitted to the user. The data returned by readFlush must be fully consumed
// before calling any other dictDecoder methods.
func (dd *dictDecoder) readFlush() []byte {
toRead := dd.hist[dd.rdPos:dd.wrPos]
dd.rdPos = dd.wrPos
if dd.wrPos == len(dd.hist) {
dd.wrPos, dd.rdPos = 0, 0
dd.full = true
}
return toRead
}
|
{
"pile_set_name": "Github"
}
|
import wx
from example1 import SketchWindow
class SketchFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "Sketch Frame",
size=(800,600))
self.sketch = SketchWindow(self, -1)
self.sketch.Bind(wx.EVT_MOTION, self.OnSketchMotion)
self.statusbar = self.CreateStatusBar()
def OnSketchMotion(self, event):
self.statusbar.SetStatusText(str(event.GetPositionTuple()))
event.Skip()
if __name__ == '__main__':
app = wx.App()
frame = SketchFrame(None)
frame.Show(True)
app.MainLoop()
|
{
"pile_set_name": "Github"
}
|
.. _tut-whatnow:
*********
What Now?
*********
Reading this tutorial has probably reinforced your interest in using Python ---
you should be eager to apply Python to solving your real-world problems. Where
should you go to learn more?
This tutorial is part of Python's documentation set. Some other documents in
the set are:
* :ref:`library-index`:
You should browse through this manual, which gives complete (though terse)
reference material about types, functions, and the modules in the standard
library. The standard Python distribution includes a *lot* of additional code.
There are modules to read Unix mailboxes, retrieve documents via HTTP, generate
random numbers, parse command-line options, write CGI programs, compress data,
and many other tasks. Skimming through the Library Reference will give you an
idea of what's available.
* :ref:`installing-index` explains how to install additional modules written
by other Python users.
* :ref:`reference-index`: A detailed explanation of Python's syntax and
semantics. It's heavy reading, but is useful as a complete guide to the
language itself.
More Python resources:
* https://www.python.org: The major Python Web site. It contains code,
documentation, and pointers to Python-related pages around the Web. This Web
site is mirrored in various places around the world, such as Europe, Japan, and
Australia; a mirror may be faster than the main site, depending on your
geographical location.
* https://docs.python.org: Fast access to Python's documentation.
* https://pypi.org: The Python Package Index, previously also nicknamed
the Cheese Shop [#]_, is an index of user-created Python modules that are available
for download. Once you begin releasing code, you can register it here so that
others can find it.
* https://github.com/ActiveState/code/tree/master/recipes/Python: The Python Cookbook is a
sizable collection of code examples, larger modules, and useful scripts.
Particularly notable contributions are collected in a book also titled Python
Cookbook (O'Reilly & Associates, ISBN 0-596-00797-3.)
* http://www.pyvideo.org collects links to Python-related videos from
conferences and user-group meetings.
* https://scipy.org: The Scientific Python project includes modules for fast
array computations and manipulations plus a host of packages for such
things as linear algebra, Fourier transforms, non-linear solvers,
random number distributions, statistical analysis and the like.
For Python-related questions and problem reports, you can post to the newsgroup
:newsgroup:`comp.lang.python`, or send them to the mailing list at
python-list@python.org. The newsgroup and mailing list are gatewayed, so
messages posted to one will automatically be forwarded to the other. There are
hundreds of postings a day, asking (and
answering) questions, suggesting new features, and announcing new modules.
Mailing list archives are available at https://mail.python.org/pipermail/.
Before posting, be sure to check the list of
:ref:`Frequently Asked Questions <faq-index>` (also called the FAQ). The
FAQ answers many of the questions that come up again and again, and may
already contain the solution for your problem.
.. rubric:: Footnotes
.. [#] "Cheese Shop" is a Monty Python's sketch: a customer enters a cheese shop,
but whatever cheese he asks for, the clerk says it's missing.
|
{
"pile_set_name": "Github"
}
|
post-chasing_NN
amy_NP
,_,
a_DT
slew_???
of_IN
love-triangle_NN
movies_NNS
:_:
this_DT
month_NN
we_PP
have_HV
kissing_NN
a_NN
fool_NN
,_,
co-starring_JJ
amy_NP
's_POS
own_DT
lee_NP
,_,
and_CC
april_NP
brings_VBZ
us_PP
the_DT
object_NN
of_IN
my_PP$
affection_NN
,_,
which_WDT
may_MD
as_RB
well_RB
be_BE
titled_VBN
chasing_VBG
allan_NP
,_,
for_IN
it_PP
is_BEZ
the_DT
story_NN
of_IN
a_DT
woman_NN
who_WP
falls_VBZ
in_IN
love_NN
with_IN
her_PP$
gay_JJ
roommate_NN
._.
(_)
to_IN
be_BE
absolutely_RB
six_CD
degrees_NNS
of_IN
kevin_NP
bacon_NP
about_IN
it_PP
,_,
that_CS
film_NN
stars_NNS
schwimmer_NN
's_POS
friend_NN
jennifer_NN
aniston_NP
._.
)_)
if_CS
only_JJ
kevin_NP
smith_NP
could_MD
write_VB
them_PP
all_PDT
._.
._.
._.
schwimmer_NN
stars_NNS
as_IN
womanizing_VBG
chicago_NP
sportscaster_NN
max_NP
,_,
who_WP
falls_VBZ
in_IN
love_NN
with_IN
his_PP$
best_JJS
friend_NN
jay_NN
(_(
lee_NP
)_)
's_BEZ
book_NN
editor_NN
samantha_NP
(_(
avital_NN
)_)
a_DT
mere_JJ
twenty-four_CD
hours_NNS
after_IN
meeting_NN
her_PP
._.
they_PP
are_BER
soon_RB
engaged_VBN
,_,
and_CC
max_NP
,_,
because_CS
of_IN
his_PP$
own_DT
raging_JJ
libido_NN
,_,
grows_VBZ
suspicious_JJ
of_IN
samantha_NP
's_POS
fidelity_NN
._.
he_PP
convinces_VBZ
jay_NN
to_IN
flirt_NN
with_IN
samantha_NP
during_IN
the_DT
development_NN
of_IN
his_PP$
book_NN
,_,
to_IN
"_"
test_NN
her_PP$
"_"
._.
the_DT
trouble_NN
is_BEZ
,_,
jay_NN
might_MD
be_BE
secretly_RB
in_IN
love_NN
with_IN
her_PP$
._.
to_TO
stretch_VB
this_DT
flat_NN
,_,
sitcom_NP
premise_NN
to_IN
feature_NN
length_NN
,_,
the_DT
plot_NN
is_BEZ
framed_VBN
by_IN
a_DT
climactic_JJ
wedding_NN
,_,
at_IN
which_WDT
bonnie_NN
hunt_NP
recounts_VBZ
the_DT
triangular_JJ
tale--the_NN
events_NNS
leading_JJ
up_IN
to_IN
the_DT
nuptials--to_???
an_DT
annoying_NN
fat_NN
man_NN
and_CC
his_PP$
silly_JJ
girlfriend_NN
._.
hunt_NP
has_HVZ
the_DT
best_JJS
comic_NN
timing_NN
of_IN
anyone_PN
in_IN
the_DT
film_NN
;_;
schwimmer_NN
can_MD
spin_NN
bad_JJ
dialogue_NN
into_IN
mildly_RB
humorous_JJ
dialogue_NN
;_;
and_CC
lee_NP
,_,
poor_JJ
lee_NP
,_,
is_BEZ
miscast_NN
._.
so_RB
hysterically_RB
funny_JJ
in_IN
chasing_VBG
amy_NP
,_,
here_RB
he_PP
is_BEZ
forced_VBN
to_IN
repress_NN
his_PP$
comic_NN
instincts_NNS
:_:
to_IN
swear_NN
,_,
to_IN
yell_???
,_,
to_IN
talk_NN
about_IN
oral_JJ
sex_NN
._.
._.
._.
the_DT
script_NN
's_POS
idea_NN
of_IN
a_DT
character_NN
trait_NN
is_BEZ
to_TO
stress_VB
that_DT
jay_NN
is_BEZ
a_DT
"_"
sensitive_JJ
man_NN
"_"
,_,
and_CC
then_RB
show_VB
him_PP
drinking_NN
pepto_???
bismol_NP
when_CS
he_PP
's_BEZ
stewing_VBG
over_IN
his_PP$
girl_NN
trouble_NN
._.
as_CS
for_IN
avital_NN
,_,
an_DT
israeli_JJ
actress_NN
,_,
she_PP
is_BEZ
warm_JJ
and_CC
sweet_JJ
,_,
but_CC
we_PP
don't_DO
know_VB
anything_PN
about_IN
her_PP$
character_NN
other_JJ
than_CS
that_DT
it_PP
takes_VBZ
her_PP$
an_DT
incredibly_RB
long_JJ
time_NN
to_IN
realize_VB
the_DT
most_DT
obvious_JJ
things_NNS
._.
she_PP
also_RB
too_RB
closely_RB
resembles_VBZ
the_DT
stunningly_RB
beautiful_JJ
kari_???
wuhrer_IN
,_,
who_NP
plays_VBZ
schwimmer_NN
's_POS
assistant_NN
and_CC
personal_JJ
temptress_NN
,_,
turning_VBG
that_CS
particular_JJ
subplot_NN
into_IN
an_DT
unintentional_JJ
riff_???
on_IN
vertigo_NN
._.
there_EX
are_BER
a_DT
handful_NN
,_,
a_DT
smattering_NN
,_,
of_IN
good_JJ
scenes_NNS
in_IN
kissing_VBG
a_DT
fool_NN
._.
i_PP
enjoyed_VBD
a_DT
moment_NN
in_IN
a_DT
comedy_NN
club_NN
,_,
during_IN
which_WDT
jay_NN
gets_VBZ
up_RP
and_CC
asks_VBZ
"_"
has_HVZ
anyone_PN
here_RB
ever_RB
hated_VBD
their_PP$
girlfriend_NN
so_RB
much_RB
you_PP
wanted_VBD
to_IN
kill_NN
her_PP
?_?
"_"
over_IN
and_CC
over_IN
until_CS
he_PP
's_BEZ
booted_VBN
off_IN
stage_NN
._.
there_EX
are_BER
also_RB
a_DT
few_DT
obviously_RB
improvised_VBD
lines_NNS
that_CS
are_BER
fresher_RB
than_CS
anything_PN
that_CS
's_BEZ
on_IN
the_DT
page_NN
._.
kissing_VBG
a_DT
fool_NN
is_BEZ
never_RB
as_IN
clever_JJ
as_CS
the_DT
thursday_NP
night_NN
joke-machine_JJ
friends_NNS
that_CS
spawned_VBD
schwimmer_DT
's_BEZ
movie_NN
career_NN
,_,
so_RB
save_VB
yourself_PPX
eight_CD
dollars_NNS
and_CC
watch_VB
three_CD
episodes_NNS
of_IN
that_DT
series_NN
back_NN
to_IN
back_NN
._.
|
{
"pile_set_name": "Github"
}
|
<?php
namespace Fisharebest\Localization\Locale;
use Fisharebest\Localization\Language\LanguageWo;
/**
* Class LocaleWo - Wo
*
* @author Greg Roach <fisharebest@gmail.com>
* @copyright (c) 2019 Greg Roach
* @license GPLv3+
*/
class LocaleWo extends AbstractLocale implements LocaleInterface
{
public function endonym()
{
return 'Wolof';
}
public function endonymSortable()
{
return 'WOLOF';
}
public function language()
{
return new LanguageWo();
}
public function numberSymbols()
{
return array(
self::DECIMAL => self::COMMA,
self::GROUP => self::DOT,
);
}
}
|
{
"pile_set_name": "Github"
}
|
{
"name": "grunt-contrib-uglify",
"description": "Minify files with UglifyJS",
"version": "0.9.2",
"author": {
"name": "Grunt Team",
"url": "http://gruntjs.com/"
},
"repository": {
"type": "git",
"url": "git+https://github.com/gruntjs/grunt-contrib-uglify.git"
},
"license": "MIT",
"engines": {
"node": ">=0.10.0"
},
"scripts": {
"test": "grunt test"
},
"dependencies": {
"chalk": "^1.0.0",
"lodash": "^3.2.0",
"maxmin": "^1.0.0",
"uglify-js": "^2.4.24",
"uri-path": "0.0.2"
},
"devDependencies": {
"grunt": "^0.4.2",
"grunt-cli": "^0.1.13",
"grunt-contrib-clean": "^0.6.0",
"grunt-contrib-internal": "^0.4.12",
"grunt-contrib-jshint": "^0.11.0",
"grunt-contrib-nodeunit": "^0.4.0"
},
"peerDependencies": {
"grunt": ">=0.4.0"
},
"keywords": [
"gruntplugin"
],
"files": [
"tasks"
],
"appveyor_id": "ybtf5vbvtenii561",
"contributors": [
{
"name": "\"Cowboy\" Ben Alman",
"url": "http://benalman.com"
},
{
"name": "Tyler Kellen",
"url": "http://goingslowly.com"
},
{
"name": "Jarrod Overson",
"url": "http://jarrodoverson.com"
}
],
"gitHead": "6e7ff8233562198054d908323f7112fc8e897d8d",
"bugs": {
"url": "https://github.com/gruntjs/grunt-contrib-uglify/issues"
},
"homepage": "https://github.com/gruntjs/grunt-contrib-uglify#readme",
"_id": "grunt-contrib-uglify@0.9.2",
"_shasum": "1a61c6f212410e4abb4f7c89153717b101560260",
"_from": "grunt-contrib-uglify@*",
"_npmVersion": "2.9.1",
"_nodeVersion": "0.10.38",
"_npmUser": {
"name": "vladikoff",
"email": "vlad@vladikoff.com"
},
"dist": {
"shasum": "1a61c6f212410e4abb4f7c89153717b101560260",
"tarball": "http://registry.npmjs.org/grunt-contrib-uglify/-/grunt-contrib-uglify-0.9.2.tgz"
},
"maintainers": [
{
"name": "tkellen",
"email": "tyler@sleekcode.net"
},
{
"name": "cowboy",
"email": "cowboy@rj3.net"
},
{
"name": "shama",
"email": "kyle@dontkry.com"
},
{
"name": "jsoverson",
"email": "jsoverson@gmail.com"
},
{
"name": "jmeas",
"email": "jellyes2@gmail.com"
},
{
"name": "vladikoff",
"email": "vlad@vladikoff.com"
},
{
"name": "sindresorhus",
"email": "sindresorhus@gmail.com"
}
],
"directories": {},
"_resolved": "https://registry.npmjs.org/grunt-contrib-uglify/-/grunt-contrib-uglify-0.9.2.tgz"
}
|
{
"pile_set_name": "Github"
}
|
//
// Mono.Directory.LDAP.LDAPMessage
//
// Author:
// Chris Toshok (toshok@ximian.com)
//
// (C) Ximian, Inc. http://www.ximian.com
//
//
// Just enough (for now) LDAP support to get System.DirectoryServices
// working.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
using System;
using System.Runtime.InteropServices;
namespace Mono.Directory.LDAP
{
public class LDAPMessage {
internal LDAPMessage(LDAP ld, IntPtr ldm) {
this.ld = ld;
this.ldm = ldm;
}
public LDAPMessage FirstMessage () {
IntPtr nm = ldap_first_message (ld.NativeLDAP ,ldm);
if (nm == IntPtr.Zero)
return null;
else
return new LDAPMessage (ld, nm);
}
public LDAPMessage NextMessage () {
IntPtr nm = ldap_next_message (ld.NativeLDAP ,ldm);
if (nm == IntPtr.Zero)
return null;
else
return new LDAPMessage (ld, nm);
}
public int CountMessages () {
return ldap_count_messages (ld.NativeLDAP, ldm);
}
public LDAPMessage FirstEntry() {
IntPtr nm = ldap_first_entry (ld.NativeLDAP ,ldm);
if (nm == IntPtr.Zero)
return null;
else
return new LDAPMessage (ld, nm);
}
public LDAPMessage NextEntry() {
IntPtr nm = ldap_next_entry (ld.NativeLDAP ,ldm);
if (nm == IntPtr.Zero)
return null;
else
return new LDAPMessage (ld, nm);
}
public int CountEntries() {
return ldap_count_entries (ld.NativeLDAP, ldm);
}
public string DN {
get { return ldap_get_dn (ld.NativeLDAP, ldm); }
}
[MonoTODO]
public string[] GetValues (string target) {
throw new NotImplementedException ();
/*
string[] ldap_values;
Console.WriteLine ("calling ldap_get_values ({0})", target);
ldap_values = ldap_get_values (ld.NativeLDAP, ldm, target);
if (ldap_values != null) {
string[] rv;
int i;
rv = new string[ldap_values.Length - 1];
for (i = 0; i < ldap_values.Length - 1; i ++)
rv[i] = ldap_values[i];
return rv;
}
else {
return null;
}
*/
}
[DllImport("ldap")]
extern static string ldap_get_dn (IntPtr ld, IntPtr ldm);
[DllImport("ldap")]
extern static IntPtr ldap_first_message (IntPtr ld, IntPtr ldm);
[DllImport("ldap")]
extern static IntPtr ldap_next_message (IntPtr ld, IntPtr ldm);
[DllImport("ldap")]
extern static int ldap_count_messages (IntPtr ld, IntPtr ldm);
[DllImport("ldap")]
extern static IntPtr ldap_first_entry (IntPtr ld, IntPtr ldm);
[DllImport("ldap")]
extern static IntPtr ldap_next_entry (IntPtr ld, IntPtr ldm);
[DllImport("ldap")]
extern static int ldap_count_entries (IntPtr ld, IntPtr ldm);
[DllImport("ldap")]
extern static string ldap_first_attribute (IntPtr ld, IntPtr ldm, out IntPtr ber);
[DllImport("ldap")]
extern static string ldap_next_attribute (IntPtr ld, IntPtr ldm, IntPtr ber);
/*
[DllImport("ldapglue")]
extern static void ldapsharp_get_values (IntPtr ld, IntPtr ldm, string target,
out string[] values, out int count);
*/
IntPtr ldm;
LDAP ld;
}
}
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8"?>
<bpmn2:definitions xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.omg.org/bpmn20" xmlns:bpmn2="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:bpsim="http://www.bpsim.org/schemas/1.0" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" xmlns:drools="http://www.jboss.org/drools" id="_8Wj84JKGEeSzKuqwkg58Fg" xsi:schemaLocation="http://www.omg.org/spec/BPMN/20100524/MODEL BPMN20.xsd http://www.jboss.org/drools drools.xsd http://www.bpsim.org/schemas/1.0 bpsim.xsd" expressionLanguage="http://www.mvel.org/2.0" targetNamespace="http://www.omg.org/bpmn20" typeLanguage="http://www.java.com/javaTypes">
<bpmn2:itemDefinition id="_employeeItem" structureRef="java.lang.String"/>
<bpmn2:itemDefinition id="_reasonItem" structureRef="java.lang.String"/>
<bpmn2:itemDefinition id="_performanceItem" structureRef="java.lang.String"/>
<bpmn2:signal id="signal" name="signal"/>
<bpmn2:process id="org.jbpm.test.functional.common.HelloWorldProcess2" drools:packageName="org.jbpm.test.functional.common" drools:version="1.1" name="HelloWorldProcess2" isExecutable="true">
<bpmn2:property id="employee" itemSubjectRef="_employeeItem"/>
<bpmn2:property id="reason" itemSubjectRef="_reasonItem"/>
<bpmn2:property id="performance" itemSubjectRef="_performanceItem"/>
<bpmn2:startEvent id="_CAFA8EE6-BCFB-4AD4-AE66-89D3EB5B21F2" drools:bgcolor="#9acd32" drools:selectable="true" name="hello_start">
<bpmn2:outgoing>_26020D52-3E6B-4A7E-B373-B1D45AC44BD0</bpmn2:outgoing>
</bpmn2:startEvent>
<bpmn2:scriptTask id="_1FBF9BD4-C6E4-4411-8CC9-5944CD34183B" drools:selectable="true" name="Hello world (1)" scriptFormat="http://www.java.com/java">
<bpmn2:incoming>_26020D52-3E6B-4A7E-B373-B1D45AC44BD0</bpmn2:incoming>
<bpmn2:outgoing>_5E0C042F-ADF8-4703-B6C0-2CDEA4852EEE</bpmn2:outgoing>
<bpmn2:script><![CDATA[System.out.println("Hello world!");]]></bpmn2:script>
</bpmn2:scriptTask>
<bpmn2:endEvent id="_842D0F60-587F-47FC-9915-27FFF56A9BDD" drools:bgcolor="#ff6347" drools:selectable="true" name="hello_end">
<bpmn2:incoming>_D902F506-30A2-4E76-A866-BE5BAAE1BE55</bpmn2:incoming>
<bpmn2:terminateEventDefinition id="_8Wj84ZKGEeSzKuqwkg58Fg"/>
</bpmn2:endEvent>
<bpmn2:parallelGateway id="_08822C48-0876-44C5-AC17-04504FAE9B78" drools:bgcolor="#f0e68c" drools:selectable="true" drools:bordercolor="#a67f00" name="hello_gateway_out" gatewayDirection="Converging">
<bpmn2:incoming>_CF4C96F9-3131-45DA-9963-35D7F8028424</bpmn2:incoming>
<bpmn2:incoming>_B3D9BDA7-6C05-4833-9FA2-207D25C81415</bpmn2:incoming>
<bpmn2:outgoing>_A1386BB9-4B7D-4CDF-A34E-705F307F6591</bpmn2:outgoing>
</bpmn2:parallelGateway>
<bpmn2:intermediateCatchEvent id="_641304E1-BEEC-46FE-98C7-0D305B0377AC" drools:bgcolor="#f5deb3" drools:selectable="true" drools:bordercolor="#a0522d" drools:boundaryca="false" name="hello_signal">
<bpmn2:incoming>_B6F8DF5F-57AC-44DB-9C92-FC0FF53ABDD5</bpmn2:incoming>
<bpmn2:outgoing>_B3D9BDA7-6C05-4833-9FA2-207D25C81415</bpmn2:outgoing>
<bpmn2:signalEventDefinition id="_8Wj84pKGEeSzKuqwkg58Fg" signalRef="signal"/>
</bpmn2:intermediateCatchEvent>
<bpmn2:scriptTask id="_11581AE9-8DF6-4A41-A9B0-CE3350F63E5D" drools:selectable="true" name="Goodbye world (1)" scriptFormat="http://www.java.com/java">
<bpmn2:incoming>_A1386BB9-4B7D-4CDF-A34E-705F307F6591</bpmn2:incoming>
<bpmn2:outgoing>_D902F506-30A2-4E76-A866-BE5BAAE1BE55</bpmn2:outgoing>
<bpmn2:script><![CDATA[System.out.println("Goodbye world!");]]></bpmn2:script>
</bpmn2:scriptTask>
<bpmn2:sequenceFlow id="_26020D52-3E6B-4A7E-B373-B1D45AC44BD0" drools:bgcolor="#000000" drools:selectable="true" sourceRef="_CAFA8EE6-BCFB-4AD4-AE66-89D3EB5B21F2" targetRef="_1FBF9BD4-C6E4-4411-8CC9-5944CD34183B"/>
<bpmn2:sequenceFlow id="_D902F506-30A2-4E76-A866-BE5BAAE1BE55" drools:bgcolor="#000000" drools:selectable="true" sourceRef="_11581AE9-8DF6-4A41-A9B0-CE3350F63E5D" targetRef="_842D0F60-587F-47FC-9915-27FFF56A9BDD"/>
<bpmn2:sequenceFlow id="_B3D9BDA7-6C05-4833-9FA2-207D25C81415" drools:bgcolor="#000000" drools:selectable="true" sourceRef="_641304E1-BEEC-46FE-98C7-0D305B0377AC" targetRef="_08822C48-0876-44C5-AC17-04504FAE9B78"/>
<bpmn2:sequenceFlow id="_A1386BB9-4B7D-4CDF-A34E-705F307F6591" drools:bgcolor="#000000" drools:selectable="true" sourceRef="_08822C48-0876-44C5-AC17-04504FAE9B78" targetRef="_11581AE9-8DF6-4A41-A9B0-CE3350F63E5D"/>
<bpmn2:sequenceFlow id="_5E0C042F-ADF8-4703-B6C0-2CDEA4852EEE" drools:bgcolor="#000000" drools:selectable="true" sourceRef="_1FBF9BD4-C6E4-4411-8CC9-5944CD34183B" targetRef="_7FC3DAB7-9BA3-4122-9E8E-00AFA30F5396"/>
<bpmn2:parallelGateway id="_7FC3DAB7-9BA3-4122-9E8E-00AFA30F5396" drools:bgcolor="#f0e68c" drools:selectable="true" drools:bordercolor="#a67f00" name="hello_gateway_in" gatewayDirection="Diverging">
<bpmn2:incoming>_5E0C042F-ADF8-4703-B6C0-2CDEA4852EEE</bpmn2:incoming>
<bpmn2:outgoing>_CF4C96F9-3131-45DA-9963-35D7F8028424</bpmn2:outgoing>
<bpmn2:outgoing>_B6F8DF5F-57AC-44DB-9C92-FC0FF53ABDD5</bpmn2:outgoing>
</bpmn2:parallelGateway>
<bpmn2:sequenceFlow id="_CF4C96F9-3131-45DA-9963-35D7F8028424" drools:bgcolor="#000000" drools:selectable="true" sourceRef="_7FC3DAB7-9BA3-4122-9E8E-00AFA30F5396" targetRef="_08822C48-0876-44C5-AC17-04504FAE9B78"/>
<bpmn2:sequenceFlow id="_B6F8DF5F-57AC-44DB-9C92-FC0FF53ABDD5" drools:bgcolor="#000000" drools:selectable="true" sourceRef="_7FC3DAB7-9BA3-4122-9E8E-00AFA30F5396" targetRef="_641304E1-BEEC-46FE-98C7-0D305B0377AC"/>
</bpmn2:process>
<bpmndi:BPMNDiagram id="_8Wj845KGEeSzKuqwkg58Fg">
<bpmndi:BPMNPlane id="_8Wj85JKGEeSzKuqwkg58Fg" bpmnElement="org.jbpm.test.functional.common.HelloWorldProcess2">
<bpmndi:BPMNShape id="_8Wkj8JKGEeSzKuqwkg58Fg" bpmnElement="_CAFA8EE6-BCFB-4AD4-AE66-89D3EB5B21F2">
<dc:Bounds height="30.0" width="30.0" x="90.0" y="99.0"/>
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="_8Wkj8ZKGEeSzKuqwkg58Fg" bpmnElement="_1FBF9BD4-C6E4-4411-8CC9-5944CD34183B">
<dc:Bounds height="48.0" width="134.0" x="155.0" y="90.0"/>
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="_8Wkj8pKGEeSzKuqwkg58Fg" bpmnElement="_842D0F60-587F-47FC-9915-27FFF56A9BDD">
<dc:Bounds height="28.0" width="28.0" x="734.0" y="98.0"/>
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="_8Wkj85KGEeSzKuqwkg58Fg" bpmnElement="_08822C48-0876-44C5-AC17-04504FAE9B78">
<dc:Bounds height="40.0" width="40.0" x="450.0" y="94.0"/>
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="_8Wkj9JKGEeSzKuqwkg58Fg" bpmnElement="_641304E1-BEEC-46FE-98C7-0D305B0377AC">
<dc:Bounds height="30.0" width="30.0" x="405.0" y="165.0"/>
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="_8Wkj9ZKGEeSzKuqwkg58Fg" bpmnElement="_11581AE9-8DF6-4A41-A9B0-CE3350F63E5D">
<dc:Bounds height="48.0" width="168.0" x="527.0" y="90.0"/>
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="_8Wkj9pKGEeSzKuqwkg58Fg" bpmnElement="_26020D52-3E6B-4A7E-B373-B1D45AC44BD0">
<di:waypoint xsi:type="dc:Point" x="105.0" y="114.0"/>
<di:waypoint xsi:type="dc:Point" x="222.0" y="114.0"/>
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="_8Wkj95KGEeSzKuqwkg58Fg" bpmnElement="_D902F506-30A2-4E76-A866-BE5BAAE1BE55">
<di:waypoint xsi:type="dc:Point" x="611.0" y="114.0"/>
<di:waypoint xsi:type="dc:Point" x="748.0" y="112.0"/>
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="_8Wkj-JKGEeSzKuqwkg58Fg" bpmnElement="_B3D9BDA7-6C05-4833-9FA2-207D25C81415">
<di:waypoint xsi:type="dc:Point" x="420.0" y="180.0"/>
<di:waypoint xsi:type="dc:Point" x="470.0" y="180.0"/>
<di:waypoint xsi:type="dc:Point" x="470.0" y="114.0"/>
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="_8Wkj-ZKGEeSzKuqwkg58Fg" bpmnElement="_A1386BB9-4B7D-4CDF-A34E-705F307F6591">
<di:waypoint xsi:type="dc:Point" x="470.0" y="114.0"/>
<di:waypoint xsi:type="dc:Point" x="611.0" y="114.0"/>
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="_8Wkj-pKGEeSzKuqwkg58Fg" bpmnElement="_5E0C042F-ADF8-4703-B6C0-2CDEA4852EEE">
<di:waypoint xsi:type="dc:Point" x="222.0" y="114.0"/>
<di:waypoint xsi:type="dc:Point" x="354.0" y="114.0"/>
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="_8Wkj-5KGEeSzKuqwkg58Fg" bpmnElement="_7FC3DAB7-9BA3-4122-9E8E-00AFA30F5396">
<dc:Bounds height="40.0" width="40.0" x="334.0" y="94.0"/>
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="_8Wkj_JKGEeSzKuqwkg58Fg" bpmnElement="_CF4C96F9-3131-45DA-9963-35D7F8028424">
<di:waypoint xsi:type="dc:Point" x="354.0" y="114.0"/>
<di:waypoint xsi:type="dc:Point" x="470.0" y="114.0"/>
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="_8Wkj_ZKGEeSzKuqwkg58Fg" bpmnElement="_B6F8DF5F-57AC-44DB-9C92-FC0FF53ABDD5">
<di:waypoint xsi:type="dc:Point" x="354.0" y="114.0"/>
<di:waypoint xsi:type="dc:Point" x="354.0" y="180.0"/>
<di:waypoint xsi:type="dc:Point" x="420.0" y="180.0"/>
</bpmndi:BPMNEdge>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
<bpmn2:relationship id="_8Wkj_pKGEeSzKuqwkg58Fg" type="BPSimData">
<bpmn2:extensionElements>
<bpsim:BPSimData>
<bpsim:Scenario xsi:type="bpsim:Scenario" id="default" name="Simulationscenario">
<bpsim:ScenarioParameters xsi:type="bpsim:ScenarioParameters" baseTimeUnit="s"/>
<bpsim:ElementParameters xsi:type="bpsim:ElementParameters" elementRef="_5E0C042F-ADF8-4703-B6C0-2CDEA4852EEE" id="_8Wkj_5KGEeSzKuqwkg58Fg">
<bpsim:ControlParameters xsi:type="bpsim:ControlParameters">
<bpsim:Probability xsi:type="bpsim:Parameter">
<bpsim:FloatingParameter value="100.0"/>
</bpsim:Probability>
</bpsim:ControlParameters>
</bpsim:ElementParameters>
<bpsim:ElementParameters xsi:type="bpsim:ElementParameters" elementRef="_B3D9BDA7-6C05-4833-9FA2-207D25C81415" id="_8WkkAJKGEeSzKuqwkg58Fg">
<bpsim:ControlParameters xsi:type="bpsim:ControlParameters">
<bpsim:Probability xsi:type="bpsim:Parameter">
<bpsim:FloatingParameter value="100.0"/>
</bpsim:Probability>
</bpsim:ControlParameters>
</bpsim:ElementParameters>
<bpsim:ElementParameters xsi:type="bpsim:ElementParameters" elementRef="_CAFA8EE6-BCFB-4AD4-AE66-89D3EB5B21F2" id="_8WkkAZKGEeSzKuqwkg58Fg">
<bpsim:TimeParameters xsi:type="bpsim:TimeParameters">
<bpsim:ProcessingTime xsi:type="bpsim:Parameter">
<bpsim:UniformDistribution max="10.0" min="5.0"/>
</bpsim:ProcessingTime>
</bpsim:TimeParameters>
<bpsim:ControlParameters xsi:type="bpsim:ControlParameters">
<bpsim:Probability xsi:type="bpsim:Parameter">
<bpsim:FloatingParameter value="100.0"/>
</bpsim:Probability>
</bpsim:ControlParameters>
</bpsim:ElementParameters>
<bpsim:ElementParameters xsi:type="bpsim:ElementParameters" elementRef="_CF4C96F9-3131-45DA-9963-35D7F8028424" id="_8WkkApKGEeSzKuqwkg58Fg">
<bpsim:ControlParameters xsi:type="bpsim:ControlParameters">
<bpsim:Probability xsi:type="bpsim:Parameter">
<bpsim:FloatingParameter value="100.0"/>
</bpsim:Probability>
</bpsim:ControlParameters>
</bpsim:ElementParameters>
<bpsim:ElementParameters xsi:type="bpsim:ElementParameters" elementRef="_11581AE9-8DF6-4A41-A9B0-CE3350F63E5D" id="_8WkkA5KGEeSzKuqwkg58Fg">
<bpsim:TimeParameters xsi:type="bpsim:TimeParameters">
<bpsim:ProcessingTime xsi:type="bpsim:Parameter">
<bpsim:UniformDistribution max="10.0" min="5.0"/>
</bpsim:ProcessingTime>
</bpsim:TimeParameters>
<bpsim:CostParameters xsi:type="bpsim:CostParameters">
<bpsim:UnitCost xsi:type="bpsim:Parameter">
<bpsim:FloatingParameter value="0.0"/>
</bpsim:UnitCost>
</bpsim:CostParameters>
</bpsim:ElementParameters>
<bpsim:ElementParameters xsi:type="bpsim:ElementParameters" elementRef="_1FBF9BD4-C6E4-4411-8CC9-5944CD34183B" id="_8WkkBJKGEeSzKuqwkg58Fg">
<bpsim:TimeParameters xsi:type="bpsim:TimeParameters">
<bpsim:ProcessingTime xsi:type="bpsim:Parameter">
<bpsim:UniformDistribution max="10.0" min="5.0"/>
</bpsim:ProcessingTime>
</bpsim:TimeParameters>
<bpsim:CostParameters xsi:type="bpsim:CostParameters">
<bpsim:UnitCost xsi:type="bpsim:Parameter">
<bpsim:FloatingParameter value="0.0"/>
</bpsim:UnitCost>
</bpsim:CostParameters>
</bpsim:ElementParameters>
<bpsim:ElementParameters xsi:type="bpsim:ElementParameters" elementRef="_641304E1-BEEC-46FE-98C7-0D305B0377AC" id="_8WkkBZKGEeSzKuqwkg58Fg">
<bpsim:TimeParameters xsi:type="bpsim:TimeParameters">
<bpsim:ProcessingTime xsi:type="bpsim:Parameter">
<bpsim:UniformDistribution max="10.0" min="5.0"/>
</bpsim:ProcessingTime>
</bpsim:TimeParameters>
<bpsim:ControlParameters xsi:type="bpsim:ControlParameters">
<bpsim:Probability xsi:type="bpsim:Parameter">
<bpsim:FloatingParameter value="100.0"/>
</bpsim:Probability>
</bpsim:ControlParameters>
</bpsim:ElementParameters>
<bpsim:ElementParameters xsi:type="bpsim:ElementParameters" elementRef="_B6F8DF5F-57AC-44DB-9C92-FC0FF53ABDD5" id="_8WkkBpKGEeSzKuqwkg58Fg">
<bpsim:ControlParameters xsi:type="bpsim:ControlParameters">
<bpsim:Probability xsi:type="bpsim:Parameter">
<bpsim:FloatingParameter value="100.0"/>
</bpsim:Probability>
</bpsim:ControlParameters>
</bpsim:ElementParameters>
<bpsim:ElementParameters xsi:type="bpsim:ElementParameters" elementRef="_D902F506-30A2-4E76-A866-BE5BAAE1BE55" id="_8WkkB5KGEeSzKuqwkg58Fg">
<bpsim:ControlParameters xsi:type="bpsim:ControlParameters">
<bpsim:Probability xsi:type="bpsim:Parameter">
<bpsim:FloatingParameter value="100.0"/>
</bpsim:Probability>
</bpsim:ControlParameters>
</bpsim:ElementParameters>
<bpsim:ElementParameters xsi:type="bpsim:ElementParameters" elementRef="_26020D52-3E6B-4A7E-B373-B1D45AC44BD0" id="_8WkkCJKGEeSzKuqwkg58Fg">
<bpsim:ControlParameters xsi:type="bpsim:ControlParameters">
<bpsim:Probability xsi:type="bpsim:Parameter">
<bpsim:FloatingParameter value="100.0"/>
</bpsim:Probability>
</bpsim:ControlParameters>
</bpsim:ElementParameters>
<bpsim:ElementParameters xsi:type="bpsim:ElementParameters" elementRef="_842D0F60-587F-47FC-9915-27FFF56A9BDD" id="_8WkkCZKGEeSzKuqwkg58Fg">
<bpsim:TimeParameters xsi:type="bpsim:TimeParameters">
<bpsim:ProcessingTime xsi:type="bpsim:Parameter">
<bpsim:UniformDistribution max="10.0" min="5.0"/>
</bpsim:ProcessingTime>
</bpsim:TimeParameters>
</bpsim:ElementParameters>
<bpsim:ElementParameters xsi:type="bpsim:ElementParameters" elementRef="_A1386BB9-4B7D-4CDF-A34E-705F307F6591" id="_8WkkCpKGEeSzKuqwkg58Fg">
<bpsim:ControlParameters xsi:type="bpsim:ControlParameters">
<bpsim:Probability xsi:type="bpsim:Parameter">
<bpsim:FloatingParameter value="100.0"/>
</bpsim:Probability>
</bpsim:ControlParameters>
</bpsim:ElementParameters>
</bpsim:Scenario>
</bpsim:BPSimData>
</bpmn2:extensionElements>
<bpmn2:source>_8Wj84JKGEeSzKuqwkg58Fg</bpmn2:source>
<bpmn2:target>_8Wj84JKGEeSzKuqwkg58Fg</bpmn2:target>
</bpmn2:relationship>
</bpmn2:definitions>
|
{
"pile_set_name": "Github"
}
|
local kube = import "kube.libsonnet";
local ingress = (import "../common/nginx-ingress.jsonnet").items_;
local kcm = (import "../common/kube-cert-manager.jsonnet").items_;
local all = ingress + kcm {
namespace: "nginx-ingress",
nginx_ingress_ns: kube.Namespace($.namespace),
};
kube.List() { items_+: all }
|
{
"pile_set_name": "Github"
}
|
using System;
using System.Collections.Generic;
using System.Text;
using WorkflowCore.Interface;
using WorkflowCore.Models;
using Xunit;
using FluentAssertions;
using System.Threading;
using WorkflowCore.Testing;
namespace WorkflowCore.IntegrationTests.Scenarios
{
public class AttachScenario : WorkflowTest<AttachScenario.GotoWorkflow, AttachScenario.MyDataClass>
{
internal static int Step1Ticker = 0;
internal static int Step2Ticker = 0;
public class MyDataClass
{
}
public class GotoWorkflow : IWorkflow<MyDataClass>
{
public string Id => "GotoWorkflow";
public int Version => 1;
public void Build(IWorkflowBuilder<MyDataClass> builder)
{
builder
.StartWith(context =>
{
Step1Ticker++;
return ExecutionResult.Next();
})
.Id("step1")
.If(data => Step1Ticker < 4).Do(then => then
.StartWith(context =>
{
Step2Ticker++;
return ExecutionResult.Next();
})
.Attach("step1")
);
}
}
public AttachScenario()
{
Setup();
}
[Fact]
public void Scenario()
{
var workflowId = StartWorkflow(new MyDataClass());
WaitForWorkflowToComplete(workflowId, TimeSpan.FromSeconds(30));
Step1Ticker.Should().Be(4);
Step2Ticker.Should().Be(3);
GetStatus(workflowId).Should().Be(WorkflowStatus.Complete);
UnhandledStepErrors.Count.Should().Be(0);
}
}
}
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>en</string>
<key>CFBundleExecutable</key>
<string>$(EXECUTABLE_NAME)</string>
<key>CFBundleIdentifier</key>
<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>$(PRODUCT_NAME)</string>
<key>CFBundlePackageType</key>
<string>BNDL</string>
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>1</string>
</dict>
</plist>
|
{
"pile_set_name": "Github"
}
|
// Copyright (C) 2018. See AUTHORS.
// package merge provides a merger for rothko data.
package merge
|
{
"pile_set_name": "Github"
}
|
jest.mock('pertain');
jest.mock('pkg-dir');
jest.mock('webpack-assets-manifest');
jest.mock('../../Utilities/loadEnvironment');
jest.mock('../plugins/RootComponentsPlugin');
jest.mock('../PWADevServer');
jest.mock('../../BuildBus/declare-base');
const path = require('path');
const fs = require('fs');
const stat = jest.spyOn(fs, 'stat');
const { SyncHook } = require('tapable');
const declareBase = require('../../BuildBus/declare-base');
const pertain = require('pertain');
const pkgDir = require('pkg-dir');
const WebpackAssetsManifest = require('webpack-assets-manifest');
const RootComponentsPlugin = require('../plugins/RootComponentsPlugin');
const loadEnvironment = require('../../Utilities/loadEnvironment');
const configureWebpack = require('../configureWebpack');
const BuildBus = require('../../BuildBus');
pertain.mockImplementation((_, subject) => [
{
name: '@magento/pwa-buildpack',
path: `./${subject.split('.').pop()}-base`
}
]);
pkgDir.mockImplementation(x => x);
const specialFeaturesHook = new SyncHook(['special']);
const envVarDefsHook = new SyncHook(['envVarDefs']);
const transformModulesHook = new SyncHook(['addTransform']);
declareBase.mockImplementation(targets => {
targets.declare({
envVarDefinitions: envVarDefsHook,
specialFeatures: specialFeaturesHook,
webpackCompiler: new SyncHook(['compiler']),
transformModules: transformModulesHook
});
});
beforeEach(() => {
pertain.mockClear();
declareBase.mockClear();
BuildBus.clearAll();
});
const mockStat = (dir, file, err = null) => {
stat.mockImplementationOnce((_, callback) =>
callback(err, { isDirectory: () => dir, isFile: () => file })
);
};
const mockEnv = prod =>
loadEnvironment.mockReturnValueOnce({
env: process.env,
sections: jest.fn(),
section: jest.fn(
key =>
({
devServer: {
serviceWorkerEnabled: !!prod
}
}[key])
),
isProd: prod
});
const simulate = {
statsAsDirectory() {
mockStat(true);
return this;
},
statsAsFile() {
mockStat(false, true);
return this;
},
statsAsMissing() {
mockStat(false, false, new Error());
return this;
},
productionEnvironment() {
mockEnv(true);
return this;
},
devEnvironment() {
mockEnv(false);
return this;
}
};
test('throws if app root not provided', async () => {
await expect(configureWebpack({})).rejects.toThrow('root directory');
});
test('throws if app root not a directory', async () => {
simulate.statsAsFile();
await expect(configureWebpack({ context: '.' })).rejects.toThrow(
'not a directory'
);
});
test('produces a webpack config and friendly manifest plugin', async () => {
simulate
.statsAsDirectory()
.statsAsFile()
.productionEnvironment();
const config = await configureWebpack({ context: '.' });
expect(config).toMatchObject({
context: '.',
mode: 'production',
optimization: expect.any(Object)
});
expect(config.entry).toHaveProperty('client');
expect(config.module).toHaveProperty('rules');
expect(WebpackAssetsManifest).toHaveBeenCalled();
const { transform } = WebpackAssetsManifest.mock.calls[0][0];
const assets = {
entrypoints: {
client: {
js: ['client1.js', 'client2.js']
}
},
'toLoad.js': 'toLoad.compiled.js',
'RootCmp-FAKE.js': [
'RootCmp-FAKE-1.compiled.js',
'RootCmp-FAKE-2.compiled.js'
],
'RootCmp-FAKER.js': 'RootCmp-FAKER-1.compiled.js',
'image.svg': 'image-hash.svg'
};
transform(assets);
expect(assets.bundles.load).toEqual(['client1.js', 'client2.js']);
expect(assets.bundles.prefetch).toEqual([
'RootCmp-FAKE-1.compiled.js',
'RootCmp-FAKE-2.compiled.js',
'RootCmp-FAKER-1.compiled.js'
]);
expect(assets.js).toHaveProperty('RootCmp-FAKE');
});
test('works if babel.config.js is not present', async () => {
simulate
.statsAsDirectory()
.statsAsMissing()
.productionEnvironment();
await expect(configureWebpack({ context: '.' })).resolves.not.toThrow();
});
test('works in developer mode from cli', async () => {
simulate
.statsAsDirectory()
.statsAsMissing()
.productionEnvironment();
const clientConfig = await configureWebpack({
context: '.',
env: { mode: 'development' }
});
expect(clientConfig).toHaveProperty('mode', 'development');
});
test('works in developer mode from fallback', async () => {
simulate
.statsAsDirectory()
.statsAsMissing()
.devEnvironment();
const clientConfig = await configureWebpack({ context: '.' });
expect(clientConfig).toHaveProperty('mode', 'development');
});
test('errors when mode unrecognized', async () => {
simulate
.statsAsDirectory()
.statsAsMissing()
.productionEnvironment();
await expect(
configureWebpack({ context: '.', env: { mode: 'wuh' } })
).rejects.toThrowError('wuh');
});
test('errors when environment is invalid', async () => {
simulate.statsAsDirectory().statsAsMissing();
loadEnvironment.mockReturnValueOnce({
env: process.env,
envFilePresent: false,
error: new Error('Configuration foo was invalid')
});
await expect(
configureWebpack({ context: '.', env: { mode: 'development' } })
).rejects.toThrowError('foo was invalid');
});
test('handles special flags', async () => {
simulate
.statsAsDirectory()
.statsAsFile()
.productionEnvironment();
const special = {
localModule1: {
esModules: true,
cssModules: true,
graphqlQueries: true,
rootComponents: true,
upward: true
},
depModule1: {
esModules: true,
cssModules: true,
graphqlQueries: true,
rootComponents: false,
upward: true
}
};
// Tapable detects the argument length of the tap provided, so we need to
// declare at least one argument or Tapable won't give us anything.
const specialFeaturesTap = jest.fn(x => x);
specialFeaturesHook.tap('configureWebpack.spec.js', specialFeaturesTap);
const clientConfig = await configureWebpack({
context: path.resolve(__dirname, '__fixtures__/resolverContext'),
vendor: ['jest'],
special
});
const gqlLoader = ({ use }) =>
use.some(({ loader }) => /^graphql\-tag/.test(loader));
expect(clientConfig.module.rules.find(gqlLoader).include).toHaveLength(3);
expect(RootComponentsPlugin).toHaveBeenCalled();
expect(
RootComponentsPlugin.mock.calls[0][0].rootComponentsDirs.some(entry =>
entry.includes('localModule1')
)
).toBeTruthy();
expect(declareBase).toHaveBeenCalledTimes(1);
expect(specialFeaturesTap).toHaveBeenCalledWith(special);
});
test('accepts aliases', async () => {
simulate
.statsAsDirectory()
.statsAsFile()
.productionEnvironment();
await expect(
configureWebpack({
context: path.resolve(__dirname, '__fixtures__/resolverContext'),
alias: {
garner: 'bristow',
cooper: 'tippin'
}
})
).resolves.toMatchObject({
resolve: {
alias: {
garner: 'bristow',
cooper: 'tippin'
}
}
});
});
|
{
"pile_set_name": "Github"
}
|
.\" Automatically generated by Pod::Man 4.11 (Pod::Simple 3.40)
.\"
.\" Standard preamble:
.\" ========================================================================
.de Sp \" Vertical space (when we can't use .PP)
.if t .sp .5v
.if n .sp
..
.de Vb \" Begin verbatim text
.ft CW
.nf
.ne \\$1
..
.de Ve \" End verbatim text
.ft R
.fi
..
.\" Set up some character translations and predefined strings. \*(-- will
.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
.\" double quote, and \*(R" will give a right double quote. \*(C+ will
.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
.\" nothing in troff, for use with C<>.
.tr \(*W-
.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
.ie n \{\
. ds -- \(*W-
. ds PI pi
. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
. ds L" ""
. ds R" ""
. ds C` ""
. ds C' ""
'br\}
.el\{\
. ds -- \|\(em\|
. ds PI \(*p
. ds L" ``
. ds R" ''
. ds C`
. ds C'
'br\}
.\"
.\" Escape single quotes in literal strings from groff's Unicode transform.
.ie \n(.g .ds Aq \(aq
.el .ds Aq '
.\"
.\" If the F register is >0, we'll generate index entries on stderr for
.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
.\" entries marked with X<> in POD. Of course, you'll have to process the
.\" output yourself in some meaningful fashion.
.\"
.\" Avoid warning from groff about undefined register 'F'.
.de IX
..
.nr rF 0
.if \n(.g .if rF .nr rF 1
.if (\n(rF:(\n(.g==0)) \{\
. if \nF \{\
. de IX
. tm Index:\\$1\t\\n%\t"\\$2"
..
. if !\nF==2 \{\
. nr % 0
. nr F 2
. \}
. \}
.\}
.rr rF
.\"
.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
.\" Fear. Run. Save yourself. No user-serviceable parts.
. \" fudge factors for nroff and troff
.if n \{\
. ds #H 0
. ds #V .8m
. ds #F .3m
. ds #[ \f1
. ds #] \fP
.\}
.if t \{\
. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
. ds #V .6m
. ds #F 0
. ds #[ \&
. ds #] \&
.\}
. \" simple accents for nroff and troff
.if n \{\
. ds ' \&
. ds ` \&
. ds ^ \&
. ds , \&
. ds ~ ~
. ds /
.\}
.if t \{\
. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
.\}
. \" troff and (daisy-wheel) nroff accents
.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
.ds ae a\h'-(\w'a'u*4/10)'e
.ds Ae A\h'-(\w'A'u*4/10)'E
. \" corrections for vroff
.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
. \" for low resolution devices (crt and lpr)
.if \n(.H>23 .if \n(.V>19 \
\{\
. ds : e
. ds 8 ss
. ds o a
. ds d- d\h'-1'\(ga
. ds D- D\h'-1'\(hy
. ds th \o'bp'
. ds Th \o'LP'
. ds ae ae
. ds Ae AE
.\}
.rm #[ #] #H #V #F C
.\" ========================================================================
.\"
.IX Title "SSL_CTX_NEW 3"
.TH SSL_CTX_NEW 3 "2020-04-21" "1.1.1g" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
TLSv1_2_method, TLSv1_2_server_method, TLSv1_2_client_method, SSL_CTX_new, SSL_CTX_up_ref, SSLv3_method, SSLv3_server_method, SSLv3_client_method, TLSv1_method, TLSv1_server_method, TLSv1_client_method, TLSv1_1_method, TLSv1_1_server_method, TLSv1_1_client_method, TLS_method, TLS_server_method, TLS_client_method, SSLv23_method, SSLv23_server_method, SSLv23_client_method, DTLS_method, DTLS_server_method, DTLS_client_method, DTLSv1_method, DTLSv1_server_method, DTLSv1_client_method, DTLSv1_2_method, DTLSv1_2_server_method, DTLSv1_2_client_method \&\- create a new SSL_CTX object as framework for TLS/SSL or DTLS enabled functions
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
\& #include <openssl/ssl.h>
\&
\& SSL_CTX *SSL_CTX_new(const SSL_METHOD *method);
\& int SSL_CTX_up_ref(SSL_CTX *ctx);
\&
\& const SSL_METHOD *TLS_method(void);
\& const SSL_METHOD *TLS_server_method(void);
\& const SSL_METHOD *TLS_client_method(void);
\&
\& const SSL_METHOD *SSLv23_method(void);
\& const SSL_METHOD *SSLv23_server_method(void);
\& const SSL_METHOD *SSLv23_client_method(void);
\&
\& #ifndef OPENSSL_NO_SSL3_METHOD
\& const SSL_METHOD *SSLv3_method(void);
\& const SSL_METHOD *SSLv3_server_method(void);
\& const SSL_METHOD *SSLv3_client_method(void);
\& #endif
\&
\& #ifndef OPENSSL_NO_TLS1_METHOD
\& const SSL_METHOD *TLSv1_method(void);
\& const SSL_METHOD *TLSv1_server_method(void);
\& const SSL_METHOD *TLSv1_client_method(void);
\& #endif
\&
\& #ifndef OPENSSL_NO_TLS1_1_METHOD
\& const SSL_METHOD *TLSv1_1_method(void);
\& const SSL_METHOD *TLSv1_1_server_method(void);
\& const SSL_METHOD *TLSv1_1_client_method(void);
\& #endif
\&
\& #ifndef OPENSSL_NO_TLS1_2_METHOD
\& const SSL_METHOD *TLSv1_2_method(void);
\& const SSL_METHOD *TLSv1_2_server_method(void);
\& const SSL_METHOD *TLSv1_2_client_method(void);
\& #endif
\&
\& const SSL_METHOD *DTLS_method(void);
\& const SSL_METHOD *DTLS_server_method(void);
\& const SSL_METHOD *DTLS_client_method(void);
\&
\& #ifndef OPENSSL_NO_DTLS1_METHOD
\& const SSL_METHOD *DTLSv1_method(void);
\& const SSL_METHOD *DTLSv1_server_method(void);
\& const SSL_METHOD *DTLSv1_client_method(void);
\& #endif
\&
\& #ifndef OPENSSL_NO_DTLS1_2_METHOD
\& const SSL_METHOD *DTLSv1_2_method(void);
\& const SSL_METHOD *DTLSv1_2_server_method(void);
\& const SSL_METHOD *DTLSv1_2_client_method(void);
\& #endif
.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
\&\fBSSL_CTX_new()\fR creates a new \fB\s-1SSL_CTX\s0\fR object as framework to
establish \s-1TLS/SSL\s0 or \s-1DTLS\s0 enabled connections. An \fB\s-1SSL_CTX\s0\fR object is
reference counted. Creating an \fB\s-1SSL_CTX\s0\fR object for the first time increments
the reference count. Freeing it (using SSL_CTX_free) decrements it. When the
reference count drops to zero, any memory or resources allocated to the
\&\fB\s-1SSL_CTX\s0\fR object are freed. \fBSSL_CTX_up_ref()\fR increments the reference count for
an existing \fB\s-1SSL_CTX\s0\fR structure.
.SH "NOTES"
.IX Header "NOTES"
The \s-1SSL_CTX\s0 object uses \fBmethod\fR as connection method.
The methods exist in a generic type (for client and server use), a server only
type, and a client only type.
\&\fBmethod\fR can be of the following types:
.IP "\fBTLS_method()\fR, \fBTLS_server_method()\fR, \fBTLS_client_method()\fR" 4
.IX Item "TLS_method(), TLS_server_method(), TLS_client_method()"
These are the general-purpose \fIversion-flexible\fR \s-1SSL/TLS\s0 methods.
The actual protocol version used will be negotiated to the highest version
mutually supported by the client and the server.
The supported protocols are SSLv3, TLSv1, TLSv1.1, TLSv1.2 and TLSv1.3.
Applications should use these methods, and avoid the version-specific
methods described below, which are deprecated.
.IP "\fBSSLv23_method()\fR, \fBSSLv23_server_method()\fR, \fBSSLv23_client_method()\fR" 4
.IX Item "SSLv23_method(), SSLv23_server_method(), SSLv23_client_method()"
These functions do not exist anymore, they have been renamed to
\&\fBTLS_method()\fR, \fBTLS_server_method()\fR and \fBTLS_client_method()\fR respectively.
Currently, the old function calls are renamed to the corresponding new
ones by preprocessor macros, to ensure that existing code which uses the
old function names still compiles. However, using the old function names
is deprecated and new code should call the new functions instead.
.IP "\fBTLSv1_2_method()\fR, \fBTLSv1_2_server_method()\fR, \fBTLSv1_2_client_method()\fR" 4
.IX Item "TLSv1_2_method(), TLSv1_2_server_method(), TLSv1_2_client_method()"
A \s-1TLS/SSL\s0 connection established with these methods will only understand the
TLSv1.2 protocol. These methods are deprecated.
.IP "\fBTLSv1_1_method()\fR, \fBTLSv1_1_server_method()\fR, \fBTLSv1_1_client_method()\fR" 4
.IX Item "TLSv1_1_method(), TLSv1_1_server_method(), TLSv1_1_client_method()"
A \s-1TLS/SSL\s0 connection established with these methods will only understand the
TLSv1.1 protocol. These methods are deprecated.
.IP "\fBTLSv1_method()\fR, \fBTLSv1_server_method()\fR, \fBTLSv1_client_method()\fR" 4
.IX Item "TLSv1_method(), TLSv1_server_method(), TLSv1_client_method()"
A \s-1TLS/SSL\s0 connection established with these methods will only understand the
TLSv1 protocol. These methods are deprecated.
.IP "\fBSSLv3_method()\fR, \fBSSLv3_server_method()\fR, \fBSSLv3_client_method()\fR" 4
.IX Item "SSLv3_method(), SSLv3_server_method(), SSLv3_client_method()"
A \s-1TLS/SSL\s0 connection established with these methods will only understand the
SSLv3 protocol.
The SSLv3 protocol is deprecated and should not be used.
.IP "\fBDTLS_method()\fR, \fBDTLS_server_method()\fR, \fBDTLS_client_method()\fR" 4
.IX Item "DTLS_method(), DTLS_server_method(), DTLS_client_method()"
These are the version-flexible \s-1DTLS\s0 methods.
Currently supported protocols are \s-1DTLS 1.0\s0 and \s-1DTLS 1.2.\s0
.IP "\fBDTLSv1_2_method()\fR, \fBDTLSv1_2_server_method()\fR, \fBDTLSv1_2_client_method()\fR" 4
.IX Item "DTLSv1_2_method(), DTLSv1_2_server_method(), DTLSv1_2_client_method()"
These are the version-specific methods for DTLSv1.2.
These methods are deprecated.
.IP "\fBDTLSv1_method()\fR, \fBDTLSv1_server_method()\fR, \fBDTLSv1_client_method()\fR" 4
.IX Item "DTLSv1_method(), DTLSv1_server_method(), DTLSv1_client_method()"
These are the version-specific methods for DTLSv1.
These methods are deprecated.
.PP
\&\fBSSL_CTX_new()\fR initializes the list of ciphers, the session cache setting, the
callbacks, the keys and certificates and the options to their default values.
.PP
\&\fBTLS_method()\fR, \fBTLS_server_method()\fR, \fBTLS_client_method()\fR, \fBDTLS_method()\fR,
\&\fBDTLS_server_method()\fR and \fBDTLS_client_method()\fR are the \fIversion-flexible\fR
methods.
All other methods only support one specific protocol version.
Use the \fIversion-flexible\fR methods instead of the version specific methods.
.PP
If you want to limit the supported protocols for the version flexible
methods you can use \fBSSL_CTX_set_min_proto_version\fR\|(3),
\&\fBSSL_set_min_proto_version\fR\|(3), \fBSSL_CTX_set_max_proto_version\fR\|(3) and
\&\fBSSL_set_max_proto_version\fR\|(3) functions.
Using these functions it is possible to choose e.g. \fBTLS_server_method()\fR
and be able to negotiate with all possible clients, but to only
allow newer protocols like \s-1TLS 1.0, TLS 1.1, TLS 1.2\s0 or \s-1TLS 1.3.\s0
.PP
The list of protocols available can also be limited using the
\&\fBSSL_OP_NO_SSLv3\fR, \fBSSL_OP_NO_TLSv1\fR, \fBSSL_OP_NO_TLSv1_1\fR,
\&\fBSSL_OP_NO_TLSv1_3\fR, \fBSSL_OP_NO_TLSv1_2\fR and \fBSSL_OP_NO_TLSv1_3\fR
options of the
\&\fBSSL_CTX_set_options\fR\|(3) or \fBSSL_set_options\fR\|(3) functions, but this approach
is not recommended. Clients should avoid creating \*(L"holes\*(R" in the set of
protocols they support. When disabling a protocol, make sure that you also
disable either all previous or all subsequent protocol versions.
In clients, when a protocol version is disabled without disabling \fIall\fR
previous protocol versions, the effect is to also disable all subsequent
protocol versions.
.PP
The SSLv3 protocol is deprecated and should generally not be used.
Applications should typically use \fBSSL_CTX_set_min_proto_version\fR\|(3) to set
the minimum protocol to at least \fB\s-1TLS1_VERSION\s0\fR.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
The following return values can occur:
.IP "\s-1NULL\s0" 4
.IX Item "NULL"
The creation of a new \s-1SSL_CTX\s0 object failed. Check the error stack to find out
the reason.
.IP "Pointer to an \s-1SSL_CTX\s0 object" 4
.IX Item "Pointer to an SSL_CTX object"
The return value points to an allocated \s-1SSL_CTX\s0 object.
.Sp
\&\fBSSL_CTX_up_ref()\fR returns 1 for success and 0 for failure.
.SH "SEE ALSO"
.IX Header "SEE ALSO"
\&\fBSSL_CTX_set_options\fR\|(3), \fBSSL_CTX_free\fR\|(3), \fBSSL_accept\fR\|(3),
\&\fBSSL_CTX_set_min_proto_version\fR\|(3), \fBssl\fR\|(7), \fBSSL_set_connect_state\fR\|(3)
.SH "HISTORY"
.IX Header "HISTORY"
Support for SSLv2 and the corresponding \fBSSLv2_method()\fR,
\&\fBSSLv2_server_method()\fR and \fBSSLv2_client_method()\fR functions where
removed in OpenSSL 1.1.0.
.PP
\&\fBSSLv23_method()\fR, \fBSSLv23_server_method()\fR and \fBSSLv23_client_method()\fR
were deprecated and the preferred \fBTLS_method()\fR, \fBTLS_server_method()\fR
and \fBTLS_client_method()\fR functions were added in OpenSSL 1.1.0.
.PP
All version-specific methods were deprecated in OpenSSL 1.1.0.
.SH "COPYRIGHT"
.IX Header "COPYRIGHT"
Copyright 2000\-2019 The OpenSSL Project Authors. All Rights Reserved.
.PP
Licensed under the OpenSSL license (the \*(L"License\*(R"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file \s-1LICENSE\s0 in the source distribution or at
<https://www.openssl.org/source/license.html>.
|
{
"pile_set_name": "Github"
}
|
; RUN: opt < %s -S -loop-unroll -unroll-count=2 | FileCheck %s
; Checks that "llvm.loop.unroll.disable" is set when
; unroll with count set by user has been applied.
;
; CHECK-LABEL: @foo(
; CHECK: llvm.loop.unroll.disable
define void @foo(i32* nocapture %a) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 64
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body
ret void
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.taobao.atlas.dexmerge.dx.util;
import com.taobao.atlas.dex.util.ByteOutput;
/**
* Interface for a sink for binary output. This is similar to
* {@code java.util.DataOutput}, but no {@code IOExceptions}
* are declared, and multibyte output is defined to be little-endian.
*/
public interface Output extends ByteOutput {
/**
* Gets the current cursor position. This is the same as the number of
* bytes written to this instance.
*
* @return {@code >= 0;} the cursor position
*/
public int getCursor();
/**
* Asserts that the cursor is the given value.
*
* @param expectedCursor the expected cursor value
* @throws RuntimeException thrown if {@code getCursor() !=
* expectedCursor}
*/
public void assertCursor(int expectedCursor);
/**
* Writes a {@code byte} to this instance.
*
* @param value the value to write; all but the low 8 bits are ignored
*/
public void writeByte(int value);
/**
* Writes a {@code short} to this instance.
*
* @param value the value to write; all but the low 16 bits are ignored
*/
public void writeShort(int value);
/**
* Writes an {@code int} to this instance.
*
* @param value the value to write
*/
public void writeInt(int value);
/**
* Writes a {@code long} to this instance.
*
* @param value the value to write
*/
public void writeLong(long value);
/**
* Writes a DWARFv3-style unsigned LEB128 integer. For details,
* see the "Dalvik Executable Format" document or DWARF v3 section
* 7.6.
*
* @param value value to write, treated as an unsigned value
* @return {@code 1..5;} the number of bytes actually written
*/
public int writeUleb128(int value);
/**
* Writes a DWARFv3-style unsigned LEB128 integer. For details,
* see the "Dalvik Executable Format" document or DWARF v3 section
* 7.6.
*
* @param value value to write
* @return {@code 1..5;} the number of bytes actually written
*/
public int writeSleb128(int value);
/**
* Writes a {@link ByteArray} to this instance.
*
* @param bytes {@code non-null;} the array to write
*/
public void write(ByteArray bytes);
/**
* Writes a portion of a {@code byte[]} to this instance.
*
* @param bytes {@code non-null;} the array to write
* @param offset {@code >= 0;} offset into {@code bytes} for the first
* byte to write
* @param length {@code >= 0;} number of bytes to write
*/
public void write(byte[] bytes, int offset, int length);
/**
* Writes a {@code byte[]} to this instance. This is just
* a convenient shorthand for {@code write(bytes, 0, bytes.length)}.
*
* @param bytes {@code non-null;} the array to write
*/
public void write(byte[] bytes);
/**
* Writes the given number of {@code 0} bytes.
*
* @param count {@code >= 0;} the number of zeroes to write
*/
public void writeZeroes(int count);
/**
* Adds extra bytes if necessary (with value {@code 0}) to
* force alignment of the output cursor as given.
*
* @param alignment {@code > 0;} the alignment; must be a power of two
*/
public void alignTo(int alignment);
}
|
{
"pile_set_name": "Github"
}
|
Changes since 2.17.2:
Enhancement:
- support prefix in prerelease when using smart tag
- support project-wise settings
- make `rebase_default_base_ref` more consistant with project-wise settings
- allow to use "git: blame current file" at blame view
- Opening commit from commadline respect setting
Feature:
- git: checkout current file at commit
Fix:
- Pedantic commit checks allows comment on 2. line
- On startup in-memory `instances` cache can be empty
- fix set status on cherry-pick done
Other:
- Emphasize in docs when to use run_in_thread
- Fix subprocess command crashing with OSError
- Include A traceback when an exception happens and we don't handle it
Contributors:
- herr kaste
- Simon
- Randy Lai
- Pavel Savchenko
- joan
- Maarten Nieber
|
{
"pile_set_name": "Github"
}
|
<p align="center">
<br>
<a href="https://musubii.qranoko.jp">
<img src="https://i.gyazo.com/fe915845b753d96c6d539022049e7a02.png" width="90"/>
</a>
</p>
<h1 align="center">MUSUBii</h1>
<h3 align="center">Simple CSS Framework for JP</h3>
<p align="center">
MUSUBiiは日本語サイトのインブラウザデザインを想定したシンプルで薄味のレスポンシブ対応CSSフレームワークです。
</p>
<p align="center">
<a aria-label="Made by QRANOKO" href="https://qranoko.jp">
<img src="https://img.shields.io/badge/MADE%20BY%20QRANOKO-212121.svg?style=for-the-badge&labelColor=212121">
</a>
<a aria-label="NPM version" href="https://www.npmjs.com/package/musubii">
<img alt="" src="https://img.shields.io/npm/v/musubii.svg?style=for-the-badge&labelColor=212121">
</a>
<a aria-label="License" href="https://github.com/qrac/musubii/blob/master/LICENSE">
<img alt="" src="https://img.shields.io/npm/l/musubii.svg?style=for-the-badge&labelColor=212121">
</a>
</p>
## Site & Documentation
- https://musubii.qranoko.jp
## About
_「腹が減ってはコードが書けぬ」_
Web デザインの腹ごなしに、CSS フレームワーク「MUSUBii(むすびー)」をどうぞ。MUSUBii は、日本語サイトのデザイン・コーディングを元気づけます。
- OOCSS を応用したラフな設計
- 日本語フォントの利用を想定
- JavaScript 未使用
## Detail
### Markup
要素 `.(xxxx)` 1 つに対して、状態 `.is-(xxxx)` を複数追加してスタイリングするのが基本です。また、すべてのクラス名は英小文字・数字・ハイフン 1 つで構成されています。
### Layers
CSS のレイヤーは大きく 4 つに分類。「下地にレイアウトを組んでボタンやテキストを置いたら調整する」使い方です。実務で固有のスタイルとなる `components` や `pages` が加わることも想定しています。
| Layer | Detail |
| ----------- | -------------------------------- |
| `bases` | 文字色などの下地 |
| `layouts` | セクション・グリッドシステムなど |
| `elements` | ボタン・テキスト・フォームなど |
| `utilities` | 調整用モディファイア |
### Responsive
CSS は 5 つの画面サイズで可変できるレスポンシブウェブデザインになっています。
| Name | Value |
| --------- | ---------------- |
| `mobile` | `~ 575px` |
| `fablet` | `576px ~ 767px` |
| `tablet` | `768px ~ 991px` |
| `desktop` | `992px ~ 1199px` |
| `wide` | `1200px ~` |
### Unit
CSS の単位は em と px を採用。エレメントの大きさをフォントサイズ変更で一括調整できます。また、すべての値には 16 を割れる数値を用いているため、サイズ変更を行った場合に割り切れない端数が出づらくなっています。
### File size
出力される [CSS ファイル](https://github.com/qrac/musubii/blob/master/dist/musubii.min.css) の容量は [Bootstrap](https://github.com/twbs/bootstrap/blob/master/dist/css/bootstrap.min.css)・[Bulma](https://github.com/jgthms/bulma/blob/master/css/bulma.min.css) の半分以下で、72KB 程度です。
## Install
npm インストールからの SCSS 利用を想定しています。
- [musubii - npm](https://www.npmjs.com/package/musubii)
```
npm install musubii
```
```
yarn add musubii
```
## Support
日本で多く使われているブラウザを基準にバグフィックスを行なっています。
| Chrome | Firefox | IE | Ege | Safari(Mac) |
| ------ | ------- | ----- | ------ | ----------- |
| Newest | Newest | \*11~ | Newest | Newest |
| Safari(iOS) | Chrome(Android) | Browser(Android) |
| ----------- | --------------- | ---------------- |
| Newest | Newest | \*4.4~ |
- \*SCSS で CSS Variables を有効化した場合、 IE11・Android Browser では色プロパティが認識されなくなります。[Ponyfill](https://jhildenbiddle.github.io/css-vars-ponyfill/#/) の導入を検討してください。
- \*PostCSS の設定によっては IE11・Android Browser が非対応となります
## License
- CC0 1.0 Public Domain
## Credit
- Author: [Qrac](https://qrac.jp)
- Organization: [QRANOKO](https://qranoko.jp)
|
{
"pile_set_name": "Github"
}
|
.. _data-loaders-grid-fs:
GridFS Loader
=============
The ``GridFSLoader`` allows you to load your images from `MongoDB GridFS`_.
Configuration
-------------
.. code-block:: yaml
# app/config/config.yml
liip_imagine:
filter_sets:
my_special_style:
data_loader: grid_fs
filters:
my_custom_filter: { }
Define a service for the loader:
.. configuration-block::
.. code-block:: yaml
# app/config/services.yml
services:
liip_imagine.binary.loader.grid_fs:
class: Liip\ImagineBundle\Binary\Loader\GridFSLoader
arguments:
- "@doctrine.odm.mongodb.document_manager"
- Application\ImageBundle\Document\Image
tags:
- { name: "liip_imagine.binary.loader", loader: grid_fs }
.. code-block:: xml
<!-- app/config/services.xml -->
<service id="liip_imagine.binary.loader.grid_fs" class="Liip\ImagineBundle\Binary\Loader\GridFSLoader">
<tag name="liip_imagine.binary.loader" loader="grid_fs" />
<argument type="service" id="doctrine.odm.mongodb.document_manager" />
<argument>Application\ImageBundle\Document\Image</argument>
</service>
Usage
-----
Reference the image by its ``id`` when piping to the template helper:
.. configuration-block::
.. code-block:: html+twig
<img src="{{ image.id | imagine_filter('my_thumb') }}" />
.. code-block:: html+php
<img src="<?php echo $this['imagine']->filter($image->getId(), 'my_thumb') ?>" />
.. _`MongoDB GridFS`: http://docs.mongodb.org/manual/applications/gridfs/
|
{
"pile_set_name": "Github"
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"context"
policy "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
core "k8s.io/client-go/testing"
)
func (c *FakeEvictions) Evict(ctx context.Context, eviction *policy.Eviction) error {
action := core.CreateActionImpl{}
action.Verb = "create"
action.Namespace = c.ns
action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
action.Subresource = "eviction"
action.Object = eviction
_, err := c.Fake.Invokes(action, eviction)
return err
}
|
{
"pile_set_name": "Github"
}
|
var path = require("path");
var fs = require("fs");
var remark = require("remark");
//var tocGenerator = require("mdast-util-toc");
var replaceSection = require("mdast-util-heading-range");
var tostring = require("mdast-util-to-string");
var ejs = require("ejs");
var unist = require("../unistHelpers");
var mdNav = require("../mdNav");
const contentsHeading = "Contents";
const minHeadingsForToc = 8;
const maxTocHeadingDepth = 3;
var templateFolder = path.resolve("tools", "doc", "templates");
module.exports = {
"processDocs": processDocs
}
function processDocs(mdCache, aggData, errorMessages) {
var pathnames = Object.keys(mdCache);
pathnames.forEach(pathname => {
updateFile(mdCache[pathname].mdOutTree, pathname, aggData, errorMessages);
});
}
// Find an existing Contents section or add a new empty one if needed.
// Returns true if section is present/needed, false if not needed.
function establishContentsSection(mdTree) {
var firstL2HeadingPos = -1;
var numTocHeadings = 0;
var foundContentsHeading = false;
for (var i = 0; i < mdTree.children.length; i++) {
var child = mdTree.children[i];
// Look through all headings.
if (child.type === "heading") {
if ((child.depth > 1) && (child.depth <= maxTocHeadingDepth)) {
numTocHeadings++;
}
if (child.depth === 2) {
// Note where the first L2 heading is.
if (firstL2HeadingPos === -1) {
firstL2HeadingPos = i;
}
// If it is also a Contents heading then we're done. We don't include the
// Contents heading itself within the ToC, so decrement the count for that.
if ((child.children[0].value === contentsHeading) && !foundContentsHeading) {
foundContentsHeading = true;
numTocHeadings--;
}
}
}
}
// If we get here then a level 2 Contents heading was not found.
// If there are enough headings for a ToC to be necessary then
// add one in the right place.
if (!foundContentsHeading) {
var newContsHeading = unist.makeHeading(unist.makeText(contentsHeading), 2);
// If we found another L2 heading then add the Contents in just before it.
if (firstL2HeadingPos != -1) {
mdTree.children.splice(firstL2HeadingPos, 0, newContsHeading);
} else {
// Otherwise, the unlikely situation where a ToC is required but there
// are no L2 headings! Add it as the second element in the document.
mdTree.children.splice(1, 0, newContsHeading);
}
}
return numTocHeadings;
}
function updateFile(tree, pathname, _aggData, _errorMessages) {
if (path.basename(pathname, ".md").match(/README|versionIndex/)) {
return false;
}
// If we need a contents section then add one or update the existing one.
var numTocHeadings = establishContentsSection(tree);
if (numTocHeadings >= minHeadingsForToc) {
var newToc = makeToc(tree); //tocGenerator(tree, {heading: contentsHeading, maxDepth: 3});
replaceSection(tree, contentsHeading, function(before, oldSection, after) {
return [before, newToc, after];
});
} else {
// Otherwise, we don't need one, so remove any existing one.
replaceSection(tree, contentsHeading, function(before, oldSection, after) {
return [after];
});
}
return true;
}
function makeToc(tree) {
var nav = new mdNav.MDNav(tree);
var headings = nav.headings(h =>
(h.depth > 1) &&
(h.depth <= maxTocHeadingDepth) //&&
//!((h.children[0].type === "text") && (h.children[0].value === "Contents"))
);
var context = {headings: []};
headings.forEach(heading => {
var linkTitle = "";
if (!((heading.item.children.length > 0) && (heading.item.children[0].type === "text") && (heading.item.children[0].value === "Contents"))) {
linkTitle = tostring(heading.item).trim();
}
if (linkTitle !== "") {
context.headings.push({
"level": heading.item.depth - 2,
"title": linkTitle,
//"anchor": "#" + linkTitle.toLowerCase().replace(/ /g, "-").replace(/[:;@\.,'"`$\(\)\/]/g ,"")
"anchor": "#" + linkTitle.toLowerCase()
.replace(/[^a-z0-9\s\-_]/g, '')
.replace(/\s/g ,"-")
.replace(/\-+$/, '')
})
};
});
var templateName = path.resolve(templateFolder, "toc.ejs");
var templateSource = fs.readFileSync(templateName, "utf8");
var template = ejs.compile(templateSource);
var mdText = template(context);
var newMD = remark().parse(mdText);
return newMD.children[0];
}
|
{
"pile_set_name": "Github"
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.