repo_name stringlengths 4 116 | path stringlengths 4 379 | size stringlengths 1 7 | content stringlengths 3 1.05M | license stringclasses 15
values |
|---|---|---|---|---|
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/Fitbit/Social/GetBadges.py | 4313 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetBadges
# Gets a user's badges.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetBadges(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetBadges Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetBadges, self).__init__(temboo_session, '/Library/Fitbit/Social/GetBadges')
def new_input_set(self):
return GetBadgesInputSet()
def _make_result_set(self, result, path):
return GetBadgesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetBadgesChoreographyExecution(session, exec_id, path)
class GetBadgesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetBadges
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((conditional, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(GetBadgesInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((conditional, string) The Access Token retrieved during the OAuth process.)
"""
super(GetBadgesInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Fitbit.)
"""
super(GetBadgesInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The Consumer Secret provided by Fitbit.)
"""
super(GetBadgesInputSet, self)._set_input('ConsumerSecret', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that you want the response to be in: xml or json. Defaults to json.)
"""
super(GetBadgesInputSet, self)._set_input('ResponseFormat', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((optional, string) The user's encoded id. Defaults to "-" (dash) which will return data for the user associated with the token credentials provided.)
"""
super(GetBadgesInputSet, self)._set_input('UserID', value)
class GetBadgesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetBadges Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Fitbit.)
"""
return self._output.get('Response', None)
class GetBadgesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetBadgesResultSet(response, path)
| gpl-3.0 |
MartyParty21/AwakenDreamsClient | mcp/src/minecraft/net/minecraft/util/MouseHelper.java | 837 | package net.minecraft.util;
import org.lwjgl.input.Mouse;
import org.lwjgl.opengl.Display;
public class MouseHelper
{
/** Mouse delta X this frame */
public int deltaX;
/** Mouse delta Y this frame */
public int deltaY;
/**
* Grabs the mouse cursor it doesn't move and isn't seen.
*/
public void grabMouseCursor()
{
Mouse.setGrabbed(true);
this.deltaX = 0;
this.deltaY = 0;
}
/**
* Ungrabs the mouse cursor so it can be moved and set it to the center of the screen
*/
public void ungrabMouseCursor()
{
Mouse.setCursorPosition(Display.getWidth() / 2, Display.getHeight() / 2);
Mouse.setGrabbed(false);
}
public void mouseXYChange()
{
this.deltaX = Mouse.getDX();
this.deltaY = Mouse.getDY();
}
}
| gpl-3.0 |
kingland/consul | logging/log_levels.go | 703 | package logging
import (
"strings"
"github.com/hashicorp/go-hclog"
)
var (
allowedLogLevels = []string{"TRACE", "DEBUG", "INFO", "WARN", "ERR", "ERROR"}
)
func AllowedLogLevels() []string {
var c []string
copy(c, allowedLogLevels)
return c
}
// ValidateLogLevel verifies that a new log level is valid
func ValidateLogLevel(minLevel string) bool {
newLevel := strings.ToUpper(minLevel)
for _, level := range allowedLogLevels {
if level == newLevel {
return true
}
}
return false
}
// Backwards compatibility with former ERR log level
func LevelFromString(level string) hclog.Level {
if strings.ToUpper(level) == "ERR" {
level = "ERROR"
}
return hclog.LevelFromString(level)
}
| mpl-2.0 |
ricardclau/packer | vendor/github.com/google/go-github/v33/github/orgs_members.go | 13027 | // Copyright 2013 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
"fmt"
)
// Membership represents the status of a user's membership in an organization or team.
type Membership struct {
URL *string `json:"url,omitempty"`
// State is the user's status within the organization or team.
// Possible values are: "active", "pending"
State *string `json:"state,omitempty"`
// Role identifies the user's role within the organization or team.
// Possible values for organization membership:
// member - non-owner organization member
// admin - organization owner
//
// Possible values for team membership are:
// member - a normal member of the team
// maintainer - a team maintainer. Able to add/remove other team
// members, promote other team members to team
// maintainer, and edit the team’s name and description
Role *string `json:"role,omitempty"`
// For organization membership, the API URL of the organization.
OrganizationURL *string `json:"organization_url,omitempty"`
// For organization membership, the organization the membership is for.
Organization *Organization `json:"organization,omitempty"`
// For organization membership, the user the membership is for.
User *User `json:"user,omitempty"`
}
func (m Membership) String() string {
return Stringify(m)
}
// ListMembersOptions specifies optional parameters to the
// OrganizationsService.ListMembers method.
type ListMembersOptions struct {
// If true (or if the authenticated user is not an owner of the
// organization), list only publicly visible members.
PublicOnly bool `url:"-"`
// Filter members returned in the list. Possible values are:
// 2fa_disabled, all. Default is "all".
Filter string `url:"filter,omitempty"`
// Role filters members returned by their role in the organization.
// Possible values are:
// all - all members of the organization, regardless of role
// admin - organization owners
// member - non-owner organization members
//
// Default is "all".
Role string `url:"role,omitempty"`
ListOptions
}
// ListMembers lists the members for an organization. If the authenticated
// user is an owner of the organization, this will return both concealed and
// public members, otherwise it will only return public members.
//
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-organization-members
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-public-organization-members
func (s *OrganizationsService) ListMembers(ctx context.Context, org string, opts *ListMembersOptions) ([]*User, *Response, error) {
var u string
if opts != nil && opts.PublicOnly {
u = fmt.Sprintf("orgs/%v/public_members", org)
} else {
u = fmt.Sprintf("orgs/%v/members", org)
}
u, err := addOptions(u, opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var members []*User
resp, err := s.client.Do(ctx, req, &members)
if err != nil {
return nil, resp, err
}
return members, resp, nil
}
// IsMember checks if a user is a member of an organization.
//
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#check-organization-membership-for-a-user
func (s *OrganizationsService) IsMember(ctx context.Context, org, user string) (bool, *Response, error) {
u := fmt.Sprintf("orgs/%v/members/%v", org, user)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return false, nil, err
}
resp, err := s.client.Do(ctx, req, nil)
member, err := parseBoolResponse(err)
return member, resp, err
}
// IsPublicMember checks if a user is a public member of an organization.
//
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#check-public-organization-membership-for-a-user
func (s *OrganizationsService) IsPublicMember(ctx context.Context, org, user string) (bool, *Response, error) {
u := fmt.Sprintf("orgs/%v/public_members/%v", org, user)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return false, nil, err
}
resp, err := s.client.Do(ctx, req, nil)
member, err := parseBoolResponse(err)
return member, resp, err
}
// RemoveMember removes a user from all teams of an organization.
//
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#remove-an-organization-member
func (s *OrganizationsService) RemoveMember(ctx context.Context, org, user string) (*Response, error) {
u := fmt.Sprintf("orgs/%v/members/%v", org, user)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}
// PublicizeMembership publicizes a user's membership in an organization. (A
// user cannot publicize the membership for another user.)
//
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#set-public-organization-membership-for-the-authenticated-user
func (s *OrganizationsService) PublicizeMembership(ctx context.Context, org, user string) (*Response, error) {
u := fmt.Sprintf("orgs/%v/public_members/%v", org, user)
req, err := s.client.NewRequest("PUT", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}
// ConcealMembership conceals a user's membership in an organization.
//
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#remove-public-organization-membership-for-the-authenticated-user
func (s *OrganizationsService) ConcealMembership(ctx context.Context, org, user string) (*Response, error) {
u := fmt.Sprintf("orgs/%v/public_members/%v", org, user)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}
// ListOrgMembershipsOptions specifies optional parameters to the
// OrganizationsService.ListOrgMemberships method.
type ListOrgMembershipsOptions struct {
// Filter memberships to include only those with the specified state.
// Possible values are: "active", "pending".
State string `url:"state,omitempty"`
ListOptions
}
// ListOrgMemberships lists the organization memberships for the authenticated user.
//
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-organization-memberships-for-the-authenticated-user
func (s *OrganizationsService) ListOrgMemberships(ctx context.Context, opts *ListOrgMembershipsOptions) ([]*Membership, *Response, error) {
u := "user/memberships/orgs"
u, err := addOptions(u, opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var memberships []*Membership
resp, err := s.client.Do(ctx, req, &memberships)
if err != nil {
return nil, resp, err
}
return memberships, resp, nil
}
// GetOrgMembership gets the membership for a user in a specified organization.
// Passing an empty string for user will get the membership for the
// authenticated user.
//
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#get-an-organization-membership-for-the-authenticated-user
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#get-organization-membership-for-a-user
func (s *OrganizationsService) GetOrgMembership(ctx context.Context, user, org string) (*Membership, *Response, error) {
var u string
if user != "" {
u = fmt.Sprintf("orgs/%v/memberships/%v", org, user)
} else {
u = fmt.Sprintf("user/memberships/orgs/%v", org)
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
membership := new(Membership)
resp, err := s.client.Do(ctx, req, membership)
if err != nil {
return nil, resp, err
}
return membership, resp, nil
}
// EditOrgMembership edits the membership for user in specified organization.
// Passing an empty string for user will edit the membership for the
// authenticated user.
//
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#update-an-organization-membership-for-the-authenticated-user
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#set-organization-membership-for-a-user
func (s *OrganizationsService) EditOrgMembership(ctx context.Context, user, org string, membership *Membership) (*Membership, *Response, error) {
var u, method string
if user != "" {
u = fmt.Sprintf("orgs/%v/memberships/%v", org, user)
method = "PUT"
} else {
u = fmt.Sprintf("user/memberships/orgs/%v", org)
method = "PATCH"
}
req, err := s.client.NewRequest(method, u, membership)
if err != nil {
return nil, nil, err
}
m := new(Membership)
resp, err := s.client.Do(ctx, req, m)
if err != nil {
return nil, resp, err
}
return m, resp, nil
}
// RemoveOrgMembership removes user from the specified organization. If the
// user has been invited to the organization, this will cancel their invitation.
//
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#remove-organization-membership-for-a-user
func (s *OrganizationsService) RemoveOrgMembership(ctx context.Context, user, org string) (*Response, error) {
u := fmt.Sprintf("orgs/%v/memberships/%v", org, user)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}
// ListPendingOrgInvitations returns a list of pending invitations.
//
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-pending-organization-invitations
func (s *OrganizationsService) ListPendingOrgInvitations(ctx context.Context, org string, opts *ListOptions) ([]*Invitation, *Response, error) {
u := fmt.Sprintf("orgs/%v/invitations", org)
u, err := addOptions(u, opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var pendingInvitations []*Invitation
resp, err := s.client.Do(ctx, req, &pendingInvitations)
if err != nil {
return nil, resp, err
}
return pendingInvitations, resp, nil
}
// CreateOrgInvitationOptions specifies the parameters to the OrganizationService.Invite
// method.
type CreateOrgInvitationOptions struct {
// GitHub user ID for the person you are inviting. Not required if you provide Email.
InviteeID *int64 `json:"invitee_id,omitempty"`
// Email address of the person you are inviting, which can be an existing GitHub user.
// Not required if you provide InviteeID
Email *string `json:"email,omitempty"`
// Specify role for new member. Can be one of:
// * admin - Organization owners with full administrative rights to the
// organization and complete access to all repositories and teams.
// * direct_member - Non-owner organization members with ability to see
// other members and join teams by invitation.
// * billing_manager - Non-owner organization members with ability to
// manage the billing settings of your organization.
// Default is "direct_member".
Role *string `json:"role"`
TeamID []int64 `json:"team_ids"`
}
// CreateOrgInvitation invites people to an organization by using their GitHub user ID or their email address.
// In order to create invitations in an organization,
// the authenticated user must be an organization owner.
//
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#create-an-organization-invitation
func (s *OrganizationsService) CreateOrgInvitation(ctx context.Context, org string, opts *CreateOrgInvitationOptions) (*Invitation, *Response, error) {
u := fmt.Sprintf("orgs/%v/invitations", org)
req, err := s.client.NewRequest("POST", u, opts)
if err != nil {
return nil, nil, err
}
var invitation *Invitation
resp, err := s.client.Do(ctx, req, &invitation)
if err != nil {
return nil, resp, err
}
return invitation, resp, nil
}
// ListOrgInvitationTeams lists all teams associated with an invitation. In order to see invitations in an organization,
// the authenticated user must be an organization owner.
//
// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-organization-invitation-teams
func (s *OrganizationsService) ListOrgInvitationTeams(ctx context.Context, org, invitationID string, opts *ListOptions) ([]*Team, *Response, error) {
u := fmt.Sprintf("orgs/%v/invitations/%v/teams", org, invitationID)
u, err := addOptions(u, opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var orgInvitationTeams []*Team
resp, err := s.client.Do(ctx, req, &orgInvitationTeams)
if err != nil {
return nil, resp, err
}
return orgInvitationTeams, resp, nil
}
| mpl-2.0 |
dchapyshev/remote-desktop | source/third_party/libwebm/mkvmuxer/mkvmuxerutil.cc | 18572 | // Copyright (c) 2012 The WebM project authors. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
#include "mkvmuxer/mkvmuxerutil.h"
#ifdef __ANDROID__
#include <fcntl.h>
#include <unistd.h>
#endif
#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <new>
#include "common/webmids.h"
#include "mkvmuxer/mkvmuxer.h"
#include "mkvmuxer/mkvwriter.h"
namespace mkvmuxer {
namespace {
// Date elements are always 8 octets in size.
const int kDateElementSize = 8;
uint64 WriteBlock(IMkvWriter* writer, const Frame* const frame, int64 timecode,
uint64 timecode_scale) {
uint64 block_additional_elem_size = 0;
uint64 block_addid_elem_size = 0;
uint64 block_more_payload_size = 0;
uint64 block_more_elem_size = 0;
uint64 block_additions_payload_size = 0;
uint64 block_additions_elem_size = 0;
if (frame->additional()) {
block_additional_elem_size =
EbmlElementSize(libwebm::kMkvBlockAdditional, frame->additional(),
frame->additional_length());
block_addid_elem_size = EbmlElementSize(
libwebm::kMkvBlockAddID, static_cast<uint64>(frame->add_id()));
block_more_payload_size =
block_addid_elem_size + block_additional_elem_size;
block_more_elem_size =
EbmlMasterElementSize(libwebm::kMkvBlockMore, block_more_payload_size) +
block_more_payload_size;
block_additions_payload_size = block_more_elem_size;
block_additions_elem_size =
EbmlMasterElementSize(libwebm::kMkvBlockAdditions,
block_additions_payload_size) +
block_additions_payload_size;
}
uint64 discard_padding_elem_size = 0;
if (frame->discard_padding() != 0) {
discard_padding_elem_size =
EbmlElementSize(libwebm::kMkvDiscardPadding,
static_cast<int64>(frame->discard_padding()));
}
const uint64 reference_block_timestamp =
frame->reference_block_timestamp() / timecode_scale;
uint64 reference_block_elem_size = 0;
if (!frame->is_key()) {
reference_block_elem_size =
EbmlElementSize(libwebm::kMkvReferenceBlock, reference_block_timestamp);
}
const uint64 duration = frame->duration() / timecode_scale;
uint64 block_duration_elem_size = 0;
if (duration > 0)
block_duration_elem_size =
EbmlElementSize(libwebm::kMkvBlockDuration, duration);
const uint64 block_payload_size = 4 + frame->length();
const uint64 block_elem_size =
EbmlMasterElementSize(libwebm::kMkvBlock, block_payload_size) +
block_payload_size;
const uint64 block_group_payload_size =
block_elem_size + block_additions_elem_size + block_duration_elem_size +
discard_padding_elem_size + reference_block_elem_size;
if (!WriteEbmlMasterElement(writer, libwebm::kMkvBlockGroup,
block_group_payload_size)) {
return 0;
}
if (!WriteEbmlMasterElement(writer, libwebm::kMkvBlock, block_payload_size))
return 0;
if (WriteUInt(writer, frame->track_number()))
return 0;
if (SerializeInt(writer, timecode, 2))
return 0;
// For a Block, flags is always 0.
if (SerializeInt(writer, 0, 1))
return 0;
if (writer->Write(frame->frame(), static_cast<uint32>(frame->length())))
return 0;
if (frame->additional()) {
if (!WriteEbmlMasterElement(writer, libwebm::kMkvBlockAdditions,
block_additions_payload_size)) {
return 0;
}
if (!WriteEbmlMasterElement(writer, libwebm::kMkvBlockMore,
block_more_payload_size))
return 0;
if (!WriteEbmlElement(writer, libwebm::kMkvBlockAddID,
static_cast<uint64>(frame->add_id())))
return 0;
if (!WriteEbmlElement(writer, libwebm::kMkvBlockAdditional,
frame->additional(), frame->additional_length())) {
return 0;
}
}
if (frame->discard_padding() != 0 &&
!WriteEbmlElement(writer, libwebm::kMkvDiscardPadding,
static_cast<int64>(frame->discard_padding()))) {
return false;
}
if (!frame->is_key() && !WriteEbmlElement(writer, libwebm::kMkvReferenceBlock,
reference_block_timestamp)) {
return false;
}
if (duration > 0 &&
!WriteEbmlElement(writer, libwebm::kMkvBlockDuration, duration)) {
return false;
}
return EbmlMasterElementSize(libwebm::kMkvBlockGroup,
block_group_payload_size) +
block_group_payload_size;
}
uint64 WriteSimpleBlock(IMkvWriter* writer, const Frame* const frame,
int64 timecode) {
if (WriteID(writer, libwebm::kMkvSimpleBlock))
return 0;
const int32 size = static_cast<int32>(frame->length()) + 4;
if (WriteUInt(writer, size))
return 0;
if (WriteUInt(writer, static_cast<uint64>(frame->track_number())))
return 0;
if (SerializeInt(writer, timecode, 2))
return 0;
uint64 flags = 0;
if (frame->is_key())
flags |= 0x80;
if (SerializeInt(writer, flags, 1))
return 0;
if (writer->Write(frame->frame(), static_cast<uint32>(frame->length())))
return 0;
return GetUIntSize(libwebm::kMkvSimpleBlock) + GetCodedUIntSize(size) + 4 +
frame->length();
}
} // namespace
int32 GetCodedUIntSize(uint64 value) {
if (value < 0x000000000000007FULL)
return 1;
else if (value < 0x0000000000003FFFULL)
return 2;
else if (value < 0x00000000001FFFFFULL)
return 3;
else if (value < 0x000000000FFFFFFFULL)
return 4;
else if (value < 0x00000007FFFFFFFFULL)
return 5;
else if (value < 0x000003FFFFFFFFFFULL)
return 6;
else if (value < 0x0001FFFFFFFFFFFFULL)
return 7;
return 8;
}
int32 GetUIntSize(uint64 value) {
if (value < 0x0000000000000100ULL)
return 1;
else if (value < 0x0000000000010000ULL)
return 2;
else if (value < 0x0000000001000000ULL)
return 3;
else if (value < 0x0000000100000000ULL)
return 4;
else if (value < 0x0000010000000000ULL)
return 5;
else if (value < 0x0001000000000000ULL)
return 6;
else if (value < 0x0100000000000000ULL)
return 7;
return 8;
}
int32 GetIntSize(int64 value) {
// Doubling the requested value ensures positive values with their high bit
// set are written with 0-padding to avoid flipping the signedness.
const uint64 v = (value < 0) ? value ^ -1LL : value;
return GetUIntSize(2 * v);
}
uint64 EbmlMasterElementSize(uint64 type, uint64 value) {
// Size of EBML ID
int32 ebml_size = GetUIntSize(type);
// Datasize
ebml_size += GetCodedUIntSize(value);
return ebml_size;
}
uint64 EbmlElementSize(uint64 type, int64 value) {
// Size of EBML ID
int32 ebml_size = GetUIntSize(type);
// Datasize
ebml_size += GetIntSize(value);
// Size of Datasize
ebml_size++;
return ebml_size;
}
uint64 EbmlElementSize(uint64 type, uint64 value) {
return EbmlElementSize(type, value, 0);
}
uint64 EbmlElementSize(uint64 type, uint64 value, uint64 fixed_size) {
// Size of EBML ID
uint64 ebml_size = GetUIntSize(type);
// Datasize
ebml_size += (fixed_size > 0) ? fixed_size : GetUIntSize(value);
// Size of Datasize
ebml_size++;
return ebml_size;
}
uint64 EbmlElementSize(uint64 type, float /* value */) {
// Size of EBML ID
uint64 ebml_size = GetUIntSize(type);
// Datasize
ebml_size += sizeof(float);
// Size of Datasize
ebml_size++;
return ebml_size;
}
uint64 EbmlElementSize(uint64 type, const char* value) {
if (!value)
return 0;
// Size of EBML ID
uint64 ebml_size = GetUIntSize(type);
// Datasize
ebml_size += strlen(value);
// Size of Datasize
ebml_size += GetCodedUIntSize(strlen(value));
return ebml_size;
}
uint64 EbmlElementSize(uint64 type, const uint8* value, uint64 size) {
if (!value)
return 0;
// Size of EBML ID
uint64 ebml_size = GetUIntSize(type);
// Datasize
ebml_size += size;
// Size of Datasize
ebml_size += GetCodedUIntSize(size);
return ebml_size;
}
uint64 EbmlDateElementSize(uint64 type) {
// Size of EBML ID
uint64 ebml_size = GetUIntSize(type);
// Datasize
ebml_size += kDateElementSize;
// Size of Datasize
ebml_size++;
return ebml_size;
}
int32 SerializeInt(IMkvWriter* writer, int64 value, int32 size) {
if (!writer || size < 1 || size > 8)
return -1;
for (int32 i = 1; i <= size; ++i) {
const int32 byte_count = size - i;
const int32 bit_count = byte_count * 8;
const int64 bb = value >> bit_count;
const uint8 b = static_cast<uint8>(bb);
const int32 status = writer->Write(&b, 1);
if (status < 0)
return status;
}
return 0;
}
int32 SerializeFloat(IMkvWriter* writer, float f) {
if (!writer)
return -1;
assert(sizeof(uint32) == sizeof(float));
// This union is merely used to avoid a reinterpret_cast from float& to
// uint32& which will result in violation of strict aliasing.
union U32 {
uint32 u32;
float f;
} value;
value.f = f;
for (int32 i = 1; i <= 4; ++i) {
const int32 byte_count = 4 - i;
const int32 bit_count = byte_count * 8;
const uint8 byte = static_cast<uint8>(value.u32 >> bit_count);
const int32 status = writer->Write(&byte, 1);
if (status < 0)
return status;
}
return 0;
}
int32 WriteUInt(IMkvWriter* writer, uint64 value) {
if (!writer)
return -1;
int32 size = GetCodedUIntSize(value);
return WriteUIntSize(writer, value, size);
}
int32 WriteUIntSize(IMkvWriter* writer, uint64 value, int32 size) {
if (!writer || size < 0 || size > 8)
return -1;
if (size > 0) {
const uint64 bit = 1LL << (size * 7);
if (value > (bit - 2))
return -1;
value |= bit;
} else {
size = 1;
int64 bit;
for (;;) {
bit = 1LL << (size * 7);
const uint64 max = bit - 2;
if (value <= max)
break;
++size;
}
if (size > 8)
return false;
value |= bit;
}
return SerializeInt(writer, value, size);
}
int32 WriteID(IMkvWriter* writer, uint64 type) {
if (!writer)
return -1;
writer->ElementStartNotify(type, writer->Position());
const int32 size = GetUIntSize(type);
return SerializeInt(writer, type, size);
}
bool WriteEbmlMasterElement(IMkvWriter* writer, uint64 type, uint64 size) {
if (!writer)
return false;
if (WriteID(writer, type))
return false;
if (WriteUInt(writer, size))
return false;
return true;
}
bool WriteEbmlElement(IMkvWriter* writer, uint64 type, uint64 value) {
return WriteEbmlElement(writer, type, value, 0);
}
bool WriteEbmlElement(IMkvWriter* writer, uint64 type, uint64 value,
uint64 fixed_size) {
if (!writer)
return false;
if (WriteID(writer, type))
return false;
uint64 size = GetUIntSize(value);
if (fixed_size > 0) {
if (size > fixed_size)
return false;
size = fixed_size;
}
if (WriteUInt(writer, size))
return false;
if (SerializeInt(writer, value, static_cast<int32>(size)))
return false;
return true;
}
bool WriteEbmlElement(IMkvWriter* writer, uint64 type, int64 value) {
if (!writer)
return false;
if (WriteID(writer, type))
return 0;
const uint64 size = GetIntSize(value);
if (WriteUInt(writer, size))
return false;
if (SerializeInt(writer, value, static_cast<int32>(size)))
return false;
return true;
}
bool WriteEbmlElement(IMkvWriter* writer, uint64 type, float value) {
if (!writer)
return false;
if (WriteID(writer, type))
return false;
if (WriteUInt(writer, 4))
return false;
if (SerializeFloat(writer, value))
return false;
return true;
}
bool WriteEbmlElement(IMkvWriter* writer, uint64 type, const char* value) {
if (!writer || !value)
return false;
if (WriteID(writer, type))
return false;
const uint64 length = strlen(value);
if (WriteUInt(writer, length))
return false;
if (writer->Write(value, static_cast<uint32>(length)))
return false;
return true;
}
bool WriteEbmlElement(IMkvWriter* writer, uint64 type, const uint8* value,
uint64 size) {
if (!writer || !value || size < 1)
return false;
if (WriteID(writer, type))
return false;
if (WriteUInt(writer, size))
return false;
if (writer->Write(value, static_cast<uint32>(size)))
return false;
return true;
}
bool WriteEbmlDateElement(IMkvWriter* writer, uint64 type, int64 value) {
if (!writer)
return false;
if (WriteID(writer, type))
return false;
if (WriteUInt(writer, kDateElementSize))
return false;
if (SerializeInt(writer, value, kDateElementSize))
return false;
return true;
}
uint64 WriteFrame(IMkvWriter* writer, const Frame* const frame,
Cluster* cluster) {
if (!writer || !frame || !frame->IsValid() || !cluster ||
!cluster->timecode_scale())
return 0;
// Technically the timecode for a block can be less than the
// timecode for the cluster itself (remember that block timecode
// is a signed, 16-bit integer). However, as a simplification we
// only permit non-negative cluster-relative timecodes for blocks.
const int64 relative_timecode = cluster->GetRelativeTimecode(
frame->timestamp() / cluster->timecode_scale());
if (relative_timecode < 0 || relative_timecode > kMaxBlockTimecode)
return 0;
return frame->CanBeSimpleBlock()
? WriteSimpleBlock(writer, frame, relative_timecode)
: WriteBlock(writer, frame, relative_timecode,
cluster->timecode_scale());
}
uint64 WriteVoidElement(IMkvWriter* writer, uint64 size) {
if (!writer)
return false;
// Subtract one for the void ID and the coded size.
uint64 void_entry_size = size - 1 - GetCodedUIntSize(size - 1);
uint64 void_size = EbmlMasterElementSize(libwebm::kMkvVoid, void_entry_size) +
void_entry_size;
if (void_size != size)
return 0;
const int64 payload_position = writer->Position();
if (payload_position < 0)
return 0;
if (WriteID(writer, libwebm::kMkvVoid))
return 0;
if (WriteUInt(writer, void_entry_size))
return 0;
const uint8 value = 0;
for (int32 i = 0; i < static_cast<int32>(void_entry_size); ++i) {
if (writer->Write(&value, 1))
return 0;
}
const int64 stop_position = writer->Position();
if (stop_position < 0 ||
stop_position - payload_position != static_cast<int64>(void_size))
return 0;
return void_size;
}
void GetVersion(int32* major, int32* minor, int32* build, int32* revision) {
*major = 0;
*minor = 3;
*build = 0;
*revision = 0;
}
uint64 MakeUID(unsigned int* seed) {
uint64 uid = 0;
#ifdef __MINGW32__
srand(*seed);
#endif
for (int i = 0; i < 7; ++i) { // avoid problems with 8-byte values
uid <<= 8;
// TODO(fgalligan): Move random number generation to platform specific code.
#ifdef _MSC_VER
(void)seed;
const int32 nn = rand();
#elif __ANDROID__
(void)seed;
int32 temp_num = 1;
int fd = open("/dev/urandom", O_RDONLY);
if (fd != -1) {
read(fd, &temp_num, sizeof(temp_num));
close(fd);
}
const int32 nn = temp_num;
#elif defined __MINGW32__
const int32 nn = rand();
#else
const int32 nn = rand_r(seed);
#endif
const int32 n = 0xFF & (nn >> 4); // throw away low-order bits
uid |= n;
}
return uid;
}
bool IsMatrixCoefficientsValueValid(uint64_t value) {
switch (value) {
case mkvmuxer::Colour::kGbr:
case mkvmuxer::Colour::kBt709:
case mkvmuxer::Colour::kUnspecifiedMc:
case mkvmuxer::Colour::kReserved:
case mkvmuxer::Colour::kFcc:
case mkvmuxer::Colour::kBt470bg:
case mkvmuxer::Colour::kSmpte170MMc:
case mkvmuxer::Colour::kSmpte240MMc:
case mkvmuxer::Colour::kYcocg:
case mkvmuxer::Colour::kBt2020NonConstantLuminance:
case mkvmuxer::Colour::kBt2020ConstantLuminance:
return true;
}
return false;
}
bool IsChromaSitingHorzValueValid(uint64_t value) {
switch (value) {
case mkvmuxer::Colour::kUnspecifiedCsh:
case mkvmuxer::Colour::kLeftCollocated:
case mkvmuxer::Colour::kHalfCsh:
return true;
}
return false;
}
bool IsChromaSitingVertValueValid(uint64_t value) {
switch (value) {
case mkvmuxer::Colour::kUnspecifiedCsv:
case mkvmuxer::Colour::kTopCollocated:
case mkvmuxer::Colour::kHalfCsv:
return true;
}
return false;
}
bool IsColourRangeValueValid(uint64_t value) {
switch (value) {
case mkvmuxer::Colour::kUnspecifiedCr:
case mkvmuxer::Colour::kBroadcastRange:
case mkvmuxer::Colour::kFullRange:
case mkvmuxer::Colour::kMcTcDefined:
return true;
}
return false;
}
bool IsTransferCharacteristicsValueValid(uint64_t value) {
switch (value) {
case mkvmuxer::Colour::kIturBt709Tc:
case mkvmuxer::Colour::kUnspecifiedTc:
case mkvmuxer::Colour::kReservedTc:
case mkvmuxer::Colour::kGamma22Curve:
case mkvmuxer::Colour::kGamma28Curve:
case mkvmuxer::Colour::kSmpte170MTc:
case mkvmuxer::Colour::kSmpte240MTc:
case mkvmuxer::Colour::kLinear:
case mkvmuxer::Colour::kLog:
case mkvmuxer::Colour::kLogSqrt:
case mkvmuxer::Colour::kIec6196624:
case mkvmuxer::Colour::kIturBt1361ExtendedColourGamut:
case mkvmuxer::Colour::kIec6196621:
case mkvmuxer::Colour::kIturBt202010bit:
case mkvmuxer::Colour::kIturBt202012bit:
case mkvmuxer::Colour::kSmpteSt2084:
case mkvmuxer::Colour::kSmpteSt4281Tc:
case mkvmuxer::Colour::kAribStdB67Hlg:
return true;
}
return false;
}
bool IsPrimariesValueValid(uint64_t value) {
switch (value) {
case mkvmuxer::Colour::kReservedP0:
case mkvmuxer::Colour::kIturBt709P:
case mkvmuxer::Colour::kUnspecifiedP:
case mkvmuxer::Colour::kReservedP3:
case mkvmuxer::Colour::kIturBt470M:
case mkvmuxer::Colour::kIturBt470Bg:
case mkvmuxer::Colour::kSmpte170MP:
case mkvmuxer::Colour::kSmpte240MP:
case mkvmuxer::Colour::kFilm:
case mkvmuxer::Colour::kIturBt2020:
case mkvmuxer::Colour::kSmpteSt4281P:
case mkvmuxer::Colour::kJedecP22Phosphors:
return true;
}
return false;
}
} // namespace mkvmuxer
| mpl-2.0 |
alesstimec/juju | apiserver/hostkeyreporter/shim.go | 509 | // Copyright 2016 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package hostkeyreporter
import (
"github.com/juju/errors"
"github.com/juju/juju/apiserver/facade"
"github.com/juju/juju/state"
)
// newFacade wraps New to express the supplied *state.State as a Backend.
func newFacade(st *state.State, res facade.Resources, auth facade.Authorizer) (*Facade, error) {
facade, err := New(st, res, auth)
if err != nil {
return nil, errors.Trace(err)
}
return facade, nil
}
| agpl-3.0 |
geothomasp/kcmit | coeus-impl/src/main/java/org/kuali/kra/award/home/ContactUsage.java | 2607 | /*
* Kuali Coeus, a comprehensive research administration system for higher education.
*
* Copyright 2005-2015 Kuali, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.kuali.kra.award.home;
import org.kuali.coeus.common.framework.module.CoeusModule;
import org.kuali.coeus.sys.framework.model.KcPersistableBusinessObjectBase;
/**
* This class represents the ContactUsage business object and is mapped
* to the CONTACT_USAGE table.
*/
public class ContactUsage extends KcPersistableBusinessObjectBase {
/**
* Comment for <code>serialVersionUID</code>
*/
private static final long serialVersionUID = 2198994554339151877L;
private Long contactUsageId;
private String contactTypeCode;
private String moduleCode;
private ContactType contactType;
private CoeusModule coeusModule;
public ContactUsage() {
}
public ContactUsage(String contactTypeCode, String moduleCode) {
this.contactTypeCode = contactTypeCode;
this.moduleCode = moduleCode;
}
public Long getContactUsageId() {
return contactUsageId;
}
public void setContactUsageId(Long contactUsageId) {
this.contactUsageId = contactUsageId;
}
public String getContactTypeCode() {
return contactTypeCode;
}
public void setContactTypeCode(String contactTypeCode) {
this.contactTypeCode = contactTypeCode;
}
public String getModuleCode() {
return moduleCode;
}
public void setModuleCode(String moduleCode) {
this.moduleCode = moduleCode;
}
public ContactType getContactType() {
return contactType;
}
public void setContactType(ContactType contactType) {
this.contactType = contactType;
}
public CoeusModule getCoeusModule() {
return coeusModule;
}
public void setCoeusModule(CoeusModule coeusModule) {
this.coeusModule = coeusModule;
}
}
| agpl-3.0 |
artsmorgan/crmtecnosagot | app/protected/modules/users/tests/unit/UserTest.php | 73940 | <?php
/*********************************************************************************
* Zurmo is a customer relationship management program developed by
* Zurmo, Inc. Copyright (C) 2015 Zurmo Inc.
*
* Zurmo is free software; you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License version 3 as published by the
* Free Software Foundation with the addition of the following permission added
* to Section 15 as permitted in Section 7(a): FOR ANY PART OF THE COVERED WORK
* IN WHICH THE COPYRIGHT IS OWNED BY ZURMO, ZURMO DISCLAIMS THE WARRANTY
* OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* Zurmo is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
* details.
*
* You should have received a copy of the GNU Affero General Public License along with
* this program; if not, see http://www.gnu.org/licenses or write to the Free
* Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.
*
* You can contact Zurmo, Inc. with a mailing address at 27 North Wacker Drive
* Suite 370 Chicago, IL 60606. or at email address contact@zurmo.com.
*
* The interactive user interfaces in original and modified versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Public License version 3.
*
* In accordance with Section 7(b) of the GNU Affero General Public License version 3,
* these Appropriate Legal Notices must retain the display of the Zurmo
* logo and Zurmo copyright notice. If the display of the logo is not reasonably
* feasible for technical reasons, the Appropriate Legal Notices must display the words
* "Copyright Zurmo Inc. 2015. All rights reserved".
********************************************************************************/
class UserTest extends ZurmoBaseTest
{
public static function setUpBeforeClass()
{
parent::setUpBeforeClass();
SecurityTestHelper::createSuperAdmin();
}
public function setUp()
{
parent::setUp();
Yii::app()->user->userModel = User::getByUsername('super');
}
/**
* You can only use setIsSystemUser to set the isSystemUser attribute
* @expectedException NotSupportedException
*/
public function testCannotSetIsSystemUserDirectlyOnModel()
{
$user = User::getByUsername('super');
Yii::app()->user->userModel = $user;
$user = new User();
$user->isSystemUser = true;
}
public function testEmailUniquenessValidation()
{
$user = User::getByUsername('super');
Yii::app()->user->userModel = $user;
$user = new User();
$user->username = 'usera';
$user->lastName = 'UserA';
$user->setPassword('myuser');
$emailAddress = 'userA@example.com';
$user->primaryEmail->emailAddress = $emailAddress;
$saved = $user->save();
$this->assertTrue($saved);
$user2 = new User();
$user2->username = 'userb';
$user2->lastName = 'UserB';
$user2->setPassword('myuser');
$emailAddress = 'userA@example.com';
$user2->primaryEmail->emailAddress = $emailAddress;
$saved = $user2->save();
$this->assertFalse($saved);
$validationErrors = $user2->getErrors();
$this->assertTrue(count($validationErrors) > 0);
// Todo: fix array keys below
$this->assertTrue(isset($validationErrors['primaryEmail']));
$this->assertTrue(isset($validationErrors['primaryEmail']['emailAddress']));
$this->assertEquals('Email address already exists in system.', $validationErrors['primaryEmail']['emailAddress'][0]);
// Try to save user without email address
$user3 = new User();
$user3->username = 'userc';
$user3->lastName = 'UserC';
$user3->setPassword('myuser');
$saved = $user3->save();
$this->assertTrue($saved);
}
public function testSetTitleValuesAndRetrieveTitleValuesFromUser()
{
$titles = array('Mr.', 'Mrs.', 'Ms.', 'Dr.', 'Swami');
$customFieldData = CustomFieldData::getByName('Titles');
$customFieldData->serializedData = serialize($titles);
$this->assertTrue($customFieldData->save());
$dropDownArray = unserialize($customFieldData->serializedData);
$this->assertEquals($titles, $dropDownArray);
$user = new User();
$dropDownModel = $user->title;
$dropDownArray = unserialize($dropDownModel->data->serializedData);
$this->assertEquals($titles, $dropDownArray);
}
public function testSaveCurrentUser()
{
//some endless loop if you are trying to save yourself
$user = User::getByUsername('super');
Yii::app()->user->userModel = $user;
$user->department = 'somethingNew';
$this->assertTrue($user->save());
}
public function testCreateAndGetUserById()
{
Yii::app()->user->userModel = User::getByUsername('super');
$user = new User();
$user->username = 'bill';
$user->title->value = 'Mr.';
$user->firstName = 'Bill';
$user->lastName = 'Billson';
$user->setPassword('billy');
$this->assertTrue($user->save());
$id = $user->id;
unset($user);
$user = User::getById($id);
$this->assertEquals('bill', $user->username);
}
/**
* @depends testCreateAndGetUserById
*/
public function testCreateUserWithRelatedUser()
{
Yii::app()->user->userModel = User::getByUsername('super');
$manager = new User();
$manager->username = 'bobi';
$manager->title->value = 'Mr.';
$manager->firstName = 'Bob';
$manager->lastName = 'Bobson';
$manager->setPassword('bobii');
$this->assertTrue($manager->save());
$user = new User();
$user->username = 'dick';
$user->title->value = 'Mr.';
$user->firstName = 'Dick';
$user->lastName = 'Dickson';
$user->manager = $manager;
$user->setPassword('dickster');
$this->assertTrue($user->save());
$id = $user->id;
$managerId = $user->manager->id;
unset($user);
$manager = User::getById($managerId);
$this->assertEquals('bobi', $manager->username);
$user = User::getById($id);
$this->assertEquals('dick', $user->username);
$this->assertEquals('bobi', $user->manager->username);
}
/**
* @depends testCreateAndGetUserById
* @expectedException NotFoundException
*/
public function testCreateAndGetUserByIdThatDoesntExist()
{
$user = User::getById(123456);
}
/**
* @depends testCreateAndGetUserById
*/
public function testGetByUsername()
{
$user = User::getByUsername('bill');
$this->assertEquals('bill', $user->username);
}
/**
* @depends testGetByUsername
*/
public function testGetLabel()
{
$user = User::getByUsername('bill');
$this->assertEquals('User', $user::getModelLabelByTypeAndLanguage('Singular'));
$this->assertEquals('Users', $user::getModelLabelByTypeAndLanguage('Plural'));
}
/**
* @depends testGetByUsername
* @expectedException NotFoundException
*/
public function testGetByUsernameForNonExistentUsername()
{
User::getByUsername('noodles');
}
/**
* @depends testCreateAndGetUserById
*/
public function testSearchByPartialName()
{
$user1= User::getByUsername('dick');
$users = UserSearch::getUsersByPartialFullName('di', 5);
$this->assertEquals(1, count($users));
$this->assertEquals($user1->id, $users[0]->id);
$this->assertEquals('dick', $users[0]->username);
$this->assertEquals('Dick Dickson', $users[0]->getFullName());
$user2 = User::getByUsername('bill');
$users = UserSearch::getUsersByPartialFullName('bi', 5);
$this->assertEquals(1, count($users));
$this->assertEquals($user2->id, $users[0]->id);
$this->assertEquals('bill', $users[0]->username);
$this->assertEquals('Bill Billson', $users[0]->getFullName());
$user3 = new User();
$user3->username = 'dison';
$user3->title->value = 'Mr.';
$user3->firstName = 'Dison';
$user3->lastName = 'Smith';
$user3->setPassword('dison');
$this->assertTrue($user3->save());
$user4 = new User();
$user4->username = 'graham';
$user4->title->value = 'Mr.';
$user4->firstName = 'Graham';
$user4->lastName = 'Dillon';
$user4->setPassword('graham');
$this->assertTrue($user4->save());
$users = UserSearch::getUsersByPartialFullName('di', 5);
$this->assertEquals(3, count($users));
$this->assertEquals($user1->id, $users[0]->id);
$this->assertEquals('dick', $users[0]->username);
$this->assertEquals('Dick Dickson', $users[0]->getFullName());
$this->assertEquals($user3->id, $users[1]->id);
$this->assertEquals('dison', $users[1]->username);
$this->assertEquals('Dison Smith', $users[1]->getFullName());
$this->assertEquals($user4->id, $users[2]->id);
$this->assertEquals('graham', $users[2]->username);
$this->assertEquals('Graham Dillon', $users[2]->getFullName());
$users = UserSearch::getUsersByPartialFullName('g', 5);
$this->assertEquals(1, count($users));
$this->assertEquals($user4->id, $users[0]->id);
$this->assertEquals('graham', $users[0]->username);
$this->assertEquals('Graham Dillon', $users[0]->getFullName());
$users = UserSearch::getUsersByPartialFullName('G', 5);
$this->assertEquals(1, count($users));
$this->assertEquals($user4->id, $users[0]->id);
$this->assertEquals('graham', $users[0]->username);
$this->assertEquals('Graham Dillon', $users[0]->getFullName());
$users = UserSearch::getUsersByPartialFullName('Dil', 5);
$this->assertEquals(1, count($users));
$this->assertEquals($user4->id, $users[0]->id);
$this->assertEquals('graham', $users[0]->username);
$this->assertEquals('Graham Dillon', $users[0]->getFullName());
}
/**
* @depends testSearchByPartialName
*/
public function testSearchByPartialNameWithFirstNamePlusPartialLastName()
{
$user = User::getByUsername('dick');
$users = UserSearch::getUsersByPartialFullName('dick', 5);
$this->assertEquals(1, count($users));
$this->assertEquals($user->id, $users[0]->id);
$this->assertEquals('dick', $users[0]->username);
$this->assertEquals('Dick Dickson', $users[0]->getFullName());
$users = UserSearch::getUsersByPartialFullName('dick ', 5);
$this->assertEquals(1, count($users));
$this->assertEquals($user->id, $users[0]->id);
$this->assertEquals('dick', $users[0]->username);
$this->assertEquals('Dick Dickson', $users[0]->getFullName());
$users = UserSearch::getUsersByPartialFullName('dick d', 5);
$this->assertEquals(1, count($users));
$this->assertEquals($user->id, $users[0]->id);
$this->assertEquals('dick', $users[0]->username);
$this->assertEquals('Dick Dickson', $users[0]->getFullName());
$user = User::getByUsername('dick');
$users = UserSearch::getUsersByPartialFullName('dick di', 5);
$this->assertEquals(1, count($users));
$this->assertEquals($user->id, $users[0]->id);
$this->assertEquals('dick', $users[0]->username);
$this->assertEquals('Dick Dickson', $users[0]->getFullName());
$users = UserSearch::getUsersByPartialFullName('Dick di', 5);
$this->assertEquals(1, count($users));
$this->assertEquals($user->id, $users[0]->id);
$this->assertEquals('dick', $users[0]->username);
$this->assertEquals('Dick Dickson', $users[0]->getFullName());
$users = UserSearch::getUsersByPartialFullName('dick Di', 5);
$this->assertEquals(1, count($users));
$this->assertEquals($user->id, $users[0]->id);
$this->assertEquals('dick', $users[0]->username);
$this->assertEquals('Dick Dickson', $users[0]->getFullName());
}
/**
* @depends testCreateAndGetUserById
*/
public function testCreateWithTitleThenClearTitleDirectly()
{
$user = new User();
$user->username = 'jason';
$user->title->value = 'Mr.';
$user->firstName = 'Jason';
$user->lastName = 'Jasonson';
$user->setPassword('jason');
$this->assertTrue($user->save());
$id = $user->id;
unset($user);
$user = User::getById($id);
$this->assertEquals('jason', $user->username);
$this->assertEquals('Mr.', strval($user->title));
$user->title = null;
$this->assertNotNull($user->title);
$this->assertTrue($user->save());
}
/**
* @depends testCreateWithTitleThenClearTitleDirectly
*/
public function testCreateWithTitleThenClearTitleWithSetAttributesWithEmptyId()
{
$user = User::getByUsername('jason');
$user->title->value = 'Mr.';
$this->assertEquals('Mr.', strval($user->title));
$this->assertTrue($user->save());
$_FAKEPOST = array(
'User' => array(
'title' => array(
'value' => '',
)
)
);
$user->setAttributes($_FAKEPOST['User']);
$this->assertEquals('(None)', strval($user->title));
$this->assertTrue($user->save());
}
/**
* @depends testCreateWithTitleThenClearTitleWithSetAttributesWithEmptyId
*/
public function testCreateWithTitleThenClearTitleWithSetAttributesWithNullId()
{
$user = User::getByUsername('jason');
$user->title->value = 'Mr.';
$this->assertEquals('Mr.', strval($user->title));
$this->assertTrue($user->save());
$_FAKEPOST = array(
'User' => array(
'title' => array(
'value' => '',
)
)
);
$user->setAttributes($_FAKEPOST['User']);
$this->assertEquals('(None)', strval($user->title));
$this->assertTrue($user->save());
}
/**
* @depends testCreateWithTitleThenClearTitleWithSetAttributesWithNullId
*/
public function testCreateWithTitleThenClearTitleWithSetAttributesWithRealId()
{
$user = User::getByUsername('jason');
$user->title->value = 'Mr.';
$this->assertEquals('Mr.', strval($user->title));
$this->assertTrue($user->save());
$_FAKEPOST = array(
'User' => array(
'title' => array(
'value' => 'Sir',
)
)
);
$user->setAttributes($_FAKEPOST['User']);
$this->assertEquals('Sir', strval($user->title));
$this->assertTrue($user->save());
}
public function testSaveUserWithNoManager()
{
$user = UserTestHelper::createBasicUser('Steven');
$_FAKEPOST = array(
'User' => array(
'manager' => array(
'id' => '',
),
),
);
$user->setAttributes($_FAKEPOST['User']);
$user->validate();
$this->assertEquals(array(), $user->getErrors());
}
/**
* @depends testCreateWithTitleThenClearTitleWithSetAttributesWithRealId
* @depends testSaveUserWithNoManager
*/
public function testSaveExistingUserWithFakePost()
{
$user = User::getByUsername('jason');
$_FAKEPOST = array(
'User' => array(
'title' => array(
'value' => '',
),
'firstName' => 'Jason',
'lastName' => 'Jasonson',
'username' => 'jason',
'jobTitle' => '',
'officePhone' => '',
'manager' => array(
'id' => '',
),
'mobilePhone' => '',
'department' => '',
'primaryEmail' => array(
'emailAddress' => '',
'optOut' => 0,
'isInvalid' => 0,
),
'primaryAddress' => array(
'street1' => '',
'street2' => '',
'city' => '',
'state' => '',
'postalCode' => '',
'country' => '',
)
)
);
$user->setAttributes($_FAKEPOST['User']);
$user->validate();
$this->assertEquals(array(), $user->getErrors());
$this->assertTrue($user->save());
}
/**
* @depends testSaveExistingUserWithFakePost
*/
public function testSaveExistingUserWithUsersIdAsManagerId()
{
$user = User::getByUsername('jason');
$_FAKEPOST = array(
'User' => array(
'title' => array(
'value' => '',
),
'firstName' => 'Jason',
'lastName' => 'Jasonson',
'username' => 'jason',
'jobTitle' => '',
'officePhone' => '',
'manager' => array(
'id' => $user->id,
),
)
);
/*
$user->setAttributes($_FAKEPOST['User']);
$this->assertFalse($user->save());
$errors = $user->getErrors();
//todo: assert an error is present for manager, assert the error says can't
//select self or something along those lines.
*/
//probably should also check if you are picking a manager that is creating recursion,
//not necessarily yourself, but someone in the chain of yourself already.
}
public function testUserMixingInPerson()
{
// See comments on User::getDefaultMetadata().
$user = new User();
$this->assertTrue($user->isAttribute('username'));
$this->assertTrue($user->isAttribute('title'));
$this->assertTrue($user->isAttribute('firstName'));
$this->assertTrue($user->isAttribute('lastName'));
$this->assertTrue($user->isAttribute('jobTitle'));
$user->username = 'oliver';
$user->title->value = 'Mr.';
$user->firstName = 'Oliver';
$user->lastName = 'Oliverson';
$user->jobTitle = 'Recruiter';
$this->assertEquals('oliver', $user->username);
$this->assertEquals('Oliver Oliverson', strval($user));
$this->assertEquals('Recruiter', $user->jobTitle);
$user->setPassword('oliver');
$this->assertTrue($user->save());
$id = $user->id;
$user->forget();
unset($user);
$user = User::getById($id);
$this->assertEquals('oliver', $user->username);
$this->assertEquals('Oliver Oliverson', strval($user));
$this->assertEquals('Recruiter', $user->jobTitle);
}
public function testCreateNewUserFromPostNoBadValues()
{
$_FAKEPOST = array(
'UserPasswordForm' => array(
'title' => array(
'value' => '',
),
'firstName' => 'Red',
'lastName' => 'Jiambo',
'username' => 'redjiambo',
'newPassword' => '123456',
'newPassword_repeat' => '123456',
'jobTitle' => '',
'officePhone' => '',
'manager' => array(
'id' => '',
),
'mobilePhone' => '',
'department' => '',
'primaryEmail' => array(
'emailAddress' => '',
'optOut' => 0,
'isInvalid' => 0,
),
'primaryAddress' => array(
'street1' => '',
'street2' => '',
'city' => '',
'state' => '',
'postalCode' => '',
'country' => '',
)
)
);
$user = new User();
$user->setScenario('createUser');
$userPasswordForm = new UserPasswordForm($user);
$userPasswordForm->setScenario('createUser');
$userPasswordForm->setAttributes($_FAKEPOST['UserPasswordForm']);
$userPasswordForm->validate();
$this->assertEquals(array(), $userPasswordForm->getErrors());
$this->assertTrue($userPasswordForm->save());
$user->forget();
$user = User::getByUsername('redjiambo');
$this->assertEquals('Red', $user->firstName);
$this->assertEquals(null, $user->officePhone);
$this->assertEquals(null, $user->jobTitle);
$this->assertEquals(null, $user->mobilePhone);
$this->assertEquals(null, $user->department);
}
/**
* @depends testCreateAndGetUserById
*/
public function testDeleteUserCascadesToDeleteEverythingItShould()
{
$group = new Group();
$group->name = 'Os mais legais do Rio';
$this->assertTrue($group->save());
$user = new User();
$user->username = 'carioca';
$user->title->value = 'Senhor';
$user->firstName = 'José';
$user->lastName = 'Olivereira';
$user->jobTitle = 'Traficante';
$user->primaryAddress->street1 = 'R. das Mulheres, 69';
$user->primaryAddress->street2 = '';
$user->primaryAddress->city = 'Centro';
$user->primaryAddress->state = 'RJ';
$user->primaryAddress->postalCode = '';
$user->primaryAddress->country = 'Brasil';
$user->primaryEmail->emailAddress = 'jose@gmail.com';
$user->primaryEmail->optOut = 1;
$user->primaryEmail->isInvalid = 0;
$user->manager = User::getByUsername('bill');
$user->setPassword('Senhor');
$user->groups->add($group);
$user->save();
$this->assertTrue($user->save());
$titleId = $user->title->id;
$primaryAddressId = $user->primaryAddress->id;
$primaryEmailId = $user->primaryEmail ->id;
$groupId = $group->id;
$user->delete();
unset($user);
unset($group);
Group::getById($groupId);
User::getByUsername('bill');
try
{
CustomField::getById($titleId);
$this->fail("Title should have been deleted.");
}
catch (NotFoundException $e)
{
}
try
{
Address::getById($primaryAddressId);
$this->fail("Address should have been deleted.");
}
catch (NotFoundException $e)
{
}
try
{
Email::getById($primaryEmailId);
$this->fail("Email should have been deleted.");
}
catch (NotFoundException $e)
{
}
}
/**
* @depends testCreateAndGetUserById
*/
public function testCanRemoveRoleFromUser()
{
Yii::app()->user->userModel = User::getByUsername('super');
$parentRole = new Role();
$parentRole->name = 'SomeParentRole';
$saved = $parentRole->save();
$this->assertTrue($parentRole->id > 0);
$this->assertTrue($saved);
$role = new Role();
$role->name = 'SomeRole';
$role->role = $parentRole;
$saved = $role->save();
$this->assertTrue($parentRole->id > 0);
$this->assertEquals($parentRole->id, $role->role->id);
$this->assertTrue($role->id > 0);
$this->assertTrue($saved);
$user = User::getByUsername('bill');
$this->assertTrue($user->id > 0);
$this->assertFalse($user->role->id > 0);
$fakePost = array(
'role' => array(
'id' => $role->id,
)
);
$user->setAttributes($fakePost);
$saved = $user->save();
$this->assertTrue($saved);
$user->forget();
unset($user);
$user = User::getByUsername('bill');
$this->assertTrue($user->id > 0);
$this->assertTrue($role->id > 0);
$this->assertEquals($role->id, $user->role->id);
$fakePost = array(
'role' => array(
'id' => '',
)
);
$user->setAttributes($fakePost);
$this->assertFalse($user->role->id > 0);
$saved = $user->save();
$this->assertTrue($saved);
$user->forget();
unset($user);
$user = User::getByUsername('bill');
$this->assertTrue($user->id > 0);
$this->assertFalse($user->role->id > 0);
}
/**
* @depends testCreateAndGetUserById
*/
public function testPasswordUserNamePolicyChangesValidationAndLogin()
{
$bill = User::getByUsername('bill');
$bill->setScenario('changePassword');
$billPasswordForm = new UserPasswordForm($bill);
$billPasswordForm->setScenario('changePassword');
$this->assertEquals(null, $bill->getEffectivePolicy('UsersModule', UsersModule::POLICY_ENFORCE_STRONG_PASSWORDS));
$this->assertEquals(5, $bill->getEffectivePolicy('UsersModule', UsersModule::POLICY_MINIMUM_PASSWORD_LENGTH));
$this->assertEquals(3, $bill->getEffectivePolicy('UsersModule', UsersModule::POLICY_MINIMUM_USERNAME_LENGTH));
$_FAKEPOST = array(
'UserPasswordForm' => array(
'username' => 'ab',
'newPassword' => 'ab',
'newPassword_repeat' => 'ab',
)
);
$billPasswordForm->setAttributes($_FAKEPOST['UserPasswordForm']);
$this->assertFalse($billPasswordForm->save());
$errors = array(
'newPassword' => array(
'The password is too short. Minimum length is 5.',
),
);
$this->assertEquals($errors, $billPasswordForm->getErrors());
$_FAKEPOST = array(
'UserPasswordForm' => array(
'username' => 'abcdefg',
'newPassword' => 'abcdefg',
'newPassword_repeat' => 'abcdefg',
)
);
$billPasswordForm->setAttributes($_FAKEPOST['UserPasswordForm']);
$this->assertEquals('abcdefg', $billPasswordForm->username);
$this->assertEquals('abcdefg', $billPasswordForm->newPassword);
$validated = $billPasswordForm->validate();
$this->assertTrue($validated);
$saved = $billPasswordForm->save();
$this->assertTrue($saved);
$bill->setPolicy('UsersModule', UsersModule::POLICY_ENFORCE_STRONG_PASSWORDS, Policy::YES);
// If security is optimized the optimization will see the policy value in the database
// and so wont use it in validating, so the non-strong password wont be validated as
// invalid until the next save.
$this->assertEquals(SECURITY_OPTIMIZED, $billPasswordForm->save());
$_FAKEPOST = array(
'UserPasswordForm' => array(
'newPassword' => 'abcdefg',
'newPassword_repeat' => 'abcdefg',
)
);
$billPasswordForm->setAttributes($_FAKEPOST['UserPasswordForm']);
$this->assertFalse($billPasswordForm->save());
$errors = array(
'newPassword' => array(
'The password must have at least one uppercase letter',
'The password must have at least one number and one letter',
),
);
$this->assertEquals($errors, $billPasswordForm->getErrors());
$_FAKEPOST = array(
'UserPasswordForm' => array(
'newPassword' => 'abcdefgN',
'newPassword_repeat' => 'abcdefgN',
)
);
$billPasswordForm->setAttributes($_FAKEPOST['UserPasswordForm']);
$this->assertFalse($billPasswordForm->save());
$errors = array(
'newPassword' => array(
'The password must have at least one number and one letter',
),
);
$this->assertEquals($errors, $billPasswordForm->getErrors());
$_FAKEPOST = array(
'UserPasswordForm' => array(
'newPassword' => 'ABCDEFGH',
'newPassword_repeat' => 'ABCDEFGH',
)
);
$billPasswordForm->setAttributes($_FAKEPOST['UserPasswordForm']);
$this->assertFalse($billPasswordForm->save());
$errors = array(
'newPassword' => array(
'The password must have at least one lowercase letter',
'The password must have at least one number and one letter',
),
);
$this->assertEquals($errors, $billPasswordForm->getErrors());
$_FAKEPOST = array(
'UserPasswordForm' => array(
'newPassword' => 'abcdefgN4',
'newPassword_repeat' => 'abcdefgN4',
)
);
$billPasswordForm->setAttributes($_FAKEPOST['UserPasswordForm']);
$this->assertTrue($billPasswordForm->save());
$bill->setRight('UsersModule', UsersModule::RIGHT_LOGIN_VIA_WEB);
$this->assertTrue($billPasswordForm->save());
$this->assertEquals(Right::ALLOW, $bill->getEffectiveRight('UsersModule', UsersModule::RIGHT_LOGIN_VIA_WEB));
//Now attempt to login as bill
$bill->forget();
$bill = User::getByUsername('abcdefg');
$this->assertEquals($bill, User::authenticate('abcdefg', 'abcdefgN4'));
$identity = new UserIdentity('abcdefg', 'abcdefgN4');
$authenticated = $identity->authenticate();
$this->assertEquals(0, $identity->errorCode);
$this->assertTrue($authenticated);
//Now turn off login via web for bill
Yii::app()->user->userModel = User::getByUsername('super');
$bill = User::getByUsername('abcdefg');
$bill->setRight('UsersModule', UsersModule::RIGHT_LOGIN_VIA_WEB, RIGHT::DENY);
$this->assertTrue($bill->save());
$identity = new UserIdentity('abcdefg', 'abcdefgN4');
$this->assertFalse($identity->authenticate());
$this->assertEquals(UserIdentity::ERROR_NO_RIGHT_WEB_LOGIN, $identity->errorCode);
//Test creating a new user uses the everyone policy
$everyone = Group::getByName(Group::EVERYONE_GROUP_NAME);
$newUser = new User();
$this->assertEquals(null, $everyone->getEffectivePolicy('UsersModule', UsersModule::POLICY_ENFORCE_STRONG_PASSWORDS));
$this->assertEquals(5, $everyone->getEffectivePolicy('UsersModule', UsersModule::POLICY_MINIMUM_PASSWORD_LENGTH));
$this->assertEquals(3, $everyone->getEffectivePolicy('UsersModule', UsersModule::POLICY_MINIMUM_USERNAME_LENGTH));
$this->assertEquals(null, $newUser->getEffectivePolicy('UsersModule', UsersModule::POLICY_ENFORCE_STRONG_PASSWORDS));
$this->assertEquals(5, $newUser->getEffectivePolicy('UsersModule', UsersModule::POLICY_MINIMUM_PASSWORD_LENGTH));
$this->assertEquals(3, $newUser->getEffectivePolicy('UsersModule', UsersModule::POLICY_MINIMUM_USERNAME_LENGTH));
$everyone->setPolicy('UsersModule', UsersModule::POLICY_ENFORCE_STRONG_PASSWORDS, Policy::YES);
$everyone->setPolicy('UsersModule', UsersModule::POLICY_MINIMUM_PASSWORD_LENGTH, 3);
$everyone->setPolicy('UsersModule', UsersModule::POLICY_MINIMUM_USERNAME_LENGTH, 15);
$everyone->save();
$this->assertEquals(Policy::YES, $newUser->getEffectivePolicy('UsersModule', UsersModule::POLICY_ENFORCE_STRONG_PASSWORDS));
$this->assertEquals(3, $newUser->getEffectivePolicy('UsersModule', UsersModule::POLICY_MINIMUM_PASSWORD_LENGTH));
$this->assertEquals(15, $newUser->getEffectivePolicy('UsersModule', UsersModule::POLICY_MINIMUM_USERNAME_LENGTH));
//Make the permission as the default for next tests
$everyone->setPolicy('UsersModule', UsersModule::POLICY_MINIMUM_PASSWORD_LENGTH, 5);
$everyone->setPolicy('UsersModule', UsersModule::POLICY_MINIMUM_USERNAME_LENGTH, 3);
$everyone->save();
}
/**
* @depends testPasswordUserNamePolicyChangesValidationAndLogin
*/
public function testUserNamePolicyValidatesCorrectlyOnDifferentScenarios()
{
$bill = User::getByUsername('abcdefg');
$bill->setScenario('editUser');
$this->assertEquals(3, $bill->getEffectivePolicy('UsersModule', UsersModule::POLICY_MINIMUM_USERNAME_LENGTH));
$_FAKEPOST = array(
'User' => array(
'username' => 'ab',
)
);
$bill->setAttributes($_FAKEPOST['User']);
$this->assertFalse($bill->save());
$errors = array(
'username' => array(
'The username is too short. Minimum length is 3.',
),
);
$this->assertEquals($errors, $bill->getErrors());
$bill = User::getByUsername('abcdefg');
$bill->setScenario('createUser');
$this->assertEquals(3, $bill->getEffectivePolicy('UsersModule', UsersModule::POLICY_MINIMUM_USERNAME_LENGTH));
$_FAKEPOST = array(
'User' => array(
'username' => 'ab',
)
);
$bill->setAttributes($_FAKEPOST['User']);
$this->assertFalse($bill->save());
$errors = array(
'username' => array(
'The username is too short. Minimum length is 3.',
),
);
$this->assertEquals($errors, $bill->getErrors());
}
public function testValidatingUserAfterGettingAttributeValuesFromRelatedUsers()
{
$super = User::getByUsername('super');
Yii::app()->user->userModel = $super;
$user = UserTestHelper::createBasicUser('notsuper');
$this->assertTrue($user->save());
$this->assertTrue($user->createdByUser ->isSame($super));
$this->assertTrue($user->modifiedByUser->isSame($super));
if (!$user->validate())
{
$this->assertEquals(array(), $user->getErrors());
}
// A regular user has a created by and
// modified by user so accessing them is no problem.
$test = $user->createdByUser->id;
$this->assertTrue($user->validate());
$this->assertEquals(array(), $user->getErrors());
}
public function testValidatingSuperAdministratorAfterGettingAttributeValuesFromRelatedUsers()
{
$super = User::getByUsername('super');
$this->assertTrue($super->validate());
$this->assertTrue($super->createdByUser->id < 0);
$this->assertTrue($super->modifiedByUser->isSame($super));
$this->assertTrue($super->validate());
$this->assertEquals(array(), $super->getErrors());
}
/**
* @depends testCreateUserWithRelatedUser
*/
public function testSavingExistingUserDoesntCreateRelatedBlankUsers()
{
$userCount = User::getCount();
$dick = User::getByUsername('dick');
$this->assertTrue($dick->save());
$this->assertEquals($userCount, User::getCount());
}
public function testMixedInPersonInUser()
{
$user = new User();
$user->username = 'dude';
$user->lastName = 'Dude';
$this->assertTrue($user->save());
$this->assertTrue($user->isAttribute('id')); // From RedBeanModel.
$this->assertTrue($user->isAttribute('createdDateTime')); // From Item.
$this->assertTrue($user->isAttribute('firstName')); // From Person.
$this->assertTrue($user->isAttribute('username')); // From User.
$this->assertTrue($user->isRelation ('createdByUser')); // From Item.
$this->assertTrue($user->isRelation ('rights')); // From Permitable.
$this->assertTrue($user->isRelation ('title')); // From Person.
$this->assertTrue($user->isRelation ('manager')); // From User.
unset($user);
$user = User::getByUsername('dude');
$this->assertTrue($user->isAttribute('id')); // From RedBeanModel.
$this->assertTrue($user->isAttribute('createdDateTime')); // From Item.
$this->assertTrue($user->isAttribute('firstName')); // From Person.
$this->assertTrue($user->isAttribute('username')); // From User.
$this->assertTrue($user->isRelation ('createdByUser')); // From Item.
$this->assertTrue($user->isRelation ('rights')); // From Permitable.
$this->assertTrue($user->isRelation ('title')); // From Person.
$this->assertTrue($user->isRelation ('manager')); // From User.
RedBeanModelsCache::cacheModel($user);
$modelIdentifier = $user->getModelIdentifier();
unset($user);
RedBeanModelsCache::forgetAll(true); // Forget it at the php level.
RedBeansCache::forgetAll();
if (MEMCACHE_ON)
{
$user = RedBeanModelsCache::getModel($modelIdentifier);
$this->assertTrue($user->isAttribute('id')); // From RedBeanModel.
$this->assertTrue($user->isAttribute('createdDateTime')); // From Item.
$this->assertTrue($user->isAttribute('firstName')); // From Person.
$this->assertTrue($user->isAttribute('username')); // From User.
$this->assertTrue($user->isRelation ('createdByUser')); // From Item.
$this->assertTrue($user->isRelation ('rights')); // From Permitable.
$this->assertTrue($user->isRelation ('title')); // From Person.
$this->assertTrue($user->isRelation ('manager')); // From User.
}
}
public function testGetModelClassNames()
{
$modelClassNames = UsersModule::getModelClassNames();
$this->assertEquals(3, count($modelClassNames));
$this->assertEquals('User', $modelClassNames[0]);
$this->assertEquals('UserSearch', $modelClassNames[1]);
}
public function testLogAuditEventsListForCreatedAndModifedCreatingFirstUser()
{
Yii::app()->user->userModel = null;
$user = new User();
$user->username = 'myuser';
$user->title->value = 'Mr.';
$user->firstName = 'My';
$user->lastName = 'Userson';
$user->setPassword('myuser');
$saved = $user->save();
$this->assertTrue($saved);
$this->assertEquals(Yii::app()->user->userModel, $user);
//Create a second user and confirm the first user is still the current user.
$user2 = new User();
$user2->username = 'myuser2';
$user2->title->value = 'Mr.';
$user2->firstName = 'My';
$user2->lastName = 'Userson2';
$user2->setPassword('myuser2');
$this->assertTrue($user2->save());
$this->assertEquals(Yii::app()->user->userModel, $user);
}
public function testAvatarForUser()
{
//Create a new user and confirm that gets the default avatar
$user = new User();
$user->username = 'avatar';
$user->lastName = 'User';
$this->assertTrue($user->save());
$this->assertContains('width="250" height="250" src="//www.gravatar.com/avatar/?s=250&r=g&d=mm', // Not Coding Standard
$user->getAvatarImage());
//When calling getAvatarImage it should return the same url to avoid querying gravatar twice
$this->assertContains('width="50" height="50" src="//www.gravatar.com/avatar/?s=50&r=g&d=mm', // Not Coding Standard
$user->getAvatarImage(50));
unset($user);
//Add avatar info to the user and confirm it gets saved
$user = User::getByUsername('avatar');
$avatar = array('avatarType' => 1);
$user->serializeAndSetAvatarData($avatar);
$this->assertEquals(serialize($avatar), $user->serializedAvatarData);
$this->assertTrue($user->save());
unset($user);
$user = User::getByUsername('avatar');
$this->assertContains('width="250" height="250" src="//www.gravatar.com/avatar/?s=250&r=g&d=mm', // Not Coding Standard
$user->getAvatarImage());
$this->assertContains('width="50" height="50" src="//www.gravatar.com/avatar/?s=50&r=g&d=mm', // Not Coding Standard
$user->getAvatarImage(50));
unset($user);
//Change avatar to primary email address
$user = new User();
$user->username = 'avatar2';
$user->lastName = 'User';
$emailAddress = 'avatar@zurmo.org';
$user->primaryEmail->emailAddress = $emailAddress;
$user->primaryEmail->optOut = 1;
$user->primaryEmail->isInvalid = 0;
$avatar = array('avatarType' => 2);
$user->serializeAndSetAvatarData($avatar);
$this->assertContains(serialize($avatar), $user->serializedAvatarData);
$this->assertTrue($user->save());
unset($user);
$user = User::getByUsername('avatar2');
$avatarUrl = 'width="250" height="250" src="//www.gravatar.com/avatar/' .
md5(strtolower(trim($emailAddress))) .
'?s=250&r=g&d=identicon'; // Not Coding Standard
$this->assertContains($avatarUrl, $user->getAvatarImage());
$avatarUrl = 'width="5" height="5" src="//www.gravatar.com/avatar/' .
md5(strtolower(trim($emailAddress))) .
'?s=5&r=g&d=identicon'; // Not Coding Standard
$this->assertContains($avatarUrl, $user->getAvatarImage(5));
unset($user);
//Change avatar to custom avatar email address
$user = new User();
$user->username = 'avatar3';
$user->lastName = 'User';
$emailAddress = 'avatar-custom@zurmo.org';
$avatar = array('avatarType' => 3, 'customAvatarEmailAddress' => $emailAddress);
$user->serializeAndSetAvatarData($avatar);
$this->assertEquals(serialize($avatar), $user->serializedAvatarData);
$this->assertTrue($user->save());
unset($user);
$user = User::getByUsername('avatar3');
$avatarUrl = 'width="250" height="250" src="//www.gravatar.com/avatar/' .
md5(strtolower(trim($emailAddress))) .
"?s=250&r=g&d=identicon"; // Not Coding Standard
$this->assertContains($avatarUrl, $user->getAvatarImage());
$avatarUrl = 'width="2500" height="2500" src="//www.gravatar.com/avatar/' .
md5(strtolower(trim($emailAddress))) .
"?s=2500&r=g&d=identicon"; // Not Coding Standard
$this->assertContains($avatarUrl, $user->getAvatarImage(2500));
unset($user);
}
/**
* @expectedException NotSupportedException
*/
public function testDeleteLastUserInSuperAdministratorsGroup()
{
Yii::app()->user->userModel = User::getByUsername('super');
$superAdminGroup = Group::getByName(Group::SUPER_ADMINISTRATORS_GROUP_NAME);
//At this point the super administrator is part of this group
$this->assertEquals(1, $superAdminGroup->users->count());
//Now try to delete super user, It should not work
$this->assertFalse(Yii::app()->user->userModel->delete());
$this->fail();
}
/**
* test for checking isActive attribute
*/
public function testIsActiveOnUserSave()
{
$user = new User();
$user->username = 'activeuser';
$user->title->value = 'Mr.';
$user->firstName = 'My';
$user->lastName = 'activeuserson';
$user->setPassword('myuser');
$this->assertTrue($user->save());
unset($user);
$user = User::getByUsername('activeuser');
$this->assertEquals(1, $user->isActive);
unset($user);
//Change the user's status to inactive and confirm the changes in rights and isActive attribute.
$user = User::getByUsername('activeuser');
$user->setRight('UsersModule', UsersModule::RIGHT_LOGIN_VIA_WEB, RIGHT::DENY);
$this->assertTrue($user->save());
$this->assertEquals(0, $user->isActive);
unset($user);
//Now change the user's status back to active.
$user = User::getByUsername('activeuser');
$user->setRight('UsersModule', UsersModule::RIGHT_LOGIN_VIA_WEB, RIGHT::ALLOW);
$this->assertTrue($user->save());
$this->assertEquals(1, $user->isActive);
unset($user);
}
public function testUserLocaleSettings()
{
$user = new User();
$user->username = 'userforlocaletest';
$user->title->value = 'Mr.';
$user->firstName = 'Locale';
$user->lastName = 'User';
$user->setPassword('localeuser');
$this->assertTrue($user->save());
$user = User::getByUsername('userForLocaleTest');
$this->assertNull($user->locale);
Yii::app()->user->userModel = $user;
$this->assertEquals('12/1/13 12:00:00 AM',
Yii::app()->dateFormatter->formatDateTime('2013-12-01', 'short'));
$user->locale = 'en_gb';
$this->assertTrue($user->save());
$user = User::getByUsername('userForLocaleTest');
$this->assertContains($user->locale, ZurmoLocale::getSelectableLocaleIds());
Yii::app()->user->userModel = $user;
$this->assertEquals('01/12/2013 00:00:00',
Yii::app()->dateFormatter->formatDateTime('2013-12-01', 'short'));
}
public function testLastLoginDateTimeAttribute()
{
$user = new User();
$user->username = 'lastloginuser';
$user->title->value = 'Mr.';
$user->firstName = 'myFirstName';
$user->lastName = 'myLastName';
$user->setPassword('lastlogin');
$user->setRight('UsersModule', UsersModule::RIGHT_LOGIN_VIA_WEB);
$this->assertTrue($user->save());
$user = User::getByUsername('lastloginuser');
$this->assertNull($user->lastLoginDateTime);
unset($user);
$now = time();
User::authenticate('lastloginuser', 'lastlogin');
$user = User::getByUsername('lastloginuser');
$this->assertLessThanOrEqual(5, $user->lastLoginDateTime - $now);
}
public function testTrimUsername()
{
$user = new User();
$user->username = ' trimusername ';
$user->title->value = 'Mr.';
$user->firstName = 'trim';
$user->lastName = 'username';
$user->setPassword('trimusername');
$this->assertTrue($user->save());
$user = User::getByUsername('trimusername');
$this->assertEquals('trimusername', $user->username);
}
/**
* test for checking hideFromSelecting attribute
*/
public function testHideFromSelectingOnUserSave()
{
$user = new User();
$user->username = 'hidefromselectuser';
$user->title->value = 'Mr.';
$user->firstName = 'My';
$user->lastName = 'hidefromselectuser';
$user->hideFromSelecting = true;
$user->setPassword('myuser');
$this->assertTrue($user->save());
unset($user);
$user = User::getByUsername('hidefromselectuser');
$this->assertEquals(1, $user->hideFromSelecting);
unset($user);
$userSet = UserSearch::getUsersByPartialFullName('hide', 20);
$this->assertEquals(0, count($userSet));
$user = User::getByUsername('hidefromselectuser');
$user->hideFromSelecting = false;
$this->assertTrue($user->save());
unset($user);
$user = User::getByUsername('hidefromselectuser');
$this->assertEquals(0, $user->hideFromSelecting);
unset($user);
$userSet = UserSearch::getUsersByPartialFullName('hide', 20);
$this->assertEquals(1, count($userSet));
}
/**
* test for checking hideFromLeaderboard attribute
*/
public function testHideFromLeaderboardOnUserSave()
{
$user = new User();
$user->username = 'leaderboard';
$user->title->value = 'Mr.';
$user->firstName = 'My';
$user->lastName = 'leaderboard';
$user->hideFromLeaderboard = true;
$user->setPassword('myuser');
$this->assertTrue($user->save());
unset($user);
$user = User::getByUsername('leaderboard');
Yii::app()->user->userModel = $user;
$pointTypeAndValueData = array('some type' => 400);
GamePointUtil::addPointsByPointData(Yii::app()->user->userModel, $pointTypeAndValueData);
Yii::app()->gameHelper->processDeferredPoints();
$user = User::getByUsername('leaderboard');
$this->assertEquals(1, $user->hideFromLeaderboard);
unset($user);
$userSet = GamePointUtil::getUserLeaderboardData(GamePointUtil::LEADERBOARD_TYPE_OVERALL);
$this->assertEquals(0, count($userSet));
$user = User::getByUsername('leaderboard');
$user->hideFromLeaderboard = false;
$this->assertTrue($user->save());
unset($user);
$user = User::getByUsername('leaderboard');
$this->assertEquals(0, $user->hideFromLeaderboard);
unset($user);
$userSet = GamePointUtil::getUserLeaderboardData(GamePointUtil::LEADERBOARD_TYPE_OVERALL);
$this->assertTrue(count($userSet) > 0);
}
/**
* test for checking hideFromSelecting attribute
*/
public function testIsRootUserOnUserSave()
{
$user = new User();
$user->username = 'rootuser';
$user->title->value = 'Mr.';
$user->firstName = 'My';
$user->lastName = 'rootuser';
$user->setPassword('myuser');
$this->assertTrue($user->save());
unset($user);
$user = User::getByUsername('rootuser');
$this->assertNull($user->isRootUser);
unset($user);
$superUser = User::getByUsername('leaderboard');
Yii::app()->user->userModel = $superUser;
$user = User::getByUsername('rootuser');
$this->assertTrue(UserAccessUtil::resolveCanCurrentUserAccessRootUser($user));
$user->setIsRootUser();
$this->assertTrue($user->save());
unset($user);
$user = User::getByUsername('rootuser');
$this->assertFalse(UserAccessUtil::resolveCanCurrentUserAccessRootUser($user, false));
$user = new User();
$user->username = 'rootuser2';
$user->title->value = 'Mr.';
$user->firstName = 'My';
$user->lastName = 'rootuser2';
$user->setPassword('myuser');
$this->assertTrue($user->save());
unset($user);
//Get root user count
$this->assertEquals(1, User::getRootUserCount());
//Take care that only root user could be there
$user = User::getByUsername('rootuser2');
try
{
$user->setIsRootUser();
}
catch (Exception $e)
{
$this->assertEquals('ExistingRootUserException', get_class($e));
}
}
/**
* test for checking hideFromSelecting attribute
*/
public function testIsSystemUserAndActiveUserCountOnUserSave()
{
$user = new User();
$user->username = 'sysuser';
$user->title->value = 'Mr.';
$user->firstName = 'My';
$user->lastName = 'sysuser';
$user->setPassword('myuser');
$this->assertTrue($user->save());
unset($user);
$user = User::getByUsername('sysuser');
$this->assertNull($user->isSystemUser);
unset($user);
//Check active user count
$activeUserCount = User::getActiveUserCount();
$this->assertEquals(26, $activeUserCount);
$user = User::getByUsername('sysuser');
$this->assertTrue(UserAccessUtil::resolveAccessingASystemUser($user));
$user->setIsSystemUser();
$this->assertTrue($user->save());
unset($user);
$user = User::getByUsername('sysuser');
$this->assertFalse(UserAccessUtil::resolveAccessingASystemUser($user, false));
//As the user has been made a system user so count should reduce
$activeUserCount = User::getActiveUserCount();
$this->assertEquals(25, $activeUserCount);
$user = User::getByUsername('rootuser');
$user->setIsNotRootUser();
$this->assertTrue($user->save());
unset($user);
//As the user removed from root user so count should increase
$activeUserCount = User::getActiveUserCount();
$this->assertEquals(26, $activeUserCount);
}
/**
* test getUsersByEmailAddress
*/
public function testGetUsersByEmailAddress()
{
$user = UserTestHelper::createBasicUserWithEmailAddress("emailhideuser");
$user->hideFromSelecting = true;
$this->assertTrue($user->save());
unset($user);
$users = UserSearch::getUsersByEmailAddress("emailhideuser@zurmo.com", null, false);
$this->assertEquals(true, (bool)$users[0]->hideFromSelecting);
$this->assertEquals(1, count($users));
$users = UserSearch::getUsersByEmailAddress("emailhideuser@zurmo.com", null, true);
$this->assertEquals(0, count($users));
}
/**
* test getUsersByPartialFullName
*/
public function testGetUsersByPartialFullName()
{
$user = UserTestHelper::createBasicUserWithEmailAddress("partialhideuser");
$user->hideFromSelecting = true;
$this->assertTrue($user->save());
unset($user);
$users = UserSearch::getUsersByPartialFullName("partial", 1);
$this->assertEquals(0, count($users));
$user = User::getByUsername('partialhideuser');
$user->hideFromSelecting = false;
$this->assertTrue($user->save());
unset($user);
$users = UserSearch::getUsersByPartialFullName("partial", 1);
$this->assertEquals(1, count($users));
}
/**
* Test structure and clauses for NonSystemUsersStateMetadataAdapter
*/
public function testNonSystemUsersStateMetadataAdapter()
{
$nonSystemUsersStateMetadataAdapter = new NonSystemUsersStateMetadataAdapter(array('clauses' => array(), 'structure' => ''));
$metadata = $nonSystemUsersStateMetadataAdapter->getAdaptedDataProviderMetadata();
$this->assertEquals('(1 or 2)', $metadata['structure']);
$nonSystemUsersStateMetadataAdapter1 = new NonSystemUsersStateMetadataAdapter(array('clauses' => array(), 'structure' => 'x and y'));
$metadata = $nonSystemUsersStateMetadataAdapter1->getAdaptedDataProviderMetadata();
$this->assertEquals('(x and y) and (1 or 2)', $metadata['structure']);
}
public function testIsSuperAdministrator()
{
$userA = User::getByUsername('super');
$userB = User::getByUsername('dick');
$this->assertTrue($userA->isSuperAdministrator());
$this->assertFalse($userB->isSuperAdministrator());
}
public function testInactiveUsers()
{
$activeUserCount = User::getActiveUserCount();
$this->assertEquals(28, $activeUserCount);
$this->assertCount(28, User::getActiveUsers());
$user = new User();
$user->username = 'inactiveuser';
$user->title->value = 'Mr.';
$user->firstName = 'My';
$user->lastName = 'inactiveuser';
$user->setPassword('myuser');
$user->setIsSystemUser();
$this->assertTrue($user->save());
$this->assertEquals(28, $activeUserCount);
$this->assertCount(28, User::getActiveUsers());
}
public function testMakeActiveUsersQuerySearchAttributeData()
{
$searchAttributeData = User::makeActiveUsersQuerySearchAttributeData();
$compareData = array(
'clauses' => array(
1 => array(
"attributeName" => "isActive",
"operatorType" => "equals",
"value" => true
),
2 => array(
"attributeName" => "isSystemUser",
"operatorType" => "equals",
"value" => 0
),
3 => array(
"attributeName" => "isSystemUser",
"operatorType" => "isNull",
"value" => null
),
4 => array(
"attributeName" => "isRootUser",
"operatorType" => "equals",
"value" => 0
),
5 => array(
"attributeName" => "isRootUser",
"operatorType" => "isNull",
"value" => null
)
),
'structure' => "1 and (2 or 3) and (4 or 5)"
);
$this->assertEquals($compareData, $searchAttributeData);
$searchAttributeData = User::makeActiveUsersQuerySearchAttributeData(false);
$compareData = array(
'clauses' => array(
1 => array(
"attributeName" => "isActive",
"operatorType" => "equals",
"value" => true
),
2 => array(
"attributeName" => "isSystemUser",
"operatorType" => "equals",
"value" => 0
),
3 => array(
"attributeName" => "isSystemUser",
"operatorType" => "isNull",
"value" => null
),
4 => array(
"attributeName" => "isRootUser",
"operatorType" => "equals",
"value" => 0
),
5 => array(
"attributeName" => "isRootUser",
"operatorType" => "isNull",
"value" => null
)
),
'structure' => "1 and (2 or 3) and (4 or 5)"
);
$this->assertEquals($compareData, $searchAttributeData);
$searchAttributeData = User::makeActiveUsersQuerySearchAttributeData(true);
$compareData = array(
'clauses' => array(
1 => array(
"attributeName" => "isActive",
"operatorType" => "equals",
"value" => true
),
2 => array(
"attributeName" => "isSystemUser",
"operatorType" => "equals",
"value" => 0
),
3 => array(
"attributeName" => "isSystemUser",
"operatorType" => "isNull",
"value" => null
),
),
'structure' => "1 and (2 or 3)"
);
$this->assertEquals($compareData, $searchAttributeData);
}
public function testActiveUsers()
{
$activeUserCount = User::getActiveUserCount();
$this->assertEquals(28, $activeUserCount);
$this->assertCount(28, User::getActiveUsers());
$activeUserCount = User::getActiveUserCount(false);
$this->assertEquals(28, $activeUserCount);
$this->assertCount(28, User::getActiveUsers(false));
$activeUserCount = User::getActiveUserCount(true);
$this->assertEquals(28, $activeUserCount);
$this->assertCount(28, User::getActiveUsers(true));
$user = User::getByUsername('rootuser');
$this->assertTrue(UserAccessUtil::resolveCanCurrentUserAccessRootUser($user));
$user->setIsRootUser();
$this->assertTrue($user->save());
unset($user);
$activeUserCount = User::getActiveUserCount();
$this->assertEquals(27, $activeUserCount);
$this->assertCount(27, User::getActiveUsers());
$activeUserCount = User::getActiveUserCount(false);
$this->assertEquals(27, $activeUserCount);
$this->assertCount(27, User::getActiveUsers(false));
$activeUserCount = User::getActiveUserCount(true);
$this->assertEquals(28, $activeUserCount);
$this->assertCount(28, User::getActiveUsers(true));
}
public function testLogAuditEventsForIsActive()
{
$user = new User();
$user->username = 'testlogauditforisactive';
$user->title->value = 'Mr.';
$user->firstName = 'My';
$user->lastName = 'testlogauditforisactive';
$user->setPassword('testlogauditforisactive');
$this->assertTrue($user->save());
unset($user);
$user = User::getByUsername('testlogauditforisactive');
$this->assertEquals(1, $user->isActive);
unset($user);
AuditEvent::deleteAll();
//Change the user's status to inactive and confirm new audit event is created
$user = User::getByUsername('testlogauditforisactive');
$user->setRight('UsersModule', UsersModule::RIGHT_LOGIN_VIA_WEB, RIGHT::DENY);
$this->assertTrue($user->save());
$this->assertEquals(0, $user->isActive);
$auditEvents = AuditEvent::getAll();
$this->assertCount(1, $auditEvents);
$this->assertContains('Item Modified', strval($auditEvents[0]));
unset($user);
//Now change the user's status back to active and confirm new audit event is created
$user = User::getByUsername('testlogauditforisactive');
$user->setRight('UsersModule', UsersModule::RIGHT_LOGIN_VIA_WEB, RIGHT::ALLOW);
$this->assertTrue($user->save());
$this->assertEquals(1, $user->isActive);
$auditEvents = AuditEvent::getAll();
$this->assertCount(2, $auditEvents);
$this->assertContains('Item Modified', strval($auditEvents[1]));
unset($user);
}
public function testSetMetadata()
{
$metadata = User::getMetadata();
$this->assertArrayHasKey('Person', $metadata);
$this->assertNotEmpty($metadata['Person']);
$this->assertArrayHasKey('User', $metadata);
$this->assertNotEmpty($metadata['User']);
$personMetaData = $metadata['Person'];
$userMetaData = $metadata['User'];
$this->assertArrayHasKey('members', $personMetaData);
$this->assertCount(7, $personMetaData['members']);
$this->assertArrayHasKey('members', $userMetaData);
$this->assertCount(12, $userMetaData['members']);
// unset a member from person, update metadata
unset($personMetaData['members'][0]);
User::setMetadata(array('Person' => $personMetaData));
// ensure metadata update has propagated
$metadata = User::getMetadata();
$this->assertArrayHasKey('Person', $metadata);
$this->assertNotEmpty($metadata['Person']);
$this->assertArrayHasKey('User', $metadata);
$this->assertNotEmpty($metadata['User']);
$personMetaData = $metadata['Person'];
$userMetaData = $metadata['User'];
$this->assertArrayHasKey('members', $personMetaData);
$this->assertCount(6, $personMetaData['members']);
$this->assertArrayHasKey('members', $userMetaData);
$this->assertCount(12, $userMetaData['members']);
// unset a member from User, update metadata
unset($userMetaData['members'][0]);
User::setMetadata(array('User' => $userMetaData));
// ensure metadata update has propagated
$metadata = User::getMetadata();
$this->assertArrayHasKey('Person', $metadata);
$this->assertNotEmpty($metadata['Person']);
$this->assertArrayHasKey('User', $metadata);
$this->assertNotEmpty($metadata['User']);
$personMetaData = $metadata['Person'];
$userMetaData = $metadata['User'];
$this->assertArrayHasKey('members', $personMetaData);
$this->assertCount(6, $personMetaData['members']);
$this->assertArrayHasKey('members', $userMetaData);
$this->assertCount(11, $userMetaData['members']);
// unset a member from User and Person, update metadata
unset($userMetaData['members'][1]);
unset($personMetaData['members'][1]);
User::setMetadata(array('Person' => $personMetaData, 'User' => $userMetaData));
// ensure metadata update has propagated
$metadata = User::getMetadata();
$this->assertArrayHasKey('Person', $metadata);
$this->assertNotEmpty($metadata['Person']);
$this->assertArrayHasKey('User', $metadata);
$this->assertNotEmpty($metadata['User']);
$personMetaData = $metadata['Person'];
$userMetaData = $metadata['User'];
$this->assertArrayHasKey('members', $personMetaData);
$this->assertCount(5, $personMetaData['members']);
$this->assertArrayHasKey('members', $userMetaData);
$this->assertCount(10, $userMetaData['members']);
}
}
?>
| agpl-3.0 |
rashikpolus/MIT_KC | coeus-impl/src/main/java/org/kuali/kra/iacuc/committee/service/IacucCommitteeBatchCorrespondenceService.java | 1058 | /*
* Kuali Coeus, a comprehensive research administration system for higher education.
*
* Copyright 2005-2015 Kuali, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.kuali.kra.iacuc.committee.service;
import org.kuali.coeus.common.committee.impl.service.CommitteeBatchCorrespondenceServiceBase;
public interface IacucCommitteeBatchCorrespondenceService extends CommitteeBatchCorrespondenceServiceBase {
}
| agpl-3.0 |
cswhite2000/ProjectAres | Util/core/src/main/java/tc/oc/time/PeriodConverters.java | 2359 | package tc.oc.time;
import java.time.temporal.ChronoUnit;
import java.time.temporal.TemporalUnit;
import java.util.Arrays;
import java.util.Collection;
import tc.oc.commons.core.util.TimeUtils;
public final class PeriodConverters {
private static final PeriodConverter LARGEST_PRECISE_UNIT = largestPreciseUnit(ChronoUnit.SECONDS);
private static final PeriodConverter LARGEST_APPROXIMATE_UNIT = largestApproximateUnit(2);
private static PeriodConverter NORMALIZED = TimePeriod::normalized;
private static PeriodConverter SECONDS = unit(ChronoUnit.SECONDS);
private PeriodConverters() {}
public static PeriodConverter normalized() {
return NORMALIZED;
}
public static PeriodConverter unit(TemporalUnit unit) {
return duration -> TimePeriod.inUnit(duration, unit);
}
public static PeriodConverter units(Collection<TemporalUnit> units) {
return duration -> TimePeriod.inUnits(duration, units);
}
public static PeriodConverter units(TemporalUnit... units) {
return units(Arrays.asList(units));
}
public static PeriodConverter seconds() {
return SECONDS;
}
public static PeriodConverter largestPreciseUnit() {
return LARGEST_PRECISE_UNIT;
}
public static PeriodConverter largestPreciseUnit(TemporalUnit zeroUnit) {
return duration -> {
if(duration.isZero()) {
return TimePeriod.ofUnit(0, zeroUnit);
}
for(FriendlyUnits info : FriendlyUnits.descending()) {
if(duration.minus(TimeUtils.toUnit(info.unit, duration), info.unit).isZero()) {
return TimePeriod.inUnit(duration, info.unit);
}
}
throw new IllegalStateException();
};
}
public static PeriodConverter largestApproximateUnit() {
return LARGEST_APPROXIMATE_UNIT;
}
public static PeriodConverter largestApproximateUnit(long minQuantity) {
return duration -> {
for(FriendlyUnits info : FriendlyUnits.descending()) {
if(minQuantity <= TimeUtils.toUnit(info.unit, duration)) {
return TimePeriod.inUnit(duration, info.unit);
}
}
return TimePeriod.inUnit(duration, FriendlyUnits.smallest().unit);
};
}
}
| agpl-3.0 |
systems-rebooter/music | l10n/lv.php | 289 | <?php
$TRANSLATIONS = array(
"Description" => "Apraksts",
"Music" => "Mūzika",
"Next" => "Nākamā",
"Pause" => "Pauzēt",
"Play" => "Atskaņot",
"Previous" => "Iepriekšējā",
"Repeat" => "Atkārtot"
);
$PLURAL_FORMS = "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2);";
| agpl-3.0 |
sbriseid/GoTools | parametrization/src/PrPrmSurface.C | 7807 | /*
* Copyright (C) 1998, 2000-2007, 2010, 2011, 2012, 2013 SINTEF ICT,
* Applied Mathematics, Norway.
*
* Contact information: E-mail: tor.dokken@sintef.no
* SINTEF ICT, Department of Applied Mathematics,
* P.O. Box 124 Blindern,
* 0314 Oslo, Norway.
*
* This file is part of GoTools.
*
* GoTools is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* GoTools is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with GoTools. If not, see
* <http://www.gnu.org/licenses/>.
*
* In accordance with Section 7(b) of the GNU Affero General Public
* License, a covered work must retain the producer line in every data
* file that is created or manipulated using GoTools.
*
* Other Usage
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the GoTools library without
* disclosing the source code of your own applications.
*
* This file may be used in accordance with the terms contained in a
* written agreement between you and SINTEF ICT.
*/
#include "GoTools/parametrization/PrPrmSurface.h"
#include "GoTools/parametrization/PrParamUtil.h"
#include "GoTools/utils/Values.h"
#ifdef _WIN32
#define M_PI 3.14159265358979
#endif
#ifdef __BORLANDC__
using std::cos;
using std::sin;
#endif
// PRIVATE METHODS
//-----------------------------------------------------------------------------
bool
PrPrmSurface::makeWeights(int i)
//-----------------------------------------------------------------------------
// Calculate shape-preserving weights for the
// interior node i of the graph
// It is assumed here that the indices of the neighbours of i
// have already been stored in neighbours_.
// This is the third parametrization in the article:
// M. S. Floater, "Parametrization and smooth approximation of
// surface triangulations", to appear in CAGD, 1997.
{
weights_.clear();
weightsold_.clear();
int n = (int)neighbours_.size();
weights_.resize(n, 0.0);
// Find local u,v's
localParamXYZ(i);
makeWeights(u_,v_,weights_);
localParamUV(i);
makeWeights(uold_,vold_,weightsold_);
double sum = 0.0;
int j;
for(j=0; j<n; j++)
{
//weights_[j] = weightsold_[j] / sqrt(weights_[j]);
weights_[j] = weightsold_[j] / weights_[j];
sum += weights_[j];
}
// Scale the weights so that they sum to 1.
for(j=0; j<n; j++) weights_[j] /= sum;
return true;
}
//-----------------------------------------------------------------------------
bool
PrPrmSurface::makeWeights(vector<double>& u,
vector<double>& v,
vector<double>& weights)
//-----------------------------------------------------------------------------
{
int n = (int)u.size();
weights.clear();
weights.resize(n, 0.0);
int j;
for(j=0; j<n; j++)
{
/* Given the j-th neighbour of node i,
find the two neighbours by intersecting the
line through nodes i and j with all segments of the polygon
made by the neighbours. Take the two neighbours on
either side. Only one segment intersects this line. */
int k;
for(k=0; k<n; k++)
{
int kk = (k == (n-1) ? 0 : k+1);
if(k == j || kk == j) continue;
double cross1 = det(u[j],v[j],u[k],v[k]);
double cross2 = det(u[j],v[j],u[kk],v[kk]);
if(cross1 * cross2 <= 0.0)
{
double tau0,tau1,tau2;
baryCoords0(u[j],v[j],u[k],v[k],u[kk],v[kk],tau0,tau1,tau2);
weights[j] += tau0;
weights[k] += tau1;
weights[kk] += tau2;
break;
}
}
}
// Scale the weights so that they sum to 1.
// Before this, they sum to n
double ratio = 1.0 / (double)n;
for(j=0; j<n; j++) weights[j] *= ratio;
// Temporary code.
/*
ratio = 0.0;
for(j=1; j<=n; j++)
{
weights[j] = 1.0 / weights[j];
ratio += weights[j];
}
for(j=1; j<=n; j++) weights[j] /= ratio;
double tmp;
tmp = weights(1);
weights(1) = weights(3);
weights(3) = tmp;
tmp = weights(2);
weights(2) = weights(4);
weights(4) = tmp;
*/
// End of temporary code.
return true;
}
//-----------------------------------------------------------------------------
bool
PrPrmSurface::localParamXYZ(int i)
//-----------------------------------------------------------------------------
// Make a local parametrization for the interior
// node i of the given graph.
// This is based on a discretization of the geodesic polar map.
// The distances of the neighbours from i are preserved
// and the ratios of any two interior angles are also preserved
// It is assumed here that the indices of the neighbours of i
// have already been stored in neighbours_.
// M.F. Mar. 97
{
alpha_.clear();
len_.clear();
u_.clear();
v_.clear();
int n = (int)neighbours_.size();
double alpha_sum = 0.0;
double alpha;
int j;
for (j=0; j<n; j++)
{
int j1 = neighbours_[j];
Vector3D v1 = g_->get3dNode(j1) - g_->get3dNode(i);
// get previous node
int jprev = (j == 0 ? n-1 : j-1);
int j2 = neighbours_[jprev];
Vector3D v2 = g_->get3dNode(j2) - g_->get3dNode(i);
len_.push_back(v1.length());
//len_[j] = sqrt(v1.norm()); // alternative
//len_[j] = 1.0; // alternative
alpha = v1.angle(v2);
alpha_.push_back(alpha);
//alpha_[j] = sqrt(v1.angle(v2)); // alternative
//alpha_[j] = 1.0; // alternative
alpha_sum += alpha;
}
double factor = 2.0 * M_PI / alpha_sum;
for(j=0; j<n; j++) alpha_[j] *= factor;
alpha_[0] = 0.0;
for(j=1; j<n; j++) alpha_[j] += alpha_[j-1];
for(j=0; j<n; j++)
{
//v_[j].init(len_[j] * cos(alpha_[j]), len_[j] * sin(alpha_[j]));
//v_.push_back(CgVector2d(len_[j] * cos(alpha_[j]), len_[j] * sin(alpha_[j])));
u_.push_back(len_[j] * cos(alpha_[j]));
v_.push_back(len_[j] * sin(alpha_[j]));
}
return true;
}
//-----------------------------------------------------------------------------
bool
PrPrmSurface::localParamUV(int i)
//-----------------------------------------------------------------------------
// Make a local parametrization for the interior
// node i of the given graph.
// This is based on a discretization of the geodesic polar map.
// The distances of the neighbours from i are preserved
// and the ratios of any two interior angles are also preserved
// It is assumed here that the indices of the neighbours of i
// have already been stored in neighbours_.
// M.F. Mar. 97
{
uold_.clear();
vold_.clear();
int n = (int)neighbours_.size();
for (int j=0; j<n; j++)
{
int j1 = neighbours_[j];
uold_.push_back(g_->getU(j1) - g_->getU(i));
vold_.push_back(g_->getV(j1) - g_->getV(i));
}
return true;
}
// PUBLIC METHODS
//-----------------------------------------------------------------------------
PrPrmSurface::PrPrmSurface()
: PrParametrizeInt()
//-----------------------------------------------------------------------------
{
}
//-----------------------------------------------------------------------------
PrPrmSurface::~PrPrmSurface()
//-----------------------------------------------------------------------------
{
}
| agpl-3.0 |
studio666/gnu-social | lib/imagefile.php | 26441 | <?php
/**
* StatusNet, the distributed open-source microblogging tool
*
* Abstraction for an image file
*
* PHP version 5
*
* LICENCE: This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* @category Image
* @package StatusNet
* @author Evan Prodromou <evan@status.net>
* @author Zach Copley <zach@status.net>
* @copyright 2008-2009 StatusNet, Inc.
* @license http://www.fsf.org/licensing/licenses/agpl-3.0.html GNU Affero General Public License version 3.0
* @link http://status.net/
*/
if (!defined('GNUSOCIAL')) { exit(1); }
/**
* A wrapper on uploaded files
*
* Makes it slightly easier to accept an image file from upload.
*
* @category Image
* @package StatusNet
* @author Evan Prodromou <evan@status.net>
* @author Zach Copley <zach@status.net>
* @license http://www.fsf.org/licensing/licenses/agpl-3.0.html GNU Affero General Public License version 3.0
* @link http://status.net/
*/
class ImageFile
{
var $id;
var $filepath;
var $filename;
var $type;
var $height;
var $width;
var $rotate=0; // degrees to rotate for properly oriented image (extrapolated from EXIF etc.)
var $animated = null; // Animated image? (has more than 1 frame). null means untested
var $mimetype = null; // The _ImageFile_ mimetype, _not_ the originating File object
protected $fileRecord = null;
function __construct($id, $filepath)
{
$this->id = $id;
if (!empty($this->id)) {
$this->fileRecord = new File();
$this->fileRecord->id = $this->id;
if (!$this->fileRecord->find(true)) {
// If we have set an ID, we need that ID to exist!
throw new NoResultException($this->fileRecord);
}
}
// These do not have to be the same as fileRecord->filename for example,
// since we may have generated an image source file from something else!
$this->filepath = $filepath;
$this->filename = basename($filepath);
$info = @getimagesize($this->filepath);
if (!(
($info[2] == IMAGETYPE_GIF && function_exists('imagecreatefromgif')) ||
($info[2] == IMAGETYPE_JPEG && function_exists('imagecreatefromjpeg')) ||
$info[2] == IMAGETYPE_BMP ||
($info[2] == IMAGETYPE_WBMP && function_exists('imagecreatefromwbmp')) ||
($info[2] == IMAGETYPE_XBM && function_exists('imagecreatefromxbm')) ||
($info[2] == IMAGETYPE_PNG && function_exists('imagecreatefrompng')))) {
// TRANS: Exception thrown when trying to upload an unsupported image file format.
throw new UnsupportedMediaException(_('Unsupported image format.'), $this->filepath);
}
$this->width = $info[0];
$this->height = $info[1];
$this->type = $info[2];
$this->mimetype = $info['mime'];
if ($this->type == IMAGETYPE_JPEG && function_exists('exif_read_data')) {
// Orientation value to rotate thumbnails properly
$exif = exif_read_data($this->filepath);
if (is_array($exif) && isset($exif['Orientation'])) {
switch ((int)$exif['Orientation']) {
case 1: // top is top
$this->rotate = 0;
break;
case 3: // top is bottom
$this->rotate = 180;
break;
case 6: // top is right
$this->rotate = -90;
break;
case 8: // top is left
$this->rotate = 90;
break;
}
// If we ever write this back, Orientation should be set to '1'
}
} elseif ($this->type === IMAGETYPE_GIF) {
$this->animated = $this->isAnimatedGif();
}
Event::handle('FillImageFileMetadata', array($this));
}
public static function fromFileObject(File $file)
{
$imgPath = null;
$media = common_get_mime_media($file->mimetype);
if (Event::handle('CreateFileImageThumbnailSource', array($file, &$imgPath, $media))) {
if (empty($file->filename)) {
throw new UnsupportedMediaException(_('File without filename could not get a thumbnail source.'));
}
// First some mimetype specific exceptions
switch ($file->mimetype) {
case 'image/svg+xml':
throw new UseFileAsThumbnailException($file->id);
}
// And we'll only consider it an image if it has such a media type
switch ($media) {
case 'image':
$imgPath = $file->getPath();
break;
default:
throw new UnsupportedMediaException(_('Unsupported media format.'), $file->getPath());
}
}
if (!file_exists($imgPath)) {
throw new ServerException(sprintf('Image not available locally: %s', $imgPath));
}
try {
$image = new ImageFile($file->id, $imgPath);
} catch (UnsupportedMediaException $e) {
// Avoid deleting the original
if ($imgPath != $file->getPath()) {
unlink($imgPath);
}
throw $e;
}
return $image;
}
public function getPath()
{
if (!file_exists($this->filepath)) {
throw new FileNotFoundException($this->filepath);
}
return $this->filepath;
}
static function fromUpload($param='upload')
{
switch ($_FILES[$param]['error']) {
case UPLOAD_ERR_OK: // success, jump out
break;
case UPLOAD_ERR_INI_SIZE:
case UPLOAD_ERR_FORM_SIZE:
// TRANS: Exception thrown when too large a file is uploaded.
// TRANS: %s is the maximum file size, for example "500b", "10kB" or "2MB".
throw new Exception(sprintf(_('That file is too big. The maximum file size is %s.'), ImageFile::maxFileSize()));
case UPLOAD_ERR_PARTIAL:
@unlink($_FILES[$param]['tmp_name']);
// TRANS: Exception thrown when uploading an image and that action could not be completed.
throw new Exception(_('Partial upload.'));
case UPLOAD_ERR_NO_FILE:
// No file; probably just a non-AJAX submission.
default:
common_log(LOG_ERR, __METHOD__ . ": Unknown upload error " . $_FILES[$param]['error']);
// TRANS: Exception thrown when uploading an image fails for an unknown reason.
throw new Exception(_('System error uploading file.'));
}
$info = @getimagesize($_FILES[$param]['tmp_name']);
if (!$info) {
@unlink($_FILES[$param]['tmp_name']);
// TRANS: Exception thrown when uploading a file as image that is not an image or is a corrupt file.
throw new UnsupportedMediaException(_('Not an image or corrupt file.'), '[deleted]');
}
return new ImageFile(null, $_FILES[$param]['tmp_name']);
}
/**
* Copy the image file to the given destination.
*
* This function may modify the resulting file. Please use the
* returned ImageFile object to read metadata (width, height etc.)
*
* @param string $outpath
* @return ImageFile the image stored at target path
*/
function copyTo($outpath)
{
return new ImageFile(null, $this->resizeTo($outpath));
}
/**
* Create and save a thumbnail image.
*
* @param string $outpath
* @param array $box width, height, boundary box (x,y,w,h) defaults to full image
* @return string full local filesystem filename
*/
function resizeTo($outpath, array $box=array())
{
$box['width'] = isset($box['width']) ? intval($box['width']) : $this->width;
$box['height'] = isset($box['height']) ? intval($box['height']) : $this->height;
$box['x'] = isset($box['x']) ? intval($box['x']) : 0;
$box['y'] = isset($box['y']) ? intval($box['y']) : 0;
$box['w'] = isset($box['w']) ? intval($box['w']) : $this->width;
$box['h'] = isset($box['h']) ? intval($box['h']) : $this->height;
if (!file_exists($this->filepath)) {
// TRANS: Exception thrown during resize when image has been registered as present, but is no longer there.
throw new Exception(_('Lost our file.'));
}
// Don't rotate/crop/scale if it isn't necessary
if ($box['width'] === $this->width
&& $box['height'] === $this->height
&& $box['x'] === 0
&& $box['y'] === 0
&& $box['w'] === $this->width
&& $box['h'] === $this->height
&& $this->type == $this->preferredType()) {
if ($this->rotate == 0) {
// No rotational difference, just copy it as-is
@copy($this->filepath, $outpath);
return $outpath;
} elseif (abs($this->rotate) == 90) {
// Box is rotated 90 degrees in either direction,
// so we have to redefine x to y and vice versa.
$tmp = $box['width'];
$box['width'] = $box['height'];
$box['height'] = $tmp;
$tmp = $box['x'];
$box['x'] = $box['y'];
$box['y'] = $tmp;
$tmp = $box['w'];
$box['w'] = $box['h'];
$box['h'] = $tmp;
}
}
if (Event::handle('StartResizeImageFile', array($this, $outpath, $box))) {
$this->resizeToFile($outpath, $box);
}
if (!file_exists($outpath)) {
throw new UseFileAsThumbnailException($this->id);
}
return $outpath;
}
protected function resizeToFile($outpath, array $box)
{
switch ($this->type) {
case IMAGETYPE_GIF:
$image_src = imagecreatefromgif($this->filepath);
break;
case IMAGETYPE_JPEG:
$image_src = imagecreatefromjpeg($this->filepath);
break;
case IMAGETYPE_PNG:
$image_src = imagecreatefrompng($this->filepath);
break;
case IMAGETYPE_BMP:
$image_src = imagecreatefrombmp($this->filepath);
break;
case IMAGETYPE_WBMP:
$image_src = imagecreatefromwbmp($this->filepath);
break;
case IMAGETYPE_XBM:
$image_src = imagecreatefromxbm($this->filepath);
break;
default:
// TRANS: Exception thrown when trying to resize an unknown file type.
throw new Exception(_('Unknown file type'));
}
if ($this->rotate != 0) {
$image_src = imagerotate($image_src, $this->rotate, 0);
}
$image_dest = imagecreatetruecolor($box['width'], $box['height']);
if ($this->type == IMAGETYPE_GIF || $this->type == IMAGETYPE_PNG || $this->type == IMAGETYPE_BMP) {
$transparent_idx = imagecolortransparent($image_src);
if ($transparent_idx >= 0) {
$transparent_color = imagecolorsforindex($image_src, $transparent_idx);
$transparent_idx = imagecolorallocate($image_dest, $transparent_color['red'], $transparent_color['green'], $transparent_color['blue']);
imagefill($image_dest, 0, 0, $transparent_idx);
imagecolortransparent($image_dest, $transparent_idx);
} elseif ($this->type == IMAGETYPE_PNG) {
imagealphablending($image_dest, false);
$transparent = imagecolorallocatealpha($image_dest, 0, 0, 0, 127);
imagefill($image_dest, 0, 0, $transparent);
imagesavealpha($image_dest, true);
}
}
imagecopyresampled($image_dest, $image_src, 0, 0, $box['x'], $box['y'], $box['width'], $box['height'], $box['w'], $box['h']);
switch ($this->preferredType()) {
case IMAGETYPE_GIF:
imagegif($image_dest, $outpath);
break;
case IMAGETYPE_JPEG:
imagejpeg($image_dest, $outpath, common_config('image', 'jpegquality'));
break;
case IMAGETYPE_PNG:
imagepng($image_dest, $outpath);
break;
default:
// TRANS: Exception thrown when trying resize an unknown file type.
throw new Exception(_('Unknown file type'));
}
imagedestroy($image_src);
imagedestroy($image_dest);
}
/**
* Several obscure file types should be normalized to PNG on resize.
*
* @fixme consider flattening anything not GIF or JPEG to PNG
* @return int
*/
function preferredType()
{
if($this->type == IMAGETYPE_BMP) {
//we don't want to save BMP... it's an inefficient, rare, antiquated format
//save png instead
return IMAGETYPE_PNG;
} else if($this->type == IMAGETYPE_WBMP) {
//we don't want to save WBMP... it's a rare format that we can't guarantee clients will support
//save png instead
return IMAGETYPE_PNG;
} else if($this->type == IMAGETYPE_XBM) {
//we don't want to save XBM... it's a rare format that we can't guarantee clients will support
//save png instead
return IMAGETYPE_PNG;
}
return $this->type;
}
function unlink()
{
@unlink($this->filepath);
}
static function maxFileSize()
{
$value = ImageFile::maxFileSizeInt();
if ($value > 1024 * 1024) {
$value = $value/(1024*1024);
// TRANS: Number of megabytes. %d is the number.
return sprintf(_m('%dMB','%dMB',$value),$value);
} else if ($value > 1024) {
$value = $value/1024;
// TRANS: Number of kilobytes. %d is the number.
return sprintf(_m('%dkB','%dkB',$value),$value);
} else {
// TRANS: Number of bytes. %d is the number.
return sprintf(_m('%dB','%dB',$value),$value);
}
}
static function maxFileSizeInt()
{
return min(ImageFile::strToInt(ini_get('post_max_size')),
ImageFile::strToInt(ini_get('upload_max_filesize')),
ImageFile::strToInt(ini_get('memory_limit')));
}
static function strToInt($str)
{
$unit = substr($str, -1);
$num = substr($str, 0, -1);
switch(strtoupper($unit)){
case 'G':
$num *= 1024;
case 'M':
$num *= 1024;
case 'K':
$num *= 1024;
}
return $num;
}
public function scaleToFit($maxWidth=null, $maxHeight=null, $crop=null)
{
return self::getScalingValues($this->width, $this->height,
$maxWidth, $maxHeight, $crop, $this->rotate);
}
/*
* Gets scaling values for images of various types. Cropping can be enabled.
*
* Values will scale _up_ to fit max values if cropping is enabled!
* With cropping disabled, the max value of each axis will be respected.
*
* @param $width int Original width
* @param $height int Original height
* @param $maxW int Resulting max width
* @param $maxH int Resulting max height
* @param $crop int Crop to the size (not preserving aspect ratio)
*/
public static function getScalingValues($width, $height,
$maxW=null, $maxH=null,
$crop=null, $rotate=0)
{
$maxW = $maxW ?: common_config('thumbnail', 'width');
$maxH = $maxH ?: common_config('thumbnail', 'height');
if ($maxW < 1 || ($maxH !== null && $maxH < 1)) {
throw new ServerException('Bad parameters for ImageFile::getScalingValues');
} elseif ($maxH === null) {
// if maxH is null, we set maxH to equal maxW and enable crop
$maxH = $maxW;
$crop = true;
}
// Because GD doesn't understand EXIF orientation etc.
if (abs($rotate) == 90) {
$tmp = $width;
$width = $height;
$height = $tmp;
}
// Cropping data (for original image size). Default values, 0 and null,
// imply no cropping and with preserved aspect ratio (per axis).
$cx = 0; // crop x
$cy = 0; // crop y
$cw = null; // crop area width
$ch = null; // crop area height
if ($crop) {
$s_ar = $width / $height;
$t_ar = $maxW / $maxH;
$rw = $maxW;
$rh = $maxH;
// Source aspect ratio differs from target, recalculate crop points!
if ($s_ar > $t_ar) {
$cx = floor($width / 2 - $height * $t_ar / 2);
$cw = ceil($height * $t_ar);
} elseif ($s_ar < $t_ar) {
$cy = floor($height / 2 - $width / $t_ar / 2);
$ch = ceil($width / $t_ar);
}
} else {
$rw = $maxW;
$rh = ceil($height * $rw / $width);
// Scaling caused too large height, decrease to max accepted value
if ($rh > $maxH) {
$rh = $maxH;
$rw = ceil($width * $rh / $height);
}
}
return array(intval($rw), intval($rh),
intval($cx), intval($cy),
is_null($cw) ? $width : intval($cw),
is_null($ch) ? $height : intval($ch));
}
/**
* Animated GIF test, courtesy of frank at huddler dot com et al:
* http://php.net/manual/en/function.imagecreatefromgif.php#104473
* Modified so avoid landing inside of a header (and thus not matching our regexp).
*/
protected function isAnimatedGif()
{
if (!($fh = @fopen($this->filepath, 'rb'))) {
return false;
}
$count = 0;
//an animated gif contains multiple "frames", with each frame having a
//header made up of:
// * a static 4-byte sequence (\x00\x21\xF9\x04)
// * 4 variable bytes
// * a static 2-byte sequence (\x00\x2C)
// In total the header is maximum 10 bytes.
// We read through the file til we reach the end of the file, or we've found
// at least 2 frame headers
while(!feof($fh) && $count < 2) {
$chunk = fread($fh, 1024 * 100); //read 100kb at a time
$count += preg_match_all('#\x00\x21\xF9\x04.{4}\x00\x2C#s', $chunk, $matches);
// rewind in case we ended up in the middle of the header, but avoid
// infinite loop (i.e. don't rewind if we're already in the end).
if (!feof($fh) && ftell($fh) >= 9) {
fseek($fh, -9, SEEK_CUR);
}
}
fclose($fh);
return $count > 1;
}
public function getFileThumbnail($width, $height, $crop)
{
if (!$this->fileRecord instanceof File) {
throw new ServerException('No File object attached to this ImageFile object.');
}
if ($width === null) {
$width = common_config('thumbnail', 'width');
$height = common_config('thumbnail', 'height');
$crop = common_config('thumbnail', 'crop');
}
if ($height === null) {
$height = $width;
$crop = true;
}
// Get proper aspect ratio width and height before lookup
// We have to do it through an ImageFile object because of orientation etc.
// Only other solution would've been to rotate + rewrite uploaded files
// which we don't want to do because we like original, untouched data!
list($width, $height, $x, $y, $w, $h) = $this->scaleToFit($width, $height, $crop);
$thumb = File_thumbnail::pkeyGet(array(
'file_id'=> $this->fileRecord->id,
'width' => $width,
'height' => $height,
));
if ($thumb instanceof File_thumbnail) {
return $thumb;
}
$filename = $this->fileRecord->filehash ?: $this->filename; // Remote files don't have $this->filehash
$extension = File::guessMimeExtension($this->mimetype);
$outname = "thumb-{$this->fileRecord->id}-{$width}x{$height}-{$filename}." . $extension;
$outpath = File_thumbnail::path($outname);
// The boundary box for our resizing
$box = array('width'=>$width, 'height'=>$height,
'x'=>$x, 'y'=>$y,
'w'=>$w, 'h'=>$h);
// Doublecheck that parameters are sane and integers.
if ($box['width'] < 1 || $box['width'] > common_config('thumbnail', 'maxsize')
|| $box['height'] < 1 || $box['height'] > common_config('thumbnail', 'maxsize')
|| $box['w'] < 1 || $box['x'] >= $this->width
|| $box['h'] < 1 || $box['y'] >= $this->height) {
// Fail on bad width parameter. If this occurs, it's due to algorithm in ImageFile->scaleToFit
common_debug("Boundary box parameters for resize of {$this->filepath} : ".var_export($box,true));
throw new ServerException('Bad thumbnail size parameters.');
}
common_debug(sprintf('Generating a thumbnail of File id==%u of size %ux%u', $this->fileRecord->id, $width, $height));
// Perform resize and store into file
$this->resizeTo($outpath, $box);
// Avoid deleting the original
if ($this->getPath() != File_thumbnail::path($this->filename)) {
$this->unlink();
}
return File_thumbnail::saveThumbnail($this->fileRecord->id,
File_thumbnail::url($outname),
$width, $height,
$outname);
}
}
//PHP doesn't (as of 2/24/2010) have an imagecreatefrombmp so conditionally define one
if(!function_exists('imagecreatefrombmp')){
//taken shamelessly from http://www.php.net/manual/en/function.imagecreatefromwbmp.php#86214
function imagecreatefrombmp($p_sFile)
{
// Load the image into a string
$file = fopen($p_sFile,"rb");
$read = fread($file,10);
while(!feof($file)&&($read<>""))
$read .= fread($file,1024);
$temp = unpack("H*",$read);
$hex = $temp[1];
$header = substr($hex,0,108);
// Process the header
// Structure: http://www.fastgraph.com/help/bmp_header_format.html
if (substr($header,0,4)=="424d")
{
// Cut it in parts of 2 bytes
$header_parts = str_split($header,2);
// Get the width 4 bytes
$width = hexdec($header_parts[19].$header_parts[18]);
// Get the height 4 bytes
$height = hexdec($header_parts[23].$header_parts[22]);
// Unset the header params
unset($header_parts);
}
// Define starting X and Y
$x = 0;
$y = 1;
// Create newimage
$image = imagecreatetruecolor($width,$height);
// Grab the body from the image
$body = substr($hex,108);
// Calculate if padding at the end-line is needed
// Divided by two to keep overview.
// 1 byte = 2 HEX-chars
$body_size = (strlen($body)/2);
$header_size = ($width*$height);
// Use end-line padding? Only when needed
$usePadding = ($body_size>($header_size*3)+4);
// Using a for-loop with index-calculation instaid of str_split to avoid large memory consumption
// Calculate the next DWORD-position in the body
for ($i=0;$i<$body_size;$i+=3)
{
// Calculate line-ending and padding
if ($x>=$width)
{
// If padding needed, ignore image-padding
// Shift i to the ending of the current 32-bit-block
if ($usePadding)
$i += $width%4;
// Reset horizontal position
$x = 0;
// Raise the height-position (bottom-up)
$y++;
// Reached the image-height? Break the for-loop
if ($y>$height)
break;
}
// Calculation of the RGB-pixel (defined as BGR in image-data)
// Define $i_pos as absolute position in the body
$i_pos = $i*2;
$r = hexdec($body[$i_pos+4].$body[$i_pos+5]);
$g = hexdec($body[$i_pos+2].$body[$i_pos+3]);
$b = hexdec($body[$i_pos].$body[$i_pos+1]);
// Calculate and draw the pixel
$color = imagecolorallocate($image,$r,$g,$b);
imagesetpixel($image,$x,$height-$y,$color);
// Raise the horizontal position
$x++;
}
// Unset the body / free the memory
unset($body);
// Return image-object
return $image;
}
} // if(!function_exists('imagecreatefrombmp'))
| agpl-3.0 |
HotChalk/canvas-lms | gems/canvas_i18nliner/js/generate_js.js | 1211 | var I18nliner = require("i18nliner");
var Commands = I18nliner.Commands;
var Check = Commands.Check;
var mkdirp = require("mkdirp");
var fs = require("fs");
/*
* GenerateJs determines what needs to go into each i18n js bundle (one
* per "i18n!scope"), based on the I18n.t calls in the code
*
* outputs a json file containing a mapping of scopes <-> translation keys,
* e.g.
*
* {
* "users": [
* "users.title",
* "users.labels.foo",
* "foo_bar_baz" // could be from a different scope, if called within the users scope
* ],
* "groups:" [
* ...
* ],
* ...
*
*/
function GenerateJs(options) {
Check.call(this, options)
}
GenerateJs.prototype = Object.create(Check.prototype);
GenerateJs.prototype.constructor = GenerateJs;
GenerateJs.prototype.run = function() {
var success = Check.prototype.run.call(this);
if (!success) return false;
var keysByScope = this.translations.keysByScope();
this.outputFile = './' + (this.options.outputFile || "config/locales/generated/js_bundles.json");
mkdirp.sync(this.outputFile.replace(/\/[^\/]+$/, ''));
fs.writeFileSync(this.outputFile, JSON.stringify(keysByScope));
return true;
};
module.exports = GenerateJs;
| agpl-3.0 |
Aca-jov/superdesk-client-core | scripts/apps/search/directives/index.js | 821 | export {ItemList} from './ItemList';
export {SearchPanel} from './SearchPanel';
export {SearchTags} from './SearchTags';
export {SearchResults} from './SearchResults';
export {SearchContainer} from './SearchContainer';
export {SearchParameters} from './SearchParameters';
export {SaveSearch} from './SaveSearch';
export {ItemContainer} from './ItemContainer';
export {ItemPreview} from './ItemPreview';
export {ItemGlobalSearch} from './ItemGlobalSearch';
export {ItemSearchbar} from './ItemSearchbar';
export {ItemRepo} from './ItemRepo';
export {ItemSortbar} from './ItemSortbar';
export {SavedSearchSelect} from './SavedSearchSelect';
export {SavedSearches} from './SavedSearches';
export {MultiActionBar} from './MultiActionBar';
export {RawSearch} from './RawSearch';
export {SearchFilters} from './SearchFilters';
| agpl-3.0 |
venturehive/canvas-lms | app/models/quizzes/quiz_user_finder.rb | 1911 | #
# Copyright (C) 2014 - present Instructure, Inc.
#
# This file is part of Canvas.
#
# Canvas is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, version 3 of the License.
#
# Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
module Quizzes
class QuizUserFinder
extend Forwardable
attr_reader :quiz, :user
def_delegators :@quiz, :context, :quiz_submissions, :differentiated_assignments_applies?
def initialize(quiz, user)
@quiz = quiz
@user = user
end
def submitted_students
all_students_with_visibility.where(id: non_preview_user_ids)
end
def unsubmitted_students
all_students_with_visibility.where('users.id NOT IN (?)', non_preview_user_ids)
end
def all_students
context.students_visible_to(user, include: :inactive).order_by_sortable_name.group('users.id')
end
def all_students_with_visibility
if differentiated_assignments_applies?
all_students.able_to_see_quiz_in_course_with_da(@quiz.id, context.id)
else
all_students
end
end
def non_preview_quiz_submissions
# This could optionally check for temporary_user_code<>NULL, but
# that's not indexed and we're checking user_id anyway in the queries above.
quiz_submissions.where('quiz_submissions.user_id IS NOT NULL')
end
private
def non_preview_user_ids
non_preview_quiz_submissions.not_settings_only.select(:user_id)
end
end
end
| agpl-3.0 |
subutai/nupic.core | src/nupic/math/Convolution.hpp | 3680 | /* ---------------------------------------------------------------------
* Numenta Platform for Intelligent Computing (NuPIC)
* Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
* with Numenta, Inc., for a separate license for this software code, the
* following terms and conditions apply:
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero Public License version 3 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero Public License for more details.
*
* You should have received a copy of the GNU Affero Public License
* along with this program. If not, see http://www.gnu.org/licenses.
*
* http://numenta.org/licenses/
* ---------------------------------------------------------------------
*/
/** @file
* Declarations for convolutions
*/
#ifndef NTA_CONVOLUTION_HPP
#define NTA_CONVOLUTION_HPP
//--------------------------------------------------------------------------------
/**
* Computes convolutions in 2D, for separable kernels.
*/
template <typename T> struct SeparableConvolution2D {
typedef size_t size_type;
typedef T value_type;
size_type nrows_;
size_type ncols_;
size_type f1_size_;
size_type f2_size_;
size_type f1_end_j_;
size_type f2_end_i_;
size_type f1_middle_;
size_type f2_middle_;
T *f1_;
T *f2_;
T *f1_end_;
T *f2_end_;
T *buffer_;
/**
* nrows is the number of rows in the original image, and ncols
* is the number of columns.
*/
inline void init(size_type nrows, size_type ncols, size_type f1_size,
size_type f2_size, T *f1, T *f2)
/*
: nrows_(nrows), ncols_(ncols),
f1_size_(f1_size), f2_size_(f2_size),
f1_end_j_(ncols - f1_size + 1), f2_end_i_(nrows - f2_size + 1),
f1_middle_(f1_size/2), f2_middle_(f2_size/2),
f1_(f1), f2_(f2), f1_end_(f1 + f1_size), f2_end_(f2 + f2_size),
buffer_(new T[nrows*ncols])
*/
{
nrows_ = nrows;
ncols_ = ncols;
f1_size_ = f1_size;
f2_size_ = f2_size;
f1_end_j_ = ncols - f1_size + 1;
f2_end_i_ = nrows - f2_size + 1;
f1_middle_ = f1_size / 2;
f2_middle_ = f2_size / 2;
f1_ = f1;
f2_ = f2;
f1_end_ = f1 + f1_size;
f2_end_ = f2 + f2_size;
buffer_ = new T[nrows * ncols];
}
inline SeparableConvolution2D() : buffer_(NULL) {}
inline ~SeparableConvolution2D() {
delete[] buffer_;
buffer_ = NULL;
}
/**
* Computes the convolution of an image in data with the two 1D
* filters f1 and f2, and puts the result in convolved.
*
* Down-sampling?
*/
inline void compute(T *data, T *convolved, bool rotated45 = false) {
for (size_type i = 0; i != nrows_; ++i) {
T *b = buffer_ + i * ncols_ + f1_middle_, *d_row = data + i * ncols_;
for (size_type j = 0; j != f1_end_j_; ++j) {
register T dot = 0, *f = f1_, *d = d_row + j;
while (f != f1_end_)
dot += *f++ * *d++;
*b++ = dot;
}
}
for (size_type i = 0; i != f2_end_i_; ++i) {
T *c = convolved + (i + f2_middle_) * ncols_,
*b_row = buffer_ + i * ncols_;
for (size_type j = 0; j != ncols_; ++j) {
register T dot = 0, *f = f2_, *b = b_row + j;
while (f != f2_end_) {
dot += *f++ * *b;
b += ncols_;
}
*c++ = dot;
}
}
}
};
//--------------------------------------------------------------------------------
#endif // NTA_CONVOLUTION_HPP
| agpl-3.0 |
mrcarlberg/cappuccino | Objective-J/Bootstrap.js | 5807 | /*
* Bootstrap.js
* Objective-J
*
* Created by Francisco Tolmasky.
* Copyright 2010, 280 North, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifdef COMMONJS
var mainBundleURL = new CFURL("file:" + require("file").cwd()).asDirectoryPathURL();
#elif defined(BROWSER)
// This is automatic when importing, but we'd like these important URLs to
// be taken into consideration in the cache as well.
enableCFURLCaching();
// To determine where our application lives, start with the current URL of the page.
var pageURL = new CFURL(window.location.href),
// Look for any <base> tags and choose the last one (which is the one that will take effect).
DOMBaseElements = document.getElementsByTagName("base"),
DOMBaseElementsCount = DOMBaseElements.length;
if (DOMBaseElementsCount > 0)
{
var DOMBaseElement = DOMBaseElements[DOMBaseElementsCount - 1],
DOMBaseElementHref = DOMBaseElement && DOMBaseElement.getAttribute("href");
// If we have one, use it instead.
if (DOMBaseElementHref)
pageURL = new CFURL(DOMBaseElementHref, pageURL);
}
// Set compiler flags
if (typeof OBJJ_COMPILER_FLAGS !== 'undefined')
{
var flags = {};
for (var i = 0; i < OBJJ_COMPILER_FLAGS.length; i++)
{
switch (OBJJ_COMPILER_FLAGS[i])
{
case "IncludeDebugSymbols":
flags.includeMethodFunctionNames = true;
break;
case "IncludeTypeSignatures":
flags.includeIvarTypeSignatures = true;
flags.includeMethodArgumentTypeSignatures = true;
break;
case "InlineMsgSend":
flags.inlineMsgSendFunctions = true;
break;
}
}
FileExecutable.setCurrentCompilerFlags(flags);
}
// Turn the main file into a URL.
var mainFileURL = new CFURL(window.OBJJ_MAIN_FILE || "main.j"),
// The main bundle is the containing folder of the main file.
mainBundleURL = new CFURL(".", new CFURL(mainFileURL, pageURL)).absoluteURL(),
// We assume the "first part" of the path is completely resolved.
assumedResolvedURL = new CFURL("..", mainBundleURL).absoluteURL();
// .. doesn't work if we're already at root, so "go back" one more level to the scheme and authority.
if (mainBundleURL === assumedResolvedURL)
assumedResolvedURL = new CFURL(assumedResolvedURL.schemeAndAuthority());
StaticResource.resourceAtURL(assumedResolvedURL, YES);
exports.pageURL = pageURL;
exports.bootstrap = function()
{
resolveMainBundleURL();
}
function resolveMainBundleURL()
{
StaticResource.resolveResourceAtURL(mainBundleURL, YES, function(/*StaticResource*/ aResource)
{
var includeURLs = StaticResource.includeURLs(),
index = 0,
count = includeURLs.length;
for (; index < count; ++index)
aResource.resourceAtURL(includeURLs[index], YES);
Executable.fileImporterForURL(mainBundleURL)(mainFileURL.lastPathComponent(), YES, function()
{
disableCFURLCaching();
afterDocumentLoad(function()
{
var hashString = window.location.hash.substring(1),
args = [];
if (hashString.length)
{
args = hashString.split("/");
for (var i = 0, count = args.length; i < count; i++)
args[i] = decodeURIComponent(args[i]);
}
var namedArgsArray = window.location.search.substring(1).split("&"),
namedArgs = new CFMutableDictionary();
for (var i = 0, count = namedArgsArray.length; i < count; i++)
{
var thisArg = namedArgsArray[i].split("=");
if (!thisArg[0])
continue;
if (thisArg[1] == null)
thisArg[1] = true;
namedArgs.setValueForKey(decodeURIComponent(thisArg[0]), decodeURIComponent(thisArg[1]));
}
main(args, namedArgs);
});
});
});
}
var documentLoaded = NO;
function afterDocumentLoad(/*Function*/ aFunction)
{
if (documentLoaded || document.readyState === "complete")
return aFunction();
if (window.addEventListener)
window.addEventListener("load", aFunction, NO);
else if (window.attachEvent)
window.attachEvent("onload", aFunction);
}
afterDocumentLoad(function()
{
documentLoaded = YES;
});
if (typeof OBJJ_AUTO_BOOTSTRAP === "undefined" || OBJJ_AUTO_BOOTSTRAP)
exports.bootstrap();
#endif
function makeAbsoluteURL(/*CFURL|String*/ aURL)
{
if (aURL instanceof CFURL && aURL.scheme())
return aURL;
return new CFURL(aURL, mainBundleURL);
}
GLOBAL(objj_importFile) = Executable.fileImporterForURL(mainBundleURL);
GLOBAL(objj_executeFile) = Executable.fileExecuterForURL(mainBundleURL);
GLOBAL(objj_import) = function()
{
CPLog.warn("objj_import is deprecated, use objj_importFile instead");
objj_importFile.apply(this, arguments);
}
| lgpl-2.1 |
zebrafishmine/intermine | intermine/web/main/src/org/intermine/webservice/server/output/JSONResultsIterator.java | 18465 | package org.intermine.webservice.server.output;
/*
* Copyright (C) 2002-2016 FlyMine
*
* This code may be freely distributed and modified under the
* terms of the GNU Lesser General Public Licence. This should
* be distributed with the code. See the LICENSE file for more
* information or http://www.gnu.org/copyleft/lesser.html.
*
*/
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.intermine.api.results.ExportResultsIterator;
import org.intermine.api.results.ResultElement;
import org.intermine.metadata.ClassDescriptor;
import org.intermine.metadata.MetaDataException;
import org.intermine.metadata.Model;
import org.intermine.metadata.ReferenceDescriptor;
import org.intermine.objectstore.query.ClobAccess;
import org.intermine.pathquery.ConstraintValueParser;
import org.intermine.pathquery.Path;
import org.json.JSONObject;
/**
* A class to to produce a sequence of JSONObjects from a set of database rows. This requires
* that the view be set up in a specific way.
* @author Alexis Kalderimis
*
*/
public class JSONResultsIterator implements Iterator<JSONObject>
{
private static final String CLASS_KEY = "class";
private static final String ID_KEY = "objectId";
private static final String ID_FIELD = "id";
private final ExportResultsIterator subIter;
private List<ResultElement> holdOver;
private final List<Path> viewPaths = new ArrayList<Path>();
protected transient Map<String, Object> currentMap;
protected transient List<Map<String, Object>> currentArray;
private Model model;
/**
* Constructor. The JSON Iterator sits on top of the basic export results iterator.
* @param it An ExportResultsIterator
*/
public JSONResultsIterator(ExportResultsIterator it) {
this.subIter = it;
init();
}
private void init() {
model = subIter.getQuery().getModel();
viewPaths.addAll(subIter.getViewPaths());
}
/**
* {@inheritDoc}
*/
public boolean hasNext() {
if (subIter.hasNext() || holdOver != null) {
return true;
}
return false;
}
/**
* {@inheritDoc}
*/
public JSONObject next() {
Map<String, Object> nextJsonMap = new HashMap<String, Object>();
Integer lastId = null;
if (holdOver != null) {
lastId = holdOver.get(0).getId();
addRowToJsonMap(holdOver, nextJsonMap);
holdOver = null;
}
while (subIter.hasNext()) {
List<ResultElement> result = subIter.next();
Integer currentId = result.get(0).getId(); // id is guarantor of
// object identity
if (lastId != null && !lastId.equals(currentId)) {
holdOver = result;
lastId = currentId;
break;
} else {
addRowToJsonMap(result, nextJsonMap);
lastId = currentId;
}
}
JSONObject nextObj = new JSONObject(nextJsonMap);
return nextObj;
}
private void addRowToJsonMap(List<ResultElement> results,
Map<String, Object> jsonMap) {
setOrCheckClassAndId(results.get(0), viewPaths.get(0), jsonMap);
for (int i = 0; i < results.size(); i++) {
ResultElement cell = results.get(i);
if (cell == null || cell.getType() == null) {
continue;
}
Path columnPath = viewPaths.get(i);
addCellToJsonMap(cell, columnPath, jsonMap);
}
}
/**
* Test whether a result element matches the type of its path.
* @param cell The Result element
* @param path The path which represents the view column
* @return true if the cell is null, or contains null information, extends/implements the
* type of path.
*/
protected boolean isCellValidForPath(ResultElement cell, Path path) {
if (cell == null || cell.getType() == null) {
return true;
}
return aIsaB(cell.getType(), path.getLastClassDescriptor().getName());
}
/**
* Test whether a class named "A" is, or descends from, a class named "B".
* @param a The name of a class
* @param b The name of a class
* @return True if a isa b
* @throws IllegalArgumentException if the names are not valid class names
*/
protected boolean aIsaB(String a, String b) {
if (a == null && b == null) {
return true;
}
ClassDescriptor aCls = model.getClassDescriptorByName(a);
ClassDescriptor bCls = model.getClassDescriptorByName(b);
if (aCls == null || bCls == null) {
throw new IllegalArgumentException(
"These names are not valid classes: a=" + a + ",b=" + b);
}
if (aCls.equals(bCls)) {
return true;
}
return aDescendsFromB(aCls.getName(), bCls.getName());
}
/**
* Test whether a class named "a" descends from a class named "b".
* @param a the name of a class
* @param b the name of a class
* @return True if a descends from b
* @throws JSONFormattingException if we can't get the super classes for a
*/
protected boolean aDescendsFromB(String a, String b) {
Set<String> supers;
try {
supers = ClassDescriptor.findSuperClassNames(model, a);
} catch (MetaDataException e) {
throw new JSONFormattingException("Problem getting supers for " + a, e);
}
if (supers.contains(b)) {
return true;
}
return false;
}
/**
* Sets the basic information (class and objectId) on the jsonMap provided. If the map already
* has values, it makes sure that these are compatible with those of the result element given.
* @param cell The result element
* @param path The path representing the column
* @param jsonMap The map to put the field on
* @throws JSONFormattingException if the details do not match
*/
protected void setOrCheckClassAndId(ResultElement cell, Path path,
Map<String, Object> jsonMap) {
setOrCheckClass(cell, path, jsonMap);
setOrCheckId(cell, jsonMap);
}
/**
* Set the class, or if one is already set on the map, check that this one is valid for it.
* @param cell The result element
* @param path The path it represents
* @param jsonMap The map to set it onto
*/
protected void setOrCheckClass(ResultElement cell, Path path, Map<String, Object> jsonMap) {
String thisType = path.getLastClassDescriptor().getUnqualifiedName();
if (jsonMap.containsKey(CLASS_KEY)) {
String storedType = (String) jsonMap.get(CLASS_KEY);
if (!aIsaB(cell.getType(), storedType)) {
throw new JSONFormattingException(
"This result element (" + cell + ") does not belong on this map (" + jsonMap
+ ") - classes don't match (" + cell.getType() + " ! isa "
+ jsonMap.get(CLASS_KEY) + ")");
}
}
if (isCellValidForPath(cell, path)) {
jsonMap.put(CLASS_KEY, cell.getType());
} else {
throw new JSONFormattingException(
"This result element (" + cell + ") does not match its column because: "
+ "classes not compatible " + "(" + thisType + " is not a superclass of "
+ cell.getType() + ")");
}
}
/**
* Set the id, or if one is already set, check that it matches the one on the map.
* @param cell The result element.
* @param jsonMap The map to set it onto,
*/
protected void setOrCheckId(ResultElement cell, Map<String, Object> jsonMap) {
Integer cellId = cell.getId();
if (jsonMap.containsKey(ID_KEY)) {
Object mapId = jsonMap.get(ID_KEY);
if (cellId != null && mapId != null && !jsonMap.get(ID_KEY).equals(cell.getId())) {
throw new JSONFormattingException(
"This result element (" + cell + ") does not belong on this map (" + jsonMap
+ ") - objectIds don't match (" + jsonMap.get(ID_KEY) + " != " + cell.getId()
+ ")");
}
} else {
// If these are simple objects, then just cross our fingers and pray...
// TODO: fix this abomination, and actually handle simple objects properly.
jsonMap.put(ID_KEY, cell.getId());
}
}
private void addCellToJsonMap(ResultElement cell, Path column,
Map<String, Object> rootMap) {
if (column.isOnlyAttribute()) {
addFieldToMap(cell, column, rootMap);
} else {
addReferencedCellToJsonMap(cell, column, rootMap);
}
}
/**
* Adds the attributes contained in the cell to the map given. It will not set the id attribute,
* as that is handled by setOrCheckClassAndId.
* @param cell The result element
* @param column The path representing the view column
* @param objectMap The map to put the values on
* @throws JSONFormattingException if the map already has values for this attribute
* and they are different to the ones in the cell
*/
protected void addFieldToMap(ResultElement cell, Path column,
Map<String, Object> objectMap) {
setOrCheckClassAndId(cell, column, objectMap);
String key = column.getLastElement();
if (ID_FIELD.equals(key)) {
return;
}
Object newValue;
if (cell.getField() instanceof Date) {
newValue = ConstraintValueParser.ISO_DATE_FORMAT.format(cell.getField());
} else if (cell.getField() instanceof ClobAccess) {
newValue = cell.getField().toString();
} else {
newValue = cell.getField();
}
if (newValue instanceof CharSequence) {
newValue = newValue.toString();
}
if (objectMap.containsKey(key)) {
Object current = objectMap.get(key);
if (current == null) {
if (newValue != null) {
throw new JSONFormattingException(
"Trying to set key " + key + " as " + newValue
+ " in " + objectMap + " but it already has the value "
+ current
);
}
} else {
if (!current.equals(newValue)) {
throw new JSONFormattingException(
"Trying to set key " + key + " as " + newValue
+ " in " + objectMap + " but it already has the value "
+ current);
}
}
} else {
objectMap.put(key, newValue);
}
}
/**
* Adds the information from a cell representing a reference to the map given.
* @param cell A cell representing the end of a trail of references.
* @param column The view column.
* @param objectMap The map to put the nested trail of object onto
* @throws JSONFormattingException if the paths that make up the trail
* contain one that is not an attribute, collection or reference (the only known types at
* present)
*/
protected void addReferencedCellToJsonMap(ResultElement cell, Path column,
Map<String, Object> objectMap) {
currentMap = objectMap;
List<Path> columnSections = column.decomposePath();
for (Path section : columnSections) {
if (section.isRootPath()) {
continue;
} else if (section.endIsAttribute()) {
addAttributeToCurrentNode(section, cell);
} else if (section.endIsReference()) {
addReferenceToCurrentNode(section);
} else if (section.endIsCollection()) {
addCollectionToCurrentNode(section);
} else {
throw new JSONFormattingException(
"Bad path type: " + section.toString());
}
}
}
/**
* Finds the object we should be dealing with by getting it from the current array. A search
* is made by looking for a map which has the objectId attribute set to the same value as the
* result element we have.
* @param cell A result element
* @throws JSONFormattingException if the current array is empty.
*/
protected void setCurrentMapFromCurrentArray(ResultElement cell) {
if (currentArray == null) {
throw new JSONFormattingException("Nowhere to put this field");
}
boolean foundMap = false;
for (Map<String, Object> obj : currentArray) {
if (obj == null) {
throw new JSONFormattingException("null map found in current array");
}
if (cell == null) {
throw new JSONFormattingException("trying to add null cell to current array");
}
if (cell.getId() == null) {
foundMap = obj.get(ID_KEY) == null;
} else {
foundMap = obj.get(ID_KEY).equals(cell.getId());
}
if (foundMap) {
currentMap = obj;
break;
}
}
if (!foundMap) {
Map<String, Object> newMap = new HashMap<String, Object>();
currentArray.add(newMap);
currentMap = newMap;
}
currentArray = null;
}
/**
* Sets the current map to work with by getting the last one from the current array.
* @throws JSONFormattingException if the array is null, or empty
*/
protected void setCurrentMapFromCurrentArray() {
try {
currentMap = currentArray.get(currentArray.size() - 1);
} catch (NullPointerException e) {
throw new JSONFormattingException(
"Nowhere to put this reference - the current array is null", e);
} catch (IndexOutOfBoundsException e) {
throw new JSONFormattingException(
"This array is empty - is the view in the wrong order?", e);
}
currentArray = null;
}
private void addAttributeToCurrentNode(Path attributePath, ResultElement cell) {
if (currentMap == null) {
try {
setCurrentMapFromCurrentArray(cell);
} catch (JSONFormattingException e) {
throw new JSONFormattingException("While adding processing " + attributePath, e);
}
}
addFieldToMap(cell, attributePath, currentMap);
}
/**
* Adds an intermediate reference to the current node.
* @param referencePath The path representing the reference.
* @throws JSONFormattingException if the node has this key set to an incompatible value.
*/
protected void addReferenceToCurrentNode(Path referencePath) {
if (currentMap == null) {
setCurrentMapFromCurrentArray();
}
String key = referencePath.getLastElement();
if (currentMap.containsKey(key)) {
Object storedItem = currentMap.get(key);
boolean storedItemIsMap = (storedItem instanceof Map<?, ?>);
if (!storedItemIsMap) {
throw new JSONFormattingException("Trying to set a reference on " + key
+ ", but this node " + currentMap + " already "
+ "has this key set, and to something other than a map "
+ "(" + storedItem.getClass().getName() + ": " + storedItem + ")");
}
@SuppressWarnings("unchecked") // the checking happens just above.
Map<String, Object> foundMap = (Map<String, Object>) currentMap.get(key);
if (!foundMap.containsKey(ID_KEY)) {
throw new JSONFormattingException(
"This node is not fully initialised: it has no objectId");
}
currentMap = foundMap;
} else {
Map<String, Object> newMap = new HashMap<String, Object>();
ReferenceDescriptor refDesc =
(ReferenceDescriptor) referencePath.getEndFieldDescriptor();
newMap.put(CLASS_KEY, refDesc.getReferencedClassDescriptor().getUnqualifiedName());
currentMap.put(key, newMap);
currentMap = newMap;
}
}
/**
* Adds a new list, representing a collection to the current node (map)
* @param collectionPath The path representing the collection.
* @throws JSONFormattingException if the current node is not initialised, or is already set
* with an incompatible value.
*/
@SuppressWarnings("unchecked")
protected void addCollectionToCurrentNode(Path collectionPath) {
if (currentMap == null) {
setCurrentMapFromCurrentArray();
}
String key = collectionPath.getLastElement();
if (!currentMap.containsKey(ID_KEY)) {
throw new JSONFormattingException(
"This node is not properly initialised (it doesn't have an objectId) - "
+ "is the view in the right order?");
}
if (currentMap.containsKey(key)) {
Object storedValue = currentMap.get(key);
if (!(storedValue instanceof List<?>)) {
throw new JSONFormattingException("Trying to set a collection on " + key
+ ", but this node " + currentMap + " already "
+ "has this key set to something other than a list "
+ "(" + storedValue.getClass().getName() + ": " + storedValue + ")");
}
} else {
List<Map<String, Object>> newArray = new ArrayList<Map<String, Object>>();
currentMap.put(key, newArray);
}
currentArray = (List<Map<String, Object>>) currentMap.get(key);
currentMap = null;
}
/**
* Remove is not supported.
*/
public void remove() {
throw new UnsupportedOperationException("Remove is not implemented in this class");
}
}
| lgpl-2.1 |
sunblithe/qt-everywhere-opensource-src-4.7.1 | src/3rdparty/webkit/WebCore/generated/JSConsole.cpp | 14822 | /*
This file is part of the WebKit open source project.
This file has been generated by generate-bindings.pl. DO NOT MODIFY!
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public License
along with this library; see the file COPYING.LIB. If not, write to
the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include "config.h"
#include "JSConsole.h"
#include "Console.h"
#include "ScriptCallStack.h"
#include <runtime/Error.h>
#include <wtf/GetPtr.h>
using namespace JSC;
namespace WebCore {
ASSERT_CLASS_FITS_IN_CELL(JSConsole);
/* Hash table */
static const HashTableValue JSConsoleTableValues[2] =
{
{ "profiles", DontDelete|ReadOnly, (intptr_t)static_cast<PropertySlot::GetValueFunc>(jsConsoleProfiles), (intptr_t)0 },
{ 0, 0, 0, 0 }
};
static JSC_CONST_HASHTABLE HashTable JSConsoleTable =
#if ENABLE(PERFECT_HASH_SIZE)
{ 0, JSConsoleTableValues, 0 };
#else
{ 2, 1, JSConsoleTableValues, 0 };
#endif
/* Hash table for prototype */
static const HashTableValue JSConsolePrototypeTableValues[18] =
{
{ "debug", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionDebug), (intptr_t)0 },
{ "error", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionError), (intptr_t)0 },
{ "info", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionInfo), (intptr_t)0 },
{ "log", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionLog), (intptr_t)0 },
{ "warn", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionWarn), (intptr_t)0 },
{ "dir", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionDir), (intptr_t)0 },
{ "dirxml", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionDirxml), (intptr_t)0 },
{ "trace", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionTrace), (intptr_t)0 },
{ "assert", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionAssert), (intptr_t)1 },
{ "count", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionCount), (intptr_t)0 },
{ "markTimeline", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionMarkTimeline), (intptr_t)0 },
{ "profile", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionProfile), (intptr_t)1 },
{ "profileEnd", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionProfileEnd), (intptr_t)1 },
{ "time", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionTime), (intptr_t)1 },
{ "timeEnd", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionTimeEnd), (intptr_t)1 },
{ "group", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionGroup), (intptr_t)0 },
{ "groupEnd", DontDelete|Function, (intptr_t)static_cast<NativeFunction>(jsConsolePrototypeFunctionGroupEnd), (intptr_t)0 },
{ 0, 0, 0, 0 }
};
static JSC_CONST_HASHTABLE HashTable JSConsolePrototypeTable =
#if ENABLE(PERFECT_HASH_SIZE)
{ 511, JSConsolePrototypeTableValues, 0 };
#else
{ 65, 63, JSConsolePrototypeTableValues, 0 };
#endif
const ClassInfo JSConsolePrototype::s_info = { "ConsolePrototype", 0, &JSConsolePrototypeTable, 0 };
JSObject* JSConsolePrototype::self(ExecState* exec, JSGlobalObject* globalObject)
{
return getDOMPrototype<JSConsole>(exec, globalObject);
}
bool JSConsolePrototype::getOwnPropertySlot(ExecState* exec, const Identifier& propertyName, PropertySlot& slot)
{
return getStaticFunctionSlot<JSObject>(exec, &JSConsolePrototypeTable, this, propertyName, slot);
}
bool JSConsolePrototype::getOwnPropertyDescriptor(ExecState* exec, const Identifier& propertyName, PropertyDescriptor& descriptor)
{
return getStaticFunctionDescriptor<JSObject>(exec, &JSConsolePrototypeTable, this, propertyName, descriptor);
}
const ClassInfo JSConsole::s_info = { "Console", 0, &JSConsoleTable, 0 };
JSConsole::JSConsole(NonNullPassRefPtr<Structure> structure, JSDOMGlobalObject* globalObject, PassRefPtr<Console> impl)
: DOMObjectWithGlobalPointer(structure, globalObject)
, m_impl(impl)
{
}
JSConsole::~JSConsole()
{
forgetDOMObject(this, impl());
}
JSObject* JSConsole::createPrototype(ExecState* exec, JSGlobalObject* globalObject)
{
return new (exec) JSConsolePrototype(JSConsolePrototype::createStructure(globalObject->objectPrototype()));
}
bool JSConsole::getOwnPropertySlot(ExecState* exec, const Identifier& propertyName, PropertySlot& slot)
{
return getStaticValueSlot<JSConsole, Base>(exec, &JSConsoleTable, this, propertyName, slot);
}
bool JSConsole::getOwnPropertyDescriptor(ExecState* exec, const Identifier& propertyName, PropertyDescriptor& descriptor)
{
return getStaticValueDescriptor<JSConsole, Base>(exec, &JSConsoleTable, this, propertyName, descriptor);
}
JSValue jsConsoleProfiles(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSConsole* castedThis = static_cast<JSConsole*>(asObject(slotBase));
return castedThis->profiles(exec);
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionDebug(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 0);
imp->debug(&callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionError(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 0);
imp->error(&callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionInfo(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 0);
imp->info(&callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionLog(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 0);
imp->log(&callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionWarn(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 0);
imp->warn(&callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionDir(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 0);
imp->dir(&callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionDirxml(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 0);
imp->dirxml(&callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionTrace(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 0);
imp->trace(&callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionAssert(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 1);
bool condition = args.at(0).toBoolean(exec);
imp->assertCondition(condition, &callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionCount(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 0);
imp->count(&callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionMarkTimeline(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 0);
imp->markTimeline(&callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionProfile(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 1);
const UString& title = valueToStringWithUndefinedOrNullCheck(exec, args.at(0));
imp->profile(title, &callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionProfileEnd(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 1);
const UString& title = valueToStringWithUndefinedOrNullCheck(exec, args.at(0));
imp->profileEnd(title, &callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionTime(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
const UString& title = valueToStringWithUndefinedOrNullCheck(exec, args.at(0));
imp->time(title);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionTimeEnd(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 1);
const UString& title = valueToStringWithUndefinedOrNullCheck(exec, args.at(0));
imp->timeEnd(title, &callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionGroup(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
ScriptCallStack callStack(exec, args, 0);
imp->group(&callStack);
return jsUndefined();
}
JSValue JSC_HOST_CALL jsConsolePrototypeFunctionGroupEnd(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
UNUSED_PARAM(args);
if (!thisValue.inherits(&JSConsole::s_info))
return throwError(exec, TypeError);
JSConsole* castedThisObj = static_cast<JSConsole*>(asObject(thisValue));
Console* imp = static_cast<Console*>(castedThisObj->impl());
imp->groupEnd();
return jsUndefined();
}
JSC::JSValue toJS(JSC::ExecState* exec, JSDOMGlobalObject* globalObject, Console* object)
{
return getDOMObjectWrapper<JSConsole>(exec, globalObject, object);
}
Console* toConsole(JSC::JSValue value)
{
return value.inherits(&JSConsole::s_info) ? static_cast<JSConsole*>(asObject(value))->impl() : 0;
}
}
| lgpl-2.1 |
halfspiral/tuxguitar | TuxGuitar/src/org/herac/tuxguitar/app/view/dialog/documentation/TGDocumentationDialogController.java | 369 | package org.herac.tuxguitar.app.view.dialog.documentation;
import org.herac.tuxguitar.app.view.controller.TGOpenViewController;
import org.herac.tuxguitar.app.view.controller.TGViewContext;
public class TGDocumentationDialogController implements TGOpenViewController {
public void openView(TGViewContext context) {
new TGDocumentationDialog(context).show();
}
}
| lgpl-2.1 |
liuwenf/moose | test/src/kernels/Advection0.C | 2054 | /****************************************************************/
/* DO NOT MODIFY THIS HEADER */
/* MOOSE - Multiphysics Object Oriented Simulation Environment */
/* */
/* (c) 2010 Battelle Energy Alliance, LLC */
/* ALL RIGHTS RESERVED */
/* */
/* Prepared by Battelle Energy Alliance, LLC */
/* Under Contract No. DE-AC07-05ID14517 */
/* With the U. S. Department of Energy */
/* */
/* See COPYRIGHT for full restrictions */
/****************************************************************/
#include "Advection0.h"
template <>
InputParameters
validParams<Advection0>()
{
InputParameters params = validParams<Kernel>();
params.set<Real>("Au") = 1.0;
params.set<Real>("Bu") = 1.0;
params.set<Real>("Cu") = 1.0;
params.set<Real>("Av") = 1.0;
params.set<Real>("Bv") = 1.0;
params.set<Real>("Cv") = 1.0;
return params;
}
Advection0::Advection0(const InputParameters & parameters) : Kernel(parameters)
{
_Au = getParam<Real>("Au");
_Bu = getParam<Real>("Bu");
_Cu = getParam<Real>("Cu");
_Av = getParam<Real>("Av");
_Bv = getParam<Real>("Bv");
_Cv = getParam<Real>("Cv");
}
Real
Advection0::computeQpResidual()
{
VectorValue<Number> vel(_Au + _Bu * _q_point[_qp](0) + _Cu * _q_point[_qp](1),
_Av + _Bv * _q_point[_qp](0) + _Cv * _q_point[_qp](1),
0.0);
return -_test[_i][_qp] * vel * _grad_u[_qp];
}
Real
Advection0::computeQpJacobian()
{
VectorValue<Number> vel(_Au + _Bu * _q_point[_qp](0) + _Cu * _q_point[_qp](1),
_Av + _Bv * _q_point[_qp](0) + _Cv * _q_point[_qp](1),
0.0);
return -_test[_i][_qp] * vel * _grad_phi[_j][_qp];
}
| lgpl-2.1 |
wildfly/wildfly | security/subsystem/src/main/java/org/jboss/as/security/SecuritySubsystemParser_2_0.java | 9559 | /*
* Copyright 2016 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.as.security;
import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.OP_ADDR;
import static org.jboss.as.controller.parsing.ParseUtils.invalidAttributeValue;
import static org.jboss.as.controller.parsing.ParseUtils.missingRequired;
import static org.jboss.as.controller.parsing.ParseUtils.requireNoAttributes;
import static org.jboss.as.controller.parsing.ParseUtils.requireNoContent;
import static org.jboss.as.controller.parsing.ParseUtils.requireNoNamespaceAttribute;
import static org.jboss.as.controller.parsing.ParseUtils.unexpectedAttribute;
import static org.jboss.as.controller.parsing.ParseUtils.unexpectedElement;
import static org.jboss.as.security.Constants.ELYTRON_KEY_MANAGER;
import static org.jboss.as.security.Constants.ELYTRON_KEY_STORE;
import static org.jboss.as.security.Constants.ELYTRON_REALM;
import static org.jboss.as.security.Constants.ELYTRON_TRUST_MANAGER;
import static org.jboss.as.security.Constants.ELYTRON_TRUST_STORE;
import static org.jboss.as.security.elytron.ElytronIntegrationResourceDefinitions.APPLY_ROLE_MAPPERS;
import static org.jboss.as.security.elytron.ElytronIntegrationResourceDefinitions.LEGACY_JAAS_CONFIG;
import static org.jboss.as.security.elytron.ElytronIntegrationResourceDefinitions.LEGACY_JSSE_CONFIG;
import java.util.EnumSet;
import java.util.List;
import javax.xml.stream.XMLStreamException;
import org.jboss.as.controller.PathAddress;
import org.jboss.as.controller.PathElement;
import org.jboss.as.controller.operations.common.Util;
import org.jboss.dmr.ModelNode;
import org.jboss.staxmapper.XMLExtendedStreamReader;
/**
* This class implements a parser for the 2.0 version of legacy security subsystem. It extends the {@link SecuritySubsystemParser}
* and adds support for the {@code elytron-integration} section of the schema.
*
* @author <a href="mailto:sguilhen@redhat.com">Stefan Guilhen</a>
*/
class SecuritySubsystemParser_2_0 extends SecuritySubsystemParser {
protected SecuritySubsystemParser_2_0() {
}
@Override
protected void readElement(final XMLExtendedStreamReader reader, final Element element, final List<ModelNode> operations,
final PathAddress subsystemPath, final ModelNode subsystemNode) throws XMLStreamException {
switch(element) {
case ELYTRON_INTEGRATION: {
requireNoAttributes(reader);
while (reader.hasNext() && reader.nextTag() != END_ELEMENT) {
final Element innerElement = Element.forName(reader.getLocalName());
switch (innerElement) {
case SECURITY_REALMS: {
parseSecurityRealms(reader, operations, subsystemPath);
break;
}
case TLS: {
parseTLS(reader, operations, subsystemPath);
break;
}
default: {
throw unexpectedElement(reader);
}
}
}
break;
}
default: {
super.readElement(reader, element, operations, subsystemPath, subsystemNode);
}
}
}
protected void parseSecurityRealms(final XMLExtendedStreamReader reader, final List<ModelNode> operations,
final PathAddress subsystemPath) throws XMLStreamException {
requireNoAttributes(reader);
while (reader.hasNext() && reader.nextTag() != END_ELEMENT) {
final Element element = Element.forName(reader.getLocalName());
switch (element) {
case ELYTRON_REALM: {
parseElytronRealm(reader, operations, subsystemPath);
break;
}
default: {
throw unexpectedElement(reader);
}
}
}
}
protected void parseElytronRealm(final XMLExtendedStreamReader reader, final List<ModelNode> operations,
final PathAddress subsystemPath) throws XMLStreamException {
final ModelNode elytronRealmAddOperation = Util.createAddOperation();
PathElement elytronRealmPath = null;
final EnumSet<Attribute> required = EnumSet.of(Attribute.NAME, Attribute.LEGACY_JAAS_CONFIG);
final int count = reader.getAttributeCount();
for (int i = 0; i < count; i++) {
requireNoNamespaceAttribute(reader, i);
final String value = reader.getAttributeValue(i);
final Attribute attribute = Attribute.forName(reader.getAttributeLocalName(i));
required.remove(attribute);
switch (attribute) {
case NAME: {
if (value == null || value.length() == 0) {
throw invalidAttributeValue(reader, i);
}
elytronRealmPath = PathElement.pathElement(ELYTRON_REALM, value);
break;
}
case LEGACY_JAAS_CONFIG: {
LEGACY_JAAS_CONFIG.parseAndSetParameter(value, elytronRealmAddOperation, reader);
break;
}
case APPLY_ROLE_MAPPERS: {
APPLY_ROLE_MAPPERS.parseAndSetParameter(value, elytronRealmAddOperation, reader);
break;
}
default:
throw unexpectedAttribute(reader, i);
}
}
if (!required.isEmpty()) {
throw missingRequired(reader, required);
}
elytronRealmAddOperation.get(OP_ADDR).set(subsystemPath.append(elytronRealmPath).toModelNode());
operations.add(elytronRealmAddOperation);
requireNoContent(reader);
}
protected void parseTLS(final XMLExtendedStreamReader reader, final List<ModelNode> operations,
final PathAddress subsystemPath) throws XMLStreamException {
requireNoAttributes(reader);
while (reader.hasNext() && reader.nextTag() != END_ELEMENT) {
final Element element = Element.forName(reader.getLocalName());
switch (element) {
case ELYTRON_KEY_STORE: {
parseTLSEntity(reader, operations, subsystemPath, ELYTRON_KEY_STORE);
break;
}
case ELYTRON_TRUST_STORE: {
parseTLSEntity(reader, operations, subsystemPath, ELYTRON_TRUST_STORE);
break;
}
case ELYTRON_KEY_MANAGER: {
parseTLSEntity(reader, operations, subsystemPath, ELYTRON_KEY_MANAGER);
break;
}
case ELYTRON_TRUST_MANAGER: {
parseTLSEntity(reader, operations, subsystemPath, ELYTRON_TRUST_MANAGER);
break;
}
default: {
throw unexpectedElement(reader);
}
}
}
}
protected void parseTLSEntity(final XMLExtendedStreamReader reader, final List<ModelNode> operations,
final PathAddress subsystemPath, final String tlsEntityName) throws XMLStreamException {
final ModelNode elytronTLSEntityAddOperation = Util.createAddOperation();
PathElement elytronTLSEntityPath = null;
final EnumSet<Attribute> required = EnumSet.of(Attribute.NAME, Attribute.LEGACY_JSSE_CONFIG);
final int count = reader.getAttributeCount();
for (int i = 0; i < count; i++) {
requireNoNamespaceAttribute(reader, i);
final String value = reader.getAttributeValue(i);
final Attribute attribute = Attribute.forName(reader.getAttributeLocalName(i));
required.remove(attribute);
switch (attribute) {
case NAME: {
if (value == null || value.length() == 0) {
throw invalidAttributeValue(reader, i);
}
elytronTLSEntityPath = PathElement.pathElement(tlsEntityName, value);
break;
}
case LEGACY_JSSE_CONFIG: {
LEGACY_JSSE_CONFIG.parseAndSetParameter(value, elytronTLSEntityAddOperation, reader);
break;
}
default:
throw unexpectedAttribute(reader, i);
}
}
if (!required.isEmpty()) {
throw missingRequired(reader, required);
}
elytronTLSEntityAddOperation.get(OP_ADDR).set(subsystemPath.append(elytronTLSEntityPath).toModelNode());
operations.add(elytronTLSEntityAddOperation);
requireNoContent(reader);
}
} | lgpl-2.1 |
ftahmed/LIA_RAL | LIA_SpkTools/src/AccumulateStat.cpp | 21093 | /*
This file is part of LIA_RAL which is a set of software based on ALIZE
toolkit for speaker recognition. ALIZE toolkit is required to use LIA_RAL.
LIA_RAL project is a development project was initiated by the computer
science laboratory of Avignon / France (Laboratoire Informatique d'Avignon -
LIA) [http://lia.univ-avignon.fr <http://lia.univ-avignon.fr/>]. Then it
was supported by two national projects of the French Research Ministry:
- TECHNOLANGUE program [http://www.technolangue.net]
- MISTRAL program [http://mistral.univ-avignon.fr]
LIA_RAL is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or any later version.
LIA_RAL is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with LIA_RAL.
If not, see [http://www.gnu.org/licenses/].
The LIA team as well as the LIA_RAL project team wants to highlight the
limits of voice authentication in a forensic context.
The "Person Authentification by Voice: A Need of Caution" paper
proposes a good overview of this point (cf. "Person
Authentification by Voice: A Need of Caution", Bonastre J.F.,
Bimbot F., Boe L.J., Campbell J.P., Douglas D.A., Magrin-
chagnolleau I., Eurospeech 2003, Genova].
The conclusion of the paper of the paper is proposed bellow:
[Currently, it is not possible to completely determine whether the
similarity between two recordings is due to the speaker or to other
factors, especially when: (a) the speaker does not cooperate, (b) there
is no control over recording equipment, (c) recording conditions are not
known, (d) one does not know whether the voice was disguised and, to a
lesser extent, (e) the linguistic content of the message is not
controlled. Caution and judgment must be exercised when applying speaker
recognition techniques, whether human or automatic, to account for these
uncontrolled factors. Under more constrained or calibrated situations,
or as an aid for investigative purposes, judicious application of these
techniques may be suitable, provided they are not considered as infallible.
At the present time, there is no scientific process that enables one to
uniquely characterize a persones voice or to identify with absolute
certainty an individual from his or her voice.]
Copyright (C) 2004-2010
Laboratoire d'informatique d'Avignon [http://lia.univ-avignon.fr]
LIA_RAL admin [alize@univ-avignon.fr]
Jean-Francois Bonastre [jean-francois.bonastre@univ-avignon.fr]
*/
#if !defined(ALIZE_GeneralTools_cpp)
#define ALIZE_GeneralTools_cpp
#include <iostream>
#include <fstream> // pour outFile
#include <cstdio> // pour printf()
#include <cassert> // pour le debug pratique
#include <cmath>
#include <liatools.h>
#if defined(THREAD)
#include <pthread.h>
#endif
//-------------------------------------------------------------------------
//-- Accumulate the log likelihood for the selected frames and a given model
void accumulateStatLLK(StatServer &ss,FeatureServer &fs,MixtureStat &llkAcc, unsigned long idxBeginFrame,unsigned long nbFrames,
Config &config)
{
fs.seekFeature(idxBeginFrame); // go to the frame in the buffer (and load it if needed)
for (unsigned long n=0;n<nbFrames;n++){
Feature f;
if(fs.readFeature(f)==false) cout<<"No more features"<<endl;
double toto=llkAcc.computeAndAccumulateLLK(f);
if (debug) cout << "likelihood Frame["<<idxBeginFrame+n<<"]="<<toto<<endl;
}
}
// one a Segment
void accumulateStatLLK(StatServer &ss,FeatureServer &fs,MixtureStat &llkAcc,Seg* seg,Config &config)
{
unsigned long begin=seg->begin()+fs.getFirstFeatureIndexOfASource(seg->sourceName()); // Find the index of the first frame of the file in the buffer
accumulateStatLLK(ss,fs,llkAcc,begin,seg->length(),config);
}
// One on Cluster
void accumulateStatLLK(StatServer &ss,FeatureServer &fs,MixtureStat &llkAcc,SegCluster &selectedSegments,Config &config)
{
Seg* seg; // reset the reader at the begin of the input stream
selectedSegments.rewind();
while((seg=selectedSegments.getSeg())!=NULL) // For each of the selected segments
accumulateStatLLK(ss,fs,llkAcc,seg,config);
}
//-------------------------------------------------------------------------
//-- accumulate the statistic for EM, using a current accumulator (wordl)
//-- CAUTION: THE ACCUMULATOR SHOULD BE INITIALIZED (resetEM) BEFORE THE CALL
//-- A GET CALL SHOULD BE DONE AFTER THE CALL
// One a part of the feature stream
double accumulateStatEM(StatServer &ss,FeatureServer &fs,MixtureStat &emAcc,unsigned long idxBeginFrame,unsigned long nbFrames,Config &config){
double llkAcc=0.0;
fs.seekFeature(idxBeginFrame); // go to the frame in the buffer (and load it if needed)
for (unsigned long n=0;n<nbFrames;n++){
Feature f;
fs.readFeature(f);
llkAcc+=log(emAcc.computeAndAccumulateEM(f));
//for (unsigned long i=0;i<fs.getVectSize();i++)
// cout <<"f["<<i<<"]="<<f[i]<<endl;
}
return llkAcc;
}
// one a Segment
double accumulateStatEM(StatServer &ss,FeatureServer &fs,MixtureStat &emAcc,Seg* seg,Config &config){
unsigned long begin=seg->begin()+fs.getFirstFeatureIndexOfASource(seg->sourceName()); // Find the index of the first frame of the file in the buffer
return accumulateStatEM(ss,fs,emAcc,begin,seg->length(),config);
}
// One on Cluster
double accumulateStatEMUnThreaded(StatServer &ss,FeatureServer &fs,MixtureStat &emAcc,SegCluster &selectedSegments,Config &config){
double llkAcc=0.0;
Seg* seg; // reset the reader at the begin of the input stream
selectedSegments.rewind();
while((seg=selectedSegments.getSeg())!=NULL) // For each of the selected segments
llkAcc+=accumulateStatEM(ss,fs,emAcc,seg,config);
return llkAcc;
}
// Choose if its threaded or not here
double accumulateStatEM(StatServer &ss,FeatureServer &fs,MixtureStat &emAcc,SegCluster &selectedSegments,Config &config){
double llkAcc=0.0;
#if defined(THREAD)
if (config.existsParam("numThread") && config.getParam("numThread").toLong() >0) llkAcc=accumulateStatEMThreaded(ss,fs,emAcc,selectedSegments,config); // Compute EM statistics
else llkAcc=accumulateStatEMUnThreaded(ss,fs,emAcc,selectedSegments,config);
#else
llkAcc= accumulateStatEMUnThreaded(ss,fs,emAcc,selectedSegments,config); // Compute EM statistics
#endif
return llkAcc;
}
// With weight on a cluster
double accumulateStatEM(StatServer &ss,FeatureServer &fs,MixtureStat &emAcc,SegCluster &selectedSegments,double & weight, Config &config){
double llkAcc=0.0;
char cW[200];
sprintf(cW,"%f",weight);
String sW(cW);
config.setParam("weightedEM","weight");
if (verbose) cout << "Weighted EM with weight" << sW << endl;
llkAcc= accumulateStatEM(ss,fs,emAcc,selectedSegments,config);
return (weight*llkAcc);
}
// *****************************************************************************************************
// ****************************** Threaded Version of Accumulate StatEM *********************************
// **************************** Nicolas Scheffer, 16/02/2007 **********************************************
#if defined(THREAD)
pthread_mutex_t mutexsum; // lock variable
bool stop=false; // flag variable once end of SegCluster is reached
//******************** Data strucutre of thread **************************
struct EMthread_data{
SegCluster *selectedSegments;
MixtureStat *emAcc;
FeatureServer *fs;
Config *config;
double *llkAcc;
RefVector <Feature> *featThreadBuff;
unsigned long nThread;
};
// *********************** Routine **************************************
static void *EMthread(void *threadarg) {
struct EMthread_data *my_data;
my_data = (struct EMthread_data *) threadarg;
SegCluster &selectedSegments=*(my_data->selectedSegments);
MixtureStat &emAcc=*(my_data->emAcc);
FeatureServer &fs=*(my_data->fs);
Config &config=*(my_data->config);
unsigned long nT=my_data->nThread;
double weight=1.0;
if (config.existsParam("weightedEM")) weight=config.getParam("weightedEM").toDouble();
// **************** Main loop
Seg* seg;
unsigned long cnt=0;
while(!stop) { // For each of the selected segments
// ***************** Reading features is locked
RefVector <Feature> featThreadBuff;
pthread_mutex_lock (&mutexsum);
if (stop) {pthread_mutex_unlock (&mutexsum);break;}
cnt++;
if ((seg=selectedSegments.getSeg())==NULL) {
pthread_mutex_unlock (&mutexsum);
if (verboseLevel >1) cout<<"Thread:"<<nT << " broke" << endl;
stop=true;
break;
}
unsigned long begin=seg->begin()+fs.getFirstFeatureIndexOfASource(seg->sourceName());
fs.seekFeature(begin);
for (unsigned long j=0;j<seg->length();j++){
featThreadBuff.addObject(*(new Feature),j);
fs.readFeature(featThreadBuff[j]);
}
pthread_mutex_unlock (&mutexsum);
// ***************** End lock
// ***************** Accumulate EM on the frameTab
for (unsigned long j=0;j<seg->length();j++)
(*(my_data->llkAcc))+=log(emAcc.computeAndAccumulateEM(featThreadBuff[j],weight));
featThreadBuff.deleteAllObjects();
}
if (verboseLevel > 2) cout << "(AccumulateStatEM) Number of segments treated by thread ["<<nT<<"]="<<cnt<<endl;
pthread_exit((void*) 0);
return(void*)0;
}
// Split selected cluster in nSplit clusters
void splitSegCluster(SegCluster & selectedSegments,unsigned long nSplit,RefVector <SegCluster> &vSelected) {
Seg* seg;
unsigned long t=0;
unsigned long offset=(totalFrame(selectedSegments))/nSplit;
if (verbose) cout << "(AccumulateStatEM) Splitting cluster: Total["<<totalFrame(selectedSegments)<<"] "<<offset<<" frames/cluster"<<endl;
unsigned long limit=offset-1;
selectedSegments.rewind();
unsigned long cnt=0;
while((seg=selectedSegments.getSeg())!=NULL) {
cnt+=seg->length();
if (cnt>=limit) {
if (verbose) cout<<"SegCluster["<<t<<"] full with "<<totalFrame(vSelected[t])<<" frames"<<endl;
if (t!=nSplit-1) t++;cnt=0;
}
vSelected[t].addCopy(*seg);
}
}
//***************************Threaded version ***************************
double accumulateStatEMThreaded(StatServer &ss,FeatureServer &fs,MixtureStat &emAcc,SegCluster &selectedSegments,Config &config){
unsigned long NUM_THREADS=1;
if(config.existsParam("numThread")) NUM_THREADS = config.getParam("numThread").toLong();
stop=false;
if (verbose) cout << "(AccumulateStatEM) Threaded version of EM with "<<NUM_THREADS<<" threads"<<endl;
double llkAcc=0.0;
SegServer segServer;
//RefVector <SegCluster> vSelected;
RefVector <MixtureStat> vEmAcc;
RealVector <double> vLlkAcc;
vLlkAcc.setSize(NUM_THREADS);
vLlkAcc.setAllValues(0.0);
selectedSegments.rewind();
for(unsigned long t=0; t<NUM_THREADS; t++){
/*vSelected.addObject(segServer.createCluster(selectedSegments.labelCode(),"",""),t);
vSelected[t].rewind();*/
vEmAcc.addObject(ss.createAndStoreMixtureStat(emAcc.getMixture()),t);
vEmAcc[t].resetEM();
}
//splitSegCluster(selectedSegments,NUM_THREADS,vSelected);
// struct EMthread_data thread_data_array[NUM_THREADS];
struct EMthread_data *thread_data_array = new EMthread_data[NUM_THREADS];
// pthread_t threads[NUM_THREADS];
pthread_t *threads = new pthread_t[NUM_THREADS];
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
pthread_mutex_init(&mutexsum, NULL);
int rc,status;
for(unsigned long t=0; t<NUM_THREADS; t++){
thread_data_array[t].selectedSegments=&selectedSegments;
thread_data_array[t].emAcc=&vEmAcc[t];
thread_data_array[t].fs=&fs;
thread_data_array[t].config=&config;
thread_data_array[t].llkAcc=&vLlkAcc[t];
thread_data_array[t].nThread=t;
if (verbose) cout<<"(AccumulateStatEM) Creating thread n["<< t<< "]"<<endl;
rc = pthread_create(&threads[t], &attr, EMthread, (void *)&thread_data_array[t]);
if (rc) throw Exception("ERROR; return code from pthread_create() is ",__FILE__,rc);
}
if (verbose) cout<<"(AccumulateStatEM) Computing on thread"<<endl;
pthread_attr_destroy(&attr);
for(unsigned long t=0; t<NUM_THREADS; t++) {
rc = pthread_join(threads[t], (void **)&status);
if (rc) throw Exception("ERROR; return code from pthread_join() is ",__FILE__,rc);
if (verbose) cout <<"(AccumulateStatEM) Completed join with thread ["<<t<<"] status["<<status<<"]"<<endl;
}
if (verbose) cout <<"(AccumulateStatEM) Fuse EM Accs "<<endl;
unsigned long total=0;
for(unsigned long t=0; t<NUM_THREADS; t++) {
if (verbose) cout << "Number of frames treated by thread ["<<t<<"]="<<vEmAcc[t].getEMFeatureCount()<<endl;
total+=vEmAcc[t].getEMFeatureCount();
emAcc.addAccEM(vEmAcc[t]);
ss.deleteMixtureStat(vEmAcc[t]);
llkAcc+=vLlkAcc[t];
}
cout << "Total Number of frames in threads: "<<total<<endl;
pthread_mutex_destroy(&mutexsum);
free(thread_data_array);
free(threads);
return llkAcc;
}
#endif
// **************************** End ********************************
/*Alex Preti things
double accumulateStatEM(StatServer & ss, FeatureServer & fs,
MixtureStat & emAcc, unsigned long idxBeginFrame, unsigned long nbFrames,
double &weight, Config & config)
{
double llkAcc = 0.0;
fs.seekFeature(idxBeginFrame); // go to the frame in the buffer (and load it if needed)
for (unsigned long n = 0; n < nbFrames; n++)
{
Feature f;
fs.readFeature(f);
llkAcc += log(emAcc.computeAndAccumulateEM(f, weight));
}
return (llkAcc * weight);
}
// one a Segment
double accumulateStatEM(StatServer & ss, FeatureServer & fs,
MixtureStat & emAcc, Seg * seg, double &weight, Config & config)
{
unsigned long begin = seg->begin() + fs.getFirstFeatureIndexOfASource(seg->sourceName()); // Find the index of the first frame of the file in the buffer
return accumulateStatEM(ss, fs, emAcc, begin, seg->length(), weight,
config);
}
// One on Cluster
double accumulateStatEM(StatServer & ss, FeatureServer & fs,
MixtureStat & emAcc, SegCluster & selectedSegments, double &weight,
Config & config)
{
double llkAcc = 0.0;
Seg *seg; // reset the reader at the begin of the input stream
selectedSegments.rewind();
while ((seg = selectedSegments.getSeg()) != NULL) // For each of the selected segments
llkAcc += accumulateStatEM(ss, fs, emAcc, seg, weight, config);
return llkAcc;
}*/
//-------------------------------------------------------------------------
//-- Accumulate the log likelihood for the selected frames and a given model, support weighted Feature Server (A.P)
void accumulateStatLLK(StatServer & ss, FeatureServer & fs,
MixtureStat & llkAcc, unsigned long idxBeginFrame, unsigned long nbFrames,
double &weight, Config & config)
{
fs.seekFeature(idxBeginFrame); // go to the frame in the buffer (and load it if needed)
for (unsigned long n = 0; n < nbFrames; n++)
{
Feature f;
if (fs.readFeature(f) == false)
cout << "No more features" << endl;
double toto = llkAcc.computeAndAccumulateLLK(f, weight);
if (debug)
cout << "likelihood Frame[" << idxBeginFrame +
n << "]=" << toto << endl;
}
}
// one a Segment, support weighted Feature Server (A.P)
void accumulateStatLLK(StatServer & ss, FeatureServer & fs,
MixtureStat & llkAcc, Seg * seg, double &weight, Config & config)
{
unsigned long begin = seg->begin() + fs.getFirstFeatureIndexOfASource(seg->sourceName()); // Find the index of the first frame of the file in the buffer
accumulateStatLLK(ss, fs, llkAcc, begin, seg->length(), weight, config);
}
// One on Cluster, support weighted Feature Server (A.P)
void accumulateStatLLK(StatServer & ss, FeatureServer & fs,
MixtureStat & llkAcc, SegCluster & selectedSegments, double &weight,
Config & config)
{
Seg *seg; // reset the reader at the begin of the input stream
selectedSegments.rewind();
while ((seg = selectedSegments.getSeg()) != NULL) // For each of the selected segments
accumulateStatLLK(ss, fs, llkAcc, seg, weight, config);
}
//-------------------------------------------------------------------------
//-- accumulate the statistic on the frames (mean and cov), using a current
//-- FrameAcc
//--
//-- CAUTION: A COMPUTE_ALL AND A GET CALL SHOULD BE DONE AFTER THE CALLS
void accumulateStatFrame(FrameAcc & frameAcc,FeatureServer &fs,
unsigned long idxBeginFrame,unsigned long nbFrames,Config &config)
{
fs.seekFeature(idxBeginFrame); // go to the frame in the buffer (and load it if needed)
Feature f;
for (unsigned long n=0;n<nbFrames;n++){
fs.readFeature(f);
frameAcc.accumulate(f);
}
}
// one a Segment
void accumulateStatFrame(FrameAcc & frameAcc,FeatureServer &fs,Seg* seg,Config &config)
{
unsigned long begin=seg->begin()+fs.getFirstFeatureIndexOfASource(seg->sourceName()); // Find the index of the first frame of the file in the buffer
accumulateStatFrame(frameAcc,fs,begin,seg->length(),config);
}
// One on Cluster
void accumulateStatFrame(FrameAcc &frameAcc,FeatureServer &fs,SegCluster &selectedSegments,Config &config)
{
Seg* seg; // reset the reader at the begin of the input stream
selectedSegments.rewind();
while((seg=selectedSegments.getSeg())!=NULL) // For each of the selected segments
accumulateStatFrame(frameAcc,fs,seg,config);
}
//-------------------------------------------------------------------------
//-- accumulate the statistic on the frames raw distribution of each coefficient
//-- CAUTION:
// *THE ACCUMULATOR SHOULD BE INITIALIZED BEFORE THE FIRST CALL
// initHistoTab()
// *THE HISTO SHOULD BE COMPUTED BEFORE TO USE THE STAT
// computeHistoTab()
// *The histoTab should be freezen after use
// freezeHistoTab();
//
// Init the Histo array (one by coeff)
double areaHisto(const Histo & histo,unsigned long bin)
{
return histo.count(bin)*(histo.higherBound(bin)-histo.lowerBound(bin));
}
double areaHisto(const Histo & histo,unsigned long bin, double nonObserved)
{
return areaHisto(histo,bin)*(1-nonObserved) ;
}
double linearInterpolation(double val,double lower,double higher){
const double EPS=0.000000000000000000001;
double interval=higher-lower;
if (interval<EPS) return 1;
return (val-lower)/interval;
}
void freezeHistoTab(Histo* &histoT)
{
delete []histoT;
}
void initHistoTab(Histo* &histoT,unsigned long size, unsigned long nbBins)
{
Histo tmp(nbBins);
histoT=new Histo[size];
for (unsigned long i=0;i<size;i++) histoT[i]=tmp;
}
void computeHistoTab(Histo* histoT,unsigned long size)
{
for (unsigned long i=0;i<size;i++) histoT[i].computeHisto();
}
void accumulateHistoFrame(Histo *histoT,FeatureServer &fs,
unsigned long idxBeginFrame,unsigned long nbFrames,Config &config)
{
fs.seekFeature(idxBeginFrame); // go to the frame in the buffer (and load it if needed)
Feature f;
for (unsigned long n=0;n<nbFrames;n++){
fs.readFeature(f);
for (unsigned long c=0;c<fs.getVectSize();c++)
histoT[c].accumulateValue(f[c]);
}
}
// one a Segment
void accumulateHistoFrame(Histo *histoT,FeatureServer &fs,Seg* seg,Config &config)
{
unsigned long begin=seg->begin()+fs.getFirstFeatureIndexOfASource(seg->sourceName());// Find the index of the first frame of the file in the buffer
accumulateHistoFrame(histoT,fs,begin,seg->length(),config);
}
// One on Cluster
void accumulateHistoFrame(Histo *histoT,FeatureServer &fs,SegCluster &selectedSegments,Config &config)
{
Seg* seg; // reset the reader at the begin of the input stream
selectedSegments.rewind();
while((seg=selectedSegments.getSeg())!=NULL) // For each of the selected segments
accumulateHistoFrame(histoT,fs,seg,config);
}
#endif //!defined(ALIZE_AccumulateStat_cpp)
| lgpl-3.0 |
Hemofektik/Druckwelle | 3rdparty/GDAL/include/xercesc/util/XMLRegisterCleanup.hpp | 3088 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Id: XMLRegisterCleanup.hpp 568078 2007-08-21 11:43:25Z amassari $
*/
#if !defined(XMLREGISTERCLEANUP_HPP)
#define XMLREGISTERCLEANUP_HPP
#include <xercesc/util/Mutexes.hpp>
XERCES_CPP_NAMESPACE_BEGIN
//
// For internal use only.
//
// This class is used by the platform utilities class to support
// reinitialisation of global/static data which is lazily created.
// Since that data is widely spread out the platform utilities
// class cannot know about them directly. So, the code that creates such
// objects creates an registers a cleanup for the object. The platform
// termination call will iterate the list and delete the objects.
//
// N.B. These objects need to be statically allocated. I couldn't think
// of a neat way of ensuring this - can anyone else?
class XMLUTIL_EXPORT XMLRegisterCleanup
{
public :
// The cleanup function to be called on XMLPlatformUtils::Terminate()
typedef void (*XMLCleanupFn)();
void doCleanup();
// This function is called during initialisation of static data to
// register a function to be called on XMLPlatformUtils::Terminate.
// It gives an object that uses static data an opportunity to reset
// such data.
void registerCleanup(XMLCleanupFn cleanupFn);
// This function can be called either from XMLPlatformUtils::Terminate
// to state that the cleanup has been performed and should not be
// performed again, or from code that you have written that determines
// that cleanup is no longer necessary.
void unregisterCleanup();
// The default constructor sets a state that ensures that this object
// will do nothing
XMLRegisterCleanup();
private:
// -----------------------------------------------------------------------
// Unimplemented constructors and operators
// -----------------------------------------------------------------------
XMLRegisterCleanup(const XMLRegisterCleanup&);
XMLRegisterCleanup& operator=(const XMLRegisterCleanup&);
// This is the cleanup function to be called
XMLCleanupFn m_cleanupFn;
// These are list pointers to the next/prev cleanup function to be called
XMLRegisterCleanup *m_nextCleanup, *m_prevCleanup;
// This function reinitialises the object to the default state
void resetCleanup();
};
XERCES_CPP_NAMESPACE_END
#endif
| unlicense |
diegopacheco/scala-playground | caliban-graphql-fun/src/main/resources/gateway/node_modules/@apollo/protobufjs/index.js | 85 | // full library entry point.
"use strict";
module.exports = require("./src/index");
| unlicense |
nyuszika7h/youtube-dl | youtube_dl/extractor/nytimes.py | 8904 | # coding: utf-8
from __future__ import unicode_literals
import hmac
import hashlib
import base64
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
js_to_json,
mimetype2ext,
parse_iso8601,
remove_start,
)
class NYTimesBaseIE(InfoExtractor):
_SECRET = b'pX(2MbU2);4N{7J8)>YwKRJ+/pQ3JkiU2Q^V>mFYv6g6gYvt6v'
def _extract_video_from_id(self, video_id):
# Authorization generation algorithm is reverse engineered from `signer` in
# http://graphics8.nytimes.com/video/vhs/vhs-2.x.min.js
path = '/svc/video/api/v3/video/' + video_id
hm = hmac.new(self._SECRET, (path + ':vhs').encode(), hashlib.sha512).hexdigest()
video_data = self._download_json('http://www.nytimes.com' + path, video_id, 'Downloading video JSON', headers={
'Authorization': 'NYTV ' + base64.b64encode(hm.encode()).decode(),
'X-NYTV': 'vhs',
}, fatal=False)
if not video_data:
video_data = self._download_json(
'http://www.nytimes.com/svc/video/api/v2/video/' + video_id,
video_id, 'Downloading video JSON')
title = video_data['headline']
def get_file_size(file_size):
if isinstance(file_size, int):
return file_size
elif isinstance(file_size, dict):
return int(file_size.get('value', 0))
else:
return None
urls = []
formats = []
for video in video_data.get('renditions', []):
video_url = video.get('url')
format_id = video.get('type')
if not video_url or format_id == 'thumbs' or video_url in urls:
continue
urls.append(video_url)
ext = mimetype2ext(video.get('mimetype')) or determine_ext(video_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=format_id or 'hls', fatal=False))
elif ext == 'mpd':
continue
# formats.extend(self._extract_mpd_formats(
# video_url, video_id, format_id or 'dash', fatal=False))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'vcodec': video.get('videoencoding') or video.get('video_codec'),
'width': int_or_none(video.get('width')),
'height': int_or_none(video.get('height')),
'filesize': get_file_size(video.get('file_size') or video.get('fileSize')),
'tbr': int_or_none(video.get('bitrate'), 1000) or None,
'ext': ext,
})
self._sort_formats(formats, ('height', 'width', 'filesize', 'tbr', 'fps', 'format_id'))
thumbnails = []
for image in video_data.get('images', []):
image_url = image.get('url')
if not image_url:
continue
thumbnails.append({
'url': 'http://www.nytimes.com/' + image_url,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
})
publication_date = video_data.get('publication_date')
timestamp = parse_iso8601(publication_date[:-8]) if publication_date else None
return {
'id': video_id,
'title': title,
'description': video_data.get('summary'),
'timestamp': timestamp,
'uploader': video_data.get('byline'),
'duration': float_or_none(video_data.get('duration'), 1000),
'formats': formats,
'thumbnails': thumbnails,
}
class NYTimesIE(NYTimesBaseIE):
_VALID_URL = r'https?://(?:(?:www\.)?nytimes\.com/video/(?:[^/]+/)+?|graphics8\.nytimes\.com/bcvideo/\d+(?:\.\d+)?/iframe/embed\.html\?videoId=)(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.nytimes.com/video/opinion/100000002847155/verbatim-what-is-a-photocopier.html?playlistId=100000001150263',
'md5': 'd665342765db043f7e225cff19df0f2d',
'info_dict': {
'id': '100000002847155',
'ext': 'mov',
'title': 'Verbatim: What Is a Photocopier?',
'description': 'md5:93603dada88ddbda9395632fdc5da260',
'timestamp': 1398631707,
'upload_date': '20140427',
'uploader': 'Brett Weiner',
'duration': 419,
}
}, {
'url': 'http://www.nytimes.com/video/travel/100000003550828/36-hours-in-dubai.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._extract_video_from_id(video_id)
class NYTimesArticleIE(NYTimesBaseIE):
_VALID_URL = r'https?://(?:www\.)?nytimes\.com/(.(?<!video))*?/(?:[^/]+/)*(?P<id>[^.]+)(?:\.html)?'
_TESTS = [{
'url': 'http://www.nytimes.com/2015/04/14/business/owner-of-gravity-payments-a-credit-card-processor-is-setting-a-new-minimum-wage-70000-a-year.html?_r=0',
'md5': 'e2076d58b4da18e6a001d53fd56db3c9',
'info_dict': {
'id': '100000003628438',
'ext': 'mov',
'title': 'New Minimum Wage: $70,000 a Year',
'description': 'Dan Price, C.E.O. of Gravity Payments, surprised his 120-person staff by announcing that he planned over the next three years to raise the salary of every employee to $70,000 a year.',
'timestamp': 1429033037,
'upload_date': '20150414',
'uploader': 'Matthew Williams',
}
}, {
'url': 'http://www.nytimes.com/2016/10/14/podcasts/revelations-from-the-final-weeks.html',
'md5': 'e0d52040cafb07662acf3c9132db3575',
'info_dict': {
'id': '100000004709062',
'title': 'The Run-Up: ‘He Was Like an Octopus’',
'ext': 'mp3',
'description': 'md5:fb5c6b93b12efc51649b4847fe066ee4',
'series': 'The Run-Up',
'episode': '‘He Was Like an Octopus’',
'episode_number': 20,
'duration': 2130,
}
}, {
'url': 'http://www.nytimes.com/2016/10/16/books/review/inside-the-new-york-times-book-review-the-rise-of-hitler.html',
'info_dict': {
'id': '100000004709479',
'title': 'The Rise of Hitler',
'ext': 'mp3',
'description': 'md5:bce877fd9e3444990cb141875fab0028',
'creator': 'Pamela Paul',
'duration': 3475,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.nytimes.com/news/minute/2014/03/17/times-minute-whats-next-in-crimea/?_php=true&_type=blogs&_php=true&_type=blogs&_r=1',
'only_matching': True,
}]
def _extract_podcast_from_json(self, json, page_id, webpage):
podcast_audio = self._parse_json(
json, page_id, transform_source=js_to_json)
audio_data = podcast_audio['data']
track = audio_data['track']
episode_title = track['title']
video_url = track['source']
description = track.get('description') or self._html_search_meta(
['og:description', 'twitter:description'], webpage)
podcast_title = audio_data.get('podcast', {}).get('title')
title = ('%s: %s' % (podcast_title, episode_title)
if podcast_title else episode_title)
episode = audio_data.get('podcast', {}).get('episode') or ''
episode_number = int_or_none(self._search_regex(
r'[Ee]pisode\s+(\d+)', episode, 'episode number', default=None))
return {
'id': remove_start(podcast_audio.get('target'), 'FT') or page_id,
'url': video_url,
'title': title,
'description': description,
'creator': track.get('credit'),
'series': podcast_title,
'episode': episode_title,
'episode_number': episode_number,
'duration': int_or_none(track.get('duration')),
}
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
video_id = self._search_regex(
r'data-videoid=["\'](\d+)', webpage, 'video id',
default=None, fatal=False)
if video_id is not None:
return self._extract_video_from_id(video_id)
podcast_data = self._search_regex(
(r'NYTD\.FlexTypes\.push\s*\(\s*({.+?})\s*\)\s*;\s*</script',
r'NYTD\.FlexTypes\.push\s*\(\s*({.+})\s*\)\s*;'),
webpage, 'podcast data')
return self._extract_podcast_from_json(podcast_data, page_id, webpage)
| unlicense |
szegedim/hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java | 6758 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CardinalityConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CompositeConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto;
import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto.CompositeType;
import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTargetProto;
import org.apache.hadoop.yarn.proto.YarnProtos.SimplePlacementConstraintProto;
import org.apache.hadoop.yarn.proto.YarnProtos.TimedPlacementConstraintProto;
import com.google.protobuf.GeneratedMessage;
/**
* {@code PlacementConstraintToProtoConverter} generates a
* {@link PlacementConstraintProto} given a
* {@link PlacementConstraint.AbstractConstraint}.
*/
@Private
public class PlacementConstraintToProtoConverter
implements PlacementConstraint.Visitor<GeneratedMessage> {
private PlacementConstraint placementConstraint;
public PlacementConstraintToProtoConverter(
PlacementConstraint placementConstraint) {
this.placementConstraint = placementConstraint;
}
public PlacementConstraintProto convert() {
return (PlacementConstraintProto) placementConstraint.getConstraintExpr()
.accept(this);
}
@Override
public GeneratedMessage visit(SingleConstraint constraint) {
SimplePlacementConstraintProto.Builder sb =
SimplePlacementConstraintProto.newBuilder();
if (constraint.getScope() != null) {
sb.setScope(constraint.getScope());
}
sb.setMinCardinality(constraint.getMinCardinality());
sb.setMaxCardinality(constraint.getMaxCardinality());
if (constraint.getTargetExpressions() != null) {
for (TargetExpression target : constraint.getTargetExpressions()) {
sb.addTargetExpressions(
(PlacementConstraintTargetProto) target.accept(this));
}
}
SimplePlacementConstraintProto sProto = sb.build();
// Wrap around PlacementConstraintProto object.
PlacementConstraintProto.Builder pb = PlacementConstraintProto.newBuilder();
pb.setSimpleConstraint(sProto);
return pb.build();
}
@Override
public GeneratedMessage visit(TargetExpression target) {
PlacementConstraintTargetProto.Builder tb =
PlacementConstraintTargetProto.newBuilder();
tb.setTargetType(ProtoUtils.convertToProtoFormat(target.getTargetType()));
if (target.getTargetKey() != null) {
tb.setTargetKey(target.getTargetKey());
}
if (target.getTargetValues() != null) {
tb.addAllTargetValues(target.getTargetValues());
}
return tb.build();
}
@Override
public GeneratedMessage visit(TargetConstraint constraint) {
throw new YarnRuntimeException("Unexpected TargetConstraint found.");
}
@Override
public GeneratedMessage visit(CardinalityConstraint constraint) {
throw new YarnRuntimeException("Unexpected CardinalityConstraint found.");
}
private GeneratedMessage visitAndOr(
CompositeConstraint<AbstractConstraint> composite, CompositeType type) {
CompositePlacementConstraintProto.Builder cb =
CompositePlacementConstraintProto.newBuilder();
cb.setCompositeType(type);
for (AbstractConstraint c : composite.getChildren()) {
cb.addChildConstraints((PlacementConstraintProto) c.accept(this));
}
CompositePlacementConstraintProto cProto = cb.build();
// Wrap around PlacementConstraintProto object.
PlacementConstraintProto.Builder pb = PlacementConstraintProto.newBuilder();
pb.setCompositeConstraint(cProto);
return pb.build();
}
@Override
public GeneratedMessage visit(And constraint) {
return visitAndOr(constraint, CompositeType.AND);
}
@Override
public GeneratedMessage visit(Or constraint) {
return visitAndOr(constraint, CompositeType.OR);
}
@Override
public GeneratedMessage visit(DelayedOr constraint) {
CompositePlacementConstraintProto.Builder cb =
CompositePlacementConstraintProto.newBuilder();
cb.setCompositeType(CompositeType.DELAYED_OR);
for (TimedPlacementConstraint c : constraint.getChildren()) {
cb.addTimedChildConstraints(
(TimedPlacementConstraintProto) c.accept(this));
}
CompositePlacementConstraintProto cProto = cb.build();
// Wrap around PlacementConstraintProto object.
PlacementConstraintProto.Builder pb = PlacementConstraintProto.newBuilder();
pb.setCompositeConstraint(cProto);
return pb.build();
}
@Override
public GeneratedMessage visit(TimedPlacementConstraint constraint) {
TimedPlacementConstraintProto.Builder tb =
TimedPlacementConstraintProto.newBuilder();
tb.setDelayUnit(ProtoUtils.convertToProtoFormat(constraint.getDelayUnit()));
tb.setSchedulingDelay(constraint.getSchedulingDelay());
tb.setPlacementConstraint(
(PlacementConstraintProto) constraint.getConstraint().accept(this));
return tb.build();
}
}
| apache-2.0 |
xq262144/hue | apps/pig/src/pig/static/pig/js/pig.ko.js | 20273 | // Licensed to Cloudera, Inc. under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. Cloudera, Inc. licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
var Resource = function (resource) {
var self = this;
self.type = ko.observable(resource.type);
self.value = ko.observable(resource.value);
};
var HadoopProperty = function (property) {
var self = this;
self.name = ko.observable(property.name);
self.value = ko.observable(property.value);
};
var PigParameter = HadoopProperty;
var PigScript = function (pigScript) {
var self = this;
self.id = ko.observable(pigScript.id);
self.docId = ko.observable(pigScript.docId);
self.isDesign = ko.observable(pigScript.isDesign);
self.name = ko.observable(pigScript.name);
self.can_write = ko.observable(pigScript.can_write);
self.script = ko.observable(pigScript.script);
self.scriptSumup = ko.observable(pigScript.script.replace(/\W+/g, ' ').substring(0, 100));
self.isRunning = ko.observable(false);
self.selected = ko.observable(false);
self.watchUrl = ko.observable("");
self.actions = ko.observableArray([]);
self.handleSelect = function (row, e) {
this.selected(!this.selected());
};
self.hovered = ko.observable(false);
self.toggleHover = function (row, e) {
this.hovered(!this.hovered());
};
self.parameters = ko.observableArray([]);
ko.utils.arrayForEach(pigScript.parameters, function (parameter) {
self.parameters.push(new PigParameter({name: parameter.name, value: parameter.value}));
});
self.addParameter = function () {
self.parameters.push(new PigParameter({name: '', value: ''}));
self.updateParentModel();
};
self.removeParameter = function () {
self.parameters.remove(this);
self.updateParentModel();
};
self.scriptContent = ko.computed(function() {
return self.script().replace(/\/\* .+? \*\//g, ''); // Trim comments, no multiline
});
self.getParameters = function () {
var params = {};
var variables = self.scriptContent().match(/([^\\]|^)\$[^\d'"](\w*)/g);
var declares = self.scriptContent().match(/%declare +([^ ])+/gi);
var defaults = self.scriptContent().match(/%default +([^;])+/gi);
var macro_defines = self.scriptContent().match(/define [^ ]+ *\(([^\)]*)\)/gi); // no multiline
var macro_returns = self.scriptContent().match(/returns +([^\{]*)/gi); // no multiline
if (variables) {
$.each(variables, function(index, param) {
var p = param.substring(param.indexOf('$') + 1);
params[p] = '';
});
}
if (declares) {
$.each(declares, function(index, param) {
param = param.match(/(\w+)/g);
if (param && param.length >= 2) {
delete params[param[1]];
}
});
}
if (defaults) {
$.each(defaults, function(index, param) {
var line = param.match(/(\w+)/g);
if (line && line.length >= 2) {
var name = line[1];
params[name] = param.substring(param.indexOf(name) + name.length + 1);
}
});
}
if (macro_defines) {
$.each(macro_defines, function(index, params_line) {
var param_line = params_line.match(/(\w+)/g);
if (param_line && param_line.length > 2) {
$.each(param_line, function(index, param) {
if (index >= 2) { // Skips define NAME
delete params[param];
}
});
}
});
}
if (macro_returns) {
$.each(macro_returns, function(index, params_line) {
var param_line = params_line.match(/(\w+)/g);
if (param_line) {
$.each(param_line, function(index, param) {
if (index >= 1) { // Skip returns
delete params[param];
}
});
}
});
}
$.each(self.parameters(), function(index, param) {
params[param.name()] = param.value();
});
return params;
};
self.hadoopProperties = ko.observableArray([]);
ko.utils.arrayForEach(pigScript.hadoopProperties, function (property) {
self.hadoopProperties.push(new HadoopProperty({name: property.name, value: property.value}));
});
self.addHadoopProperties = function () {
self.hadoopProperties.push(new HadoopProperty({name: '', value: ''}));
self.updateParentModel();
};
self.removeHadoopProperties = function () {
self.hadoopProperties.remove(this);
self.updateParentModel();
};
self.resources = ko.observableArray([]);
ko.utils.arrayForEach(pigScript.resources, function (resource) {
self.resources.push(new Resource({type: resource.type, value: resource.value}));
});
self.addResource = function () {
self.resources.push(new Resource({type: 'file', value: ''}));
self.updateParentModel();
};
self.removeResource = function () {
self.resources.remove(this);
self.updateParentModel();
};
self.parentModel = pigScript.parentModel;
self.updateParentModel = function () {
if (typeof self.parentModel != "undefined" && self.parentModel != null) {
self.parentModel.isDirty(true);
}
}
self.name.subscribe(function (name) {
self.updateParentModel();
});
}
var Workflow = function (wf) {
return {
id: wf.id,
scriptId: wf.scriptId,
scriptContent: wf.scriptContent,
lastModTime: wf.lastModTime,
endTime: wf.endTime,
status: wf.status,
statusClass: "label " + getStatusClass(wf.status),
isRunning: wf.isRunning,
duration: wf.duration,
appName: wf.appName,
progress: wf.progress,
progressPercent: wf.progressPercent,
progressClass: "bar " + getStatusClass(wf.status, "bar-"),
user: wf.user,
absoluteUrl: wf.absoluteUrl,
watchUrl: wf.watchUrl,
canEdit: wf.canEdit,
killUrl: wf.killUrl,
created: wf.created,
run: wf.run
}
}
var PigViewModel = function (props) {
var self = this;
self.LABELS = props.labels;
self.LIST_SCRIPTS = props.listScripts;
self.SAVE_URL = props.saveUrl;
self.RUN_URL = props.runUrl;
self.STOP_URL = props.stopUrl;
self.COPY_URL = props.copyUrl;
self.DELETE_URL = props.deleteUrl;
self.isLoading = ko.observable(false);
self.allSelected = ko.observable(false);
self.submissionVariables = ko.observableArray([]);
self.scripts = ko.observableArray([]);
self.filteredScripts = ko.observableArray(self.scripts());
self.runningScripts = ko.observableArray([]);
self.completedScripts = ko.observableArray([]);
self.isDashboardLoaded = false;
self.isDirty = ko.observable(false);
var _defaultScript = {
id: -1,
docId: -1,
name: self.LABELS.NEW_SCRIPT_NAME,
script: self.LABELS.NEW_SCRIPT_CONTENT,
parameters: self.LABELS.NEW_SCRIPT_PARAMETERS,
resources: self.LABELS.NEW_SCRIPT_RESOURCES,
hadoopProperties: self.LABELS.NEW_SCRIPT_HADOOP_PROPERTIES,
parentModel: self,
can_write: true
};
self.currentScript = ko.observable(new PigScript(_defaultScript));
self.loadingScript = null;
self.currentDeleteType = ko.observable("");
self.selectedScripts = ko.computed(function () {
return ko.utils.arrayFilter(self.scripts(), function (script) {
return script.selected();
});
}, self);
self.selectedScript = ko.computed(function () {
return self.selectedScripts()[0];
}, self);
self.selectAll = function () {
self.allSelected(! self.allSelected());
ko.utils.arrayForEach(self.scripts(), function (script) {
script.selected(self.allSelected());
});
return true;
};
self.getScriptById = function (id) {
var _s = null;
ko.utils.arrayForEach(self.scripts(), function (script) {
if (script.id() == id) {
_s = script;
}
});
return _s;
}
self.filterScripts = function (filter) {
self.filteredScripts(ko.utils.arrayFilter(self.scripts(), function (script) {
return script.isDesign() && script.name().toLowerCase().indexOf(filter.toLowerCase()) > -1
}));
};
self.loadScript = function (id) {
var _s = self.getScriptById(id);
if (_s != null) {
self.currentScript(_s);
}
else {
self.currentScript(new PigScript(_defaultScript));
}
}
self.confirmNewScript = function () {
if (self.isDirty()) {
showConfirmModal();
}
else {
self.newScript();
}
};
self.confirmScript = function () {
if (self.loadingScript != null){
self.viewScript(self.loadingScript);
}
else {
self.newScript();
}
};
self.newScript = function () {
self.loadingScript = null;
self.currentScript(new PigScript(_defaultScript));
self.isDirty(false);
$("#confirmModal").modal("hide");
$(document).trigger("loadEditor");
$(document).trigger("showEditor");
$(document).trigger("clearLogs");
};
self.editScript = function (script) {
$(document).trigger("showEditor");
};
self.editScriptProperties = function (script) {
$(document).trigger("showProperties");
};
self.showScriptLogs = function (script) {
$(document).trigger("showLogs");
};
self.confirmViewScript = function (script) {
if (self.isDirty()) {
self.loadingScript = script;
showConfirmModal();
}
else {
self.viewScript(script);
}
};
self.viewScript = function (script) {
self.loadingScript = null;
self.currentScript(script);
self.isDirty(false);
$("#confirmModal").modal("hide");
$(document).trigger("loadEditor");
$(document).trigger("showEditor");
};
self.saveScript = function () {
if (self.LABELS.NEW_SCRIPT_NAME == self.currentScript().name()){
showNameModal();
}
else {
$("#nameModal").modal("hide");
callSave(self.currentScript());
self.isDirty(false);
}
};
self.runScript = function () {
$("#withLogs").empty();
callRun(self.currentScript());
};
self.copyScript = function () {
callCopy(self.currentScript());
viewModel.isDirty(true);
};
self.confirmDeleteScript = function () {
self.currentDeleteType("single");
showDeleteModal();
};
self.stopScript = function () {
callStop(self.currentScript());
};
self.listRunScript = function () {
self.currentScript(self.selectedScript());
self.runOrShowSubmissionModal();
};
self.listCopyScript = function () {
callCopy(self.selectedScript());
};
self.listConfirmDeleteScripts = function () {
self.currentDeleteType("multiple");
showDeleteModal();
};
self.deleteScripts = function () {
var ids = [];
if (self.currentDeleteType() == "single") {
ids.push(self.currentScript().id());
}
if (self.currentDeleteType() == "multiple") {
$(self.selectedScripts()).each(function (index, script) {
ids.push(script.id());
});
}
callDelete(ids);
};
self.updateScripts = function () {
$.getJSON(self.LIST_SCRIPTS, function (data) {
self.scripts(ko.utils.arrayMap(data, function (script) {
script.parentModel = self;
return new PigScript(script);
}));
self.filteredScripts(self.scripts());
$(document).trigger("scriptsRefreshed");
});
};
self.updateDashboard = function (workflows) {
self.isDashboardLoaded = true;
var koWorkflows = ko.utils.arrayMap(workflows, function (wf) {
return new Workflow(wf);
});
self.runningScripts(ko.utils.arrayFilter(koWorkflows, function (wf) {
return wf.isRunning
}));
self.completedScripts(ko.utils.arrayFilter(koWorkflows, function (wf) {
return !wf.isRunning
}));
}
self.runOrShowSubmissionModal = function runOrShowSubmissionModal() {
var script = self.currentScript();
if (! $.isEmptyObject(script.getParameters())) {
self.submissionVariables.removeAll();
$.each(script.getParameters(), function (key, value) {
self.submissionVariables.push({'name': key, 'value': value});
});
$("#runScriptBtn").button("reset");
$("#runScriptBtn").attr("data-loading-text", $("#runScriptBtn").text() + " ...");
$("#submitModal").modal({
keyboard: true,
show: true
});
} else {
self.runScript();
}
};
self.showStopModal = function showStopModal() {
$("#stopScriptBtn").button("reset");
$("#stopScriptBtn").attr("data-loading-text", $("#stopScriptBtn").text() + " ...");
$("#stopModal").modal({
keyboard: true,
show: true
});
}
self.showFileChooser = function showFileChooser() {
var inputPath = this;
var path = inputPath.value().substr(0, inputPath.value().lastIndexOf("/"));
$("#filechooser").jHueFileChooser({
initialPath: path,
onFileChoose: function (filePath) {
inputPath.value(filePath);
$("#chooseFile").modal("hide");
},
createFolder: false
});
$("#chooseFile").modal("show");
};
function showDeleteModal() {
$(".deleteMsg").addClass("hide");
if (self.currentDeleteType() == "single") {
$(".deleteMsg.single").removeClass("hide");
}
if (self.currentDeleteType() == "multiple") {
if (self.selectedScripts().length > 1) {
$(".deleteMsg.multiple").removeClass("hide");
}
else {
$(".deleteMsg.single").removeClass("hide");
}
}
$("#deleteModal").modal({
keyboard: true,
show: true
});
}
function showStopModal() {
$(".stopMsg").addClass("hide");
if (self.currentStopType() == "single") {
$(".stopMsg.single").removeClass("hide");
}
if (self.currentStopType() == "multiple") {
if (self.selectedScripts().length > 1) {
$(".stopMsg.multiple").removeClass("hide");
} else {
$(".stopMsg.single").removeClass("hide");
}
}
$("#stopModal").modal({
keyboard: true,
show: true
});
}
function showConfirmModal() {
$("#confirmModal").modal({
keyboard: true,
show: true
});
}
function showNameModal() {
$("#nameModal").modal({
keyboard: true,
show: true
});
}
function updateScript(script, data) {
script.id(data.id);
script.docId(data.docId);
}
function callSave(script) {
$(document).trigger("saving");
$.post(self.SAVE_URL,
{
id: script.id(),
name: script.name(),
script: script.script(),
parameters: ko.toJSON(script.parameters()),
resources: ko.toJSON(script.resources()),
hadoopProperties: ko.toJSON(script.hadoopProperties()),
},
function (data) {
updateScript(self.currentScript(), data);
self.updateScripts();
$(document).trigger("saved");
}, "json").fail( function(xhr, textStatus, errorThrown) {
$(document).trigger("error", xhr.responseText);
});
}
function callRun(script) {
self.currentScript(script);
$(document).trigger("clearLogs");
script.isRunning(true);
script.actions([]);
$(document).trigger("showLogs");
$(document).trigger("running");
$("#submitModal").modal("hide");
$.post(self.RUN_URL,
{
id: script.id(),
name: script.name(),
script: script.script(),
parameters: ko.toJSON(script.parameters()),
submissionVariables: ko.utils.stringifyJson(self.submissionVariables()),
resources: ko.toJSON(script.resources()),
hadoopProperties: ko.toJSON(script.hadoopProperties())
},
function (data) {
if (data.id && self.currentScript().id() != data.id){
updateScript(self.currentScript(), data);
$(document).trigger("loadEditor");
}
script.isRunning(true);
script.watchUrl(data.watchUrl);
$(document).trigger("startLogsRefresh");
self.updateScripts();
}, "json").fail( function(xhr, textStatus, errorThrown) {
$(document).trigger("error", xhr.responseText);
});
}
function callStop(script) {
$(document).trigger("stopping");
$.post(self.STOP_URL, {
id: script.id()
},
function (data) {
$(document).trigger("stopped");
$("#stopModal").modal("hide");
}, "json"
).fail(function () {
self.currentScript().isRunning(false);
$(document).trigger("stopError");
$(document).trigger("stopped");
$("#stopModal").modal("hide");
});
}
function callCopy(script) {
$.post(self.COPY_URL,
{
id: script.id()
},
function (data) {
data.parentModel = self;
self.currentScript(new PigScript(data));
$(document).trigger("loadEditor");
self.updateScripts();
}, "json");
}
function callDelete(ids) {
if (ids.indexOf(self.currentScript().id()) > -1) {
self.currentScript(new PigScript(_defaultScript));
$(document).trigger("loadEditor");
}
$.post(self.DELETE_URL, {
ids: ids.join(",")
},
function (data) {
self.updateScripts();
$("#deleteModal").modal("hide");
viewModel.isDirty(false);
}, "json");
}
self.viewSubmittedScript = function (workflow) {
self.loadScript(workflow.scriptId);
self.currentScript().script(workflow.scriptContent);
self.currentScript().isRunning(true);
self.currentScript().watchUrl(workflow.watchUrl);
$(document).trigger("loadEditor");
$(document).trigger("clearLogs");
$(document).trigger("startLogsRefresh");
$(document).trigger("showLogs");
};
self.showLogsInterval = -1;
self.showLogsAtEnd = true;
self.showLogs = function (workflow) {
window.clearInterval(self.showLogsInterval);
$("#logsModal pre").scroll(function () {
self.showLogsAtEnd = $(this).scrollTop() + $(this).height() + 20 >= $(this)[0].scrollHeight;
});
if (workflow.isRunning) {
$("#logsModal i").removeClass("hide");
$("#logsModal pre").addClass("hide");
$("#logsModal").modal({
keyboard: true,
show: true
});
$("#logsModal").on("hide", function () {
window.clearInterval(self.showLogsInterval);
});
self.showLogsInterval = window.setInterval(function () {
$.getJSON(workflow.watchUrl, function (data) {
if (data.workflow && !data.workflow.isRunning) {
window.clearInterval(self.showLogsInterval);
}
if (data.logs.pig) {
$("#logsModal i").addClass("hide");
$("#logsModal pre").removeClass("hide");
var _logsEl = $("#logsModal pre");
var newLines = data.logs.pig.split("\n").slice(_logsEl.html().split("<br>").length);
if (newLines.length > 0){
_logsEl.html(_logsEl.html() + newLines.join("<br>") + "<br>");
}
if (self.showLogsAtEnd) {
_logsEl.scrollTop(_logsEl[0].scrollHeight - _logsEl.height());
}
}
});
}, 1000);
}
};
};
| apache-2.0 |
abhishek24509/aribaweb | src/widgets/ariba/ui/outline/BindingNames.java | 1706 | /*
Copyright 1996-2008 Ariba, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
$Id: //ariba/platform/ui/widgets/ariba/ui/outline/BindingNames.java#8 $
*/
package ariba.ui.outline;
public final class BindingNames extends ariba.ui.widgets.BindingNames
{
public static final String hasChildren = "hasChildren";
public static final String children = "children";
public static final String selectionPath = "selectionPath";
public static final String selectedObject = "selectedObject";
public static final String selectAction = "selectAction";
public static final String expandAll = "expandAll";
public static final String expandCurrentItem = "expandCurrentItem";
public static final String showExpansionControl = "showExpansionControl";
public static final String maxLevels = "maxLevels";
public static final String outlineState = "outlineState";
public static final String outlineIndex = "outlineIndex";
public static final String sortOrderings = "sortOrderings";
public static final String indentationPerLevel = "indentationPerLevel";
public static final String renderAsTable = "renderAsTable";
}
| apache-2.0 |
dsdinter/oryx | framework/oryx-lambda-serving/src/main/java/com/cloudera/oryx/lambda/serving/CSVMessageBodyWriter.java | 2914 | /*
* Copyright (c) 2014, Cloudera, Inc. All Rights Reserved.
*
* Cloudera, Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"). You may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for
* the specific language governing permissions and limitations under the
* License.
*/
package com.cloudera.oryx.lambda.serving;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
import java.nio.charset.StandardCharsets;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.ext.MessageBodyWriter;
import com.cloudera.oryx.api.serving.HasCSV;
/**
* A {@link MessageBodyWriter} that can produce CSV ({@code text/csv}). If given an
* {@link Iterable}, produces a line of CSV for each element. If an element implements
* {@link HasCSV} then {@link HasCSV#toCSV()} is used to compute its CSV representation; otherwise
* {@link Object#toString()} is.
*/
@Produces({MediaType.TEXT_PLAIN, "text/csv"})
public final class CSVMessageBodyWriter implements MessageBodyWriter<Object> {
private static final MediaType TEXT_CSV_TYPE = new MediaType("text", "csv");
@Override
public boolean isWriteable(Class<?> type,
Type genericType,
Annotation[] annotations,
MediaType mediaType) {
return MediaType.TEXT_PLAIN_TYPE.equals(mediaType) || TEXT_CSV_TYPE.equals(mediaType);
}
@Override
public long getSize(Object o,
Class<?> type,
Type genericType,
Annotation[] annotations,
MediaType mediaType) {
return -1L;
}
@Override
public void writeTo(Object o,
Class<?> type,
Type genericType,
Annotation[] annotations,
MediaType mediaType,
MultivaluedMap<String,Object> httpHeaders,
OutputStream entityStream) throws IOException {
Writer out = new OutputStreamWriter(entityStream, StandardCharsets.UTF_8);
if (Iterable.class.isAssignableFrom(type)) {
for (Object row : (Iterable<?>) o) {
out.append(toCSV(row)).append('\n');
}
} else {
out.append(toCSV(o)).append('\n');
}
out.flush();
}
private static String toCSV(Object row) {
if (row instanceof HasCSV) {
return ((HasCSV) row).toCSV();
} else {
return row.toString();
}
}
}
| apache-2.0 |
nizhikov/ignite | modules/core/src/test/java/org/apache/ignite/internal/processors/metastorage/DistributedMetaStorageTest.java | 15460 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.metastorage;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.Comparator;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.ignite.configuration.DataRegionConfiguration;
import org.apache.ignite.configuration.DataStorageConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.failure.FailureHandler;
import org.apache.ignite.failure.StopNodeFailureHandler;
import org.apache.ignite.internal.IgniteEx;
import org.apache.ignite.internal.IgniteInternalFuture;
import org.apache.ignite.internal.processors.metastorage.persistence.DistributedMetaStorageImpl;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.spi.discovery.DiscoverySpi;
import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
import org.apache.ignite.testframework.GridTestUtils;
import org.apache.ignite.testframework.junits.WithSystemProperty;
import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.apache.ignite.IgniteSystemProperties.IGNITE_GLOBAL_METASTORAGE_HISTORY_MAX_BYTES;
/**
* Test for {@link DistributedMetaStorageImpl} with disabled persistence.
*/
public class DistributedMetaStorageTest extends GridCommonAbstractTest {
/**
* Used in tests for updatesCount counter of metastorage and corresponds to keys BASELINE_ENABLED and other initial
* objects that were added but should not be counted along with keys defined in tests.
*/
private static int initialUpdatesCount = -1;
/** {@inheritDoc} */
@Override protected void beforeTestsStarted() throws Exception {
super.beforeTestsStarted();
startGrid(0);
// We have to start the second node and wait when it is started
// to be sure that all async metastorage updates of the node_0 are completed.
startGrid(1);
initialUpdatesCount = (int)metastorage(0).getUpdatesCount();
}
/** {@inheritDoc} */
@Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
cfg.setConsistentId(igniteInstanceName);
cfg.setDataStorageConfiguration(new DataStorageConfiguration()
.setDefaultDataRegionConfiguration(new DataRegionConfiguration()
.setPersistenceEnabled(isPersistent())
)
.setWalSegments(3)
.setWalSegmentSize(512 * 1024)
);
DiscoverySpi discoSpi = cfg.getDiscoverySpi();
if (discoSpi instanceof TcpDiscoverySpi)
((TcpDiscoverySpi)discoSpi).setNetworkTimeout(1000);
return cfg;
}
/**
* @return {@code true} for tests with persistent cluster, {@code false} otherwise.
*/
protected boolean isPersistent() {
return false;
}
/** {@inheritDoc} */
@Override protected FailureHandler getFailureHandler(String igniteInstanceName) {
return new StopNodeFailureHandler();
}
/** */
@Before
public void before() throws Exception {
stopAllGrids();
}
/** */
@After
public void after() throws Exception {
stopAllGrids();
}
/**
* @throws Exception If failed.
*/
@Test
public void testSingleNode() throws Exception {
IgniteEx ignite = startGrid(0);
ignite.cluster().active(true);
DistributedMetaStorage metastorage = ignite.context().distributedMetastorage();
assertNull(metastorage.read("key"));
metastorage.write("key", "value");
assertEquals("value", metastorage.read("key"));
metastorage.remove("key");
assertNull(metastorage.read("key"));
}
/**
* @throws Exception If failed.
*/
@Test
public void testMultipleNodes() throws Exception {
int cnt = 4;
startGrids(cnt);
grid(0).cluster().active(true);
for (int i = 0; i < cnt; i++) {
String key = UUID.randomUUID().toString();
String val = UUID.randomUUID().toString();
metastorage(i).write(key, val);
for (int j = 0; j < cnt; j++)
assertEquals(i + " " + j, val, metastorage(j).read(key));
}
for (int i = 1; i < cnt; i++)
assertDistributedMetastoragesAreEqual(grid(0), grid(i));
}
/**
* @throws Exception If failed.
*/
@Test
public void testListenersOnWrite() throws Exception {
int cnt = 4;
startGrids(cnt);
grid(0).cluster().active(true);
AtomicInteger predCntr = new AtomicInteger();
for (int i = 0; i < cnt; i++) {
DistributedMetaStorage metastorage = metastorage(i);
metastorage.listen(key -> key.startsWith("k"), (key, oldVal, newVal) -> {
assertNull(oldVal);
assertEquals("value", newVal);
predCntr.incrementAndGet();
});
}
metastorage(0).write("key", "value");
assertEquals(cnt, predCntr.get());
for (int i = 1; i < cnt; i++)
assertDistributedMetastoragesAreEqual(grid(0), grid(i));
}
/**
* @throws Exception If failed.
*/
@Test
public void testListenersOnRemove() throws Exception {
int cnt = 4;
startGrids(cnt);
grid(0).cluster().active(true);
metastorage(0).write("key", "value");
AtomicInteger predCntr = new AtomicInteger();
for (int i = 0; i < cnt; i++) {
DistributedMetaStorage metastorage = metastorage(i);
metastorage.listen(key -> key.startsWith("k"), (key, oldVal, newVal) -> {
assertEquals("value", oldVal);
assertNull(newVal);
predCntr.incrementAndGet();
});
}
metastorage(0).remove("key");
assertEquals(cnt, predCntr.get());
for (int i = 1; i < cnt; i++)
assertDistributedMetastoragesAreEqual(grid(0), grid(i));
}
/**
* @throws Exception If failed.
*/
@Test
public void testCas() throws Exception {
startGrids(2);
grid(0).cluster().active(true);
assertFalse(metastorage(0).compareAndSet("key", "expVal", "newVal"));
assertNull(metastorage(0).read("key"));
assertFalse(metastorage(0).compareAndRemove("key", "expVal"));
assertTrue(metastorage(0).compareAndSet("key", null, "val1"));
assertEquals("val1", metastorage(0).read("key"));
assertFalse(metastorage(0).compareAndSet("key", null, "val2"));
assertEquals("val1", metastorage(0).read("key"));
assertTrue(metastorage(0).compareAndSet("key", "val1", "val3"));
assertEquals("val3", metastorage(0).read("key"));
assertFalse(metastorage(0).compareAndRemove("key", "val1"));
assertEquals("val3", metastorage(0).read("key"));
assertTrue(metastorage(0).compareAndRemove("key", "val3"));
assertNull(metastorage(0).read("key"));
assertDistributedMetastoragesAreEqual(grid(0), grid(1));
}
/**
* @throws Exception If failed.
*/
@Test
public void testJoinCleanNode() throws Exception {
IgniteEx ignite = startGrid(0);
ignite.cluster().active(true);
ignite.context().distributedMetastorage().write("key", "value");
IgniteEx newNode = startGrid(1);
assertEquals("value", newNode.context().distributedMetastorage().read("key"));
assertDistributedMetastoragesAreEqual(ignite, newNode);
}
/**
* @throws Exception If failed.
*/
@Test
@WithSystemProperty(key = IGNITE_GLOBAL_METASTORAGE_HISTORY_MAX_BYTES, value = "0")
public void testJoinCleanNodeFullData() throws Exception {
IgniteEx ignite = startGrid(0);
ignite.cluster().active(true);
ignite.context().distributedMetastorage().write("key1", "value1");
ignite.context().distributedMetastorage().write("key2", "value2");
startGrid(1);
assertEquals("value1", metastorage(1).read("key1"));
assertEquals("value2", metastorage(1).read("key2"));
assertDistributedMetastoragesAreEqual(ignite, grid(1));
}
/**
* @throws Exception If failed.
*/
@Test
@WithSystemProperty(key = IGNITE_GLOBAL_METASTORAGE_HISTORY_MAX_BYTES, value = "0")
public void testDeactivateActivate() throws Exception {
startGrid(0);
grid(0).cluster().active(true);
metastorage(0).write("key1", "value1");
metastorage(0).write("key2", "value2");
grid(0).cluster().active(false);
startGrid(1);
grid(0).cluster().active(true);
assertEquals("value1", metastorage(0).read("key1"));
assertEquals("value2", metastorage(0).read("key2"));
assertDistributedMetastoragesAreEqual(grid(0), grid(1));
}
/**
* @throws Exception If failed.
*/
@Test
public void testOptimizedWriteTwice() throws Exception {
IgniteEx igniteEx = startGrid(0);
igniteEx.cluster().active(true);
metastorage(0).write("key1", "value1");
assertEquals(1, metastorage(0).getUpdatesCount() - initialUpdatesCount);
metastorage(0).write("key2", "value2");
assertEquals(2, metastorage(0).getUpdatesCount() - initialUpdatesCount);
metastorage(0).write("key1", "value1");
assertEquals(2, metastorage(0).getUpdatesCount() - initialUpdatesCount);
}
/** */
@Test
public void testClient() throws Exception {
IgniteEx igniteEx = startGrid(0);
igniteEx.cluster().active(true);
metastorage(0).write("key0", "value0");
startClientGrid(1);
AtomicInteger clientLsnrUpdatesCnt = new AtomicInteger();
assertEquals(1, metastorage(1).getUpdatesCount() - initialUpdatesCount);
assertEquals("value0", metastorage(1).read("key0"));
metastorage(1).listen(key -> true, (key, oldVal, newVal) -> clientLsnrUpdatesCnt.incrementAndGet());
metastorage(1).write("key1", "value1");
assertEquals(1, clientLsnrUpdatesCnt.get());
assertEquals("value1", metastorage(1).read("key1"));
assertEquals("value1", metastorage(0).read("key1"));
}
/** */
@Test
public void testClientReconnect() throws Exception {
IgniteEx igniteEx = startGrid(0);
igniteEx.cluster().active(true);
startClientGrid(1);
metastorage(0).write("key0", "value0");
startGrid(2);
stopGrid(0);
stopGrid(2);
startGrid(2).cluster().active(true);
metastorage(2).write("key1", "value1");
metastorage(2).write("key2", "value2");
int expUpdatesCnt = isPersistent() ? 3 : 2;
// Wait enough to cover failover timeout.
assertTrue(GridTestUtils.waitForCondition(
() -> metastorage(1).getUpdatesCount() - initialUpdatesCount == expUpdatesCnt, 15_000));
if (isPersistent())
assertEquals("value0", metastorage(1).read("key0"));
assertEquals("value1", metastorage(1).read("key1"));
assertEquals("value2", metastorage(1).read("key2"));
}
/**
* @throws Exception If failed.
*/
@Test
public void testUnstableTopology() throws Exception {
int cnt = 8;
startGridsMultiThreaded(cnt);
grid(0).cluster().active(true);
stopGrid(0);
startGrid(0);
AtomicInteger gridIdxCntr = new AtomicInteger(0);
AtomicBoolean stop = new AtomicBoolean();
IgniteInternalFuture<?> fut = multithreadedAsync(() -> {
int gridIdx = gridIdxCntr.incrementAndGet();
try {
while (!stop.get()) {
stopGrid(gridIdx, true);
Thread.sleep(50L);
startGrid(gridIdx);
Thread.sleep(50L);
}
}
catch (Exception e) {
log.error(e.getMessage(), e);
}
}, cnt - 1);
long start = System.currentTimeMillis();
long duration = GridTestUtils.SF.applyLB(15_000, 5_000);
try {
while (System.currentTimeMillis() < start + duration) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
metastorage(0).write(
"key" + rnd.nextInt(5000), Integer.toString(rnd.nextInt(1000))
);
}
}
finally {
stop.set(true);
fut.get();
}
awaitPartitionMapExchange();
for (int i = 1; i < cnt; i++)
assertDistributedMetastoragesAreEqual(grid(0), grid(i));
}
/**
* @return {@link DistributedMetaStorage} instance for i'th node.
*/
protected DistributedMetaStorage metastorage(int i) {
return grid(i).context().distributedMetastorage();
}
/**
* Assert that two nodes have the same internal state in {@link DistributedMetaStorage}.
*/
protected void assertDistributedMetastoragesAreEqual(IgniteEx ignite1, IgniteEx ignite2) throws Exception {
DistributedMetaStorage distributedMetastorage1 = ignite1.context().distributedMetastorage();
DistributedMetaStorage distributedMetastorage2 = ignite2.context().distributedMetastorage();
Object ver1 = U.field(distributedMetastorage1, "ver");
Object ver2 = U.field(distributedMetastorage2, "ver");
assertEquals(ver1, ver2);
Object histCache1 = U.field(distributedMetastorage1, "histCache");
Object histCache2 = U.field(distributedMetastorage2, "histCache");
assertEquals(histCache1, histCache2);
Method fullDataMtd = U.findNonPublicMethod(DistributedMetaStorageImpl.class, "localFullData");
Object[] fullData1 = (Object[])fullDataMtd.invoke(distributedMetastorage1);
Object[] fullData2 = (Object[])fullDataMtd.invoke(distributedMetastorage2);
assertEqualsCollections(Arrays.asList(fullData1), Arrays.asList(fullData2));
// Also check that arrays are sorted.
Arrays.sort(fullData1, Comparator.comparing(o -> U.field(o, "key")));
assertEqualsCollections(Arrays.asList(fullData1), Arrays.asList(fullData2));
}
}
| apache-2.0 |
apache/incubator-htrace | htrace-core4/src/test/java/org/apache/htrace/core/TestHTrace.java | 5265 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.htrace.core;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.apache.htrace.core.TraceGraph.SpansByParent;
import org.junit.Assert;
import org.junit.Test;
public class TestHTrace {
@Test
public void TestTracerCreateAndClose() throws Exception {
Tracer tracer = new Tracer.Builder().
name("TestSimpleScope").
tracerPool(new TracerPool("TestTracerCreateAndClose")).
conf(HTraceConfiguration.fromKeyValuePairs(
"sampler.classes", "AlwaysSampler")).
build();
POJOSpanReceiver receiver =
new POJOSpanReceiver(HTraceConfiguration.EMPTY);
tracer.getTracerPool().addReceiver(receiver);
tracer.close();
Assert.assertTrue(receiver.getSpans().isEmpty());
}
@Test
public void TestSimpleScope() throws Exception {
Tracer tracer = new Tracer.Builder().
name("TestSimpleScope").
tracerPool(new TracerPool("TestSimpleScope")).
conf(HTraceConfiguration.fromKeyValuePairs(
"sampler.classes", "AlwaysSampler")).
build();
POJOSpanReceiver receiver =
new POJOSpanReceiver(HTraceConfiguration.EMPTY);
tracer.getTracerPool().addReceiver(receiver);
TraceScope scope = tracer.newScope("Foo");
scope.close();
tracer.close();
Assert.assertEquals(1, receiver.getSpans().size());
Span span = receiver.getSpans().iterator().next();
Assert.assertEquals(0, span.getParents().length);
}
@Test
public void TestCreateSpans() throws Exception {
Tracer tracer = new Tracer.Builder().
name("TestCreateSpans").
tracerPool(new TracerPool("TestCreateSpans")).
conf(HTraceConfiguration.fromKeyValuePairs(
"sampler.classes", "AlwaysSampler")).
build();
POJOSpanReceiver receiver =
new POJOSpanReceiver(HTraceConfiguration.EMPTY);
tracer.getTracerPool().addReceiver(receiver);
TraceCreator traceCreator = new TraceCreator(tracer);
traceCreator.createSampleRpcTrace();
traceCreator.createSimpleTrace();
traceCreator.createThreadedTrace();
tracer.close();
TraceGraph traceGraph = new TraceGraph(receiver.getSpans());
Collection<Span> roots = traceGraph.getSpansByParent().find(SpanId.INVALID);
Assert.assertTrue("Trace tree must have roots", !roots.isEmpty());
Assert.assertEquals(3, roots.size());
Map<String, Span> descriptionToRootSpan = new HashMap<String, Span>();
for (Span root : roots) {
descriptionToRootSpan.put(root.getDescription(), root);
}
Assert.assertTrue(descriptionToRootSpan.keySet().contains(
TraceCreator.RPC_TRACE_ROOT));
Assert.assertTrue(descriptionToRootSpan.keySet().contains(
TraceCreator.SIMPLE_TRACE_ROOT));
Assert.assertTrue(descriptionToRootSpan.keySet().contains(
TraceCreator.THREADED_TRACE_ROOT));
SpansByParent spansByParentId = traceGraph.getSpansByParent();
Span rpcTraceRoot = descriptionToRootSpan.get(TraceCreator.RPC_TRACE_ROOT);
Assert.assertEquals(1, spansByParentId.find(rpcTraceRoot.getSpanId()).size());
Span rpcTraceChild1 = spansByParentId.find(rpcTraceRoot.getSpanId())
.iterator().next();
Assert.assertEquals(1, spansByParentId.find(rpcTraceChild1.getSpanId()).size());
Span rpcTraceChild2 = spansByParentId.find(rpcTraceChild1.getSpanId())
.iterator().next();
Assert.assertEquals(1, spansByParentId.find(rpcTraceChild2.getSpanId()).size());
Span rpcTraceChild3 = spansByParentId.find(rpcTraceChild2.getSpanId())
.iterator().next();
Assert.assertEquals(0, spansByParentId.find(rpcTraceChild3.getSpanId()).size());
}
@Test(timeout=60000)
public void testRootSpansHaveNonZeroSpanId() throws Exception {
Tracer tracer = new Tracer.Builder().
name("testRootSpansHaveNonZeroSpanId").
tracerPool(new TracerPool("testRootSpansHaveNonZeroSpanId")).
conf(HTraceConfiguration.fromKeyValuePairs(
"sampler.classes", "AlwaysSampler")).build();
TraceScope scope = tracer.
newScope("myRootSpan", new SpanId(100L, 200L));
Assert.assertNotNull(scope);
Assert.assertEquals("myRootSpan", scope.getSpan().getDescription());
Assert.assertTrue(scope.getSpan().getSpanId().isValid());
Assert.assertEquals(100L, scope.getSpan().getSpanId().getHigh());
Assert.assertNotEquals(0L, scope.getSpan().getSpanId().getLow());
scope.close();
}
}
| apache-2.0 |
GBGamer/rust | src/test/run-pass/issues/issue-21361.rs | 752 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
fn main() {
let v = vec![1, 2, 3];
let boxed: Box<Iterator<Item=i32>> = Box::new(v.into_iter());
assert_eq!(boxed.max(), Some(3));
let v = vec![1, 2, 3];
let boxed: &mut Iterator<Item=i32> = &mut v.into_iter();
assert_eq!(boxed.max(), Some(3));
}
| apache-2.0 |
toxeh/presto | presto-main/src/main/java/com/facebook/presto/execution/SqlQueryExecution.java | 20921 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.execution;
import com.facebook.presto.OutputBuffers;
import com.facebook.presto.Session;
import com.facebook.presto.SystemSessionProperties;
import com.facebook.presto.UnpartitionedPagePartitionFunction;
import com.facebook.presto.execution.StateMachine.StateChangeListener;
import com.facebook.presto.execution.scheduler.ExecutionPolicy;
import com.facebook.presto.execution.scheduler.SqlQueryScheduler;
import com.facebook.presto.memory.VersionedMemoryPoolId;
import com.facebook.presto.metadata.Metadata;
import com.facebook.presto.security.AccessControl;
import com.facebook.presto.spi.PrestoException;
import com.facebook.presto.split.SplitManager;
import com.facebook.presto.sql.analyzer.Analysis;
import com.facebook.presto.sql.analyzer.Analyzer;
import com.facebook.presto.sql.analyzer.FeaturesConfig;
import com.facebook.presto.sql.analyzer.QueryExplainer;
import com.facebook.presto.sql.parser.SqlParser;
import com.facebook.presto.sql.planner.DistributedExecutionPlanner;
import com.facebook.presto.sql.planner.InputExtractor;
import com.facebook.presto.sql.planner.LogicalPlanner;
import com.facebook.presto.sql.planner.Plan;
import com.facebook.presto.sql.planner.PlanFragmenter;
import com.facebook.presto.sql.planner.PlanNodeIdAllocator;
import com.facebook.presto.sql.planner.StageExecutionPlan;
import com.facebook.presto.sql.planner.SubPlan;
import com.facebook.presto.sql.planner.optimizations.PlanOptimizer;
import com.facebook.presto.sql.tree.Statement;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import io.airlift.concurrent.SetThreadName;
import io.airlift.units.Duration;
import javax.annotation.concurrent.ThreadSafe;
import javax.inject.Inject;
import java.net.URI;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicReference;
import static com.facebook.presto.OutputBuffers.INITIAL_EMPTY_OUTPUT_BUFFERS;
import static com.facebook.presto.SystemSessionProperties.getHashPartitionCount;
import static com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED;
import static com.google.common.base.Preconditions.checkArgument;
import static java.util.Objects.requireNonNull;
@ThreadSafe
public final class SqlQueryExecution
implements QueryExecution
{
private static final OutputBuffers ROOT_OUTPUT_BUFFERS = INITIAL_EMPTY_OUTPUT_BUFFERS
.withBuffer(new TaskId("output", "buffer", "id"), new UnpartitionedPagePartitionFunction())
.withNoMoreBufferIds();
private final QueryStateMachine stateMachine;
private final Statement statement;
private final Metadata metadata;
private final AccessControl accessControl;
private final SqlParser sqlParser;
private final SplitManager splitManager;
private final NodeScheduler nodeScheduler;
private final List<PlanOptimizer> planOptimizers;
private final RemoteTaskFactory remoteTaskFactory;
private final LocationFactory locationFactory;
private final int scheduleSplitBatchSize;
private final int initialHashPartitions;
private final boolean experimentalSyntaxEnabled;
private final ExecutorService queryExecutor;
private final QueryExplainer queryExplainer;
private final AtomicReference<SqlQueryScheduler> queryScheduler = new AtomicReference<>();
private final AtomicReference<QueryInfo> finalQueryInfo = new AtomicReference<>();
private final NodeTaskMap nodeTaskMap;
private final Session session;
private final ExecutionPolicy executionPolicy;
public SqlQueryExecution(QueryId queryId,
String query,
Session session,
URI self,
Statement statement,
Metadata metadata,
AccessControl accessControl,
SqlParser sqlParser,
SplitManager splitManager,
NodeScheduler nodeScheduler,
List<PlanOptimizer> planOptimizers,
RemoteTaskFactory remoteTaskFactory,
LocationFactory locationFactory,
int scheduleSplitBatchSize,
int initialHashPartitions,
boolean experimentalSyntaxEnabled,
ExecutorService queryExecutor,
NodeTaskMap nodeTaskMap,
QueryExplainer queryExplainer,
ExecutionPolicy executionPolicy)
{
try (SetThreadName ignored = new SetThreadName("Query-%s", queryId)) {
this.statement = requireNonNull(statement, "statement is null");
this.metadata = requireNonNull(metadata, "metadata is null");
this.accessControl = requireNonNull(accessControl, "accessControl is null");
this.sqlParser = requireNonNull(sqlParser, "sqlParser is null");
this.splitManager = requireNonNull(splitManager, "splitManager is null");
this.nodeScheduler = requireNonNull(nodeScheduler, "nodeScheduler is null");
this.planOptimizers = requireNonNull(planOptimizers, "planOptimizers is null");
this.locationFactory = requireNonNull(locationFactory, "locationFactory is null");
this.queryExecutor = requireNonNull(queryExecutor, "queryExecutor is null");
this.experimentalSyntaxEnabled = experimentalSyntaxEnabled;
this.nodeTaskMap = requireNonNull(nodeTaskMap, "nodeTaskMap is null");
this.session = requireNonNull(session, "session is null");
this.executionPolicy = requireNonNull(executionPolicy, "executionPolicy is null");
this.queryExplainer = requireNonNull(queryExplainer, "queryExplainer is null");
checkArgument(scheduleSplitBatchSize > 0, "scheduleSplitBatchSize must be greater than 0");
this.scheduleSplitBatchSize = scheduleSplitBatchSize;
checkArgument(initialHashPartitions > 0, "initialHashPartitions must be greater than 0");
this.initialHashPartitions = initialHashPartitions;
requireNonNull(queryId, "queryId is null");
requireNonNull(query, "query is null");
requireNonNull(session, "session is null");
requireNonNull(self, "self is null");
this.stateMachine = new QueryStateMachine(queryId, query, session, self, queryExecutor);
// when the query finishes cache the final query info, and clear the reference to the output stage
stateMachine.addStateChangeListener(state -> {
if (!state.isDone()) {
return;
}
// query is now done, so abort any work that is still running
SqlQueryScheduler scheduler = queryScheduler.get();
if (scheduler != null) {
scheduler.abort();
}
// capture the final query state and drop reference to the scheduler
finalQueryInfo.compareAndSet(null, buildQueryInfo(scheduler));
queryScheduler.set(null);
});
this.remoteTaskFactory = new MemoryTrackingRemoteTaskFactory(requireNonNull(remoteTaskFactory, "remoteTaskFactory is null"), stateMachine);
}
}
@Override
public VersionedMemoryPoolId getMemoryPool()
{
return stateMachine.getMemoryPool();
}
@Override
public void setMemoryPool(VersionedMemoryPoolId poolId)
{
stateMachine.setMemoryPool(poolId);
}
@Override
public long getTotalMemoryReservation()
{
// acquire reference to outputStage before checking finalQueryInfo, because
// state change listener sets finalQueryInfo and then clears outputStage when
// the query finishes.
SqlQueryScheduler scheduler = queryScheduler.get();
QueryInfo queryInfo = finalQueryInfo.get();
if (queryInfo != null) {
return queryInfo.getQueryStats().getTotalMemoryReservation().toBytes();
}
return scheduler.getTotalMemoryReservation();
}
@Override
public Session getSession()
{
return session;
}
@Override
public void start()
{
try (SetThreadName ignored = new SetThreadName("Query-%s", stateMachine.getQueryId())) {
try {
// transition to planning
if (!stateMachine.transitionToPlanning()) {
// query already started or finished
return;
}
// analyze query
SubPlan subplan = analyzeQuery();
// plan distribution of query
planDistribution(subplan);
// transition to starting
if (!stateMachine.transitionToStarting()) {
// query already started or finished
return;
}
// if query is not finished, start the scheduler, otherwise cancel it
SqlQueryScheduler scheduler = queryScheduler.get();
if (!stateMachine.isDone()) {
scheduler.start();
}
}
catch (Throwable e) {
fail(e);
Throwables.propagateIfInstanceOf(e, Error.class);
}
}
}
@Override
public void addStateChangeListener(StateChangeListener<QueryState> stateChangeListener)
{
try (SetThreadName ignored = new SetThreadName("Query-%s", stateMachine.getQueryId())) {
stateMachine.addStateChangeListener(stateChangeListener);
}
}
private SubPlan analyzeQuery()
{
try {
return doAnalyzeQuery();
}
catch (StackOverflowError e) {
throw new PrestoException(NOT_SUPPORTED, "statement is too large (stack overflow during analysis)", e);
}
}
private SubPlan doAnalyzeQuery()
{
// time analysis phase
long analysisStart = System.nanoTime();
// analyze query
Analyzer analyzer = new Analyzer(stateMachine.getSession(), metadata, sqlParser, accessControl, Optional.of(queryExplainer), experimentalSyntaxEnabled);
Analysis analysis = analyzer.analyze(statement);
stateMachine.setUpdateType(analysis.getUpdateType());
// plan query
PlanNodeIdAllocator idAllocator = new PlanNodeIdAllocator();
LogicalPlanner logicalPlanner = new LogicalPlanner(stateMachine.getSession(), planOptimizers, idAllocator, metadata);
Plan plan = logicalPlanner.plan(analysis);
// extract inputs
List<Input> inputs = new InputExtractor(metadata, session).extract(plan.getRoot());
stateMachine.setInputs(inputs);
// fragment the plan
SubPlan subplan = new PlanFragmenter().createSubPlans(plan);
// record analysis time
stateMachine.recordAnalysisTime(analysisStart);
return subplan;
}
private void planDistribution(SubPlan subplan)
{
// time distribution planning
long distributedPlanningStart = System.nanoTime();
// plan the execution on the active nodes
DistributedExecutionPlanner distributedPlanner = new DistributedExecutionPlanner(splitManager);
StageExecutionPlan outputStageExecutionPlan = distributedPlanner.plan(subplan, session);
stateMachine.recordDistributedPlanningTime(distributedPlanningStart);
if (stateMachine.isDone()) {
return;
}
// record field names
stateMachine.setOutputFieldNames(outputStageExecutionPlan.getFieldNames());
// build the stage execution objects (this doesn't schedule execution)
SqlQueryScheduler scheduler = new SqlQueryScheduler(
stateMachine,
locationFactory,
outputStageExecutionPlan,
nodeScheduler,
remoteTaskFactory,
stateMachine.getSession(),
scheduleSplitBatchSize,
initialHashPartitions,
queryExecutor,
ROOT_OUTPUT_BUFFERS,
nodeTaskMap,
executionPolicy);
queryScheduler.set(scheduler);
// if query was canceled during scheduler creation, abort the scheduler
// directly since the callback may have already fired
if (stateMachine.isDone()) {
scheduler.abort();
queryScheduler.set(null);
}
}
@Override
public void cancelStage(StageId stageId)
{
requireNonNull(stageId, "stageId is null");
try (SetThreadName ignored = new SetThreadName("Query-%s", stateMachine.getQueryId())) {
SqlQueryScheduler scheduler = queryScheduler.get();
if (scheduler != null) {
scheduler.cancelStage(stageId);
}
}
}
@Override
public void fail(Throwable cause)
{
requireNonNull(cause, "cause is null");
stateMachine.transitionToFailed(cause);
}
@Override
public Duration waitForStateChange(QueryState currentState, Duration maxWait)
throws InterruptedException
{
try (SetThreadName ignored = new SetThreadName("Query-%s", stateMachine.getQueryId())) {
return stateMachine.waitForStateChange(currentState, maxWait);
}
}
@Override
public void recordHeartbeat()
{
stateMachine.recordHeartbeat();
}
@Override
public void pruneInfo()
{
QueryInfo queryInfo = finalQueryInfo.get();
if (queryInfo == null || queryInfo.getOutputStage() == null) {
return;
}
StageInfo prunedOutputStage = new StageInfo(
queryInfo.getOutputStage().getStageId(),
queryInfo.getOutputStage().getState(),
queryInfo.getOutputStage().getSelf(),
null, // Remove the plan
queryInfo.getOutputStage().getTypes(),
queryInfo.getOutputStage().getStageStats(),
ImmutableList.of(), // Remove the tasks
ImmutableList.of(), // Remove the substages
queryInfo.getOutputStage().getFailureCause()
);
QueryInfo prunedQueryInfo = new QueryInfo(
queryInfo.getQueryId(),
queryInfo.getSession(),
queryInfo.getState(),
getMemoryPool().getId(),
queryInfo.isScheduled(),
queryInfo.getSelf(),
queryInfo.getFieldNames(),
queryInfo.getQuery(),
queryInfo.getQueryStats(),
queryInfo.getSetSessionProperties(),
queryInfo.getResetSessionProperties(),
queryInfo.getUpdateType(),
prunedOutputStage,
queryInfo.getFailureInfo(),
queryInfo.getErrorCode(),
queryInfo.getInputs()
);
finalQueryInfo.compareAndSet(queryInfo, prunedQueryInfo);
}
@Override
public QueryId getQueryId()
{
return stateMachine.getQueryId();
}
@Override
public QueryInfo getQueryInfo()
{
try (SetThreadName ignored = new SetThreadName("Query-%s", stateMachine.getQueryId())) {
// acquire reference to scheduler before checking finalQueryInfo, because
// state change listener sets finalQueryInfo and then clears scheduler when
// the query finishes.
SqlQueryScheduler scheduler = queryScheduler.get();
QueryInfo finalQueryInfo = this.finalQueryInfo.get();
if (finalQueryInfo != null) {
return finalQueryInfo;
}
return buildQueryInfo(scheduler);
}
}
@Override
public QueryState getState()
{
return stateMachine.getQueryState();
}
private QueryInfo buildQueryInfo(SqlQueryScheduler scheduler)
{
StageInfo stageInfo = null;
if (scheduler != null) {
stageInfo = scheduler.getStageInfo();
}
return stateMachine.getQueryInfo(stageInfo);
}
public static class SqlQueryExecutionFactory
implements QueryExecutionFactory<SqlQueryExecution>
{
private final int scheduleSplitBatchSize;
private final boolean experimentalSyntaxEnabled;
private final Metadata metadata;
private final AccessControl accessControl;
private final SqlParser sqlParser;
private final SplitManager splitManager;
private final NodeScheduler nodeScheduler;
private final List<PlanOptimizer> planOptimizers;
private final RemoteTaskFactory remoteTaskFactory;
private final QueryExplainer queryExplainer;
private final LocationFactory locationFactory;
private final ExecutorService executor;
private final NodeTaskMap nodeTaskMap;
private final Map<String, ExecutionPolicy> executionPolicies;
@Inject
SqlQueryExecutionFactory(QueryManagerConfig config,
FeaturesConfig featuresConfig,
Metadata metadata,
AccessControl accessControl,
SqlParser sqlParser,
LocationFactory locationFactory,
SplitManager splitManager,
NodeScheduler nodeScheduler,
List<PlanOptimizer> planOptimizers,
RemoteTaskFactory remoteTaskFactory,
@ForQueryExecution ExecutorService executor,
NodeTaskMap nodeTaskMap,
QueryExplainer queryExplainer,
Map<String, ExecutionPolicy> executionPolicies)
{
requireNonNull(config, "config is null");
this.scheduleSplitBatchSize = config.getScheduleSplitBatchSize();
this.metadata = requireNonNull(metadata, "metadata is null");
this.accessControl = requireNonNull(accessControl, "accessControl is null");
this.sqlParser = requireNonNull(sqlParser, "sqlParser is null");
this.locationFactory = requireNonNull(locationFactory, "locationFactory is null");
this.splitManager = requireNonNull(splitManager, "splitManager is null");
this.nodeScheduler = requireNonNull(nodeScheduler, "nodeScheduler is null");
this.planOptimizers = requireNonNull(planOptimizers, "planOptimizers is null");
this.remoteTaskFactory = requireNonNull(remoteTaskFactory, "remoteTaskFactory is null");
requireNonNull(featuresConfig, "featuresConfig is null");
this.experimentalSyntaxEnabled = featuresConfig.isExperimentalSyntaxEnabled();
this.executor = requireNonNull(executor, "executor is null");
this.nodeTaskMap = requireNonNull(nodeTaskMap, "nodeTaskMap is null");
this.queryExplainer = requireNonNull(queryExplainer, "queryExplainer is null");
this.executionPolicies = requireNonNull(executionPolicies, "schedulerPolicies is null");
}
@Override
public SqlQueryExecution createQueryExecution(QueryId queryId, String query, Session session, Statement statement)
{
String executionPolicyName = SystemSessionProperties.getExecutionPolicy(session);
ExecutionPolicy executionPolicy = executionPolicies.get(executionPolicyName);
checkArgument(executionPolicy != null, "No execution policy %s", executionPolicy);
SqlQueryExecution queryExecution = new SqlQueryExecution(
queryId,
query,
session,
locationFactory.createQueryLocation(queryId),
statement,
metadata,
accessControl,
sqlParser,
splitManager,
nodeScheduler,
planOptimizers,
remoteTaskFactory,
locationFactory,
scheduleSplitBatchSize,
getHashPartitionCount(session),
experimentalSyntaxEnabled,
executor,
nodeTaskMap,
queryExplainer,
executionPolicy);
return queryExecution;
}
}
}
| apache-2.0 |
Esri/geoportal-server | components/desktop/WMCOpener/branches/10.6.1/src/WMCDocument.cs | 11744 | /* See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Esri Inc. licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Text;
using System.Xml;
using System.Xml.Xsl;
using System.Xml.XPath;
using ESRI.ArcGIS.Geometry;
using System.Globalization;
namespace com.esri.gpt.wmc
{
/// <summary>
/// Represents WMCDocument object
/// </summary>
class WMCDocument
{
private List<WMCLayer> layers = new List<WMCLayer>();
private bool hasExtent = false;
private IEnvelope extent = null;
private string sSRS = null;
private string sErr = "";
/// <summary>
/// List of WMC layers
/// </summary>
public List<WMCLayer> Layers
{
set
{
layers = value;
}
get
{
return layers;
}
}
/// <summary>
/// Envelope
/// </summary>
public IEnvelope Extent
{
set
{
extent = value;
}
get
{
return extent;
}
}
/// <summary>
/// Check for extent
/// </summary>
public bool HasExtent
{
set
{
hasExtent = value;
}
get
{
return hasExtent;
}
}
/// <summary>
/// stores SRS
/// </summary>
public string SRS
{
set
{
sSRS = value;
}
get
{
return sSRS;
}
}
public string Err
{
set
{
sErr = value;
}
get
{
return sErr;
}
}
/// <summary>
/// Returns validity of document
/// </summary>
/// <returns></returns>
public bool IsValidWMCDoc()
{
return sErr.Length == 0 ? true : false;
}
private string getAttribute(XmlAttributeCollection theAttributes, string strName)
{
XmlNode anAttribute = theAttributes.GetNamedItem(strName);
if (anAttribute != null)
return anAttribute.Value;
else
return "";
}
private string getAttributeFromNode(XmlNode pNode, string sAttrName)
{
XmlNode nd = pNode.Attributes.GetNamedItem(sAttrName);
if (nd != null)
return nd.Value;
else
return "";
}
static string ReadFromFile(string filename)
{
System.IO.StreamReader SR;
StringBuilder S = new StringBuilder();
SR = System.IO.File.OpenText(filename);
String fileCnt = null;
fileCnt = SR.ReadLine();
while (fileCnt != null)
{
S.Append(fileCnt);
fileCnt = SR.ReadLine();
}
SR.Close();
return S.ToString();
}
/// <summary>
/// Loads a wmc file with file name provided
/// </summary>
/// <param name="strFileName">the file name</param>
public void LoadFromFile(string strFileName)
{
XmlAttributeCollection theAttributes = null;
XmlNode theLayer = null;
XmlNode theNameNode = null;
XmlNode theTitleNode = null;
XmlNode theServerNode = null;
XmlNode theOnlineResourceNode = null;
XmlDocument theWMC = new XmlDocument();
try
{
string xml = ReadFromFile(strFileName);
theWMC.LoadXml(xml);
}
catch (Exception e)
{
OpenWMC.logger.writeLog(e.StackTrace);
}
XmlNamespaceManager xmlnsManager = new XmlNamespaceManager(theWMC.NameTable);
xmlnsManager.AddNamespace("context", "http://www.opengis.net/context");
xmlnsManager.AddNamespace("sld", "http://www.opengis.net/sld");
xmlnsManager.AddNamespace("xlink", "http://www.w3.org/1999/xlink");
xmlnsManager.AddNamespace("xs", "http://www.w3.org/2001/XMLSchema");
XmlNodeList theLayers = theWMC.SelectNodes("/context:ViewContext/context:LayerList/context:Layer",xmlnsManager);
if (theLayers.Count == 0)
{
xmlnsManager = new XmlNamespaceManager(theWMC.NameTable);
xmlnsManager.AddNamespace("sld", "http://www.opengis.net/sld");
xmlnsManager.AddNamespace("xlink", "http://www.w3.org/1999/xlink");
xmlnsManager.AddNamespace("xs", "http://www.w3.org/2001/XMLSchema");
xmlnsManager.AddNamespace("context", "http://www.opengeospatial.net/context");
theLayers = theWMC.SelectNodes("/context:ViewContext/context:LayerList/context:Layer",xmlnsManager);
if (theLayers.Count == 0)
{
sErr = StringResources.InvalidWMCDocument +
StringResources.NoLayers;
return;
}
}
for (int i = theLayers.Count -1; i >= 0 ; i--)
{
theLayer = theLayers.Item(i);
WMCLayer myLayer = new WMCLayer();
XmlNodeList children = theLayer.ChildNodes;
foreach (XmlNode child in children)
{
if (child.Name.ToLower() == "name")
theNameNode = child;
else if (child.Name.ToLower() == "title")
theTitleNode = child;
else if (child.Name.ToLower() == "server")
{
theServerNode = child;
XmlNodeList nds = theServerNode.ChildNodes;
foreach (XmlNode c in nds)
{
if (c.Name.ToLower() == "onlineresource")
theOnlineResourceNode = c;
}
if (theTitleNode == null)
{
myLayer.Title = getAttribute(theServerNode.Attributes, "title");
}
}
}
//1 - get Server info
theAttributes = theServerNode.Attributes;
myLayer.Server.Service = getAttribute(theAttributes, "service");
myLayer.Server.Version = getAttribute(theAttributes, "version");
myLayer.Server.Title = getAttribute(theAttributes, "title");
//2 - get online info
myLayer.Server.OnlineResource.Href = getAttributeFromNode(theOnlineResourceNode, "xlink:href");
myLayer.Server.OnlineResource.ResourceType = getAttributeFromNode(theOnlineResourceNode, "xlink:type");
myLayer.Server.OnlineResource.Role = getAttributeFromNode(theOnlineResourceNode, "xlink:role");
myLayer.Server.OnlineResource.ArcRole = getAttributeFromNode(theOnlineResourceNode, "xlink:arcrole");
myLayer.Server.OnlineResource.Title = getAttributeFromNode(theOnlineResourceNode, "xlink:title");
myLayer.Server.OnlineResource.Show = getAttributeFromNode(theOnlineResourceNode, "xlink:show");
myLayer.Server.OnlineResource.Actuate = getAttributeFromNode(theOnlineResourceNode, "xlink:actuate");
//3 - get layer name, title and info
if (myLayer.Server.Service == "OGC:WMS")
{
myLayer.Name = theNameNode.InnerText;
myLayer.SecretName = theNameNode.InnerText;
}
else if (myLayer.Server.Service == "ESRI:ARCIMS")
{
if (theNameNode.InnerText.Contains(":"))
{
myLayer.Name = theNameNode.InnerText.Substring(0, theNameNode.InnerText.IndexOf(":") - 1);
myLayer.SecretName = theNameNode.InnerText.Substring(theNameNode.InnerText.IndexOf(":") + 1);
}
else
{
myLayer.Name = theNameNode.InnerText;
myLayer.SecretName = theNameNode.InnerText;
}
}
else if (myLayer.Server.Service == "ESRI:ARCIMS:HTTP")
{
myLayer.Name = myLayer.Server.Title;
myLayer.SecretName = theTitleNode.InnerText;
}
else if (myLayer.Server.Service == "ESRI:AGS:MAP:SOAP")
{
if (theNameNode.InnerText.Contains(":"))
{
myLayer.Name = theNameNode.InnerText.Substring(0, theNameNode.InnerText.IndexOf(":") - 1);
myLayer.SecretName = theNameNode.InnerText.Substring(theNameNode.InnerText.IndexOf(":") + 1);
}
else
{
myLayer.Name = theNameNode.InnerText;
myLayer.SecretName = theNameNode.InnerText;
}
}
else
{
myLayer.Name = theNameNode.InnerText;
}
if (theTitleNode != null)
{
myLayer.Title = theTitleNode.InnerText;
}
theAttributes = theLayer.Attributes;
myLayer.IsHidden =theAttributes.GetNamedItem("hidden").Value;
myLayer.IsQueryable = theAttributes.GetNamedItem("queryable").Value;
layers.Add(myLayer);
}
if (theWMC.SelectNodes("/context:ViewContext/context:General/context:BoundingBox",xmlnsManager).Count == 0)
{
HasExtent = false;
}
HasExtent = true;
XmlNode theGeneral = theWMC.SelectNodes("/context:ViewContext/context:General/context:BoundingBox",xmlnsManager).Item(0);
theAttributes = theGeneral.Attributes;
Double xmax, xmin, ymax, ymin;
CultureInfo culture = new CultureInfo("en");
xmax = Double.Parse(getAttribute(theAttributes, "maxx"), culture);
xmin = Double.Parse(getAttribute(theAttributes, "minx"), culture);
ymax = Double.Parse(getAttribute(theAttributes, "maxy"), culture);
ymin = Double.Parse(getAttribute(theAttributes, "miny"), culture);
extent = new EnvelopeClass();
extent.XMax = xmax;
extent.XMin = xmin;
extent.YMax = ymax;
extent.YMin = ymin;
sSRS = getAttribute(theAttributes, "SRS");
}
}
}
| apache-2.0 |
ebyhr/presto | core/trino-main/src/test/java/io/trino/operator/scalar/TestArrayFilterFunction.java | 4913 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.operator.scalar;
import com.google.common.collect.ImmutableList;
import io.trino.spi.type.ArrayType;
import org.testng.annotations.Test;
import static io.trino.spi.type.BooleanType.BOOLEAN;
import static io.trino.spi.type.DoubleType.DOUBLE;
import static io.trino.spi.type.IntegerType.INTEGER;
import static io.trino.spi.type.TimestampType.createTimestampType;
import static io.trino.spi.type.VarcharType.createVarcharType;
import static io.trino.type.UnknownType.UNKNOWN;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
public class TestArrayFilterFunction
extends AbstractTestFunctions
{
@Test
public void testBasic()
{
assertFunction("filter(ARRAY [5, 6], x -> x = 5)", new ArrayType(INTEGER), ImmutableList.of(5));
assertFunction("filter(ARRAY [5 + RANDOM(1), 6 + RANDOM(1)], x -> x = 5)", new ArrayType(INTEGER), ImmutableList.of(5));
assertFunction("filter(ARRAY [true, false, true, false], x -> nullif(x, false))", new ArrayType(BOOLEAN), ImmutableList.of(true, true));
assertFunction("filter(ARRAY [true, false, null, true, false, null], x -> not x)", new ArrayType(BOOLEAN), ImmutableList.of(false, false));
assertFunction(
"filter(ARRAY [TIMESTAMP '2020-05-10 12:34:56.123456789', TIMESTAMP '1111-05-10 12:34:56.123456789'], t -> year(t) = 1111)",
new ArrayType(createTimestampType(9)),
ImmutableList.of(timestamp(9, "1111-05-10 12:34:56.123456789")));
}
@Test
public void testEmpty()
{
assertFunction("filter(ARRAY [], x -> true)", new ArrayType(UNKNOWN), ImmutableList.of());
assertFunction("filter(ARRAY [], x -> false)", new ArrayType(UNKNOWN), ImmutableList.of());
assertFunction("filter(ARRAY [], x -> CAST (null AS BOOLEAN))", new ArrayType(UNKNOWN), ImmutableList.of());
assertFunction("filter(CAST (ARRAY [] AS ARRAY(INTEGER)), x -> true)", new ArrayType(INTEGER), ImmutableList.of());
}
@Test
public void testNull()
{
assertFunction("filter(ARRAY [NULL], x -> x IS NULL)", new ArrayType(UNKNOWN), singletonList(null));
assertFunction("filter(ARRAY [NULL], x -> x IS NOT NULL)", new ArrayType(UNKNOWN), ImmutableList.of());
assertFunction("filter(ARRAY [CAST (NULL AS INTEGER)], x -> x IS NULL)", new ArrayType(INTEGER), singletonList(null));
assertFunction("filter(ARRAY [NULL, NULL, NULL], x -> x IS NULL)", new ArrayType(UNKNOWN), asList(null, null, null));
assertFunction("filter(ARRAY [NULL, NULL, NULL], x -> x IS NOT NULL)", new ArrayType(UNKNOWN), ImmutableList.of());
assertFunction("filter(ARRAY [25, 26, NULL], x -> x % 2 = 1 OR x IS NULL)", new ArrayType(INTEGER), asList(25, null));
assertFunction("filter(ARRAY [25.6E0, 37.3E0, NULL], x -> x < 30.0E0 OR x IS NULL)", new ArrayType(DOUBLE), asList(25.6, null));
assertFunction("filter(ARRAY [true, false, NULL], x -> not x OR x IS NULL)", new ArrayType(BOOLEAN), asList(false, null));
assertFunction("filter(ARRAY ['abc', 'def', NULL], x -> substr(x, 1, 1) = 'a' OR x IS NULL)", new ArrayType(createVarcharType(3)), asList("abc", null));
assertFunction(
"filter(ARRAY [ARRAY ['abc', null, '123'], NULL], x -> x[2] IS NULL OR x IS NULL)",
new ArrayType(new ArrayType(createVarcharType(3))),
asList(asList("abc", null, "123"), null));
}
@Test
public void testTypeCombinations()
{
assertFunction("filter(ARRAY [25, 26, 27], x -> x % 2 = 1)", new ArrayType(INTEGER), ImmutableList.of(25, 27));
assertFunction("filter(ARRAY [25.6E0, 37.3E0, 28.6E0], x -> x < 30.0E0)", new ArrayType(DOUBLE), ImmutableList.of(25.6, 28.6));
assertFunction("filter(ARRAY [true, false, true], x -> not x)", new ArrayType(BOOLEAN), ImmutableList.of(false));
assertFunction("filter(ARRAY ['abc', 'def', 'ayz'], x -> substr(x, 1, 1) = 'a')", new ArrayType(createVarcharType(3)), ImmutableList.of("abc", "ayz"));
assertFunction(
"filter(ARRAY [ARRAY ['abc', null, '123'], ARRAY ['def', 'x', '456']], x -> x[2] IS NULL)",
new ArrayType(new ArrayType(createVarcharType(3))),
ImmutableList.of(asList("abc", null, "123")));
}
}
| apache-2.0 |
stiez/evelib | EveLib.EveCrest/Models/Resources/KillmailCollection.cs | 1169 | // ***********************************************************************
// Assembly : EveLib.EveCrest
// Author : Lars Kristian
// Created : 12-16-2014
//
// Last Modified By : Lars Kristian
// Last Modified On : 12-17-2014
// ***********************************************************************
// <copyright file="KillmailCollection.cs" company="">
// Copyright (c) . All rights reserved.
// </copyright>
// <summary></summary>
// ***********************************************************************
using System.Runtime.Serialization;
using eZet.EveLib.EveCrestModule.Models.Links;
namespace eZet.EveLib.EveCrestModule.Models.Resources {
/// <summary>
/// Represents a CREST /killmails/ response
/// </summary>
[DataContract]
public sealed class KillmailCollection : CollectionResource<KillmailCollection, LinkedEntity<Killmail>> {
/// <summary>
/// Initializes a new instance of the <see cref="KillmailCollection" /> class.
/// </summary>
public KillmailCollection() {
ContentType = "application/vnd.ccp.eve.WarKillmails-v1+json";
}
}
} | apache-2.0 |
Maccimo/intellij-community | java/java-impl/src/com/intellij/codeInspection/deadCode/UnusedParametersInspection.java | 10280 | // Copyright 2000-2021 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.codeInspection.deadCode;
import com.intellij.analysis.AnalysisScope;
import com.intellij.codeInsight.FileModificationService;
import com.intellij.codeInsight.daemon.impl.quickfix.RemoveUnusedParameterFix;
import com.intellij.codeInspection.*;
import com.intellij.codeInspection.ex.EntryPointsManager;
import com.intellij.codeInspection.reference.*;
import com.intellij.codeInspection.unusedSymbol.UnusedSymbolLocalInspectionBase;
import com.intellij.java.JavaBundle;
import com.intellij.openapi.project.Project;
import com.intellij.psi.*;
import com.intellij.psi.search.PsiReferenceProcessor;
import com.intellij.psi.search.PsiReferenceProcessorAdapter;
import com.intellij.psi.search.PsiSearchHelper;
import com.intellij.psi.search.searches.OverridingMethodsSearch;
import com.intellij.psi.search.searches.ReferencesSearch;
import com.intellij.psi.util.PsiModificationTracker;
import com.intellij.psi.util.PsiTreeUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.uast.UDeclaration;
import org.jetbrains.uast.UElementKt;
import org.jetbrains.uast.UParameter;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
class UnusedParametersInspection extends GlobalJavaBatchInspectionTool {
@Override
public CommonProblemDescriptor @Nullable [] checkElement(@NotNull final RefEntity refEntity,
@NotNull final AnalysisScope scope,
@NotNull final InspectionManager manager,
@NotNull final GlobalInspectionContext globalContext,
@NotNull final ProblemDescriptionsProcessor processor) {
if (!(refEntity instanceof RefMethod)) return null;
RefMethod refMethod = (RefMethod)refEntity;
if (refMethod.isSyntheticJSP()) return null;
if (refMethod.isExternalOverride()) return null;
if (!(refMethod.isStatic() || refMethod.isConstructor()) && !refMethod.getSuperMethods().isEmpty()) return null;
RefClass aClass = refMethod.getOwnerClass();
if (aClass != null && ((refMethod.isAbstract() || aClass.isInterface()) && refMethod.getDerivedReferences().isEmpty())) {
return null;
}
if (refMethod.isAppMain()) return null;
List<RefParameter> unusedParameters = getUnusedParameters(refMethod);
if (unusedParameters.isEmpty()) return null;
if (refMethod.isEntry()) return null;
UDeclaration uMethod = refMethod.getUastElement();
if (uMethod == null) return null;
PsiElement element = uMethod.getJavaPsi();
if (element != null && EntryPointsManager.getInstance(manager.getProject()).isEntryPoint(element)) return null;
List<ProblemDescriptor> result = new ArrayList<>();
for (RefParameter refParameter : unusedParameters) {
UParameter parameter = refParameter.getUastElement();
PsiElement anchor = UElementKt.getSourcePsiElement(parameter.getUastAnchor());
if (anchor != null) {
result.add(manager.createProblemDescriptor(anchor,
JavaBundle.message(refMethod.isAbstract()
? "inspection.unused.parameter.composer"
: "inspection.unused.parameter.composer1"),
new AcceptSuggested(globalContext.getRefManager(), processor, refParameter.getName()),
ProblemHighlightType.LIKE_UNUSED_SYMBOL, false));
}
}
return result.toArray(CommonProblemDescriptor.EMPTY_ARRAY);
}
@Override
protected boolean queryExternalUsagesRequests(@NotNull final RefManager manager, @NotNull final GlobalJavaInspectionContext globalContext,
@NotNull final ProblemDescriptionsProcessor processor) {
Project project = manager.getProject();
for (RefElement entryPoint : globalContext.getEntryPointsManager(manager).getEntryPoints(manager)) {
processor.ignoreElement(entryPoint);
}
PsiSearchHelper helper = PsiSearchHelper.getInstance(project);
AnalysisScope scope = manager.getScope();
manager.iterate(new RefJavaVisitor() {
@Override
public void visitElement(@NotNull RefEntity refEntity) {
if (!(refEntity instanceof RefMethod)) return;
RefMethod refMethod = (RefMethod)refEntity;
if (refMethod.isStatic() || refMethod.isConstructor() ||
PsiModifier.PRIVATE.equals(refMethod.getAccessModifier())) {
return;
}
UDeclaration uastElement = refMethod.getUastElement();
if (uastElement == null) return;
PsiMethod element = (PsiMethod)uastElement.getJavaPsi();
if (element == null) {
return;
}
List<RefParameter> unusedParameters = getUnusedParameters(refMethod);
if (unusedParameters.isEmpty()) return;
PsiMethod[] derived = OverridingMethodsSearch.search(element).toArray(PsiMethod.EMPTY_ARRAY);
for (RefParameter refParameter : unusedParameters) {
if (refMethod.isAbstract() && derived.length == 0) {
refParameter.parameterReferenced(false);
processor.ignoreElement(refParameter);
continue;
}
int idx = refParameter.getIndex();
boolean[] found = {false};
for (int i = 0; i < derived.length && !found[0]; i++) {
if (scope != null && scope.contains(derived[i])) continue;
PsiParameter[] parameters = derived[i].getParameterList().getParameters();
if (idx >= parameters.length) continue;
PsiParameter psiParameter = parameters[idx];
ReferencesSearch.search(psiParameter, helper.getUseScope(psiParameter), false)
.forEach(new PsiReferenceProcessorAdapter(
new PsiReferenceProcessor() {
@Override
public boolean execute(PsiReference element) {
refParameter.parameterReferenced(false);
processor.ignoreElement(refParameter);
found[0] = true;
return false;
}
}));
}
}
}
});
return false;
}
@Override
public String getHint(@NotNull final QuickFix fix) {
if (fix instanceof AcceptSuggested) {
return ((AcceptSuggested)fix).getHint();
}
return null;
}
@Override
@Nullable
public QuickFix getQuickFix(final String hint) {
return new AcceptSuggested(null, null, hint);
}
@NotNull
private static ArrayList<RefParameter> getUnusedParameters(@NotNull RefMethod refMethod) {
boolean checkDeep = !refMethod.isStatic() && !refMethod.isConstructor();
ArrayList<RefParameter> res = new ArrayList<>();
RefParameter[] methodParameters = refMethod.getParameters();
RefParameter[] result = methodParameters.clone();
clearUsedParameters(refMethod, result, checkDeep);
for (RefParameter parameter : result) {
if (parameter != null &&
!((RefElementImpl)parameter).isSuppressed(UnusedSymbolLocalInspectionBase.UNUSED_PARAMETERS_SHORT_NAME,
UnusedSymbolLocalInspectionBase.UNUSED_ID)) {
res.add(parameter);
}
}
return res;
}
private static void clearUsedParameters(@NotNull RefOverridable refOverridable, RefParameter[] params, boolean checkDeep) {
RefParameter[] methodParams;
if (refOverridable instanceof RefMethod) {
methodParams = ((RefMethod)refOverridable).getParameters();
}
else if (refOverridable instanceof RefFunctionalExpression) {
methodParams = ((RefFunctionalExpression)refOverridable).getParameters().toArray(RefParameter[]::new);
}
else {
return;
}
for (int i = 0; i < Math.min(methodParams.length, params.length); i++) {
if (methodParams[i].isUsedForReading()) params[i] = null;
}
if (checkDeep) {
for (RefOverridable reference : refOverridable.getDerivedReferences()) {
clearUsedParameters(reference, params, true);
}
}
}
private static class AcceptSuggested implements LocalQuickFix {
private final RefManager myManager;
private final String myHint;
private final ProblemDescriptionsProcessor myProcessor;
AcceptSuggested(@Nullable RefManager manager, @Nullable ProblemDescriptionsProcessor processor, @NotNull String hint) {
myManager = manager;
myProcessor = processor;
myHint = hint;
}
@NotNull
String getHint() {
return myHint;
}
@Override
@NotNull
public String getFamilyName() {
return JavaBundle.message("inspection.unused.parameter.delete.quickfix");
}
@Override
public void applyFix(@NotNull Project project, @NotNull ProblemDescriptor descriptor) {
final PsiElement psiElement = descriptor.getPsiElement();
if (!FileModificationService.getInstance().preparePsiElementForWrite(psiElement)) return;
final PsiParameter psiParameter = PsiTreeUtil.getParentOfType(psiElement, PsiParameter.class);
final PsiMethod psiMethod = PsiTreeUtil.getParentOfType(psiElement, PsiMethod.class);
if (psiMethod != null && psiParameter != null) {
final RefElement refMethod = myManager != null ? myManager.getReference(psiMethod) : null;
final PsiModificationTracker tracker = psiMethod.getManager().getModificationTracker();
final long startModificationCount = tracker.getModificationCount();
RemoveUnusedParameterFix.removeReferences(psiParameter);
if (refMethod != null && startModificationCount != tracker.getModificationCount()) {
Objects.requireNonNull(myProcessor).ignoreElement(refMethod);
}
}
}
@Override
public boolean startInWriteAction() {
return false;
}
}
}
| apache-2.0 |
hmcl/storm-apache | storm-client/src/jvm/org/apache/storm/streams/tuple/Tuple7.java | 3970 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version
* 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package org.apache.storm.streams.tuple;
/**
* A tuple of seven elements along the lines of Scala's Tuple.
*
* @param <T1> the type of the first element
* @param <T2> the type of the second element
* @param <T3> the type of the third element
* @param <T4> the type of the fourth element
* @param <T5> the type of the fifth element
* @param <T6> the type of the sixth element
* @param <T7> the type of the seventh element
*/
public class Tuple7<T1, T2, T3, T4, T5, T6, T7> {
public final T1 value1;
public final T2 value2;
public final T3 value3;
public final T4 value4;
public final T5 value5;
public final T6 value6;
public final T7 value7;
/**
* Constructs a new tuple.
*
* @param value1 the first element
* @param value2 the second element
* @param value3 the third element
* @param value4 the fourth element
* @param value5 the fifth element
* @param value6 the sixth element
* @param value7 the seventh element
*/
public Tuple7(T1 value1, T2 value2, T3 value3, T4 value4, T5 value5, T6 value6, T7 value7) {
this.value1 = value1;
this.value2 = value2;
this.value3 = value3;
this.value4 = value4;
this.value5 = value5;
this.value6 = value6;
this.value7 = value7;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Tuple7<?, ?, ?, ?, ?, ?, ?> tuple7 = (Tuple7<?, ?, ?, ?, ?, ?, ?>) o;
if (value1 != null ? !value1.equals(tuple7.value1) : tuple7.value1 != null) {
return false;
}
if (value2 != null ? !value2.equals(tuple7.value2) : tuple7.value2 != null) {
return false;
}
if (value3 != null ? !value3.equals(tuple7.value3) : tuple7.value3 != null) {
return false;
}
if (value4 != null ? !value4.equals(tuple7.value4) : tuple7.value4 != null) {
return false;
}
if (value5 != null ? !value5.equals(tuple7.value5) : tuple7.value5 != null) {
return false;
}
if (value6 != null ? !value6.equals(tuple7.value6) : tuple7.value6 != null) {
return false;
}
return value7 != null ? value7.equals(tuple7.value7) : tuple7.value7 == null;
}
@Override
public int hashCode() {
int result = value1 != null ? value1.hashCode() : 0;
result = 31 * result + (value2 != null ? value2.hashCode() : 0);
result = 31 * result + (value3 != null ? value3.hashCode() : 0);
result = 31 * result + (value4 != null ? value4.hashCode() : 0);
result = 31 * result + (value5 != null ? value5.hashCode() : 0);
result = 31 * result + (value6 != null ? value6.hashCode() : 0);
result = 31 * result + (value7 != null ? value7.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "(" + value1 + "," + value2 + "," + value3 + "," + value4 + "," + value5 + "," + value6 + "," + value7 + ")";
}
}
| apache-2.0 |
abhishek24509/aribaweb | src/expr/ariba/util/expr/ASTKeyValue.java | 2655 | //--------------------------------------------------------------------------
// Copyright (c) 1998-2004, Drew Davidson and Luke Blanshard
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// Neither the name of the Drew Davidson nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//--------------------------------------------------------------------------
package ariba.util.expr;
/**
* @author Luke Blanshard (blanshlu@netscape.net)
* @author Drew Davidson (drew@ognl.org)
*/
class ASTKeyValue extends SimpleNode
{
public ASTKeyValue(int id) {
super(id);
}
public ASTKeyValue(ExprParser p, int id) {
super(p, id);
}
protected Node getKey()
{
return children[0];
}
protected Node getValue()
{
return (jjtGetNumChildren() > 1) ? children[1] : null;
}
/**
Returns null because this is a parser construct and does not evaluate
*/
protected Object getValueBody( ExprContext context, Object source ) throws ExprException
{
return null;
}
public String toString()
{
return getKey() + " -> " + getValue();
}
public void accept (ASTNodeVisitor visitor)
{
acceptChildren(visitor);
visitor.visit(this);
}
}
| apache-2.0 |
flofreud/aws-sdk-java | aws-java-sdk-swf-libraries/src/main/java/com/amazonaws/services/simpleworkflow/flow/spring/WorkflowScopeBeanNames.java | 1589 | /*
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.simpleworkflow.flow.spring;
public class WorkflowScopeBeanNames {
public static final String GENERIC_ACTIVITY_CLIENT = "genericActivityClient";
public static final String GENERIC_WORKFLOW_CLIENT = "genericWorkflowClient";
public static final String WORKFLOW_CLOCK = "workflowClock";
public static final String WORKFLOW_CONTEXT = "workflowContext";
public static final String DECISION_CONTEXT = "decisionContext";
public static boolean isWorkflowScopeBeanName(String name) {
if (GENERIC_ACTIVITY_CLIENT.equals(name)) {
return true;
}
if (GENERIC_WORKFLOW_CLIENT.equals(name)) {
return true;
}
if (WORKFLOW_CLOCK.equals(name)) {
return true;
}
if (WORKFLOW_CONTEXT.equals(name)) {
return true;
}
if (DECISION_CONTEXT.equals(name)) {
return true;
}
return false;
}
}
| apache-2.0 |
drewlyall/dasein-cloud-core | src/main/java/org/dasein/cloud/identity/CloudUser.java | 3063 | /**
* Copyright (C) 2009-2015 Dell, Inc.
* See annotations for authorship information
*
* ====================================================================
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ====================================================================
*/
package org.dasein.cloud.identity;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.Serializable;
/**
* A user registered in a cloud identity management system.
* @author George Reese (george.reese@imaginary.com)
* @since 2012.02
* @version 2012.02
*/
public class CloudUser implements Serializable {
private String path;
private String providerUserId;
private String providerOwnerId;
private String userName;
public CloudUser() { }
@Override
public boolean equals(@Nullable Object ob) {
if( ob == null ) {
return false;
}
if( ob == this ) {
return true;
}
if( !getClass().getName().equals(ob.getClass().getName()) ) {
return false;
}
CloudUser user = (CloudUser)ob;
//noinspection SimplifiableIfStatement
if( (providerOwnerId == null && user.providerOwnerId == null) || (providerOwnerId != null && providerOwnerId.equals(user.providerOwnerId)) ) {
return providerUserId.equals(user.providerUserId);
}
return false;
}
public @Nullable String getPath() {
return path;
}
public void setPath(@Nullable String path) {
this.path = path;
}
public @Nullable String getProviderUserId() {
return providerUserId;
}
public void setProviderUserId(@Nonnull String providerUserId) {
this.providerUserId = providerUserId;
}
public @Nullable String getProviderOwnerId() {
return providerOwnerId;
}
public void setProviderOwnerId(@Nullable String providerOwnerId) {
this.providerOwnerId = providerOwnerId;
}
public @Nullable String getUserName() {
return userName;
}
public void setUserName(@Nonnull String userName) {
this.userName = userName;
}
@Override
public @Nonnull String toString() {
if( path == null ) {
return (userName + " [#" + providerUserId + "]");
}
else if( path.endsWith("/") ) {
return (path + userName + " [#" + providerUserId + "]");
}
else {
return (path + "/" + userName + " [#" + providerUserId + "]");
}
}
}
| apache-2.0 |
denzelsN/pinpoint | collector/src/main/java/com/navercorp/pinpoint/collector/util/AddressParser.java | 1216 | /*
* Copyright 2018 NAVER Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.navercorp.pinpoint.collector.util;
import com.navercorp.pinpoint.common.util.NetUtils;
import java.util.List;
/**
* @author Woonduk Kang(emeroad)
*/
public class AddressParser {
private static final NetUtils.HostAndPortFactory<Address> addressFactory = new NetUtils.HostAndPortFactory<Address>() {
@Override
public Address newInstance(String host, int port) {
return new DefaultAddress(host, port);
}
};
public static List<Address> parseAddressLIst(List<String> addressList) {
return NetUtils.toHostAndPortLIst(addressList, addressFactory);
}
}
| apache-2.0 |
ron8hu/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala | 60785 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.IOException
import java.lang.reflect.InvocationTargetException
import java.util
import java.util.Locale
import scala.collection.mutable
import scala.util.control.NonFatal
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hive.ql.metadata.HiveException
import org.apache.thrift.TException
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical.ColumnStat
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.execution.datasources.{PartitioningUtils, SourceOptions}
import org.apache.spark.sql.hive.client.HiveClient
import org.apache.spark.sql.internal.HiveSerDe
import org.apache.spark.sql.internal.StaticSQLConf._
import org.apache.spark.sql.types.{DataType, StructType}
/**
* A persistent implementation of the system catalog using Hive.
* All public methods must be synchronized for thread-safety.
*/
private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configuration)
extends ExternalCatalog with Logging {
import CatalogTypes.TablePartitionSpec
import HiveExternalCatalog._
import CatalogTableType._
/**
* A Hive client used to interact with the metastore.
*/
lazy val client: HiveClient = {
HiveUtils.newClientForMetadata(conf, hadoopConf)
}
// Exceptions thrown by the hive client that we would like to wrap
private val clientExceptions = Set(
classOf[HiveException].getCanonicalName,
classOf[TException].getCanonicalName,
classOf[InvocationTargetException].getCanonicalName)
/**
* Whether this is an exception thrown by the hive client that should be wrapped.
*
* Due to classloader isolation issues, pattern matching won't work here so we need
* to compare the canonical names of the exceptions, which we assume to be stable.
*/
private def isClientException(e: Throwable): Boolean = {
var temp: Class[_] = e.getClass
var found = false
while (temp != null && !found) {
found = clientExceptions.contains(temp.getCanonicalName)
temp = temp.getSuperclass
}
found
}
/**
* Run some code involving `client` in a [[synchronized]] block and wrap certain
* exceptions thrown in the process in [[AnalysisException]].
*/
private def withClient[T](body: => T): T = synchronized {
try {
body
} catch {
case NonFatal(exception) if isClientException(exception) =>
val e = exception match {
// Since we are using shim, the exceptions thrown by the underlying method of
// Method.invoke() are wrapped by InvocationTargetException
case i: InvocationTargetException => i.getCause
case o => o
}
throw new AnalysisException(
e.getClass.getCanonicalName + ": " + e.getMessage, cause = Some(e))
}
}
/**
* Get the raw table metadata from hive metastore directly. The raw table metadata may contains
* special data source properties and should not be exposed outside of `HiveExternalCatalog`. We
* should interpret these special data source properties and restore the original table metadata
* before returning it.
*/
private[hive] def getRawTable(db: String, table: String): CatalogTable = withClient {
client.getTable(db, table)
}
/**
* If the given table properties contains datasource properties, throw an exception. We will do
* this check when create or alter a table, i.e. when we try to write table metadata to Hive
* metastore.
*/
private def verifyTableProperties(table: CatalogTable): Unit = {
val invalidKeys = table.properties.keys.filter(_.startsWith(SPARK_SQL_PREFIX))
if (invalidKeys.nonEmpty) {
throw new AnalysisException(s"Cannot persistent ${table.qualifiedName} into hive metastore " +
s"as table property keys may not start with '$SPARK_SQL_PREFIX': " +
invalidKeys.mkString("[", ", ", "]"))
}
// External users are not allowed to set/switch the table type. In Hive metastore, the table
// type can be switched by changing the value of a case-sensitive table property `EXTERNAL`.
if (table.properties.contains("EXTERNAL")) {
throw new AnalysisException("Cannot set or change the preserved property key: 'EXTERNAL'")
}
}
/**
* Checks the validity of data column names. Hive metastore disallows the table to use comma in
* data column names. Partition columns do not have such a restriction. Views do not have such
* a restriction.
*/
private def verifyDataSchema(
tableName: TableIdentifier, tableType: CatalogTableType, dataSchema: StructType): Unit = {
if (tableType != VIEW) {
dataSchema.map(_.name).foreach { colName =>
if (colName.contains(",")) {
throw new AnalysisException("Cannot create a table having a column whose name contains " +
s"commas in Hive metastore. Table: $tableName; Column: $colName")
}
}
}
}
// --------------------------------------------------------------------------
// Databases
// --------------------------------------------------------------------------
override protected def doCreateDatabase(
dbDefinition: CatalogDatabase,
ignoreIfExists: Boolean): Unit = withClient {
client.createDatabase(dbDefinition, ignoreIfExists)
}
override protected def doDropDatabase(
db: String,
ignoreIfNotExists: Boolean,
cascade: Boolean): Unit = withClient {
client.dropDatabase(db, ignoreIfNotExists, cascade)
}
/**
* Alter a database whose name matches the one specified in `dbDefinition`,
* assuming the database exists.
*
* Note: As of now, this only supports altering database properties!
*/
override def doAlterDatabase(dbDefinition: CatalogDatabase): Unit = withClient {
val existingDb = getDatabase(dbDefinition.name)
if (existingDb.properties == dbDefinition.properties) {
logWarning(s"Request to alter database ${dbDefinition.name} is a no-op because " +
s"the provided database properties are the same as the old ones. Hive does not " +
s"currently support altering other database fields.")
}
client.alterDatabase(dbDefinition)
}
override def getDatabase(db: String): CatalogDatabase = withClient {
client.getDatabase(db)
}
override def databaseExists(db: String): Boolean = withClient {
client.databaseExists(db)
}
override def listDatabases(): Seq[String] = withClient {
client.listDatabases("*")
}
override def listDatabases(pattern: String): Seq[String] = withClient {
client.listDatabases(pattern)
}
override def setCurrentDatabase(db: String): Unit = withClient {
client.setCurrentDatabase(db)
}
// --------------------------------------------------------------------------
// Tables
// --------------------------------------------------------------------------
override protected def doCreateTable(
tableDefinition: CatalogTable,
ignoreIfExists: Boolean): Unit = withClient {
assert(tableDefinition.identifier.database.isDefined)
val db = tableDefinition.identifier.database.get
val table = tableDefinition.identifier.table
requireDbExists(db)
verifyTableProperties(tableDefinition)
verifyDataSchema(
tableDefinition.identifier, tableDefinition.tableType, tableDefinition.dataSchema)
if (tableExists(db, table) && !ignoreIfExists) {
throw new TableAlreadyExistsException(db = db, table = table)
}
// Ideally we should not create a managed table with location, but Hive serde table can
// specify location for managed table. And in [[CreateDataSourceTableAsSelectCommand]] we have
// to create the table directory and write out data before we create this table, to avoid
// exposing a partial written table.
val needDefaultTableLocation = tableDefinition.tableType == MANAGED &&
tableDefinition.storage.locationUri.isEmpty
val tableLocation = if (needDefaultTableLocation) {
Some(CatalogUtils.stringToURI(defaultTablePath(tableDefinition.identifier)))
} else {
tableDefinition.storage.locationUri
}
if (DDLUtils.isDatasourceTable(tableDefinition)) {
createDataSourceTable(
tableDefinition.withNewStorage(locationUri = tableLocation),
ignoreIfExists)
} else {
val tableWithDataSourceProps = tableDefinition.copy(
// We can't leave `locationUri` empty and count on Hive metastore to set a default table
// location, because Hive metastore uses hive.metastore.warehouse.dir to generate default
// table location for tables in default database, while we expect to use the location of
// default database.
storage = tableDefinition.storage.copy(locationUri = tableLocation),
// Here we follow data source tables and put table metadata like table schema, partition
// columns etc. in table properties, so that we can work around the Hive metastore issue
// about not case preserving and make Hive serde table and view support mixed-case column
// names.
properties = tableDefinition.properties ++ tableMetaToTableProps(tableDefinition))
client.createTable(tableWithDataSourceProps, ignoreIfExists)
}
}
private def createDataSourceTable(table: CatalogTable, ignoreIfExists: Boolean): Unit = {
// data source table always have a provider, it's guaranteed by `DDLUtils.isDatasourceTable`.
val provider = table.provider.get
val options = new SourceOptions(table.storage.properties)
// To work around some hive metastore issues, e.g. not case-preserving, bad decimal type
// support, no column nullability, etc., we should do some extra works before saving table
// metadata into Hive metastore:
// 1. Put table metadata like table schema, partition columns, etc. in table properties.
// 2. Check if this table is hive compatible.
// 2.1 If it's not hive compatible, set location URI, schema, partition columns and bucket
// spec to empty and save table metadata to Hive.
// 2.2 If it's hive compatible, set serde information in table metadata and try to save
// it to Hive. If it fails, treat it as not hive compatible and go back to 2.1
val tableProperties = tableMetaToTableProps(table)
// put table provider and partition provider in table properties.
tableProperties.put(DATASOURCE_PROVIDER, provider)
if (table.tracksPartitionsInCatalog) {
tableProperties.put(TABLE_PARTITION_PROVIDER, TABLE_PARTITION_PROVIDER_CATALOG)
}
// Ideally we should also put `locationUri` in table properties like provider, schema, etc.
// However, in older version of Spark we already store table location in storage properties
// with key "path". Here we keep this behaviour for backward compatibility.
val storagePropsWithLocation = table.storage.properties ++
table.storage.locationUri.map("path" -> CatalogUtils.URIToString(_))
// converts the table metadata to Spark SQL specific format, i.e. set data schema, names and
// bucket specification to empty. Note that partition columns are retained, so that we can
// call partition-related Hive API later.
def newSparkSQLSpecificMetastoreTable(): CatalogTable = {
table.copy(
// Hive only allows directory paths as location URIs while Spark SQL data source tables
// also allow file paths. For non-hive-compatible format, we should not set location URI
// to avoid hive metastore to throw exception.
storage = table.storage.copy(
locationUri = None,
properties = storagePropsWithLocation),
schema = StructType(EMPTY_DATA_SCHEMA ++ table.partitionSchema),
bucketSpec = None,
properties = table.properties ++ tableProperties)
}
// converts the table metadata to Hive compatible format, i.e. set the serde information.
def newHiveCompatibleMetastoreTable(serde: HiveSerDe): CatalogTable = {
val location = if (table.tableType == EXTERNAL) {
// When we hit this branch, we are saving an external data source table with hive
// compatible format, which means the data source is file-based and must have a `path`.
require(table.storage.locationUri.isDefined,
"External file-based data source table must have a `path` entry in storage properties.")
Some(table.location)
} else {
None
}
table.copy(
storage = table.storage.copy(
locationUri = location,
inputFormat = serde.inputFormat,
outputFormat = serde.outputFormat,
serde = serde.serde,
properties = storagePropsWithLocation
),
properties = table.properties ++ tableProperties)
}
val qualifiedTableName = table.identifier.quotedString
val maybeSerde = HiveSerDe.sourceToSerDe(provider)
val (hiveCompatibleTable, logMessage) = maybeSerde match {
case _ if options.skipHiveMetadata =>
val message =
s"Persisting data source table $qualifiedTableName into Hive metastore in" +
"Spark SQL specific format, which is NOT compatible with Hive."
(None, message)
// our bucketing is un-compatible with hive(different hash function)
case _ if table.bucketSpec.nonEmpty =>
val message =
s"Persisting bucketed data source table $qualifiedTableName into " +
"Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. "
(None, message)
case Some(serde) =>
val message =
s"Persisting file based data source table $qualifiedTableName into " +
s"Hive metastore in Hive compatible format."
(Some(newHiveCompatibleMetastoreTable(serde)), message)
case _ =>
val message =
s"Couldn't find corresponding Hive SerDe for data source provider $provider. " +
s"Persisting data source table $qualifiedTableName into Hive metastore in " +
s"Spark SQL specific format, which is NOT compatible with Hive."
(None, message)
}
(hiveCompatibleTable, logMessage) match {
case (Some(table), message) =>
// We first try to save the metadata of the table in a Hive compatible way.
// If Hive throws an error, we fall back to save its metadata in the Spark SQL
// specific way.
try {
logInfo(message)
saveTableIntoHive(table, ignoreIfExists)
} catch {
case NonFatal(e) =>
val warningMessage =
s"Could not persist ${table.identifier.quotedString} in a Hive " +
"compatible way. Persisting it into Hive metastore in Spark SQL specific format."
logWarning(warningMessage, e)
saveTableIntoHive(newSparkSQLSpecificMetastoreTable(), ignoreIfExists)
}
case (None, message) =>
logWarning(message)
saveTableIntoHive(newSparkSQLSpecificMetastoreTable(), ignoreIfExists)
}
}
/**
* Data source tables may be non Hive compatible and we need to store table metadata in table
* properties to workaround some Hive metastore limitations.
* This method puts table schema, partition column names, bucket specification into a map, which
* can be used as table properties later.
*/
private def tableMetaToTableProps(table: CatalogTable): mutable.Map[String, String] = {
tableMetaToTableProps(table, table.schema)
}
private def tableMetaToTableProps(
table: CatalogTable,
schema: StructType): mutable.Map[String, String] = {
val partitionColumns = table.partitionColumnNames
val bucketSpec = table.bucketSpec
val properties = new mutable.HashMap[String, String]
properties.put(CREATED_SPARK_VERSION, table.createVersion)
// Serialized JSON schema string may be too long to be stored into a single metastore table
// property. In this case, we split the JSON string and store each part as a separate table
// property.
val threshold = conf.get(SCHEMA_STRING_LENGTH_THRESHOLD)
val schemaJsonString = schema.json
// Split the JSON string.
val parts = schemaJsonString.grouped(threshold).toSeq
properties.put(DATASOURCE_SCHEMA_NUMPARTS, parts.size.toString)
parts.zipWithIndex.foreach { case (part, index) =>
properties.put(s"$DATASOURCE_SCHEMA_PART_PREFIX$index", part)
}
if (partitionColumns.nonEmpty) {
properties.put(DATASOURCE_SCHEMA_NUMPARTCOLS, partitionColumns.length.toString)
partitionColumns.zipWithIndex.foreach { case (partCol, index) =>
properties.put(s"$DATASOURCE_SCHEMA_PARTCOL_PREFIX$index", partCol)
}
}
if (bucketSpec.isDefined) {
val BucketSpec(numBuckets, bucketColumnNames, sortColumnNames) = bucketSpec.get
properties.put(DATASOURCE_SCHEMA_NUMBUCKETS, numBuckets.toString)
properties.put(DATASOURCE_SCHEMA_NUMBUCKETCOLS, bucketColumnNames.length.toString)
bucketColumnNames.zipWithIndex.foreach { case (bucketCol, index) =>
properties.put(s"$DATASOURCE_SCHEMA_BUCKETCOL_PREFIX$index", bucketCol)
}
if (sortColumnNames.nonEmpty) {
properties.put(DATASOURCE_SCHEMA_NUMSORTCOLS, sortColumnNames.length.toString)
sortColumnNames.zipWithIndex.foreach { case (sortCol, index) =>
properties.put(s"$DATASOURCE_SCHEMA_SORTCOL_PREFIX$index", sortCol)
}
}
}
properties
}
private def defaultTablePath(tableIdent: TableIdentifier): String = {
val dbLocation = getDatabase(tableIdent.database.get).locationUri
new Path(new Path(dbLocation), tableIdent.table).toString
}
private def saveTableIntoHive(tableDefinition: CatalogTable, ignoreIfExists: Boolean): Unit = {
assert(DDLUtils.isDatasourceTable(tableDefinition),
"saveTableIntoHive only takes data source table.")
// If this is an external data source table...
if (tableDefinition.tableType == EXTERNAL &&
// ... that is not persisted as Hive compatible format (external tables in Hive compatible
// format always set `locationUri` to the actual data location and should NOT be hacked as
// following.)
tableDefinition.storage.locationUri.isEmpty) {
// !! HACK ALERT !!
//
// Due to a restriction of Hive metastore, here we have to set `locationUri` to a temporary
// directory that doesn't exist yet but can definitely be successfully created, and then
// delete it right after creating the external data source table. This location will be
// persisted to Hive metastore as standard Hive table location URI, but Spark SQL doesn't
// really use it. Also, since we only do this workaround for external tables, deleting the
// directory after the fact doesn't do any harm.
//
// Please refer to https://issues.apache.org/jira/browse/SPARK-15269 for more details.
val tempPath = {
val dbLocation = new Path(getDatabase(tableDefinition.database).locationUri)
new Path(dbLocation, tableDefinition.identifier.table + "-__PLACEHOLDER__")
}
try {
client.createTable(
tableDefinition.withNewStorage(locationUri = Some(tempPath.toUri)),
ignoreIfExists)
} finally {
FileSystem.get(tempPath.toUri, hadoopConf).delete(tempPath, true)
}
} else {
client.createTable(tableDefinition, ignoreIfExists)
}
}
override protected def doDropTable(
db: String,
table: String,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit = withClient {
requireDbExists(db)
client.dropTable(db, table, ignoreIfNotExists, purge)
}
override protected def doRenameTable(
db: String,
oldName: String,
newName: String): Unit = withClient {
val rawTable = getRawTable(db, oldName)
// Note that Hive serde tables don't use path option in storage properties to store the value
// of table location, but use `locationUri` field to store it directly. And `locationUri` field
// will be updated automatically in Hive metastore by the `alterTable` call at the end of this
// method. Here we only update the path option if the path option already exists in storage
// properties, to avoid adding a unnecessary path option for Hive serde tables.
val hasPathOption = CaseInsensitiveMap(rawTable.storage.properties).contains("path")
val storageWithNewPath = if (rawTable.tableType == MANAGED && hasPathOption) {
// If it's a managed table with path option and we are renaming it, then the path option
// becomes inaccurate and we need to update it according to the new table name.
val newTablePath = defaultTablePath(TableIdentifier(newName, Some(db)))
updateLocationInStorageProps(rawTable, Some(newTablePath))
} else {
rawTable.storage
}
val newTable = rawTable.copy(
identifier = TableIdentifier(newName, Some(db)),
storage = storageWithNewPath)
client.alterTable(db, oldName, newTable)
}
private def getLocationFromStorageProps(table: CatalogTable): Option[String] = {
CaseInsensitiveMap(table.storage.properties).get("path")
}
private def updateLocationInStorageProps(
table: CatalogTable,
newPath: Option[String]): CatalogStorageFormat = {
// We can't use `filterKeys` here, as the map returned by `filterKeys` is not serializable,
// while `CatalogTable` should be serializable.
val propsWithoutPath = table.storage.properties.filter {
case (k, v) => k.toLowerCase(Locale.ROOT) != "path"
}
table.storage.copy(properties = propsWithoutPath ++ newPath.map("path" -> _))
}
/**
* Alter a table whose name that matches the one specified in `tableDefinition`,
* assuming the table exists. This method does not change the properties for data source and
* statistics.
*
* Note: As of now, this doesn't support altering table schema, partition column names and bucket
* specification. We will ignore them even if users do specify different values for these fields.
*/
override def doAlterTable(tableDefinition: CatalogTable): Unit = withClient {
assert(tableDefinition.identifier.database.isDefined)
val db = tableDefinition.identifier.database.get
requireTableExists(db, tableDefinition.identifier.table)
verifyTableProperties(tableDefinition)
if (tableDefinition.tableType == VIEW) {
client.alterTable(tableDefinition)
} else {
val oldTableDef = getRawTable(db, tableDefinition.identifier.table)
val newStorage = if (DDLUtils.isHiveTable(tableDefinition)) {
tableDefinition.storage
} else {
// We can't alter the table storage of data source table directly for 2 reasons:
// 1. internally we use path option in storage properties to store the value of table
// location, but the given `tableDefinition` is from outside and doesn't have the path
// option, we need to add it manually.
// 2. this data source table may be created on a file, not a directory, then we can't set
// the `locationUri` field and save it to Hive metastore, because Hive only allows
// directory as table location.
//
// For example, an external data source table is created with a single file '/path/to/file'.
// Internally, we will add a path option with value '/path/to/file' to storage properties,
// and set the `locationUri` to a special value due to SPARK-15269(please see
// `saveTableIntoHive` for more details). When users try to get the table metadata back, we
// will restore the `locationUri` field from the path option and remove the path option from
// storage properties. When users try to alter the table storage, the given
// `tableDefinition` will have `locationUri` field with value `/path/to/file` and the path
// option is not set.
//
// Here we need 2 extra steps:
// 1. add path option to storage properties, to match the internal format, i.e. using path
// option to store the value of table location.
// 2. set the `locationUri` field back to the old one from the existing table metadata,
// if users don't want to alter the table location. This step is necessary as the
// `locationUri` is not always same with the path option, e.g. in the above example
// `locationUri` is a special value and we should respect it. Note that, if users
// want to alter the table location to a file path, we will fail. This should be fixed
// in the future.
val newLocation = tableDefinition.storage.locationUri.map(CatalogUtils.URIToString(_))
val storageWithPathOption = tableDefinition.storage.copy(
properties = tableDefinition.storage.properties ++ newLocation.map("path" -> _))
val oldLocation = getLocationFromStorageProps(oldTableDef)
if (oldLocation == newLocation) {
storageWithPathOption.copy(locationUri = oldTableDef.storage.locationUri)
} else {
storageWithPathOption
}
}
val partitionProviderProp = if (tableDefinition.tracksPartitionsInCatalog) {
TABLE_PARTITION_PROVIDER -> TABLE_PARTITION_PROVIDER_CATALOG
} else {
TABLE_PARTITION_PROVIDER -> TABLE_PARTITION_PROVIDER_FILESYSTEM
}
// Add old data source properties to table properties, to retain the data source table format.
// Add old stats properties to table properties, to retain spark's stats.
// Set the `schema`, `partitionColumnNames` and `bucketSpec` from the old table definition,
// to retain the spark specific format if it is.
val propsFromOldTable = oldTableDef.properties.filter { case (k, v) =>
k.startsWith(DATASOURCE_PREFIX) || k.startsWith(STATISTICS_PREFIX) ||
k.startsWith(CREATED_SPARK_VERSION)
}
val newTableProps = propsFromOldTable ++ tableDefinition.properties + partitionProviderProp
val newDef = tableDefinition.copy(
storage = newStorage,
schema = oldTableDef.schema,
partitionColumnNames = oldTableDef.partitionColumnNames,
bucketSpec = oldTableDef.bucketSpec,
properties = newTableProps)
client.alterTable(newDef)
}
}
/**
* Alter the data schema of a table identified by the provided database and table name. The new
* data schema should not have conflict column names with the existing partition columns, and
* should still contain all the existing data columns.
*/
override def doAlterTableDataSchema(
db: String,
table: String,
newDataSchema: StructType): Unit = withClient {
requireTableExists(db, table)
val oldTable = getTable(db, table)
verifyDataSchema(oldTable.identifier, oldTable.tableType, newDataSchema)
val schemaProps =
tableMetaToTableProps(oldTable, StructType(newDataSchema ++ oldTable.partitionSchema)).toMap
if (isDatasourceTable(oldTable)) {
// For data source tables, first try to write it with the schema set; if that does not work,
// try again with updated properties and the partition schema. This is a simplified version of
// what createDataSourceTable() does, and may leave the table in a state unreadable by Hive
// (for example, the schema does not match the data source schema, or does not match the
// storage descriptor).
try {
client.alterTableDataSchema(db, table, newDataSchema, schemaProps)
} catch {
case NonFatal(e) =>
val warningMessage =
s"Could not alter schema of table ${oldTable.identifier.quotedString} in a Hive " +
"compatible way. Updating Hive metastore in Spark SQL specific format."
logWarning(warningMessage, e)
client.alterTableDataSchema(db, table, EMPTY_DATA_SCHEMA, schemaProps)
}
} else {
client.alterTableDataSchema(db, table, newDataSchema, schemaProps)
}
}
/** Alter the statistics of a table. If `stats` is None, then remove all existing statistics. */
override def doAlterTableStats(
db: String,
table: String,
stats: Option[CatalogStatistics]): Unit = withClient {
requireTableExists(db, table)
val rawTable = getRawTable(db, table)
// For datasource tables and hive serde tables created by spark 2.1 or higher,
// the data schema is stored in the table properties.
val schema = restoreTableMetadata(rawTable).schema
// convert table statistics to properties so that we can persist them through hive client
var statsProperties =
if (stats.isDefined) {
statsToProperties(stats.get, schema)
} else {
new mutable.HashMap[String, String]()
}
val oldTableNonStatsProps = rawTable.properties.filterNot(_._1.startsWith(STATISTICS_PREFIX))
val updatedTable = rawTable.copy(properties = oldTableNonStatsProps ++ statsProperties)
client.alterTable(updatedTable)
}
override def getTable(db: String, table: String): CatalogTable = withClient {
restoreTableMetadata(getRawTable(db, table))
}
/**
* Restores table metadata from the table properties. This method is kind of a opposite version
* of [[createTable]].
*
* It reads table schema, provider, partition column names and bucket specification from table
* properties, and filter out these special entries from table properties.
*/
private def restoreTableMetadata(inputTable: CatalogTable): CatalogTable = {
if (conf.get(DEBUG_MODE)) {
return inputTable
}
var table = inputTable
table.properties.get(DATASOURCE_PROVIDER) match {
case None if table.tableType == VIEW =>
// If this is a view created by Spark 2.2 or higher versions, we should restore its schema
// from table properties.
if (table.properties.contains(DATASOURCE_SCHEMA_NUMPARTS)) {
table = table.copy(schema = getSchemaFromTableProperties(table))
}
// No provider in table properties, which means this is a Hive serde table.
case None =>
table = restoreHiveSerdeTable(table)
// This is a regular data source table.
case Some(provider) =>
table = restoreDataSourceTable(table, provider)
}
// Restore version info
val version: String = table.properties.getOrElse(CREATED_SPARK_VERSION, "2.2 or prior")
// Restore Spark's statistics from information in Metastore.
val restoredStats =
statsFromProperties(table.properties, table.identifier.table, table.schema)
if (restoredStats.isDefined) {
table = table.copy(stats = restoredStats)
}
// Get the original table properties as defined by the user.
table.copy(
createVersion = version,
properties = table.properties.filterNot { case (key, _) => key.startsWith(SPARK_SQL_PREFIX) })
}
// Reorder table schema to put partition columns at the end. Before Spark 2.2, the partition
// columns are not put at the end of schema. We need to reorder it when reading the schema
// from the table properties.
private def reorderSchema(schema: StructType, partColumnNames: Seq[String]): StructType = {
val partitionFields = partColumnNames.map { partCol =>
schema.find(_.name == partCol).getOrElse {
throw new AnalysisException("The metadata is corrupted. Unable to find the " +
s"partition column names from the schema. schema: ${schema.catalogString}. " +
s"Partition columns: ${partColumnNames.mkString("[", ", ", "]")}")
}
}
StructType(schema.filterNot(partitionFields.contains) ++ partitionFields)
}
private def restoreHiveSerdeTable(table: CatalogTable): CatalogTable = {
val options = new SourceOptions(table.storage.properties)
val hiveTable = table.copy(
provider = Some(DDLUtils.HIVE_PROVIDER),
tracksPartitionsInCatalog = true)
// If this is a Hive serde table created by Spark 2.1 or higher versions, we should restore its
// schema from table properties.
if (table.properties.contains(DATASOURCE_SCHEMA_NUMPARTS)) {
val schemaFromTableProps = getSchemaFromTableProperties(table)
val partColumnNames = getPartitionColumnsFromTableProperties(table)
val reorderedSchema = reorderSchema(schema = schemaFromTableProps, partColumnNames)
if (DataType.equalsIgnoreCaseAndNullability(reorderedSchema, table.schema) ||
options.respectSparkSchema) {
hiveTable.copy(
schema = reorderedSchema,
partitionColumnNames = partColumnNames,
bucketSpec = getBucketSpecFromTableProperties(table))
} else {
// Hive metastore may change the table schema, e.g. schema inference. If the table
// schema we read back is different(ignore case and nullability) from the one in table
// properties which was written when creating table, we should respect the table schema
// from hive.
logWarning(s"The table schema given by Hive metastore(${table.schema.simpleString}) is " +
"different from the schema when this table was created by Spark SQL" +
s"(${schemaFromTableProps.simpleString}). We have to fall back to the table schema " +
"from Hive metastore which is not case preserving.")
hiveTable.copy(schemaPreservesCase = false)
}
} else {
hiveTable.copy(schemaPreservesCase = false)
}
}
private def restoreDataSourceTable(table: CatalogTable, provider: String): CatalogTable = {
// Internally we store the table location in storage properties with key "path" for data
// source tables. Here we set the table location to `locationUri` field and filter out the
// path option in storage properties, to avoid exposing this concept externally.
val storageWithLocation = {
val tableLocation = getLocationFromStorageProps(table)
// We pass None as `newPath` here, to remove the path option in storage properties.
updateLocationInStorageProps(table, newPath = None).copy(
locationUri = tableLocation.map(CatalogUtils.stringToURI(_)))
}
val partitionProvider = table.properties.get(TABLE_PARTITION_PROVIDER)
val schemaFromTableProps = getSchemaFromTableProperties(table)
val partColumnNames = getPartitionColumnsFromTableProperties(table)
val reorderedSchema = reorderSchema(schema = schemaFromTableProps, partColumnNames)
table.copy(
provider = Some(provider),
storage = storageWithLocation,
schema = reorderedSchema,
partitionColumnNames = partColumnNames,
bucketSpec = getBucketSpecFromTableProperties(table),
tracksPartitionsInCatalog = partitionProvider == Some(TABLE_PARTITION_PROVIDER_CATALOG))
}
override def tableExists(db: String, table: String): Boolean = withClient {
client.tableExists(db, table)
}
override def listTables(db: String): Seq[String] = withClient {
requireDbExists(db)
client.listTables(db)
}
override def listTables(db: String, pattern: String): Seq[String] = withClient {
requireDbExists(db)
client.listTables(db, pattern)
}
override def loadTable(
db: String,
table: String,
loadPath: String,
isOverwrite: Boolean,
isSrcLocal: Boolean): Unit = withClient {
requireTableExists(db, table)
client.loadTable(
loadPath,
s"$db.$table",
isOverwrite,
isSrcLocal)
}
override def loadPartition(
db: String,
table: String,
loadPath: String,
partition: TablePartitionSpec,
isOverwrite: Boolean,
inheritTableSpecs: Boolean,
isSrcLocal: Boolean): Unit = withClient {
requireTableExists(db, table)
val orderedPartitionSpec = new util.LinkedHashMap[String, String]()
getTable(db, table).partitionColumnNames.foreach { colName =>
// Hive metastore is not case preserving and keeps partition columns with lower cased names,
// and Hive will validate the column names in partition spec to make sure they are partition
// columns. Here we Lowercase the column names before passing the partition spec to Hive
// client, to satisfy Hive.
orderedPartitionSpec.put(colName.toLowerCase, partition(colName))
}
client.loadPartition(
loadPath,
db,
table,
orderedPartitionSpec,
isOverwrite,
inheritTableSpecs,
isSrcLocal)
}
override def loadDynamicPartitions(
db: String,
table: String,
loadPath: String,
partition: TablePartitionSpec,
replace: Boolean,
numDP: Int): Unit = withClient {
requireTableExists(db, table)
val orderedPartitionSpec = new util.LinkedHashMap[String, String]()
getTable(db, table).partitionColumnNames.foreach { colName =>
// Hive metastore is not case preserving and keeps partition columns with lower cased names,
// and Hive will validate the column names in partition spec to make sure they are partition
// columns. Here we Lowercase the column names before passing the partition spec to Hive
// client, to satisfy Hive.
orderedPartitionSpec.put(colName.toLowerCase, partition(colName))
}
client.loadDynamicPartitions(
loadPath,
db,
table,
orderedPartitionSpec,
replace,
numDP)
}
// --------------------------------------------------------------------------
// Partitions
// --------------------------------------------------------------------------
// Hive metastore is not case preserving and the partition columns are always lower cased. We need
// to lower case the column names in partition specification before calling partition related Hive
// APIs, to match this behaviour.
private def lowerCasePartitionSpec(spec: TablePartitionSpec): TablePartitionSpec = {
spec.map { case (k, v) => k.toLowerCase -> v }
}
// Build a map from lower-cased partition column names to exact column names for a given table
private def buildLowerCasePartColNameMap(table: CatalogTable): Map[String, String] = {
val actualPartColNames = table.partitionColumnNames
actualPartColNames.map(colName => (colName.toLowerCase, colName)).toMap
}
// Hive metastore is not case preserving and the column names of the partition specification we
// get from the metastore are always lower cased. We should restore them w.r.t. the actual table
// partition columns.
private def restorePartitionSpec(
spec: TablePartitionSpec,
partColMap: Map[String, String]): TablePartitionSpec = {
spec.map { case (k, v) => partColMap(k.toLowerCase) -> v }
}
private def restorePartitionSpec(
spec: TablePartitionSpec,
partCols: Seq[String]): TablePartitionSpec = {
spec.map { case (k, v) => partCols.find(_.equalsIgnoreCase(k)).get -> v }
}
override def createPartitions(
db: String,
table: String,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = withClient {
requireTableExists(db, table)
val tableMeta = getTable(db, table)
val partitionColumnNames = tableMeta.partitionColumnNames
val tablePath = new Path(tableMeta.location)
val partsWithLocation = parts.map { p =>
// Ideally we can leave the partition location empty and let Hive metastore to set it.
// However, Hive metastore is not case preserving and will generate wrong partition location
// with lower cased partition column names. Here we set the default partition location
// manually to avoid this problem.
val partitionPath = p.storage.locationUri.map(uri => new Path(uri)).getOrElse {
ExternalCatalogUtils.generatePartitionPath(p.spec, partitionColumnNames, tablePath)
}
p.copy(storage = p.storage.copy(locationUri = Some(partitionPath.toUri)))
}
val lowerCasedParts = partsWithLocation.map(p => p.copy(spec = lowerCasePartitionSpec(p.spec)))
client.createPartitions(db, table, lowerCasedParts, ignoreIfExists)
}
override def dropPartitions(
db: String,
table: String,
parts: Seq[TablePartitionSpec],
ignoreIfNotExists: Boolean,
purge: Boolean,
retainData: Boolean): Unit = withClient {
requireTableExists(db, table)
client.dropPartitions(
db, table, parts.map(lowerCasePartitionSpec), ignoreIfNotExists, purge, retainData)
}
override def renamePartitions(
db: String,
table: String,
specs: Seq[TablePartitionSpec],
newSpecs: Seq[TablePartitionSpec]): Unit = withClient {
client.renamePartitions(
db, table, specs.map(lowerCasePartitionSpec), newSpecs.map(lowerCasePartitionSpec))
val tableMeta = getTable(db, table)
val partitionColumnNames = tableMeta.partitionColumnNames
// Hive metastore is not case preserving and keeps partition columns with lower cased names.
// When Hive rename partition for managed tables, it will create the partition location with
// a default path generate by the new spec with lower cased partition column names. This is
// unexpected and we need to rename them manually and alter the partition location.
val hasUpperCasePartitionColumn = partitionColumnNames.exists(col => col.toLowerCase != col)
if (tableMeta.tableType == MANAGED && hasUpperCasePartitionColumn) {
val tablePath = new Path(tableMeta.location)
val fs = tablePath.getFileSystem(hadoopConf)
val newParts = newSpecs.map { spec =>
val rightPath = renamePartitionDirectory(fs, tablePath, partitionColumnNames, spec)
val partition = client.getPartition(db, table, lowerCasePartitionSpec(spec))
partition.copy(storage = partition.storage.copy(locationUri = Some(rightPath.toUri)))
}
alterPartitions(db, table, newParts)
}
}
/**
* Rename the partition directory w.r.t. the actual partition columns.
*
* It will recursively rename the partition directory from the first partition column, to be most
* compatible with different file systems. e.g. in some file systems, renaming `a=1/b=2` to
* `A=1/B=2` will result to `a=1/B=2`, while in some other file systems, the renaming works, but
* will leave an empty directory `a=1`.
*/
private def renamePartitionDirectory(
fs: FileSystem,
tablePath: Path,
partCols: Seq[String],
newSpec: TablePartitionSpec): Path = {
import ExternalCatalogUtils.getPartitionPathString
var currentFullPath = tablePath
partCols.foreach { col =>
val partValue = newSpec(col)
val expectedPartitionString = getPartitionPathString(col, partValue)
val expectedPartitionPath = new Path(currentFullPath, expectedPartitionString)
if (fs.exists(expectedPartitionPath)) {
// It is possible that some parental partition directories already exist or doesn't need to
// be renamed. e.g. the partition columns are `a` and `B`, then we don't need to rename
// `/table_path/a=1`. Or we already have a partition directory `A=1/B=2`, and we rename
// another partition to `A=1/B=3`, then we will have `A=1/B=2` and `a=1/b=3`, and we should
// just move `a=1/b=3` into `A=1` with new name `B=3`.
} else {
val actualPartitionString = getPartitionPathString(col.toLowerCase, partValue)
val actualPartitionPath = new Path(currentFullPath, actualPartitionString)
try {
fs.rename(actualPartitionPath, expectedPartitionPath)
} catch {
case e: IOException =>
throw new SparkException("Unable to rename partition path from " +
s"$actualPartitionPath to $expectedPartitionPath", e)
}
}
currentFullPath = expectedPartitionPath
}
currentFullPath
}
private def statsToProperties(
stats: CatalogStatistics,
schema: StructType): Map[String, String] = {
val statsProperties = new mutable.HashMap[String, String]()
statsProperties += STATISTICS_TOTAL_SIZE -> stats.sizeInBytes.toString()
if (stats.rowCount.isDefined) {
statsProperties += STATISTICS_NUM_ROWS -> stats.rowCount.get.toString()
}
val colNameTypeMap: Map[String, DataType] =
schema.fields.map(f => (f.name, f.dataType)).toMap
stats.colStats.foreach { case (colName, colStat) =>
colStat.toMap(colName, colNameTypeMap(colName)).foreach { case (k, v) =>
statsProperties += (columnStatKeyPropName(colName, k) -> v)
}
}
statsProperties.toMap
}
private def statsFromProperties(
properties: Map[String, String],
table: String,
schema: StructType): Option[CatalogStatistics] = {
val statsProps = properties.filterKeys(_.startsWith(STATISTICS_PREFIX))
if (statsProps.isEmpty) {
None
} else {
val colStats = new mutable.HashMap[String, ColumnStat]
// For each column, recover its column stats. Note that this is currently a O(n^2) operation,
// but given the number of columns it usually not enormous, this is probably OK as a start.
// If we want to map this a linear operation, we'd need a stronger contract between the
// naming convention used for serialization.
schema.foreach { field =>
if (statsProps.contains(columnStatKeyPropName(field.name, ColumnStat.KEY_VERSION))) {
// If "version" field is defined, then the column stat is defined.
val keyPrefix = columnStatKeyPropName(field.name, "")
val colStatMap = statsProps.filterKeys(_.startsWith(keyPrefix)).map { case (k, v) =>
(k.drop(keyPrefix.length), v)
}
ColumnStat.fromMap(table, field, colStatMap).foreach { cs =>
colStats += field.name -> cs
}
}
}
Some(CatalogStatistics(
sizeInBytes = BigInt(statsProps(STATISTICS_TOTAL_SIZE)),
rowCount = statsProps.get(STATISTICS_NUM_ROWS).map(BigInt(_)),
colStats = colStats.toMap))
}
}
override def alterPartitions(
db: String,
table: String,
newParts: Seq[CatalogTablePartition]): Unit = withClient {
val lowerCasedParts = newParts.map(p => p.copy(spec = lowerCasePartitionSpec(p.spec)))
val rawTable = getRawTable(db, table)
// For datasource tables and hive serde tables created by spark 2.1 or higher,
// the data schema is stored in the table properties.
val schema = restoreTableMetadata(rawTable).schema
// convert partition statistics to properties so that we can persist them through hive api
val withStatsProps = lowerCasedParts.map(p => {
if (p.stats.isDefined) {
val statsProperties = statsToProperties(p.stats.get, schema)
p.copy(parameters = p.parameters ++ statsProperties)
} else {
p
}
})
// Note: Before altering table partitions in Hive, you *must* set the current database
// to the one that contains the table of interest. Otherwise you will end up with the
// most helpful error message ever: "Unable to alter partition. alter is not possible."
// See HIVE-2742 for more detail.
client.setCurrentDatabase(db)
client.alterPartitions(db, table, withStatsProps)
}
override def getPartition(
db: String,
table: String,
spec: TablePartitionSpec): CatalogTablePartition = withClient {
val part = client.getPartition(db, table, lowerCasePartitionSpec(spec))
restorePartitionMetadata(part, getTable(db, table))
}
/**
* Restores partition metadata from the partition properties.
*
* Reads partition-level statistics from partition properties, puts these
* into [[CatalogTablePartition#stats]] and removes these special entries
* from the partition properties.
*/
private def restorePartitionMetadata(
partition: CatalogTablePartition,
table: CatalogTable): CatalogTablePartition = {
val restoredSpec = restorePartitionSpec(partition.spec, table.partitionColumnNames)
// Restore Spark's statistics from information in Metastore.
// Note: partition-level statistics were introduced in 2.3.
val restoredStats =
statsFromProperties(partition.parameters, table.identifier.table, table.schema)
if (restoredStats.isDefined) {
partition.copy(
spec = restoredSpec,
stats = restoredStats,
parameters = partition.parameters.filterNot {
case (key, _) => key.startsWith(SPARK_SQL_PREFIX) })
} else {
partition.copy(spec = restoredSpec)
}
}
/**
* Returns the specified partition or None if it does not exist.
*/
override def getPartitionOption(
db: String,
table: String,
spec: TablePartitionSpec): Option[CatalogTablePartition] = withClient {
client.getPartitionOption(db, table, lowerCasePartitionSpec(spec)).map { part =>
restorePartitionMetadata(part, getTable(db, table))
}
}
/**
* Returns the partition names from hive metastore for a given table in a database.
*/
override def listPartitionNames(
db: String,
table: String,
partialSpec: Option[TablePartitionSpec] = None): Seq[String] = withClient {
val catalogTable = getTable(db, table)
val partColNameMap = buildLowerCasePartColNameMap(catalogTable).mapValues(escapePathName)
val clientPartitionNames =
client.getPartitionNames(catalogTable, partialSpec.map(lowerCasePartitionSpec))
clientPartitionNames.map { partitionPath =>
val partSpec = PartitioningUtils.parsePathFragmentAsSeq(partitionPath)
partSpec.map { case (partName, partValue) =>
partColNameMap(partName.toLowerCase) + "=" + escapePathName(partValue)
}.mkString("/")
}
}
/**
* Returns the partitions from hive metastore for a given table in a database.
*/
override def listPartitions(
db: String,
table: String,
partialSpec: Option[TablePartitionSpec] = None): Seq[CatalogTablePartition] = withClient {
val partColNameMap = buildLowerCasePartColNameMap(getTable(db, table))
val res = client.getPartitions(db, table, partialSpec.map(lowerCasePartitionSpec)).map { part =>
part.copy(spec = restorePartitionSpec(part.spec, partColNameMap))
}
partialSpec match {
// This might be a bug of Hive: When the partition value inside the partial partition spec
// contains dot, and we ask Hive to list partitions w.r.t. the partial partition spec, Hive
// treats dot as matching any single character and may return more partitions than we
// expected. Here we do an extra filter to drop unexpected partitions.
case Some(spec) if spec.exists(_._2.contains(".")) =>
res.filter(p => isPartialPartitionSpec(spec, p.spec))
case _ => res
}
}
override def listPartitionsByFilter(
db: String,
table: String,
predicates: Seq[Expression],
defaultTimeZoneId: String): Seq[CatalogTablePartition] = withClient {
val rawTable = getRawTable(db, table)
val catalogTable = restoreTableMetadata(rawTable)
val partColNameMap = buildLowerCasePartColNameMap(catalogTable)
val clientPrunedPartitions =
client.getPartitionsByFilter(rawTable, predicates).map { part =>
part.copy(spec = restorePartitionSpec(part.spec, partColNameMap))
}
prunePartitionsByFilter(catalogTable, clientPrunedPartitions, predicates, defaultTimeZoneId)
}
// --------------------------------------------------------------------------
// Functions
// --------------------------------------------------------------------------
override protected def doCreateFunction(
db: String,
funcDefinition: CatalogFunction): Unit = withClient {
requireDbExists(db)
// Hive's metastore is case insensitive. However, Hive's createFunction does
// not normalize the function name (unlike the getFunction part). So,
// we are normalizing the function name.
val functionName = funcDefinition.identifier.funcName.toLowerCase(Locale.ROOT)
requireFunctionNotExists(db, functionName)
val functionIdentifier = funcDefinition.identifier.copy(funcName = functionName)
client.createFunction(db, funcDefinition.copy(identifier = functionIdentifier))
}
override protected def doDropFunction(db: String, name: String): Unit = withClient {
requireFunctionExists(db, name)
client.dropFunction(db, name)
}
override protected def doAlterFunction(
db: String, funcDefinition: CatalogFunction): Unit = withClient {
requireDbExists(db)
val functionName = funcDefinition.identifier.funcName.toLowerCase(Locale.ROOT)
requireFunctionExists(db, functionName)
val functionIdentifier = funcDefinition.identifier.copy(funcName = functionName)
client.alterFunction(db, funcDefinition.copy(identifier = functionIdentifier))
}
override protected def doRenameFunction(
db: String,
oldName: String,
newName: String): Unit = withClient {
requireFunctionExists(db, oldName)
requireFunctionNotExists(db, newName)
client.renameFunction(db, oldName, newName)
}
override def getFunction(db: String, funcName: String): CatalogFunction = withClient {
requireFunctionExists(db, funcName)
client.getFunction(db, funcName)
}
override def functionExists(db: String, funcName: String): Boolean = withClient {
requireDbExists(db)
client.functionExists(db, funcName)
}
override def listFunctions(db: String, pattern: String): Seq[String] = withClient {
requireDbExists(db)
client.listFunctions(db, pattern)
}
}
object HiveExternalCatalog {
val SPARK_SQL_PREFIX = "spark.sql."
val DATASOURCE_PREFIX = SPARK_SQL_PREFIX + "sources."
val DATASOURCE_PROVIDER = DATASOURCE_PREFIX + "provider"
val DATASOURCE_SCHEMA = DATASOURCE_PREFIX + "schema"
val DATASOURCE_SCHEMA_PREFIX = DATASOURCE_SCHEMA + "."
val DATASOURCE_SCHEMA_NUMPARTS = DATASOURCE_SCHEMA_PREFIX + "numParts"
val DATASOURCE_SCHEMA_NUMPARTCOLS = DATASOURCE_SCHEMA_PREFIX + "numPartCols"
val DATASOURCE_SCHEMA_NUMSORTCOLS = DATASOURCE_SCHEMA_PREFIX + "numSortCols"
val DATASOURCE_SCHEMA_NUMBUCKETS = DATASOURCE_SCHEMA_PREFIX + "numBuckets"
val DATASOURCE_SCHEMA_NUMBUCKETCOLS = DATASOURCE_SCHEMA_PREFIX + "numBucketCols"
val DATASOURCE_SCHEMA_PART_PREFIX = DATASOURCE_SCHEMA_PREFIX + "part."
val DATASOURCE_SCHEMA_PARTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "partCol."
val DATASOURCE_SCHEMA_BUCKETCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "bucketCol."
val DATASOURCE_SCHEMA_SORTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "sortCol."
val STATISTICS_PREFIX = SPARK_SQL_PREFIX + "statistics."
val STATISTICS_TOTAL_SIZE = STATISTICS_PREFIX + "totalSize"
val STATISTICS_NUM_ROWS = STATISTICS_PREFIX + "numRows"
val STATISTICS_COL_STATS_PREFIX = STATISTICS_PREFIX + "colStats."
val TABLE_PARTITION_PROVIDER = SPARK_SQL_PREFIX + "partitionProvider"
val TABLE_PARTITION_PROVIDER_CATALOG = "catalog"
val TABLE_PARTITION_PROVIDER_FILESYSTEM = "filesystem"
val CREATED_SPARK_VERSION = SPARK_SQL_PREFIX + "create.version"
// When storing data source tables in hive metastore, we need to set data schema to empty if the
// schema is hive-incompatible. However we need a hack to preserve existing behavior. Before
// Spark 2.0, we do not set a default serde here (this was done in Hive), and so if the user
// provides an empty schema Hive would automatically populate the schema with a single field
// "col". However, after SPARK-14388, we set the default serde to LazySimpleSerde so this
// implicit behavior no longer happens. Therefore, we need to do it in Spark ourselves.
val EMPTY_DATA_SCHEMA = new StructType()
.add("col", "array<string>", nullable = true, comment = "from deserializer")
/**
* Returns the fully qualified name used in table properties for a particular column stat.
* For example, for column "mycol", and "min" stat, this should return
* "spark.sql.statistics.colStats.mycol.min".
*/
private def columnStatKeyPropName(columnName: String, statKey: String): String = {
STATISTICS_COL_STATS_PREFIX + columnName + "." + statKey
}
// A persisted data source table always store its schema in the catalog.
private def getSchemaFromTableProperties(metadata: CatalogTable): StructType = {
val errorMessage = "Could not read schema from the hive metastore because it is corrupted."
val props = metadata.properties
val schema = props.get(DATASOURCE_SCHEMA)
if (schema.isDefined) {
// Originally, we used `spark.sql.sources.schema` to store the schema of a data source table.
// After SPARK-6024, we removed this flag.
// Although we are not using `spark.sql.sources.schema` any more, we need to still support.
DataType.fromJson(schema.get).asInstanceOf[StructType]
} else if (props.filterKeys(_.startsWith(DATASOURCE_SCHEMA_PREFIX)).isEmpty) {
// If there is no schema information in table properties, it means the schema of this table
// was empty when saving into metastore, which is possible in older version(prior to 2.1) of
// Spark. We should respect it.
new StructType()
} else {
val numSchemaParts = props.get(DATASOURCE_SCHEMA_NUMPARTS)
if (numSchemaParts.isDefined) {
val parts = (0 until numSchemaParts.get.toInt).map { index =>
val part = metadata.properties.get(s"$DATASOURCE_SCHEMA_PART_PREFIX$index").orNull
if (part == null) {
throw new AnalysisException(errorMessage +
s" (missing part $index of the schema, ${numSchemaParts.get} parts are expected).")
}
part
}
// Stick all parts back to a single schema string.
DataType.fromJson(parts.mkString).asInstanceOf[StructType]
} else {
throw new AnalysisException(errorMessage)
}
}
}
private def getColumnNamesByType(
props: Map[String, String],
colType: String,
typeName: String): Seq[String] = {
for {
numCols <- props.get(s"spark.sql.sources.schema.num${colType.capitalize}Cols").toSeq
index <- 0 until numCols.toInt
} yield props.getOrElse(
s"$DATASOURCE_SCHEMA_PREFIX${colType}Col.$index",
throw new AnalysisException(
s"Corrupted $typeName in catalog: $numCols parts expected, but part $index is missing."
)
)
}
private def getPartitionColumnsFromTableProperties(metadata: CatalogTable): Seq[String] = {
getColumnNamesByType(metadata.properties, "part", "partitioning columns")
}
private def getBucketSpecFromTableProperties(metadata: CatalogTable): Option[BucketSpec] = {
metadata.properties.get(DATASOURCE_SCHEMA_NUMBUCKETS).map { numBuckets =>
BucketSpec(
numBuckets.toInt,
getColumnNamesByType(metadata.properties, "bucket", "bucketing columns"),
getColumnNamesByType(metadata.properties, "sort", "sorting columns"))
}
}
/**
* Detects a data source table. This checks both the table provider and the table properties,
* unlike DDLUtils which just checks the former.
*/
private[spark] def isDatasourceTable(table: CatalogTable): Boolean = {
val provider = table.provider.orElse(table.properties.get(DATASOURCE_PROVIDER))
provider.isDefined && provider != Some(DDLUtils.HIVE_PROVIDER)
}
}
| apache-2.0 |
mengxn/tensorflow | tensorflow/contrib/image/__init__.py | 1497 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""##Ops for image manipulation.
### API
This module provides functions for image manipulation; currently, only
projective transforms (including rotation) are supported.
## Image `Ops`
@@angles_to_projective_transforms
@@compose_transforms
@@rotate
@@transform
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=line-too-long
from tensorflow.contrib.image.python.ops.image_ops import angles_to_projective_transforms
from tensorflow.contrib.image.python.ops.image_ops import compose_transforms
from tensorflow.contrib.image.python.ops.image_ops import rotate
from tensorflow.contrib.image.python.ops.image_ops import transform
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| apache-2.0 |
mono/roslyn | src/VisualStudio/CSharp/Test/Debugging/DataTipInfoGetterTests.cs | 11574 | // Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Linq;
using System.Threading;
using Microsoft.CodeAnalysis;
using Microsoft.CodeAnalysis.Editor.UnitTests.Workspaces;
using Microsoft.CodeAnalysis.Text;
using Microsoft.VisualStudio.LanguageServices.CSharp.Debugging;
using Roslyn.Test.Utilities;
using Roslyn.Utilities;
using Xunit;
namespace Microsoft.CodeAnalysis.Editor.CSharp.UnitTests.Debugging
{
public class DataTipInfoGetterTests
{
private void Test(string markup, string expectedText = null)
{
TestSpanGetter(markup, (document, position, expectedSpan) =>
{
var result = DataTipInfoGetter.GetInfoAsync(document, position, CancellationToken.None).WaitAndGetResult(CancellationToken.None);
Assert.Equal(expectedSpan, result.Span);
Assert.Equal(expectedText, result.Text);
});
}
private void TestNoDataTip(string markup)
{
TestSpanGetter(markup, (document, position, expectedSpan) =>
{
var result = DataTipInfoGetter.GetInfoAsync(document, position, CancellationToken.None).WaitAndGetResult(CancellationToken.None);
Assert.True(result.IsDefault);
});
}
private void TestSpanGetter(string markup, Action<Document, int, TextSpan?> continuation)
{
using (var workspace = CSharpWorkspaceFactory.CreateWorkspaceFromLines(markup))
{
var testHostDocument = workspace.Documents.Single();
var position = testHostDocument.CursorPosition.Value;
var expectedSpan = testHostDocument.SelectedSpans.Any()
? testHostDocument.SelectedSpans.Single()
: (TextSpan?)null;
continuation(
workspace.CurrentSolution.Projects.First().Documents.First(),
position,
expectedSpan);
}
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void TestCSharpLanguageDebugInfoGetDataTipSpanAndText()
{
Test("class [|C$$|] { }");
Test("struct [|C$$|] { }");
Test("interface [|C$$|] { }");
Test("enum [|C$$|] { }");
Test("delegate void [|C$$|] ();"); // Without the space, that position is actually on the open paren.
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void Test1()
{
Test(
@"class C
{
void Foo()
{
[|Sys$$tem|].Console.WriteLine(args);
}
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void Test2()
{
Test(
@"class C
{
void Foo()
{
[|System$$.Console|].WriteLine(args);
}
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void Test3()
{
Test(
@"class C
{
void Foo()
{
[|System.$$Console|].WriteLine(args);
}
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void Test4()
{
Test(
@"class C
{
void Foo()
{
[|System.Con$$sole|].WriteLine(args);
}
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void Test5()
{
Test(
@"class C
{
void Foo()
{
[|System.Console.Wri$$teLine|](args);
}
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void Test6()
{
TestNoDataTip(
@"class C
{
void Foo()
{
[|System.Console.WriteLine|]$$(args);
}
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void Test7()
{
Test(
@"class C
{
void Foo()
{
System.Console.WriteLine($$[|args|]);
}
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void Test8()
{
TestNoDataTip(
@"class C
{
void Foo()
{
[|System.Console.WriteLine|](args$$);
}
}");
}
[Fact]
public void TestVar()
{
Test(
@"class C
{
void Foo()
{
[|va$$r|] v = 0;
}
}", "int");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void TestVariableType()
{
Test(
@"class C
{
void Foo()
{
[|in$$t|] i = 0;
}
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void TestVariableIdentifier()
{
Test(
@"class C
{
void Foo()
{
int [|$$i|] = 0;
}
}");
}
[WorkItem(539910)]
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void TestLiterals()
{
Test(
@"class C
{
void Foo()
{
int i = [|4$$2|];
}
}", "int");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void TestNonExpressions()
{
TestNoDataTip(
@"class C
{
void Foo()
{
int i = 42;
}$$
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void TestParameterIdentifier()
{
Test(
@"class C
{
void Foo(int [|$$i|])
{
}
}");
}
[WorkItem(942699)]
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void TestCatchIdentifier()
{
Test(
@"class C
{
void Foo()
{
try
{
}
catch (System.Exception [|$$e|])
{
}
}
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void TestEvent()
{
Test(
@"class C
{
event System.Action [|$$E|];
}");
Test(
@"class C
{
event System.Action [|$$E|]
{
add { }
remove { }
}
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void TestMethod()
{
Test(
@"class C
{
int [|$$M|]() { }
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void TestTypeParameter()
{
Test("class C<T, [|$$U|], V> { }");
Test(
@"class C
{
void M<T, [|$$U|]>() { }
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void UsingAlias()
{
Test(
@"using [|$$S|] = Static;
static class Static
{
}");
}
[WorkItem(540921)]
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void TestForEachIdentifier()
{
Test(
@"class C
{
void Foo(string[] args)
{
foreach (string [|$$s|] in args)
{
}
}
}");
}
[WorkItem(546328)]
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void TestProperty()
{
Test(
@"namespace ConsoleApplication16
{
class C
{
public int [|$$foo|] { get; private set; } // hover over me
public C()
{
this.foo = 1;
}
public int Foo()
{
return 2; // breakpoint here
}
}
class Program
{
static void Main(string[] args)
{
new C().Foo();
}
}
}
");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips)]
public void TestQueryIdentifier()
{
Test( // From
@"class C
{
object Foo(string[] args)
{
return from [|$$a|] in args select a;
}
}");
Test( // Let
@"class C
{
object Foo(string[] args)
{
return from a in args let [|$$b|] = ""END"" select a + b;
}
}");
Test( // Join
@"class C
{
object Foo(string[] args)
{
return from a in args join [|$$b|] in args on a equals b;
}
}");
Test( // Join Into
@"class C
{
object Foo(string[] args)
{
return from a in args join b in args on a equals b into [|$$c|];
}
}");
Test( // Continuation
@"class C
{
object Foo(string[] args)
{
return from a in args select a into [|$$b|] from c in b select c;
}
}");
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips), WorkItem(1077843)]
public void TestConditionalAccessExpression()
{
var sourceTemplate = @"
class A
{{
B B;
object M()
{{
return {0};
}}
}}
class B
{{
C C;
}}
class C
{{
D D;
}}
class D
{{
}}
";
// One level.
Test(string.Format(sourceTemplate, "[|Me?.$$B|]"));
// Two levels.
Test(string.Format(sourceTemplate, "[|Me?.$$B|].C"));
Test(string.Format(sourceTemplate, "[|Me?.B.$$C|]"));
Test(string.Format(sourceTemplate, "[|Me.$$B|]?.C"));
Test(string.Format(sourceTemplate, "[|Me.B?.$$C|]"));
Test(string.Format(sourceTemplate, "[|Me?.$$B|]?.C"));
Test(string.Format(sourceTemplate, "[|Me?.B?.$$C|]"));
// Three levels.
Test(string.Format(sourceTemplate, "[|Me?.$$B|].C.D"));
Test(string.Format(sourceTemplate, "[|Me?.B.$$C|].D"));
Test(string.Format(sourceTemplate, "[|Me?.B.C.$$D|]"));
Test(string.Format(sourceTemplate, "[|Me.$$B|]?.C.D"));
Test(string.Format(sourceTemplate, "[|Me.B?.$$C|].D"));
Test(string.Format(sourceTemplate, "[|Me.B?.C.$$D|]"));
Test(string.Format(sourceTemplate, "[|Me.$$B|].C?.D"));
Test(string.Format(sourceTemplate, "[|Me.B.$$C|]?.D"));
Test(string.Format(sourceTemplate, "[|Me.B.C?.$$D|]"));
Test(string.Format(sourceTemplate, "[|Me?.$$B|]?.C.D"));
Test(string.Format(sourceTemplate, "[|Me?.B?.$$C|].D"));
Test(string.Format(sourceTemplate, "[|Me?.B?.C.$$D|]"));
Test(string.Format(sourceTemplate, "[|Me?.$$B|].C?.D"));
Test(string.Format(sourceTemplate, "[|Me?.B.$$C|]?.D"));
Test(string.Format(sourceTemplate, "[|Me?.B.C?.$$D|]"));
Test(string.Format(sourceTemplate, "[|Me.$$B|]?.C?.D"));
Test(string.Format(sourceTemplate, "[|Me.B?.$$C|]?.D"));
Test(string.Format(sourceTemplate, "[|Me.B?.C?.$$D|]"));
Test(string.Format(sourceTemplate, "[|Me?.$$B|]?.C?.D"));
Test(string.Format(sourceTemplate, "[|Me?.B?.$$C|]?.D"));
Test(string.Format(sourceTemplate, "[|Me?.B?.C?.$$D|]"));
}
[Fact, Trait(Traits.Feature, Traits.Features.DebuggingDataTips), WorkItem(1077843)]
public void TestConditionalAccessExpression_Trivia()
{
var sourceTemplate = @"
class A
{{
B B;
object M()
{{
return {0};
}}
}}
class B
{{
C C;
}}
class C
{{
}}
";
Test(string.Format(sourceTemplate, "/*1*/[|$$Me|]/*2*/?./*3*/B/*4*/?./*5*/C/*6*/"));
Test(string.Format(sourceTemplate, "/*1*/[|Me/*2*/?./*3*/$$B|]/*4*/?./*5*/C/*6*/"));
Test(string.Format(sourceTemplate, "/*1*/[|Me/*2*/?./*3*/B/*4*/?./*5*/$$C|]/*6*/"));
}
}
}
| apache-2.0 |
ebyhr/presto | core/trino-main/src/main/java/io/trino/execution/scheduler/NodeSchedulerExporter.java | 2134 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.execution.scheduler;
import com.google.common.collect.ImmutableMap;
import io.airlift.stats.CounterStat;
import org.weakref.jmx.JmxException;
import org.weakref.jmx.MBeanExport;
import org.weakref.jmx.MBeanExporter;
import javax.annotation.PreDestroy;
import javax.annotation.concurrent.GuardedBy;
import javax.inject.Inject;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import static java.util.Objects.requireNonNull;
public final class NodeSchedulerExporter
{
@GuardedBy("this")
private final List<MBeanExport> mbeanExports = new ArrayList<>();
@Inject
public NodeSchedulerExporter(TopologyAwareNodeSelectorFactory nodeSelectorFactory, MBeanExporter exporter)
{
requireNonNull(nodeSelectorFactory, "nodeSelectorFactory is null");
requireNonNull(exporter, "exporter is null");
for (Map.Entry<String, CounterStat> entry : nodeSelectorFactory.getPlacementCountersByName().entrySet()) {
try {
mbeanExports.add(exporter.exportWithGeneratedName(entry.getValue(), NodeScheduler.class, ImmutableMap.of("segment", entry.getKey())));
}
catch (JmxException e) {
// ignored
}
}
}
@PreDestroy
public synchronized void destroy()
{
for (MBeanExport mbeanExport : mbeanExports) {
try {
mbeanExport.unexport();
}
catch (JmxException e) {
// ignored
}
}
mbeanExports.clear();
}
}
| apache-2.0 |
cloudfoundry-incubator/cf-networking-release | src/code.cloudfoundry.org/vendor/github.com/pivotal-cf-experimental/rainmaker/internal/network/transport.go | 993 | package network
import (
"crypto/tls"
"net"
"net/http"
"time"
)
/*
The purpose of the transports defined herein is to stop a program that
consumes this package from using up all of the file descriptors provided
by the operating system. The implementation here ensures that the HTTP
client for this library will consume, at most, 2 file descriptors, one
for each transport.
*/
var _transports map[bool]http.RoundTripper
func init() {
_transports = map[bool]http.RoundTripper{
true: _buildTransport(true),
false: _buildTransport(false),
}
}
func buildTransport(skipVerifySSL bool) http.RoundTripper {
return _transports[skipVerifySSL]
}
func _buildTransport(skipVerifySSL bool) http.RoundTripper {
return &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: skipVerifySSL,
},
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
}
}
| apache-2.0 |
11xor6/presto | core/trino-main/src/main/java/io/trino/sql/planner/PlanFragment.java | 10934 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.sql.planner;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import io.trino.cost.StatsAndCosts;
import io.trino.operator.StageExecutionDescriptor;
import io.trino.spi.type.Type;
import io.trino.sql.planner.plan.PlanFragmentId;
import io.trino.sql.planner.plan.PlanNode;
import io.trino.sql.planner.plan.PlanNodeId;
import io.trino.sql.planner.plan.RemoteSourceNode;
import javax.annotation.concurrent.Immutable;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import static com.google.common.base.MoreObjects.toStringHelper;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static java.util.Objects.requireNonNull;
@Immutable
public class PlanFragment
{
private final PlanFragmentId id;
private final PlanNode root;
private final Map<Symbol, Type> symbols;
private final PartitioningHandle partitioning;
private final List<PlanNodeId> partitionedSources;
private final Set<PlanNodeId> partitionedSourcesSet;
private final List<Type> types;
private final Set<PlanNode> partitionedSourceNodes;
private final List<RemoteSourceNode> remoteSourceNodes;
private final PartitioningScheme partitioningScheme;
private final StageExecutionDescriptor stageExecutionDescriptor;
private final StatsAndCosts statsAndCosts;
private final Optional<String> jsonRepresentation;
// Only for creating instances without the JSON representation embedded
private PlanFragment(
PlanFragmentId id,
PlanNode root,
Map<Symbol, Type> symbols,
PartitioningHandle partitioning,
List<PlanNodeId> partitionedSources,
Set<PlanNodeId> partitionedSourcesSet,
List<Type> types,
Set<PlanNode> partitionedSourceNodes,
List<RemoteSourceNode> remoteSourceNodes,
PartitioningScheme partitioningScheme,
StageExecutionDescriptor stageExecutionDescriptor,
StatsAndCosts statsAndCosts)
{
this.id = requireNonNull(id, "id is null");
this.root = requireNonNull(root, "root is null");
this.symbols = requireNonNull(symbols, "symbols is null");
this.partitioning = requireNonNull(partitioning, "partitioning is null");
this.partitionedSources = requireNonNull(partitionedSources, "partitionedSources is null");
this.partitionedSourcesSet = requireNonNull(partitionedSourcesSet, "partitionedSourcesSet is null");
this.types = requireNonNull(types, "types is null");
this.partitionedSourceNodes = requireNonNull(partitionedSourceNodes, "partitionedSourceNodes is null");
this.remoteSourceNodes = requireNonNull(remoteSourceNodes, "remoteSourceNodes is null");
this.partitioningScheme = requireNonNull(partitioningScheme, "partitioningScheme is null");
this.stageExecutionDescriptor = requireNonNull(stageExecutionDescriptor, "stageExecutionDescriptor is null");
this.statsAndCosts = requireNonNull(statsAndCosts, "statsAndCosts is null");
this.jsonRepresentation = Optional.empty();
}
@JsonCreator
public PlanFragment(
@JsonProperty("id") PlanFragmentId id,
@JsonProperty("root") PlanNode root,
@JsonProperty("symbols") Map<Symbol, Type> symbols,
@JsonProperty("partitioning") PartitioningHandle partitioning,
@JsonProperty("partitionedSources") List<PlanNodeId> partitionedSources,
@JsonProperty("partitioningScheme") PartitioningScheme partitioningScheme,
@JsonProperty("stageExecutionDescriptor") StageExecutionDescriptor stageExecutionDescriptor,
@JsonProperty("statsAndCosts") StatsAndCosts statsAndCosts,
@JsonProperty("jsonRepresentation") Optional<String> jsonRepresentation)
{
this.id = requireNonNull(id, "id is null");
this.root = requireNonNull(root, "root is null");
this.symbols = requireNonNull(symbols, "symbols is null");
this.partitioning = requireNonNull(partitioning, "partitioning is null");
this.partitionedSources = ImmutableList.copyOf(requireNonNull(partitionedSources, "partitionedSources is null"));
this.partitionedSourcesSet = ImmutableSet.copyOf(partitionedSources);
this.stageExecutionDescriptor = requireNonNull(stageExecutionDescriptor, "stageExecutionDescriptor is null");
this.statsAndCosts = requireNonNull(statsAndCosts, "statsAndCosts is null");
this.jsonRepresentation = requireNonNull(jsonRepresentation, "jsonRepresentation is null");
checkArgument(partitionedSourcesSet.size() == partitionedSources.size(), "partitionedSources contains duplicates");
checkArgument(ImmutableSet.copyOf(root.getOutputSymbols()).containsAll(partitioningScheme.getOutputLayout()),
"Root node outputs (%s) does not include all fragment outputs (%s)", root.getOutputSymbols(), partitioningScheme.getOutputLayout());
types = partitioningScheme.getOutputLayout().stream()
.map(symbols::get)
.collect(toImmutableList());
this.partitionedSourceNodes = findSources(root, partitionedSources);
ImmutableList.Builder<RemoteSourceNode> remoteSourceNodes = ImmutableList.builder();
findRemoteSourceNodes(root, remoteSourceNodes);
this.remoteSourceNodes = remoteSourceNodes.build();
this.partitioningScheme = requireNonNull(partitioningScheme, "partitioningScheme is null");
}
@JsonProperty
public PlanFragmentId getId()
{
return id;
}
@JsonProperty
public PlanNode getRoot()
{
return root;
}
@JsonProperty
public Map<Symbol, Type> getSymbols()
{
return symbols;
}
@JsonProperty
public PartitioningHandle getPartitioning()
{
return partitioning;
}
@JsonProperty
public List<PlanNodeId> getPartitionedSources()
{
return partitionedSources;
}
public boolean isPartitionedSources(PlanNodeId nodeId)
{
return partitionedSourcesSet.contains(nodeId);
}
@JsonProperty
public PartitioningScheme getPartitioningScheme()
{
return partitioningScheme;
}
@JsonProperty
public StageExecutionDescriptor getStageExecutionDescriptor()
{
return stageExecutionDescriptor;
}
@JsonProperty
public StatsAndCosts getStatsAndCosts()
{
return statsAndCosts;
}
@JsonProperty
public Optional<String> getJsonRepresentation()
{
// @reviewer: I believe this should be a json raw value, but that would make this class have a different deserialization constructor.
// workers don't need this, so that should be OK, but it's worth thinking about.
return jsonRepresentation;
}
public PlanFragment withoutEmbeddedJsonRepresentation()
{
if (jsonRepresentation.isEmpty()) {
return this;
}
return new PlanFragment(
this.id,
this.root,
this.symbols,
this.partitioning,
this.partitionedSources,
this.partitionedSourcesSet,
this.types,
this.partitionedSourceNodes,
this.remoteSourceNodes,
this.partitioningScheme,
this.stageExecutionDescriptor,
this.statsAndCosts);
}
public List<Type> getTypes()
{
return types;
}
public Set<PlanNode> getPartitionedSourceNodes()
{
return partitionedSourceNodes;
}
public boolean isLeaf()
{
return remoteSourceNodes.isEmpty();
}
public List<RemoteSourceNode> getRemoteSourceNodes()
{
return remoteSourceNodes;
}
private static Set<PlanNode> findSources(PlanNode node, Iterable<PlanNodeId> nodeIds)
{
ImmutableSet.Builder<PlanNode> nodes = ImmutableSet.builder();
findSources(node, ImmutableSet.copyOf(nodeIds), nodes);
return nodes.build();
}
private static void findSources(PlanNode node, Set<PlanNodeId> nodeIds, ImmutableSet.Builder<PlanNode> nodes)
{
if (nodeIds.contains(node.getId())) {
nodes.add(node);
}
for (PlanNode source : node.getSources()) {
nodes.addAll(findSources(source, nodeIds));
}
}
private static void findRemoteSourceNodes(PlanNode node, ImmutableList.Builder<RemoteSourceNode> builder)
{
for (PlanNode source : node.getSources()) {
findRemoteSourceNodes(source, builder);
}
if (node instanceof RemoteSourceNode) {
builder.add((RemoteSourceNode) node);
}
}
public PlanFragment withBucketToPartition(Optional<int[]> bucketToPartition)
{
return new PlanFragment(id, root, symbols, partitioning, partitionedSources, partitioningScheme.withBucketToPartition(bucketToPartition), stageExecutionDescriptor, statsAndCosts, jsonRepresentation);
}
public PlanFragment withFixedLifespanScheduleGroupedExecution(List<PlanNodeId> capableTableScanNodes)
{
return new PlanFragment(id, root, symbols, partitioning, partitionedSources, partitioningScheme, StageExecutionDescriptor.fixedLifespanScheduleGroupedExecution(capableTableScanNodes), statsAndCosts, jsonRepresentation);
}
public PlanFragment withDynamicLifespanScheduleGroupedExecution(List<PlanNodeId> capableTableScanNodes)
{
return new PlanFragment(id, root, symbols, partitioning, partitionedSources, partitioningScheme, StageExecutionDescriptor.dynamicLifespanScheduleGroupedExecution(capableTableScanNodes), statsAndCosts, jsonRepresentation);
}
@Override
public String toString()
{
return toStringHelper(this)
.add("id", id)
.add("partitioning", partitioning)
.add("partitionedSource", partitionedSources)
.add("partitionFunction", partitioningScheme)
.toString();
}
}
| apache-2.0 |
huonw/swift | tools/SourceKit/tools/complete-test/complete-test.cpp | 28790 | //===--- complete-test.cpp ------------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "sourcekitd/sourcekitd.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/FileSystem.h"
#include <fstream>
#include <regex>
#include <unistd.h>
#include <sys/param.h>
// FIXME: Platform compatibility.
#include <dispatch/dispatch.h>
using namespace llvm;
namespace {
struct TestOptions {
StringRef sourceFile;
StringRef completionToken;
StringRef popularAPI;
StringRef unpopularAPI;
Optional<bool> sortByName;
Optional<bool> useImportDepth;
Optional<bool> groupOverloads;
Optional<bool> groupStems;
Optional<bool> includeExactMatch;
Optional<bool> addInnerResults;
Optional<bool> addInnerOperators;
Optional<bool> addInitsToTopLevel;
Optional<unsigned> requestStart;
Optional<unsigned> requestLimit;
Optional<unsigned> hideUnderscores;
Optional<bool> hideByName;
Optional<bool> hideLowPriority;
Optional<unsigned> showTopNonLiteral;
Optional<bool> fuzzyMatching;
Optional<unsigned> fuzzyWeight;
Optional<unsigned> popularityBonus;
StringRef filterRulesJSON;
bool rawOutput = false;
bool structureOutput = false;
ArrayRef<const char *> compilerArgs;
};
} // end anonymous namespace
static int handleTestInvocation(TestOptions &options);
static sourcekitd_uid_t KeyRequest;
static sourcekitd_uid_t KeyCompilerArgs;
static sourcekitd_uid_t KeyOffset;
static sourcekitd_uid_t KeyLength;
static sourcekitd_uid_t KeySourceFile;
static sourcekitd_uid_t KeySourceText;
static sourcekitd_uid_t KeyName;
static sourcekitd_uid_t KeyNameOffset;
static sourcekitd_uid_t KeyNameLength;
static sourcekitd_uid_t KeyBodyOffset;
static sourcekitd_uid_t KeyBodyLength;
static sourcekitd_uid_t KeyThrowOffset;
static sourcekitd_uid_t KeyThrowLength;
static sourcekitd_uid_t KeyIsLocal;
static sourcekitd_uid_t KeyDescription;
static sourcekitd_uid_t KeyCodeCompleteOptions;
static sourcekitd_uid_t KeySortByName;
static sourcekitd_uid_t KeyUseImportDepth;
static sourcekitd_uid_t KeyGroupOverloads;
static sourcekitd_uid_t KeyGroupStems;
static sourcekitd_uid_t KeyFilterText;
static sourcekitd_uid_t KeyFilterRules;
static sourcekitd_uid_t KeyRequestStart;
static sourcekitd_uid_t KeyRequestLimit;
static sourcekitd_uid_t KeyHideUnderscores;
static sourcekitd_uid_t KeyHideLowPriority;
static sourcekitd_uid_t KeyHideByName;
static sourcekitd_uid_t KeyIncludeExactMatch;
static sourcekitd_uid_t KeyAddInnerResults;
static sourcekitd_uid_t KeyAddInnerOperators;
static sourcekitd_uid_t KeyAddInitsToTopLevel;
static sourcekitd_uid_t KeyFuzzyMatching;
static sourcekitd_uid_t KeyFuzzyWeight;
static sourcekitd_uid_t KeyPopularityBonus;
static sourcekitd_uid_t KeyTopNonLiteral;
static sourcekitd_uid_t KeyKind;
static sourcekitd_uid_t KeyResults;
static sourcekitd_uid_t KeyPopular;
static sourcekitd_uid_t KeyUnpopular;
static sourcekitd_uid_t KeySubStructure;
// Returns false and sets 'error' on failure.
static bool parseOptions(ArrayRef<const char *> args, TestOptions &options,
std::string &error) {
for (unsigned i = 0; i < args.size(); ++i) {
StringRef opt, value;
std::tie(opt, value) = StringRef(args[i]).split('=');
if (opt == "--") {
options.compilerArgs = args.slice(i + 1);
break;
}
if (opt == "-" || !opt.startswith("-")) {
options.sourceFile = args[i];
continue;
}
if (opt.startswith("--")) {
error = std::string("unrecognized option '") + args[i] + "'";
return false;
}
opt = opt.ltrim("-");
if (opt == "tok") {
options.completionToken = value;
} else if (opt == "sort") {
if (value == "context") {
options.sortByName = false;
} else if (value == "name") {
options.sortByName = true;
} else {
error = "unrecognized argument for -sort=";
return false;
}
} else if (opt == "add-inits-to-top-level") {
options.addInitsToTopLevel = true;
} else if (opt == "include-exact-match") {
options.includeExactMatch = true;
} else if (opt == "no-include-exact-match") {
options.includeExactMatch = false;
} else if (opt == "add-inner-results") {
options.addInnerResults = true;
} else if (opt == "no-inner-results") {
options.addInnerResults = false;
} else if (opt == "inner-operators") {
options.addInnerOperators = true;
} else if (opt == "no-inner-operators") {
options.addInnerOperators = false;
} else if (opt == "depth") {
options.useImportDepth = true;
} else if (opt == "no-depth") {
options.useImportDepth = false;
} else if (opt == "fuzz") {
options.fuzzyMatching = true;
} else if (opt == "no-fuzz") {
options.fuzzyMatching = false;
} else if (opt == "fuzzy-weight") {
unsigned uval;
if (value.getAsInteger(10, uval)) {
error = "unrecognized integer value for -fuzzy-weight=";
return false;
}
options.fuzzyWeight = uval;
} else if (opt == "popularity-bonus") {
unsigned uval;
if (value.getAsInteger(10, uval)) {
error = "unrecognized integer value for -popularity-bonus=";
return false;
}
options.popularityBonus = uval;
} else if (opt == "group") {
if (value == "overloads") {
options.groupOverloads = true;
if (!options.groupStems)
options.groupStems = false;
} else if (value == "stems") {
options.groupStems = true;
} else if (value == "none") {
options.groupStems = false;
options.groupOverloads = false;
} else {
error = "unrecognized argument for -group=";
return false;
}
} else if (opt == "start") {
unsigned uval;
if (value.getAsInteger(10, uval)) {
error = "unrecognized integer value for -start=";
return false;
}
options.requestStart = uval;
} else if (opt == "limit") {
unsigned uval;
if (value.getAsInteger(10, uval)) {
error = "unrecognized integer value for -limit=";
return false;
}
options.requestLimit = uval;
} else if (opt == "raw") {
options.rawOutput = true;
} else if (opt == "structure") {
options.structureOutput = true;
} else if (opt == "hide-underscores") {
unsigned uval;
if (value.getAsInteger(10, uval)) {
error = "unrecognized integer value for -hide-underscores=";
return false;
}
options.hideUnderscores = uval;
} else if (opt == "hide-low-priority") {
unsigned uval;
if (value.getAsInteger(10, uval)) {
error = "unrecognized integer value for -hide-low-priority=";
return false;
}
options.hideLowPriority = uval;
} else if (opt == "hide-by-name") {
unsigned uval;
if (value.getAsInteger(10, uval)) {
error = "unrecognized integer value for -hide-by-name=";
return false;
}
options.hideByName = uval;
} else if (opt == "hide-none") {
options.hideUnderscores = 0;
options.hideLowPriority = false;
options.hideByName = false;
} else if (opt == "popular") {
options.popularAPI = value;
} else if (opt == "unpopular") {
options.unpopularAPI = value;
} else if (opt == "filter-rules") {
options.filterRulesJSON = value;
} else if (opt == "top") {
unsigned uval;
if (value.getAsInteger(10, uval)) {
error = "unrecognized integer value for -tope=";
return false;
}
options.showTopNonLiteral = uval;
}
}
if (options.sourceFile.empty()) {
error = "missing <source-file>";
return false;
}
if (options.completionToken.empty()) {
error = "missing -tok=<completion-token>";
return false;
}
return true;
}
static int skt_main(int argc, const char **argv);
int main(int argc, const char **argv) {
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0), ^{
int ret = skt_main(argc, argv);
exit(ret);
});
dispatch_main();
}
static void notification_receiver(sourcekitd_response_t resp) {
if (sourcekitd_response_is_error(resp)) {
sourcekitd_response_description_dump(resp);
exit(1);
}
}
static int skt_main(int argc, const char **argv) {
llvm::sys::PrintStackTraceOnErrorSignal(argv[0]);
sourcekitd_initialize();
sourcekitd_set_notification_handler(^(sourcekitd_response_t resp) {
notification_receiver(resp);
});
KeyRequest = sourcekitd_uid_get_from_cstr("key.request");
KeyCompilerArgs = sourcekitd_uid_get_from_cstr("key.compilerargs");
KeyOffset = sourcekitd_uid_get_from_cstr("key.offset");
KeyLength = sourcekitd_uid_get_from_cstr("key.length");
KeyKind = sourcekitd_uid_get_from_cstr("key.kind");
KeyCodeCompleteOptions =
sourcekitd_uid_get_from_cstr("key.codecomplete.options");
KeySortByName = sourcekitd_uid_get_from_cstr("key.codecomplete.sort.byname");
KeyUseImportDepth =
sourcekitd_uid_get_from_cstr("key.codecomplete.sort.useimportdepth");
KeyGroupOverloads =
sourcekitd_uid_get_from_cstr("key.codecomplete.group.overloads");
KeyGroupStems = sourcekitd_uid_get_from_cstr("key.codecomplete.group.stems");
KeyFilterText = sourcekitd_uid_get_from_cstr("key.codecomplete.filtertext");
KeyFilterRules = sourcekitd_uid_get_from_cstr("key.codecomplete.filterrules");
KeyRequestLimit =
sourcekitd_uid_get_from_cstr("key.codecomplete.requestlimit");
KeyRequestStart =
sourcekitd_uid_get_from_cstr("key.codecomplete.requeststart");
KeyHideUnderscores =
sourcekitd_uid_get_from_cstr("key.codecomplete.hideunderscores");
KeyHideLowPriority =
sourcekitd_uid_get_from_cstr("key.codecomplete.hidelowpriority");
KeyHideByName = sourcekitd_uid_get_from_cstr("key.codecomplete.hidebyname");
KeyIncludeExactMatch =
sourcekitd_uid_get_from_cstr("key.codecomplete.includeexactmatch");
KeyAddInnerResults =
sourcekitd_uid_get_from_cstr("key.codecomplete.addinnerresults");
KeyAddInnerOperators =
sourcekitd_uid_get_from_cstr("key.codecomplete.addinneroperators");
KeyAddInitsToTopLevel =
sourcekitd_uid_get_from_cstr("key.codecomplete.addinitstotoplevel");
KeyFuzzyMatching =
sourcekitd_uid_get_from_cstr("key.codecomplete.fuzzymatching");
KeyFuzzyWeight =
sourcekitd_uid_get_from_cstr("key.codecomplete.sort.fuzzyweight");
KeyPopularityBonus =
sourcekitd_uid_get_from_cstr("key.codecomplete.sort.popularitybonus");
KeyTopNonLiteral =
sourcekitd_uid_get_from_cstr("key.codecomplete.showtopnonliteralresults");
KeySourceFile = sourcekitd_uid_get_from_cstr("key.sourcefile");
KeySourceText = sourcekitd_uid_get_from_cstr("key.sourcetext");
KeyName = sourcekitd_uid_get_from_cstr("key.name");
KeyNameOffset = sourcekitd_uid_get_from_cstr("key.nameoffset");
KeyNameLength = sourcekitd_uid_get_from_cstr("key.namelength");
KeyBodyOffset = sourcekitd_uid_get_from_cstr("key.bodyoffset");
KeyBodyLength = sourcekitd_uid_get_from_cstr("key.bodylength");
KeyThrowOffset = sourcekitd_uid_get_from_cstr("key.throwoffset");
KeyThrowLength = sourcekitd_uid_get_from_cstr("key.throwlength");
KeyIsLocal = sourcekitd_uid_get_from_cstr("key.is_local");
KeyDescription = sourcekitd_uid_get_from_cstr("key.description");
KeyResults = sourcekitd_uid_get_from_cstr("key.results");
KeyPopular = sourcekitd_uid_get_from_cstr("key.popular");
KeyUnpopular = sourcekitd_uid_get_from_cstr("key.unpopular");
KeySubStructure = sourcekitd_uid_get_from_cstr("key.substructure");
auto Args = llvm::makeArrayRef(argv + 1, argc - 1);
TestOptions options;
std::string error;
if (!parseOptions(Args, options, error)) {
llvm::errs() << "usage: complete-test -tok=A file\n" << error << "\n";
return 1;
}
int ret = handleTestInvocation(options);
sourcekitd_shutdown();
return ret;
}
static std::string
removeCodeCompletionTokens(StringRef Input, StringRef TokenName,
SmallVectorImpl<std::string> &prefixes,
unsigned *CompletionOffset) {
assert(TokenName.size() >= 1);
*CompletionOffset = ~0U;
std::string CleanFile;
CleanFile.reserve(Input.size());
std::regex tokenRegex(R"(#\^([^^,]+)(,[^^]*)?\^#)");
auto pos = Input.begin();
while (pos != Input.end()) {
std::match_results<StringRef::iterator> match;
std::regex_search(pos, Input.end(), match, tokenRegex);
if (match.empty()) {
CleanFile.append(pos, Input.end()); // remaining text
pos = Input.end();
break;
}
// Update the buffer.
CleanFile.append(pos, match.prefix().second);
pos = match.suffix().first;
// Check the token.
assert(match.size() == 2 || match.size() == 3);
if (match[1].str() != TokenName)
continue;
*CompletionOffset = CleanFile.size();
CleanFile.push_back('\0');
if (match.size() == 2 || !match[2].matched)
continue;
std::string fullMatch = match[2].str();
assert(fullMatch[0] == ',');
StringRef next = StringRef(fullMatch).split(',').second;
while (next != "") {
auto split = next.split(',');
prefixes.push_back(split.first);
next = split.second;
}
}
return CleanFile;
}
namespace {
class ResponsePrinter {
llvm::raw_ostream &OS;
unsigned indentWidth;
unsigned currentIndentation;
bool structuredOutput = false;
public:
ResponsePrinter(llvm::raw_ostream &OS, unsigned indentWidth,
unsigned startingIndent, bool structure)
: OS(OS), indentWidth(indentWidth), currentIndentation(startingIndent),
structuredOutput(structure) {}
void printResponse(sourcekitd_response_t resp) {
auto dict = sourcekitd_response_get_value(resp);
printGroup(dict);
}
void printArray(sourcekitd_variant_t array) {
sourcekitd_variant_array_apply(
array, ^bool(size_t index, sourcekitd_variant_t value) {
printGroupOrCompletion(value);
return true;
});
}
void printGroupOrCompletion(sourcekitd_variant_t value) {
static sourcekitd_uid_t GroupUID =
sourcekitd_uid_get_from_cstr("source.lang.swift.codecomplete.group");
if (GroupUID == sourcekitd_variant_dictionary_get_uid(value, KeyKind)) {
printGroup(value);
} else {
printCompletion(value);
}
}
void printCompletion(sourcekitd_variant_t completion) {
assert(sourcekitd_variant_get_type(completion) ==
SOURCEKITD_VARIANT_TYPE_DICTIONARY);
// FIXME: kind, semantic context?
StringRef desc = dictGetString(completion, KeyDescription);
if (!structuredOutput) {
indent() << desc << "\n";
return;
}
auto structure =
sourcekitd_variant_dictionary_get_value(completion, KeySubStructure);
if (sourcekitd_variant_get_type(structure) ==
SOURCEKITD_VARIANT_TYPE_NULL) {
indent() << "(no structure)\n";
return;
}
unsigned index = 0;
auto printUntil =
[desc, &index, this](unsigned end) -> llvm::raw_ostream & {
for (; index != end; ++index)
OS.write(desc[index]);
return OS;
};
auto getUInt = [](sourcekitd_variant_t dict, sourcekitd_uid_t key) {
auto value = sourcekitd_variant_dictionary_get_int64(dict, key);
assert(0 <= value && value <= UINT_MAX);
return value;
};
auto baseStart = getUInt(structure, KeyNameOffset);
auto baseLen = getUInt(structure, KeyNameLength);
// {name:basename}([params:{p:{t:Int}}, {p:label:{t: String}}])
indent();
if (baseLen) {
printUntil(baseStart) << "{name:";
printUntil(baseStart + baseLen) << "}";
}
auto paramStart = getUInt(structure, KeyBodyOffset);
auto paramLen = getUInt(structure, KeyBodyLength);
auto params =
sourcekitd_variant_dictionary_get_value(structure, KeySubStructure);
if (sourcekitd_variant_get_type(params) != SOURCEKITD_VARIANT_TYPE_NULL) {
assert(paramStart >= baseStart + baseLen);
printUntil(paramStart) << "{params:";
sourcekitd_variant_array_apply(
params, ^bool(size_t index, sourcekitd_variant_t param) {
auto start = getUInt(param, KeyNameOffset);
auto len = getUInt(param, KeyNameLength);
auto tStart = getUInt(param, KeyBodyOffset);
auto tLen = getUInt(param, KeyBodyLength);
bool isLocalName = getUInt(param, KeyIsLocal);
if (len) {
assert(start >= paramStart);
assert(start + len <= paramStart + paramLen);
printUntil(start) << "{" << (isLocalName ? "l" : "n") << ":";
printUntil(start + len) << "}";
}
if (tLen != 0) {
assert(tStart >= start);
assert(tStart + tLen <= paramStart + paramLen);
printUntil(tStart) << "{t:";
printUntil(tStart + tLen) << "}";
}
return true;
});
printUntil(paramStart + paramLen) << "}";
} else if (paramLen != 0) {
printUntil(paramStart) << "{params:";
printUntil(paramStart + paramLen) << "}";
}
auto throwStart = getUInt(structure, KeyThrowOffset);
auto throwLength = getUInt(structure, KeyThrowLength);
if (throwLength != 0) {
printUntil(throwStart) << "{throws:";
printUntil(throwStart + throwLength) << "}";
}
printUntil(desc.size()) << "\n";
}
void printGroup(sourcekitd_variant_t dict) {
struct RestoreInt {
unsigned old;
unsigned &value;
RestoreInt(unsigned &value) : old(value), value(value) {}
~RestoreInt() { value = old; }
} restoreIndent(currentIndentation);
StringRef name = dictGetString(dict, KeyName);
if (!name.empty()) {
indent() << name << ":\n";
currentIndentation += indentWidth;
}
auto results = sourcekitd_variant_dictionary_get_value(dict, KeyResults);
printArray(results);
}
StringRef dictGetString(sourcekitd_variant_t dict, sourcekitd_uid_t key) {
auto val = sourcekitd_variant_dictionary_get_value(dict, key);
assert(sourcekitd_variant_get_type(val) == SOURCEKITD_VARIANT_TYPE_STRING);
StringRef str(sourcekitd_variant_string_get_ptr(val),
sourcekitd_variant_string_get_length(val));
return str;
}
llvm::raw_ostream &indent() {
for (unsigned i = 0; i < currentIndentation; ++i)
OS.write(' ');
return OS;
}
};
} // end anonymous namespace
static void printResponse(sourcekitd_response_t resp, bool raw, bool structure,
unsigned indentation) {
if (raw) {
sourcekitd_response_description_dump_filedesc(resp, STDOUT_FILENO);
return;
}
ResponsePrinter p(llvm::outs(), 4, indentation, structure);
p.printResponse(resp);
llvm::outs().flush();
}
static std::unique_ptr<llvm::MemoryBuffer>
getBufferForFilename(StringRef name) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> buffer =
llvm::MemoryBuffer::getFile(name);
if (!buffer) {
llvm::errs() << "error reading '" << name
<< "': " << buffer.getError().message() << "\n";
return nullptr;
}
return std::move(buffer.get());
}
static sourcekitd_object_t createBaseRequest(sourcekitd_uid_t requestUID,
const char *name,
unsigned offset) {
sourcekitd_object_t request =
sourcekitd_request_dictionary_create(nullptr, nullptr, 0);
sourcekitd_request_dictionary_set_uid(request, KeyRequest, requestUID);
sourcekitd_request_dictionary_set_int64(request, KeyOffset, offset);
sourcekitd_request_dictionary_set_string(request, KeyName, name);
return request;
}
using HandlerFunc = std::function<bool(sourcekitd_response_t)>;
static bool sendRequestSync(sourcekitd_object_t request, HandlerFunc func) {
auto response = sourcekitd_send_request_sync(request);
bool result = func(response);
sourcekitd_response_dispose(response);
return result;
}
static bool codeCompleteRequest(sourcekitd_uid_t requestUID, const char *name,
unsigned offset, const char *sourceText,
const char *filterText, TestOptions &options,
HandlerFunc func) {
auto request = createBaseRequest(requestUID, name, offset);
sourcekitd_request_dictionary_set_string(request, KeySourceFile, name);
sourcekitd_request_dictionary_set_string(request, KeySourceText, sourceText);
auto opts = sourcekitd_request_dictionary_create(nullptr, nullptr, 0);
{
auto addBoolOption = [&](sourcekitd_uid_t key, Optional<bool> option) {
if (option)
sourcekitd_request_dictionary_set_int64(opts, key, *option);
};
addBoolOption(KeySortByName, options.sortByName);
addBoolOption(KeyUseImportDepth, options.useImportDepth);
addBoolOption(KeyGroupOverloads, options.groupOverloads);
addBoolOption(KeyGroupStems, options.groupStems);
addBoolOption(KeyIncludeExactMatch, options.includeExactMatch);
addBoolOption(KeyAddInnerResults, options.addInnerResults);
addBoolOption(KeyAddInnerOperators, options.addInnerOperators);
addBoolOption(KeyAddInitsToTopLevel, options.addInitsToTopLevel);
addBoolOption(KeyFuzzyMatching, options.fuzzyMatching);
addBoolOption(KeyHideLowPriority, options.hideLowPriority);
addBoolOption(KeyHideByName, options.hideByName);
auto addIntOption = [&](sourcekitd_uid_t key, Optional<unsigned> option) {
if (option)
sourcekitd_request_dictionary_set_int64(opts, key, *option);
};
addIntOption(KeyRequestStart, options.requestStart);
addIntOption(KeyRequestLimit, options.requestLimit);
addIntOption(KeyHideUnderscores, options.hideUnderscores);
addIntOption(KeyFuzzyWeight, options.fuzzyWeight);
addIntOption(KeyPopularityBonus, options.popularityBonus);
addIntOption(KeyTopNonLiteral, options.showTopNonLiteral);
if (filterText)
sourcekitd_request_dictionary_set_string(opts, KeyFilterText, filterText);
if (!options.filterRulesJSON.empty()) {
auto buffer = getBufferForFilename(options.filterRulesJSON);
if (!buffer)
return 1;
char *err = nullptr;
auto dict =
sourcekitd_request_create_from_yaml(buffer->getBuffer().data(), &err);
if (!dict) {
assert(err);
llvm::errs() << err;
free(err);
return 1;
}
sourcekitd_request_dictionary_set_value(opts, KeyFilterRules, dict);
}
}
sourcekitd_request_dictionary_set_value(request, KeyCodeCompleteOptions,opts);
sourcekitd_request_release(opts);
auto args = sourcekitd_request_array_create(nullptr, 0);
{
sourcekitd_request_array_set_string(args, SOURCEKITD_ARRAY_APPEND, name);
if (const char *sdk = getenv("SDKROOT")) {
sourcekitd_request_array_set_string(args, SOURCEKITD_ARRAY_APPEND,"-sdk");
sourcekitd_request_array_set_string(args, SOURCEKITD_ARRAY_APPEND, sdk);
}
// Add -- options.
for (const char *arg : options.compilerArgs)
sourcekitd_request_array_set_string(args, SOURCEKITD_ARRAY_APPEND, arg);
}
sourcekitd_request_dictionary_set_value(request, KeyCompilerArgs, args);
sourcekitd_request_release(args);
// Send the request!
bool result = sendRequestSync(request, func);
sourcekitd_request_release(request);
return result;
}
static bool readPopularAPIList(StringRef filename,
std::vector<std::string> &result) {
std::ifstream in(filename);
if (!in.is_open()) {
llvm::errs() << "error opening '" << filename << "'\n";
return true;
}
std::string line;
while (std::getline(in, line)) {
result.emplace_back();
std::swap(result.back(), line);
}
return false;
}
static bool setupPopularAPI(const TestOptions &options) {
if (options.popularAPI.empty() && options.unpopularAPI.empty())
return false;
sourcekitd_uid_t RequestCodeCompleteSetPopularAPI =
sourcekitd_uid_get_from_cstr("source.request.codecomplete.setpopularapi");
auto req = sourcekitd_request_dictionary_create(nullptr, nullptr, 0);
sourcekitd_request_dictionary_set_uid(req, KeyRequest,
RequestCodeCompleteSetPopularAPI);
auto addPopularList = [&req](StringRef filename, sourcekitd_uid_t key) {
std::vector<std::string> names;
if (readPopularAPIList(filename, names))
return true;
sourcekitd_object_t popular = sourcekitd_request_array_create(nullptr, 0);
for (auto name : names)
sourcekitd_request_array_set_string(popular, SOURCEKITD_ARRAY_APPEND,
name.c_str());
sourcekitd_request_dictionary_set_value(req, key, popular);
return false;
};
if (!options.popularAPI.empty() &&
addPopularList(options.popularAPI, KeyPopular))
return true;
if (!options.unpopularAPI.empty() &&
addPopularList(options.unpopularAPI, KeyUnpopular))
return true;
auto resp = sourcekitd_send_request_sync(req);
bool fail = false;
if (sourcekitd_response_is_error(resp)) {
fail = true;
sourcekitd_response_description_dump(resp);
}
sourcekitd_response_dispose(resp);
sourcekitd_request_release(req);
return fail;
}
static int handleTestInvocation(TestOptions &options) {
StringRef SourceFilename = options.sourceFile;
auto SourceBuf = getBufferForFilename(SourceFilename);
if (!SourceBuf)
return 1;
unsigned CodeCompletionOffset;
SmallVector<std::string, 4> prefixes;
std::string CleanFile = removeCodeCompletionTokens(
SourceBuf->getBuffer(), options.completionToken, prefixes,
&CodeCompletionOffset);
if (CodeCompletionOffset == ~0U) {
llvm::errs() << "cannot find code completion token in source file\n";
return 1;
}
sourcekitd_uid_t RequestCodeCompleteOpen =
sourcekitd_uid_get_from_cstr("source.request.codecomplete.open");
sourcekitd_uid_t RequestCodeCompleteClose =
sourcekitd_uid_get_from_cstr("source.request.codecomplete.close");
sourcekitd_uid_t RequestCodeCompleteUpdate =
sourcekitd_uid_get_from_cstr("source.request.codecomplete.update");
if (setupPopularAPI(options))
return 1;
// Open the connection and get the first set of results.
bool isError = codeCompleteRequest(
RequestCodeCompleteOpen, SourceFilename.data(), CodeCompletionOffset,
CleanFile.c_str(), /*filterText*/ nullptr, options,
[&](sourcekitd_object_t response) -> bool {
if (sourcekitd_response_is_error(response)) {
sourcekitd_response_description_dump(response);
return true;
}
// If there are no prefixes, just dump all the results.
if (prefixes.empty())
printResponse(response, options.rawOutput, options.structureOutput,
/*indentation*/ 0);
return false;
});
if (isError)
return isError;
for (auto &prefix : prefixes) {
isError |= codeCompleteRequest(
RequestCodeCompleteUpdate, SourceFilename.data(), CodeCompletionOffset,
CleanFile.c_str(), prefix.c_str(), options,
[&](sourcekitd_object_t response) -> bool {
if (sourcekitd_response_is_error(response)) {
sourcekitd_response_description_dump(response);
return true;
}
llvm::outs() << "Results for filterText: " << prefix << " [\n";
llvm::outs().flush();
printResponse(response, options.rawOutput, options.structureOutput,
/*indentation*/ 4);
llvm::outs() << "]\n";
llvm::outs().flush();
return false;
});
if (isError)
break;
}
// Close the code completion connection.
auto request = createBaseRequest(RequestCodeCompleteClose,
SourceFilename.data(), CodeCompletionOffset);
isError |= sendRequestSync(request, [&](sourcekitd_object_t response) {
if (sourcekitd_response_is_error(response)) {
sourcekitd_response_description_dump(response);
return true;
}
return false;
});
sourcekitd_request_release(request);
return isError;
}
| apache-2.0 |
Java8-CNAPI-Team/Java8CN | org/omg/CORBA/PrincipalHolder.java | 2591 | /*
* Copyright (c) 1996, 2004, Oracle and/or its affiliates. All rights reserved.
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*/
package org.omg.CORBA;
import org.omg.CORBA.portable.Streamable;
import org.omg.CORBA.portable.InputStream;
import org.omg.CORBA.portable.OutputStream;
/**
* The Holder for <tt>Principal</tt>. For more information on
* Holder files, see <a href="doc-files/generatedfiles.html#holder">
* "Generated Files: Holder Files"</a>.<P>
* A container class for values of type <code>Principal</code>
* that is used to store "out" and "inout" parameters in IDL methods.
* If an IDL method signature has an IDL <code>Principal</code> as an "out"
* or "inout" parameter, the programmer must pass an instance of
* <code>PrincipalHolder</code> as the corresponding
* parameter in the method invocation; for "inout" parameters, the programmer
* must also fill the "in" value to be sent to the server.
* Before the method invocation returns, the ORB will fill in the
* value corresponding to the "out" value returned from the server.
* <P>
* If <code>myPrincipalHolder</code> is an instance of <code>PrincipalHolder</code>,
* the value stored in its <code>value</code> field can be accessed with
* <code>myPrincipalHolder.value</code>.
*
* @since JDK1.2
* @deprecated Deprecated by CORBA 2.2.
*/
@Deprecated
public final class PrincipalHolder implements Streamable {
/**
* The <code>Principal</code> value held by this <code>PrincipalHolder</code>
* object.
*/
public Principal value;
/**
* Constructs a new <code>PrincipalHolder</code> object with its
* <code>value</code> field initialized to <code>null</code>.
*/
public PrincipalHolder() {
}
/**
* Constructs a new <code>PrincipalHolder</code> object with its
* <code>value</code> field initialized to the given
* <code>Principal</code> object.
* @param initial the <code>Principal</code> with which to initialize
* the <code>value</code> field of the newly-created
* <code>PrincipalHolder</code> object
*/
public PrincipalHolder(Principal initial) {
value = initial;
}
public void _read(InputStream input) {
value = input.read_Principal();
}
public void _write(OutputStream output) {
output.write_Principal(value);
}
public org.omg.CORBA.TypeCode _type() {
return ORB.init().get_primitive_tc(TCKind.tk_Principal);
}
}
| apache-2.0 |
Addepar/buck | src/com/facebook/buck/worker/WorkerJobResult.java | 1048 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.worker;
import com.facebook.buck.core.util.immutables.BuckStyleValue;
import java.util.Optional;
@BuckStyleValue
public interface WorkerJobResult {
int getExitCode();
Optional<String> getStdout();
Optional<String> getStderr();
static WorkerJobResult of(int exitCode, Optional<String> stdout, Optional<String> stderr) {
return ImmutableWorkerJobResult.of(exitCode, stdout, stderr);
}
}
| apache-2.0 |
saurav4342/Hygieia-Devops-Dashboard | collectors/scm/bitbucket/src/main/java/com/capitalone/dashboard/model/GitRepo.java | 2841 | package com.capitalone.dashboard.model;
import java.util.Date;
/**
* CollectorItem extension to store the git repo url and branch.
*/
public class GitRepo extends CollectorItem {
private static final String REPO_URL = "url"; // http://git.company.com/jack/somejavacode
private static final String BRANCH = "branch"; // master, development etc.
private static final String USER_ID = "userID";
private static final String PASSWORD = "password";
private static final String LAST_UPDATE_TIME = "lastUpdate";
private static final String LAST_UPDATE_COMMIT = "lastUpdateCommit"; // Bitbucket Server api uses last update commit instead of time
public String getUserId() {
return (String) getOptions().get(USER_ID);
}
public void setUserId(String userId) {
getOptions().put(USER_ID, userId);
}
public String getPassword() {
return (String) getOptions().get(PASSWORD);
}
public void setPassword(String password) {
getOptions().put(PASSWORD, password);
}
public String getRepoUrl() {
return (String) getOptions().get(REPO_URL);
}
public void setRepoUrl(String instanceUrl) {
getOptions().put(REPO_URL, instanceUrl);
}
public String getBranch() {
return (String) getOptions().get(BRANCH);
}
public void setBranch(String branch) {
getOptions().put(BRANCH, branch);
}
public Date getLastUpdateTime() {
Object latest = getOptions().get(LAST_UPDATE_TIME);
return (Date) latest;
}
public void setLastUpdateTime(Date date) {
getOptions().put(LAST_UPDATE_TIME, date);
}
public String getLastUpdateCommit() {
return (String) getOptions().get(LAST_UPDATE_COMMIT);
}
public void setLastUpdateCommit(String sha) {
getOptions().put(LAST_UPDATE_COMMIT, sha);
}
/* (non-Javadoc)
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
GitRepo other = (GitRepo) obj;
if (getBranch() == null) {
if (other.getBranch() != null)
return false;
} else if (!getBranch().equals(other.getBranch()))
return false;
if (getRepoUrl() == null) {
if (other.getRepoUrl() != null)
return false;
} else if (!getRepoUrl().equals(other.getRepoUrl()))
return false;
return true;
}
/* (non-Javadoc)
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((getBranch() == null) ? 0 : getBranch().hashCode());
result = prime * result + ((getRepoUrl() == null) ? 0 : getRepoUrl().hashCode());
return result;
}
}
| apache-2.0 |
leafclick/intellij-community | java/debugger/impl/src/com/intellij/debugger/memory/action/ShowInstancesAction.java | 1425 | /*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.debugger.memory.action;
import com.intellij.xdebugger.memory.ui.TypeInfo;
import com.intellij.openapi.actionSystem.AnActionEvent;
import com.intellij.openapi.actionSystem.Presentation;
import org.jetbrains.annotations.NotNull;
abstract class ShowInstancesAction extends ClassesActionBase {
@Override
public void update(@NotNull AnActionEvent e) {
final Presentation presentation = e.getPresentation();
final TypeInfo ref = getSelectedClass(e);
final boolean enabled = isEnabled(e) && ref != null && ref.canGetInstanceInfo();
presentation.setEnabled(enabled);
if (enabled) {
presentation.setText(String.format("%s (%d)", getLabel(), getInstancesCount(e)));
}
}
protected abstract String getLabel();
protected abstract int getInstancesCount(AnActionEvent e);
}
| apache-2.0 |
shankarh/geode | geode-core/src/main/java/org/apache/geode/redis/internal/ExecutionHandlerContext.java | 13360 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.redis.internal;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.handler.codec.DecoderException;
import io.netty.util.concurrent.EventExecutor;
import java.io.IOException;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.geode.LogWriter;
import org.apache.geode.cache.Cache;
import org.apache.geode.cache.CacheClosedException;
import org.apache.geode.cache.CacheTransactionManager;
import org.apache.geode.cache.RegionDestroyedException;
import org.apache.geode.cache.TransactionException;
import org.apache.geode.cache.TransactionId;
import org.apache.geode.cache.UnsupportedOperationInTransactionException;
import org.apache.geode.cache.query.QueryInvocationTargetException;
import org.apache.geode.cache.query.RegionNotFoundException;
import org.apache.geode.redis.internal.executor.transactions.TransactionExecutor;
import org.apache.geode.redis.GeodeRedisServer;
/**
* This class extends {@link ChannelInboundHandlerAdapter} from Netty and it is the last part of the
* channel pipeline. The {@link ByteToCommandDecoder} forwards a {@link Command} to this class which
* executes it and sends the result back to the client. Additionally, all exception handling is done
* by this class.
* <p>
* Besides being part of Netty's pipeline, this class also serves as a context to the execution of a
* command. It abstracts transactions, provides access to the {@link RegionProvider} and anything
* else an executing {@link Command} may need.
*
*
*/
public class ExecutionHandlerContext extends ChannelInboundHandlerAdapter {
private static final int WAIT_REGION_DSTRYD_MILLIS = 100;
private static final int MAXIMUM_NUM_RETRIES = (1000 * 60) / WAIT_REGION_DSTRYD_MILLIS; // 60
// seconds
// total
private final Cache cache;
private final GeodeRedisServer server;
private final LogWriter logger;
private final Channel channel;
private final AtomicBoolean needChannelFlush;
private final Runnable flusher;
private final EventExecutor lastExecutor;
private final ByteBufAllocator byteBufAllocator;
/**
* TransactionId for any transactions started by this client
*/
private TransactionId transactionID;
/**
* Queue of commands for a given transaction
*/
private Queue<Command> transactionQueue;
private final RegionProvider regionProvider;
private final byte[] authPwd;
private boolean isAuthenticated;
/**
* Default constructor for execution contexts.
*
* @param ch Channel used by this context, should be one to one
* @param cache The Geode cache instance of this vm
* @param regionProvider The region provider of this context
* @param server Instance of the server it is attached to, only used so that any execution can
* initiate a shutdwon
* @param pwd Authentication password for each context, can be null
*/
public ExecutionHandlerContext(Channel ch, Cache cache, RegionProvider regionProvider,
GeodeRedisServer server, byte[] pwd) {
if (ch == null || cache == null || regionProvider == null || server == null)
throw new IllegalArgumentException("Only the authentication password may be null");
this.cache = cache;
this.server = server;
this.logger = cache.getLogger();
this.channel = ch;
this.needChannelFlush = new AtomicBoolean(false);
this.flusher = new Runnable() {
@Override
public void run() {
flushChannel();
}
};
this.lastExecutor = channel.pipeline().lastContext().executor();
this.byteBufAllocator = channel.alloc();
this.transactionID = null;
this.transactionQueue = null; // Lazy
this.regionProvider = regionProvider;
this.authPwd = pwd;
this.isAuthenticated = pwd != null ? false : true;
}
private void flushChannel() {
while (needChannelFlush.getAndSet(false)) {
channel.flush();
}
}
private void writeToChannel(ByteBuf message) {
channel.write(message, channel.voidPromise());
if (!needChannelFlush.getAndSet(true)) {
this.lastExecutor.execute(flusher);
}
}
/**
* This will handle the execution of received commands
*/
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
Command command = (Command) msg;
executeCommand(ctx, command);
}
/**
* Exception handler for the entire pipeline
*/
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
if (cause instanceof IOException) {
channelInactive(ctx);
return;
}
ByteBuf response = getExceptionResponse(ctx, cause);
writeToChannel(response);
}
private ByteBuf getExceptionResponse(ChannelHandlerContext ctx, Throwable cause) {
ByteBuf response;
if (cause instanceof RedisDataTypeMismatchException)
response = Coder.getWrongTypeResponse(this.byteBufAllocator, cause.getMessage());
else if (cause instanceof DecoderException
&& cause.getCause() instanceof RedisCommandParserException)
response =
Coder.getErrorResponse(this.byteBufAllocator, RedisConstants.PARSING_EXCEPTION_MESSAGE);
else if (cause instanceof RegionCreationException) {
this.logger.error(cause);
response =
Coder.getErrorResponse(this.byteBufAllocator, RedisConstants.ERROR_REGION_CREATION);
} else if (cause instanceof InterruptedException || cause instanceof CacheClosedException)
response =
Coder.getErrorResponse(this.byteBufAllocator, RedisConstants.SERVER_ERROR_SHUTDOWN);
else if (cause instanceof IllegalStateException) {
response = Coder.getErrorResponse(this.byteBufAllocator, cause.getMessage());
} else {
if (this.logger.errorEnabled())
this.logger.error("GeodeRedisServer-Unexpected error handler for " + ctx.channel(), cause);
response = Coder.getErrorResponse(this.byteBufAllocator, RedisConstants.SERVER_ERROR_MESSAGE);
}
return response;
}
@Override
public void channelInactive(ChannelHandlerContext ctx) {
if (logger.fineEnabled())
logger.fine("GeodeRedisServer-Connection closing with " + ctx.channel().remoteAddress());
ctx.channel().close();
ctx.close();
}
private void executeCommand(ChannelHandlerContext ctx, Command command) throws Exception {
RedisCommandType type = command.getCommandType();
Executor exec = type.getExecutor();
if (isAuthenticated) {
if (type == RedisCommandType.SHUTDOWN) {
this.server.shutdown();
return;
}
if (hasTransaction() && !(exec instanceof TransactionExecutor))
executeWithTransaction(ctx, exec, command);
else
executeWithoutTransaction(exec, command);
if (hasTransaction() && command.getCommandType() != RedisCommandType.MULTI) {
writeToChannel(
Coder.getSimpleStringResponse(this.byteBufAllocator, RedisConstants.COMMAND_QUEUED));
} else {
ByteBuf response = command.getResponse();
writeToChannel(response);
}
} else if (type == RedisCommandType.QUIT) {
exec.executeCommand(command, this);
ByteBuf response = command.getResponse();
writeToChannel(response);
channelInactive(ctx);
} else if (type == RedisCommandType.AUTH) {
exec.executeCommand(command, this);
ByteBuf response = command.getResponse();
writeToChannel(response);
} else {
ByteBuf r = Coder.getNoAuthResponse(this.byteBufAllocator, RedisConstants.ERROR_NOT_AUTH);
writeToChannel(r);
}
}
/**
* Private helper method to execute a command without a transaction, done for special exception
* handling neatness
*
* @param exec Executor to use
* @param command Command to execute
* @throws Exception Throws exception if exception is from within execution and not to be handled
*/
private void executeWithoutTransaction(final Executor exec, Command command) throws Exception {
Exception cause = null;
for (int i = 0; i < MAXIMUM_NUM_RETRIES; i++) {
try {
exec.executeCommand(command, this);
return;
} catch (Exception e) {
cause = e;
if (e instanceof RegionDestroyedException || e instanceof RegionNotFoundException
|| e.getCause() instanceof QueryInvocationTargetException)
Thread.sleep(WAIT_REGION_DSTRYD_MILLIS);
}
}
throw cause;
}
private void executeWithTransaction(ChannelHandlerContext ctx, final Executor exec,
Command command) throws Exception {
CacheTransactionManager txm = cache.getCacheTransactionManager();
TransactionId transactionId = getTransactionID();
txm.resume(transactionId);
try {
exec.executeCommand(command, this);
} catch (UnsupportedOperationInTransactionException e) {
command.setResponse(Coder.getErrorResponse(this.byteBufAllocator,
RedisConstants.ERROR_UNSUPPORTED_OPERATION_IN_TRANSACTION));
} catch (TransactionException e) {
command.setResponse(Coder.getErrorResponse(this.byteBufAllocator,
RedisConstants.ERROR_TRANSACTION_EXCEPTION));
} catch (Exception e) {
ByteBuf response = getExceptionResponse(ctx, e);
command.setResponse(response);
}
getTransactionQueue().add(command);
transactionId = txm.suspend();
setTransactionID(transactionId);
}
/**
* Get the current transacationId
*
* @return The current transactionId, null if one doesn't exist
*/
public TransactionId getTransactionID() {
return this.transactionID;
}
/**
* Check if client has transaction
*
* @return True if client has transaction, false otherwise
*/
public boolean hasTransaction() {
return transactionID != null;
}
/**
* Setter method for transaction
*
* @param id TransactionId of current transaction for client
*/
public void setTransactionID(TransactionId id) {
this.transactionID = id;
}
/**
* Reset the transaction of client
*/
public void clearTransaction() {
this.transactionID = null;
if (this.transactionQueue != null) {
for (Command c : this.transactionQueue) {
ByteBuf r = c.getResponse();
if (r != null)
r.release();
}
this.transactionQueue.clear();
}
}
/**
* Getter for transaction command queue
*
* @return Command queue
*/
public Queue<Command> getTransactionQueue() {
if (this.transactionQueue == null)
this.transactionQueue = new ConcurrentLinkedQueue<Command>();
return this.transactionQueue;
}
/**
* {@link ByteBuf} allocator for this context. All executors must use this pooled allocator as
* opposed to having unpooled buffers for maximum performance
*
* @return allocator instance
*/
public ByteBufAllocator getByteBufAllocator() {
return this.byteBufAllocator;
}
/**
* Gets the provider of Regions
*
* @return Provider
*/
public RegionProvider getRegionProvider() {
return this.regionProvider;
}
/**
* Getter for manager to allow pausing and resuming transactions
*
* @return Instance
*/
public CacheTransactionManager getCacheTransactionManager() {
return this.cache.getCacheTransactionManager();
}
/**
* Getter for logger
*
* @return instance
*/
public LogWriter getLogger() {
return this.cache.getLogger();
}
/**
* Get the channel for this context
*
* @return instance
*
* public Channel getChannel() { return this.channel; }
*/
/**
* Get the authentication password, this will be same server wide. It is exposed here as opposed
* to {@link GeodeRedisServer}.
*
* @return password
*/
public byte[] getAuthPwd() {
return this.authPwd;
}
/**
* Checker if user has authenticated themselves
*
* @return True if no authentication required or authentication complete, false otherwise
*/
public boolean isAuthenticated() {
return this.isAuthenticated;
}
/**
* Lets this context know the authentication is complete
*/
public void setAuthenticationVerified() {
this.isAuthenticated = true;
}
}
| apache-2.0 |
afabris86/mykuapp | inst/www/bower_components/opencpu/opencpu-0.4.js | 12341 | /**
* Javascript client library for OpenCPU
* Version 0.4.4
* Depends: jQuery
* Requires HTML5 FormData support for file uploads
* http://github.com/jeroenooms/opencpu.js
*
* Include this file in your apps and packages.
* You only need to use ocpu.seturl if this page is hosted outside of the OpenCPU package. For example:
*
* ocpu.seturl("../R") //default, use for apps
* ocpu.seturl("//public.opencpu.org/ocpu/library/mypackage/R") //CORS
* ocpu.seturl("/ocpu/library/mypackage/R") //hardcode path
*/
//Warning for the newbies
if(!window.jQuery) {
alert("Could not find jQuery! The HTML must include jquery.js before opencpu.js!")
}
(function ( $ ) {
//global variable
var r_cors = false;
var r_path = document.createElement('a');
r_path.href = "../R";
//new Session()
function Session(loc, key, txt){
this.loc = loc;
this.key = key;
this.txt = txt;
this.output = txt.split(/\r\n|\r|\n/g);
this.getKey = function(){
return key;
};
this.getLoc = function(){
return loc;
};
this.getFileURL = function(path){
return this.getLoc() + "files/" + path;
};
this.getFile = function(path, success){
var url = this.getFileURL(path);
return $.get(url, success);
};
this.getObject = function(name, data, success){
//in case of no arguments
name = name || ".val";
//first arg is a function
if(name instanceof Function){
//pass on to second arg
success = name;
name = ".val";
}
var url = this.getLoc() + "R/" + name + "/json";
return $.get(url, data, success);
};
this.getStdout = function(success){
var url = this.getLoc() + "stdout/text";
return $.get(url, success);
};
this.getConsole = function(success){
var url = this.getLoc() + "console/text";
return $.get(url, success);
};
}
//for POSTing raw code snippets
//new Snippet("rnorm(100)")
function Snippet(code){
this.code = code || "NULL";
this.getCode = function(){
return code;
};
}
//for POSTing files
//new Upload($('#file')[0].files)
function Upload(file){
if(file instanceof File){
this.file = file;
} else if(file instanceof FileList){
this.file = file[0];
} else if (file.files instanceof FileList){
this.file = file.files[0];
} else if (file.length > 0 && file[0].files instanceof FileList){
this.file = file[0].files[0];
} else {
throw 'invalid new Upload(file). Argument file must be a HTML <input type="file"></input>';
}
this.getFile = function(){
return file;
};
}
function stringify(x){
if(x instanceof Session){
return x.getKey();
} else if(x instanceof Snippet){
return x.getCode();
} else if(x instanceof Upload){
return x.getFile();
} else if(x instanceof File){
return x;
} else if(x instanceof FileList){
return x[0];
} else if(x && x.files instanceof FileList){
return x.files[0];
} else if(x && x.length && x[0].files instanceof FileList){
return x[0].files[0];
} else {
return JSON.stringify(x);
}
}
//low level call
function r_fun_ajax(fun, settings, handler){
//validate input
if(!fun) throw "r_fun_call called without fun";
settings = settings || {};
handler = handler || function(){};
//set global settings
settings.url = settings.url || (r_path.href + "/" + fun);
settings.type = settings.type || "POST";
settings.data = settings.data || {};
settings.dataType = settings.dataType || "text";
//ajax call
var jqxhr = $.ajax(settings).done(function(){
var loc = jqxhr.getResponseHeader('Location') || console.log("Location response header missing.");
var key = jqxhr.getResponseHeader('X-ocpu-session') || console.log("X-ocpu-session response header missing.");
var txt = jqxhr.responseText;
//in case of cors we translate relative paths to the target domain
if(r_cors && loc.match("^/[^/]")){
loc = r_path.protocol + "//" + r_path.host + loc;
}
handler(new Session(loc, key, txt));
}).fail(function(){
console.log("OpenCPU error HTTP " + jqxhr.status + "\n" + jqxhr.responseText);
});
//function chaining
return jqxhr;
}
//call a function using uson arguments
function r_fun_call_json(fun, args, handler){
return r_fun_ajax(fun, {
data: JSON.stringify(args || {}),
contentType : 'application/json'
}, handler);
}
//call function using url encoding
//needs to wrap arguments in quotes, etc
function r_fun_call_urlencoded(fun, args, handler){
var data = {};
$.each(args, function(key, val){
data[key] = stringify(val);
});
return r_fun_ajax(fun, {
data: $.param(data)
}, handler);
}
//call a function using multipart/form-data
//use for file uploads. Requires HTML5
function r_fun_call_multipart(fun, args, handler){
testhtml5();
var formdata = new FormData();
$.each(args, function(key, value) {
formdata.append(key, stringify(value));
});
return r_fun_ajax(fun, {
data: formdata,
cache: false,
contentType: false,
processData: false
}, handler);
}
//Automatically determines type based on argument classes.
function r_fun_call(fun, args, handler){
args = args || {};
var hasfiles = false;
var hascode = false;
//find argument types
$.each(args, function(key, value){
if(value instanceof File || value instanceof Upload || value instanceof FileList){
hasfiles = true;
} else if (value instanceof Snippet || value instanceof Session){
hascode = true;
}
});
//determine type
if(hasfiles){
return r_fun_call_multipart(fun, args, handler);
} else if(hascode){
return r_fun_call_urlencoded(fun, args, handler);
} else {
return r_fun_call_json(fun, args, handler);
}
}
//call a function and return JSON
function rpc(fun, args, handler){
return r_fun_call(fun, args, function(session){
session.getObject(function(data){
if(handler) handler(data);
}).fail(function(){
console.log("Failed to get JSON response for " + session.getLoc());
});
});
}
//plotting widget
//to be called on an (empty) div.
$.fn.rplot = function(fun, args, cb) {
var targetdiv = this;
var myplot = initplot(targetdiv);
//reset state
myplot.setlocation();
myplot.spinner.show();
// call the function
return r_fun_call(fun, args, function(tmp) {
myplot.setlocation(tmp.getLoc());
//call success handler as well
if(cb) cb(tmp);
}).always(function(){
myplot.spinner.hide();
});
};
function initplot(targetdiv){
if(targetdiv.data("ocpuplot")){
return targetdiv.data("ocpuplot");
}
var ocpuplot = function(){
//local variables
var Location;
var pngwidth;
var pngheight;
var plotdiv = $('<div />').attr({
style: "width: 100%; height:100%; min-width: 100px; min-height: 100px; position:relative; background-repeat:no-repeat; background-size: 100% 100%;"
}).appendTo(targetdiv).css("background-image", "none");
var spinner = $('<span />').attr({
style : "position: absolute; top: 20px; left: 20px; z-index:1000; font-family: monospace;"
}).text("loading...").appendTo(plotdiv);
var pdf = $('<a />').attr({
target: "_blank",
style: "position: absolute; top: 10px; right: 10px; z-index:1000; text-decoration:underline; font-family: monospace;"
}).text("pdf").appendTo(plotdiv);
var svg = $('<a />').attr({
target: "_blank",
style: "position: absolute; top: 30px; right: 10px; z-index:1000; text-decoration:underline; font-family: monospace;"
}).text("svg").appendTo(plotdiv);
var png = $('<a />').attr({
target: "_blank",
style: "position: absolute; top: 50px; right: 10px; z-index:1000; text-decoration:underline; font-family: monospace;"
}).text("png").appendTo(plotdiv);
function updatepng(){
if(!Location) return;
pngwidth = plotdiv.width();
pngheight = plotdiv.height();
plotdiv.css("background-image", "url(" + Location + "graphics/last/png?width=" + pngwidth + "&height=" + pngheight + ")");
}
function setlocation(newloc){
Location = newloc;
if(!Location){
pdf.hide();
svg.hide();
png.hide();
plotdiv.css("background-image", "");
} else {
pdf.attr("href", Location + "graphics/last/pdf?width=11.69&height=8.27&paper=a4r").show();
svg.attr("href", Location + "graphics/last/svg?width=11&height=6").show();
png.attr("href", Location + "graphics/last/png?width=800&height=600").show();
updatepng();
}
}
// function to update the png image
var onresize = debounce(function(e) {
if(pngwidth == plotdiv.width() && pngheight == plotdiv.height()){
return;
}
if(plotdiv.is(":visible")){
updatepng();
}
}, 500);
// register update handlers
plotdiv.on("resize", onresize);
$(window).on("resize", onresize);
//return objects
return {
setlocation: setlocation,
spinner : spinner
};
}();
targetdiv.data("ocpuplot", ocpuplot);
return ocpuplot;
}
// from understore.js
function debounce(func, wait, immediate) {
var result;
var timeout = null;
return function() {
var context = this, args = arguments;
var later = function() {
timeout = null;
if (!immediate)
result = func.apply(context, args);
};
var callNow = immediate && !timeout;
clearTimeout(timeout);
timeout = setTimeout(later, wait);
if (callNow)
result = func.apply(context, args);
return result;
};
}
function testhtml5(){
if( window.FormData === undefined ) {
alert("Uploading of files requires HTML5. It looks like you are using an outdated browser that does not support this. Please install Firefox, Chrome or Internet Explorer 10+");
throw "HTML5 required.";
}
}
//export
window.ocpu = window.ocpu || {};
var ocpu = window.ocpu;
//global settings
function seturl(newpath){
if(!newpath.match("/R$")){
alert("ERROR! Trying to set R url to: " + newpath +". Path to an OpenCPU R package must end with '/R'");
} else {
r_path = document.createElement('a');
r_path.href = newpath;
r_path.href = r_path.href; //IE needs this
if(location.protocol != r_path.protocol || location.host != r_path.host){
r_cors = true;
if (!('withCredentials' in new XMLHttpRequest())) {
alert("This browser does not support CORS. Try using Firefox or Chrome.");
}
}
if(location.protocol == "https:" && r_path.protocol != "https:"){
alert("Page is hosted on HTTPS but using a (non-SSL) HTTP OpenCPU server. This is insecure and most browsers will not allow this.")
}
if(r_cors){
console.log("Setting path to CORS server " + r_path.href);
} else {
console.log("Setting path to local (non-CORS) server " + r_path.href);
}
//we use trycatch because javascript will throw an error in case CORS is refused.
$.get(r_path.href, function(resdata){
console.log("Path updated. Available objects/functions:\n" + resdata);
}).fail(function(xhr, textStatus, errorThrown){
alert("Connection to OpenCPU failed:\n" + textStatus + "\n" + xhr.responseText + "\n" + errorThrown);
});
}
}
//exported functions
ocpu.call = r_fun_call;
ocpu.rpc = rpc;
ocpu.seturl = seturl;
//exported constructors
ocpu.Snippet = Snippet;
ocpu.Upload = Upload;
//for innernetz exploder
if (typeof console == "undefined") {
this.console = {log: function() {}};
}
}( jQuery ));
| apache-2.0 |
madjam/mxnet | python/mxnet/gluon/data/dataset.py | 2933 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=
"""Dataset container."""
__all__ = ['Dataset', 'ArrayDataset', 'RecordFileDataset']
import os
from ... import recordio, ndarray
class Dataset(object):
"""Abstract dataset class. All datasets should have this interface.
Subclasses need to override `__getitem__`, which returns the i-th
element, and `__len__`, which returns the total number elements.
.. note:: An mxnet or numpy array can be directly used as a dataset.
"""
def __getitem__(self, idx):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class ArrayDataset(Dataset):
"""A dataset of multiple arrays.
The i-th sample is `(x1[i], x2[i], ...)`.
Parameters
----------
*args : one or more arrays
The data arrays.
"""
def __init__(self, *args):
assert len(args) > 0, "Needs at least 1 arrays"
self._length = len(args[0])
self._data = []
for i, data in enumerate(args):
assert len(data) == self._length, \
"All arrays must have the same length. But the first has %s " \
"while the %d-th has %d."%(length, i+1, len(data))
if isinstance(data, ndarray.NDArray) and len(data.shape) == 1:
data = data.asnumpy()
self._data.append(data)
def __getitem__(self, idx):
if len(self._data) == 1:
return self._data[0][idx]
else:
return tuple(data[idx] for data in self._data)
def __len__(self):
return self._length
class RecordFileDataset(Dataset):
"""A dataset wrapping over a RecordIO (.rec) file.
Each sample is a string representing the raw content of an record.
Parameters
----------
filename : str
Path to rec file.
"""
def __init__(self, filename):
idx_file = os.path.splitext(filename)[0] + '.idx'
self._record = recordio.MXIndexedRecordIO(idx_file, filename, 'r')
def __getitem__(self, idx):
return self._record.read_idx(self._record.keys[idx])
def __len__(self):
return len(self._record.keys)
| apache-2.0 |
juvchan/azure-sdk-for-net | src/ResourceManagement/SiteRecovery/SiteRecovery.Tests/ScenarioTests/FailoverTests.cs | 11780 | //
// Copyright (c) Microsoft. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
using Microsoft.Azure.Management.SiteRecovery.Models;
using Microsoft.Azure.Management.SiteRecovery;
using Microsoft.Azure.Test;
using System.Linq;
using System.Net;
using Xunit;
using System;
namespace SiteRecovery.Tests
{
public class FailoverTests : SiteRecoveryTestsBase
{
public void E2EFailover()
{
using (UndoContext context = UndoContext.Current)
{
context.Start();
var client = GetSiteRecoveryClient(CustomHttpHandler);
string fabricId = "6adf9420-b02f-4377-8ab7-ff384e6d792f";
string containerId = "4f94127d-2eb3-449d-a708-250752e93cb4";
var pgs = client.ReplicationProtectedItem.List(fabricId, containerId, RequestHeaders);
PlannedFailoverInputProperties pfoProp = new PlannedFailoverInputProperties()
{
FailoverDirection = "PrimaryToRecovery",
//ProviderConfigurationSettings = new ProviderSpecificFailoverInput()
};
PlannedFailoverInput pfoInput = new PlannedFailoverInput()
{
Properties = pfoProp
};
var failoverExecution = client.ReplicationProtectedItem.PlannedFailover(fabricId, containerId, pgs.ReplicationProtectedItems[0].Name, pfoInput, RequestHeaders);
}
}
public void CommitFailover()
{
using (UndoContext context = UndoContext.Current)
{
context.Start();
var client = GetSiteRecoveryClient(CustomHttpHandler);
string fabricId = "6adf9420-b02f-4377-8ab7-ff384e6d792f";
string containerId = "4f94127d-2eb3-449d-a708-250752e93cb4";
var pgs = client.ReplicationProtectedItem.List(fabricId, containerId, RequestHeaders);
var commitResp = client.ReplicationProtectedItem.CommitFailover(fabricId, containerId, pgs.ReplicationProtectedItems[0].Name, RequestHeaders);
}
}
public void RR()
{
using (UndoContext context = UndoContext.Current)
{
context.Start();
var client = GetSiteRecoveryClient(CustomHttpHandler);
string fabricId = "6adf9420-b02f-4377-8ab7-ff384e6d792f";
string containerId = "4f94127d-2eb3-449d-a708-250752e93cb4";
var pgs = client.ReplicationProtectedItem.List(fabricId, containerId, RequestHeaders);
var commitResp = client.ReplicationProtectedItem.Reprotect(fabricId, containerId, pgs.ReplicationProtectedItems[0].Name, new ReverseReplicationInput(), RequestHeaders);
}
}
public void E2ETFO()
{
using (UndoContext context = UndoContext.Current)
{
context.Start();
var client = GetSiteRecoveryClient(CustomHttpHandler);
string fabricId = "6adf9420-b02f-4377-8ab7-ff384e6d792f";
string containerId = "4f94127d-2eb3-449d-a708-250752e93cb4";
var pgs = client.ReplicationProtectedItem.List(fabricId, containerId, RequestHeaders);
TestFailoverInputProperties tfoProp = new TestFailoverInputProperties()
{
FailoverDirection = "RecoveryToPrimary",
ProviderSpecificDetails = new ProviderSpecificFailoverInput()
};
TestFailoverInput tfoInput = new TestFailoverInput()
{
Properties = tfoProp
};
DateTime startTfoTime = DateTime.UtcNow;
var tfoResp = client.ReplicationProtectedItem.TestFailover(fabricId, containerId, pgs.ReplicationProtectedItems[0].Name, tfoInput, RequestHeaders);
Job tfoJob = MonitoringHelper.GetJobId(
MonitoringHelper.TestFailoverJobName,
startTfoTime,
client,
RequestHeaders);
ResumeJobParamsProperties resJobProp = new ResumeJobParamsProperties()
{
Comments = "ResumeTfo"
};
ResumeJobParams resumeJobParams = new ResumeJobParams()
{
Properties = resJobProp
};
var resumeJob = client.Jobs.Resume(tfoJob.Name, resumeJobParams, RequestHeaders);
}
}
public void E2EUFO()
{
using (UndoContext context = UndoContext.Current)
{
context.Start();
var client = GetSiteRecoveryClient(CustomHttpHandler);
string fabricId = "6adf9420-b02f-4377-8ab7-ff384e6d792f";
string containerId = "4f94127d-2eb3-449d-a708-250752e93cb4";
var pgs = client.ReplicationProtectedItem.List(fabricId, containerId, RequestHeaders);
UnplannedFailoverInputProperties ufoProp = new UnplannedFailoverInputProperties()
{
FailoverDirection = "RecoveryToPrimary",
SourceSiteOperations = "NotRequired",
ProviderSpecificDetails = new ProviderSpecificFailoverInput()
};
UnplannedFailoverInput ufoInput = new UnplannedFailoverInput()
{
Properties = ufoProp
};
var ufoResp = client.ReplicationProtectedItem.UnplannedFailover(fabricId, containerId, pgs.ReplicationProtectedItems[0].Name, ufoInput, RequestHeaders);
}
}
public void ApplyRecoveryPoint()
{
using (UndoContext context = UndoContext.Current)
{
context.Start();
var client = GetSiteRecoveryClient(CustomHttpHandler);
var fabrics = client.Fabrics.List(RequestHeaders);
Fabric selectedFabric = null;
ProtectionContainer selectedContainer = null;
foreach (var fabric in fabrics.Fabrics)
{
if (fabric.Properties.CustomDetails.InstanceType.Contains("VMM"))
{
selectedFabric = fabric;
break;
}
}
var containers = client.ProtectionContainer.List(selectedFabric.Name, RequestHeaders);
foreach (var container in containers.ProtectionContainers)
{
if (container.Properties.ProtectedItemCount > 0
&& container.Properties.Role.Equals("Primary"))
{
selectedContainer = container;
break;
}
}
string fabricId = selectedFabric.Name;
string containerId = selectedContainer.Name;
if (selectedContainer != null)
{
var pgs = client.ReplicationProtectedItem.List(fabricId, containerId, RequestHeaders);
var rps = client.RecoveryPoint.List(fabricId, containerId, pgs.ReplicationProtectedItems[0].Name, RequestHeaders);
ApplyRecoveryPointInputProperties applyRpProp = new ApplyRecoveryPointInputProperties()
{
RecoveryPointId = rps.RecoveryPoints[rps.RecoveryPoints.Count - 2].Id,
ProviderSpecificDetails = new HyperVReplicaAzureApplyRecoveryPointInput()
{
VaultLocation = "SoutheastAsia"
}
};
ApplyRecoveryPointInput applyRpInput = new ApplyRecoveryPointInput()
{
Properties = applyRpProp
};
var applyRpResp = client.ReplicationProtectedItem.ApplyRecoveryPoint(
fabricId,
containerId,
pgs.ReplicationProtectedItems[0].Name,
applyRpInput,
RequestHeaders);
}
else
{
throw new System.Exception("Container not found.");
}
}
}
public void VMwareAzureV2UnplannedFailover()
{
using (UndoContext context = UndoContext.Current)
{
context.Start();
var client = GetSiteRecoveryClient(CustomHttpHandler);
var responseServers = client.Fabrics.List(RequestHeaders);
Assert.True(
responseServers.Fabrics.Count > 0,
"Servers count can't be less than 1");
var vmWareFabric = responseServers.Fabrics.First(
fabric => fabric.Properties.CustomDetails.InstanceType == "VMware");
Assert.NotNull(vmWareFabric);
var containersResponse = client.ProtectionContainer.List(
vmWareFabric.Name,
RequestHeaders);
Assert.NotNull(containersResponse);
Assert.True(
containersResponse.ProtectionContainers.Count > 0,
"Containers count can't be less than 1.");
var protectedItemsResponse = client.ReplicationProtectedItem.List(
vmWareFabric.Name,
containersResponse.ProtectionContainers[0].Name,
RequestHeaders);
Assert.NotNull(protectedItemsResponse);
Assert.NotEmpty(protectedItemsResponse.ReplicationProtectedItems);
var protectedItem = protectedItemsResponse.ReplicationProtectedItems[0];
Assert.NotNull(protectedItem.Properties.ProviderSpecificDetails);
var vmWareAzureV2Details = protectedItem.Properties.ProviderSpecificDetails
as VMwareAzureV2ProviderSpecificSettings;
Assert.NotNull(vmWareAzureV2Details);
UnplannedFailoverInput ufoInput = new UnplannedFailoverInput()
{
Properties = new UnplannedFailoverInputProperties()
{
FailoverDirection = "PrimaryToRecovery",
ProviderSpecificDetails = new VMWareAzureV2FailoverProviderInput
{
RecoveryPointId = "",
VaultLocation = "Southeast Asia"
},
SourceSiteOperations = ""
}
};
var failoverExecution = client.ReplicationProtectedItem.UnplannedFailover(
vmWareFabric.Name,
containersResponse.ProtectionContainers[0].Name,
protectedItem.Name,
ufoInput,
RequestHeaders);
}
}
}
}
| apache-2.0 |
projectcypress/health-data-standards | test/unit/import/cda/section_importer_test.rb | 1705 | require 'test_helper'
class SectionImporterTest < Minitest::Test
def setup
@si = HealthDataStandards::Import::CDA::SectionImporter.new(HealthDataStandards::Import::CDA::EntryFinder.new('/cda:simple/cda:entry'))
@si.status_xpath = './cda:status'
@doc = Nokogiri::XML(File.new('test/fixtures/section_importer.xml'))
@doc.root.add_namespace_definition('cda', 'urn:hl7-org:v3')
end
def test_create_entries_with_date
entries = @si.create_entries(@doc)
entry = entries[1]
assert_equal 1026777600, entry.time
assert entry.codes['SNOMED-CT'].include?('314443004')
end
def test_create_entries_with_date_values
entries = @si.create_entries(@doc)
entry = entries[2]
assert_equal 1026777600, entry.time
assert entry.codes['SNOMED-CT'].include?('314443004')
assert_equal 'eleventeen', entry.values.first.scalar
assert_equal 'active', entry.status
end
def test_create_entries_with_date_ranges
entries = @si.create_entries(@doc)
entry = entries[0]
assert_equal 1026777600, entry.start_time
assert_equal 1189814400, entry.end_time
assert entry.is_date_range?
end
def test_extracting_translations
entries = @si.create_entries(@doc)
entry = entries[1]
assert_equal 1026777600, entry.time
assert entry.codes['SNOMED-CT'].include?('12345')
end
def test_dealing_with_center_times
entries = @si.create_entries(@doc)
entry = entries[3]
assert_equal 1026777600, entry.time
end
def test_extracting_identifiers
entries = @si.create_entries(@doc)
entry = entries[0]
assert_equal "1.2.3.4", entry.cda_identifier.root
assert_equal "abcdef", entry.cda_identifier.extension
end
end | apache-2.0 |
jpodeszwik/mifos | serviceInterfaces/src/main/java/org/mifos/application/admin/servicefacade/RolesPermissionServiceFacade.java | 2687 | /*
* Copyright (c) 2005-2011 Grameen Foundation USA
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*
* See also http://www.apache.org/licenses/LICENSE-2.0.html for an
* explanation of the license and how it is applied.
*/
package org.mifos.application.admin.servicefacade;
import java.util.List;
import org.mifos.dto.domain.ActivityRestrictionDto;
import org.mifos.dto.screen.ListElement;
import org.mifos.framework.exceptions.PersistenceException;
import org.springframework.security.access.prepost.PreAuthorize;
@SuppressWarnings("PMD")
public interface RolesPermissionServiceFacade {
@PreAuthorize("isFullyAuthenticated()")
List<ListElement> retrieveAllRoles();
@PreAuthorize("isFullyAuthenticated()")
void createRole(Short userId, String name, List<Short> ActivityIds) throws Exception;
@PreAuthorize("isFullyAuthenticated()")
void createRole(Short userId, String name, List<Short> ActivityIds, List<ActivityRestrictionDto> activityRestrictionDtoList) throws Exception;
@PreAuthorize("isFullyAuthenticated()")
void updateRole(Short roleId, Short userId, String name, List<Short> ActivityIds) throws Exception;
@PreAuthorize("isFullyAuthenticated()")
void updateRole(Short roleId, Short userId, String name, List<Short> ActivityIds, List<ActivityRestrictionDto> activityRestrictions) throws Exception;
@PreAuthorize("isFullyAuthenticated()")
void deleteRole(Integer versionNo, Short roleId) throws Exception;
@PreAuthorize("isFullyAuthenticated()")
boolean hasUserAccessForActivity(Short activityID) throws Exception;
@PreAuthorize("isFullyAuthenticated()")
List<ActivityRestrictionDto> getRoleActivitiesRestrictions(Short roleId);
@PreAuthorize("isFullyAuthenticated()")
int calculateDynamicActivityId() throws Exception;
@PreAuthorize("isFullyAuthenticated()")
int createActivityForQuestionGroup(short parentActivity, String lookUpDescription) throws Exception;
@PreAuthorize("isFullyAuthenticated()")
void updateLookUpValue(int newActivityId, String activityNameHead, String title) throws PersistenceException;
}
| apache-2.0 |
vesense/incubator-rocketmq | example/src/main/java/org/apache/rocketmq/example/openmessaging/SimplePullConsumer.java | 2351 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.rocketmq.example.openmessaging;
import io.openmessaging.Message;
import io.openmessaging.MessageHeader;
import io.openmessaging.MessagingAccessPoint;
import io.openmessaging.MessagingAccessPointFactory;
import io.openmessaging.OMS;
import io.openmessaging.PullConsumer;
import io.openmessaging.rocketmq.domain.NonStandardKeys;
public class SimplePullConsumer {
public static void main(String[] args) {
final MessagingAccessPoint messagingAccessPoint = MessagingAccessPointFactory
.getMessagingAccessPoint("openmessaging:rocketmq://IP1:9876,IP2:9876/namespace");
final PullConsumer consumer = messagingAccessPoint.createPullConsumer("OMS_HELLO_TOPIC",
OMS.newKeyValue().put(NonStandardKeys.CONSUMER_GROUP, "OMS_CONSUMER"));
messagingAccessPoint.startup();
System.out.printf("MessagingAccessPoint startup OK%n");
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
@Override
public void run() {
consumer.shutdown();
messagingAccessPoint.shutdown();
}
}));
consumer.startup();
System.out.printf("Consumer startup OK%n");
while (true) {
Message message = consumer.poll();
if (message != null) {
String msgId = message.headers().getString(MessageHeader.MESSAGE_ID);
System.out.printf("Received one message: %s%n", msgId);
consumer.ack(msgId);
}
}
}
}
| apache-2.0 |
brettsam/azure-mobile-apps-net-server | test/Microsoft.Azure.Mobile.Server.Authentication.Test/Properties/AssemblyInfo.cs | 367 | // ----------------------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// ----------------------------------------------------------------------------
using System.Reflection;
[assembly: AssemblyTitle("Microsoft.Azure.Mobile.Server.Authentication.Test")]
[assembly: AssemblyDescription("")] | apache-2.0 |
tekul/spring-security | web/src/test/java/org/springframework/security/web/authentication/session/RegisterSessionAuthenticationStrategyTests.java | 2333 | /*
* Copyright 2002-2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.web.authentication.session;
import static org.mockito.Mockito.verify;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import org.springframework.mock.web.MockHttpServletRequest;
import org.springframework.mock.web.MockHttpServletResponse;
import org.springframework.security.authentication.TestingAuthenticationToken;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.session.SessionRegistry;
/**
* @author Rob Winch
*
*/
@RunWith(MockitoJUnitRunner.class)
public class RegisterSessionAuthenticationStrategyTests {
@Mock
private SessionRegistry registry;
private RegisterSessionAuthenticationStrategy authenticationStrategy;
private Authentication authentication;
private MockHttpServletRequest request;
private MockHttpServletResponse response;
@Before
public void setup() {
authenticationStrategy = new RegisterSessionAuthenticationStrategy(registry);
authentication = new TestingAuthenticationToken("user", "password","ROLE_USER");
request = new MockHttpServletRequest();
response = new MockHttpServletResponse();
}
@Test(expected = IllegalArgumentException.class)
public void constructorNullRegistry() {
new RegisterSessionAuthenticationStrategy(null);
}
@Test
public void onAuthenticationRegistersSession() {
authenticationStrategy.onAuthentication(authentication, request, response);
verify(registry).registerNewSession(request.getSession().getId(), authentication.getPrincipal());
}
}
| apache-2.0 |
ol-loginov/intellij-community | jps/jps-builders/src/org/jetbrains/jps/cmdline/BuildSession.java | 27757 | /*
* Copyright 2000-2014 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.jps.cmdline;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.util.Pair;
import com.intellij.openapi.util.Ref;
import com.intellij.openapi.util.io.BufferExposingByteArrayOutputStream;
import com.intellij.openapi.util.io.FileSystemUtil;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.util.concurrency.SequentialTaskExecutor;
import com.intellij.util.io.DataOutputStream;
import io.netty.channel.Channel;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.jps.TimingLog;
import org.jetbrains.jps.api.*;
import org.jetbrains.jps.builders.*;
import org.jetbrains.jps.builders.java.JavaModuleBuildTargetType;
import org.jetbrains.jps.builders.java.dependencyView.Callbacks;
import org.jetbrains.jps.incremental.MessageHandler;
import org.jetbrains.jps.incremental.RebuildRequestedException;
import org.jetbrains.jps.incremental.TargetTypeRegistry;
import org.jetbrains.jps.incremental.Utils;
import org.jetbrains.jps.incremental.fs.BuildFSState;
import org.jetbrains.jps.incremental.messages.*;
import org.jetbrains.jps.incremental.storage.Timestamps;
import org.jetbrains.jps.model.module.JpsModule;
import org.jetbrains.jps.model.serialization.CannotLoadJpsModelException;
import org.jetbrains.jps.service.SharedThreadPool;
import java.io.*;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.jetbrains.jps.api.CmdlineRemoteProto.Message.ControllerMessage.ParametersMessage.TargetTypeBuildScope;
/**
* @author Eugene Zhuravlev
* Date: 4/17/12
*/
final class BuildSession implements Runnable, CanceledStatus {
private static final Logger LOG = Logger.getInstance("#org.jetbrains.jps.cmdline.BuildSession");
public static final String FS_STATE_FILE = "fs_state.dat";
private final UUID mySessionId;
private final Channel myChannel;
@Nullable
private final PreloadedData myPreloadedData;
private volatile boolean myCanceled = false;
private final String myProjectPath;
@Nullable
private CmdlineRemoteProto.Message.ControllerMessage.FSEvent myInitialFSDelta;
// state
private final EventsProcessor myEventsProcessor = new EventsProcessor();
private volatile long myLastEventOrdinal;
private volatile ProjectDescriptor myProjectDescriptor;
private final Map<Pair<String, String>, ConstantSearchFuture> mySearchTasks = Collections.synchronizedMap(new HashMap<Pair<String, String>, ConstantSearchFuture>());
private final ConstantSearch myConstantSearch = new ConstantSearch();
@NotNull
private final BuildRunner myBuildRunner;
private final boolean myForceModelLoading;
private final BuildType myBuildType;
private final List<TargetTypeBuildScope> myScopes;
BuildSession(UUID sessionId,
Channel channel,
CmdlineRemoteProto.Message.ControllerMessage.ParametersMessage params,
@Nullable CmdlineRemoteProto.Message.ControllerMessage.FSEvent delta, @Nullable PreloadedData preloaded) {
mySessionId = sessionId;
myChannel = channel;
myPreloadedData = preloaded;
final CmdlineRemoteProto.Message.ControllerMessage.GlobalSettings globals = params.getGlobalSettings();
myProjectPath = FileUtil.toCanonicalPath(params.getProjectId());
String globalOptionsPath = FileUtil.toCanonicalPath(globals.getGlobalOptionsPath());
myBuildType = convertCompileType(params.getBuildType());
myScopes = params.getScopeList();
List<String> filePaths = params.getFilePathList();
final Map<String, String> builderParams = new HashMap<String, String>();
for (CmdlineRemoteProto.Message.KeyValuePair pair : params.getBuilderParameterList()) {
builderParams.put(pair.getKey(), pair.getValue());
}
myInitialFSDelta = delta;
if (preloaded == null || preloaded.getRunner() == null) {
myBuildRunner = new BuildRunner(new JpsModelLoaderImpl(myProjectPath, globalOptionsPath, null));
}
else {
myBuildRunner = preloaded.getRunner();
}
myBuildRunner.setFilePaths(filePaths);
myBuildRunner.setBuilderParams(builderParams);
myForceModelLoading = Boolean.parseBoolean(builderParams.get(BuildParametersKeys.FORCE_MODEL_LOADING));
}
@Override
public void run() {
Throwable error = null;
final Ref<Boolean> hasErrors = new Ref<Boolean>(false);
final Ref<Boolean> doneSomething = new Ref<Boolean>(false);
try {
ProfilingHelper profilingHelper = null;
if (Utils.IS_PROFILING_MODE) {
profilingHelper = new ProfilingHelper();
profilingHelper.startProfiling();
}
runBuild(new MessageHandler() {
@Override
public void processMessage(BuildMessage buildMessage) {
final CmdlineRemoteProto.Message.BuilderMessage response;
if (buildMessage instanceof FileGeneratedEvent) {
final Collection<Pair<String, String>> paths = ((FileGeneratedEvent)buildMessage).getPaths();
response = !paths.isEmpty() ? CmdlineProtoUtil.createFileGeneratedEvent(paths) : null;
}
else if (buildMessage instanceof DoneSomethingNotification) {
doneSomething.set(true);
response = null;
}
else if (buildMessage instanceof CompilerMessage) {
doneSomething.set(true);
final CompilerMessage compilerMessage = (CompilerMessage)buildMessage;
final String compilerName = compilerMessage.getCompilerName();
final String text = !StringUtil.isEmptyOrSpaces(compilerName)? compilerName + ": " + compilerMessage.getMessageText() : compilerMessage.getMessageText();
final BuildMessage.Kind kind = compilerMessage.getKind();
if (kind == BuildMessage.Kind.ERROR) {
hasErrors.set(true);
}
response = CmdlineProtoUtil.createCompileMessage(
kind, text, compilerMessage.getSourcePath(),
compilerMessage.getProblemBeginOffset(), compilerMessage.getProblemEndOffset(),
compilerMessage.getProblemLocationOffset(), compilerMessage.getLine(), compilerMessage.getColumn(),
-1.0f);
}
else if (buildMessage instanceof CustomBuilderMessage) {
CustomBuilderMessage builderMessage = (CustomBuilderMessage)buildMessage;
response = CmdlineProtoUtil.createCustomBuilderMessage(builderMessage.getBuilderId(), builderMessage.getMessageType(), builderMessage.getMessageText());
}
else if (buildMessage instanceof BuilderStatisticsMessage) {
BuilderStatisticsMessage message = (BuilderStatisticsMessage)buildMessage;
int srcCount = message.getNumberOfProcessedSources();
long time = message.getElapsedTimeMs();
if (srcCount != 0 || time > 50) {
LOG.info("Build duration: '" + message.getBuilderName() + "' builder took " + time + " ms, " + srcCount + " sources processed");
}
response = null;
}
else if (!(buildMessage instanceof BuildingTargetProgressMessage)) {
float done = -1.0f;
if (buildMessage instanceof ProgressMessage) {
done = ((ProgressMessage)buildMessage).getDone();
}
response = CmdlineProtoUtil.createCompileProgressMessageResponse(buildMessage.getMessageText(), done);
}
else {
response = null;
}
if (response != null) {
myChannel.writeAndFlush(CmdlineProtoUtil.toMessage(mySessionId, response));
}
}
}, this);
if (profilingHelper != null) {
profilingHelper.stopProfiling();
}
}
catch (Throwable e) {
LOG.info(e);
error = e;
}
finally {
finishBuild(error, hasErrors.get(), doneSomething.get());
}
}
private void runBuild(final MessageHandler msgHandler, CanceledStatus cs) throws Throwable{
final File dataStorageRoot = Utils.getDataStorageRoot(myProjectPath);
if (dataStorageRoot == null) {
msgHandler.processMessage(new CompilerMessage("build", BuildMessage.Kind.ERROR, "Cannot determine build data storage root for project " + myProjectPath));
return;
}
if (!dataStorageRoot.exists()) {
// invoked the very first time for this project
myBuildRunner.setForceCleanCaches(true);
}
final ProjectDescriptor preloadedProject = myPreloadedData != null? myPreloadedData.getProjectDescriptor() : null;
final DataInputStream fsStateStream =
preloadedProject != null || myInitialFSDelta == null /*this will force FS rescan*/? null : createFSDataStream(dataStorageRoot, myInitialFSDelta.getOrdinal());
if (fsStateStream != null || myPreloadedData != null) {
// optimization: check whether we can skip the build
final boolean hasWorkFlag = fsStateStream != null? fsStateStream.readBoolean() : myPreloadedData.hasWorkToDo();
final boolean hasWorkToDoWithModules = hasWorkFlag || myInitialFSDelta == null;
if (!myForceModelLoading && (myBuildType == BuildType.BUILD || myBuildType == BuildType.UP_TO_DATE_CHECK) && !hasWorkToDoWithModules
&& scopeContainsModulesOnlyForIncrementalMake(myScopes) && !containsChanges(myInitialFSDelta)) {
final DataInputStream storedFsData;
if (myPreloadedData != null) {
storedFsData = createFSDataStream(dataStorageRoot, myInitialFSDelta.getOrdinal());
if (storedFsData != null) {
storedFsData.readBoolean(); // skip hasWorkToDo flag
}
}
else {
storedFsData = fsStateStream;
}
if (storedFsData != null) {
updateFsStateOnDisk(dataStorageRoot, storedFsData, myInitialFSDelta.getOrdinal());
LOG.info("No changes found since last build. Exiting.");
if (preloadedProject != null) {
preloadedProject.release();
}
return;
}
}
}
final BuildFSState fsState = preloadedProject != null? preloadedProject.fsState : new BuildFSState(false);
try {
final ProjectDescriptor pd;
if (preloadedProject != null) {
pd = preloadedProject;
final List<BuildMessage> preloadMessages = myPreloadedData.getLoadMessages();
if (!preloadMessages.isEmpty()) {
// replay preload-time messages, so that they are delivered to the IDE
for (BuildMessage message : preloadMessages) {
msgHandler.processMessage(message);
}
}
if (myInitialFSDelta == null || myPreloadedData.getFsEventOrdinal() + 1L != myInitialFSDelta.getOrdinal()) {
// FS rescan was forced
fsState.clearAll();
}
else {
// apply events to already loaded state
try {
applyFSEvent(pd, myInitialFSDelta, false);
}
catch (Throwable e) {
LOG.error(e);
fsState.clearAll();
}
}
}
else {
// standard case
pd = myBuildRunner.load(msgHandler, dataStorageRoot, fsState);
TimingLog.LOG.debug("Project descriptor loaded");
if (fsStateStream != null) {
try {
try {
fsState.load(fsStateStream, pd.getModel(), pd.getBuildRootIndex());
applyFSEvent(pd, myInitialFSDelta, false);
TimingLog.LOG.debug("FS Delta loaded");
}
finally {
fsStateStream.close();
}
}
catch (Throwable e) {
LOG.error(e);
fsState.clearAll();
}
}
}
myProjectDescriptor = pd;
myLastEventOrdinal = myInitialFSDelta != null? myInitialFSDelta.getOrdinal() : 0L;
// free memory
myInitialFSDelta = null;
// ensure events from controller are processed after FSState initialization
myEventsProcessor.startProcessing();
myBuildRunner.runBuild(pd, cs, myConstantSearch, msgHandler, myBuildType, myScopes, false);
TimingLog.LOG.debug("Build finished");
}
finally {
saveData(fsState, dataStorageRoot);
}
}
private static boolean scopeContainsModulesOnlyForIncrementalMake(List<TargetTypeBuildScope> scopes) {
TargetTypeRegistry typeRegistry = null;
for (TargetTypeBuildScope scope : scopes) {
if (scope.getForceBuild()) return false;
final String typeId = scope.getTypeId();
if (isJavaModuleBuildType(typeId)) { // fast check
continue;
}
if (typeRegistry == null) {
// lazy init
typeRegistry = TargetTypeRegistry.getInstance();
}
final BuildTargetType<?> targetType = typeRegistry.getTargetType(typeId);
if (targetType != null && !(targetType instanceof ModuleBasedBuildTargetType)) {
return false;
}
}
return true;
}
private static boolean isJavaModuleBuildType(String typeId) {
for (JavaModuleBuildTargetType moduleBuildTargetType : JavaModuleBuildTargetType.ALL_TYPES) {
if (moduleBuildTargetType.getTypeId().equals(typeId)) {
return true;
}
}
return false;
}
private void saveData(final BuildFSState fsState, File dataStorageRoot) {
final boolean wasInterrupted = Thread.interrupted();
try {
saveFsState(dataStorageRoot, fsState);
final ProjectDescriptor pd = myProjectDescriptor;
if (pd != null) {
pd.release();
}
}
finally {
if (wasInterrupted) {
Thread.currentThread().interrupt();
}
}
}
public void processFSEvent(final CmdlineRemoteProto.Message.ControllerMessage.FSEvent event) {
myEventsProcessor.submit(new Runnable() {
@Override
public void run() {
try {
applyFSEvent(myProjectDescriptor, event, true);
myLastEventOrdinal += 1;
}
catch (IOException e) {
LOG.error(e);
}
}
});
}
public void processConstantSearchResult(CmdlineRemoteProto.Message.ControllerMessage.ConstantSearchResult result) {
final ConstantSearchFuture future = mySearchTasks.remove(Pair.create(result.getOwnerClassName(), result.getFieldName()));
if (future != null) {
if (result.getIsSuccess()) {
final List<String> paths = result.getPathList();
final List<File> files = new ArrayList<File>(paths.size());
for (String path : paths) {
files.add(new File(path));
}
future.setResult(files);
LOG.debug("Constant search result: " + files.size() + " affected files found");
}
else {
future.setDone();
LOG.debug("Constant search failed");
}
}
}
private static void applyFSEvent(ProjectDescriptor pd, @Nullable CmdlineRemoteProto.Message.ControllerMessage.FSEvent event,
final boolean saveEventStamp) throws IOException {
if (event == null) {
return;
}
final Timestamps timestamps = pd.timestamps.getStorage();
boolean cacheCleared = false;
for (String deleted : event.getDeletedPathsList()) {
final File file = new File(deleted);
Collection<BuildRootDescriptor> descriptor = pd.getBuildRootIndex().findAllParentDescriptors(file, null, null);
if (!descriptor.isEmpty()) {
if (!cacheCleared) {
pd.getFSCache().clear();
cacheCleared = true;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Applying deleted path from fs event: " + file.getPath());
}
for (BuildRootDescriptor rootDescriptor : descriptor) {
pd.fsState.registerDeleted(rootDescriptor.getTarget(), file, timestamps);
}
}
else {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping deleted path: " + file.getPath());
}
}
}
for (String changed : event.getChangedPathsList()) {
final File file = new File(changed);
Collection<BuildRootDescriptor> descriptors = pd.getBuildRootIndex().findAllParentDescriptors(file, null, null);
if (!descriptors.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Applying dirty path from fs event: " + changed);
}
long fileStamp = -1L;
for (BuildRootDescriptor descriptor : descriptors) {
if (!descriptor.isGenerated()) { // ignore generates sources as they are processed at the time of generation
if (fileStamp == -1L) {
fileStamp = FileSystemUtil.lastModified(file); // lazy init
}
final long stamp = timestamps.getStamp(file, descriptor.getTarget());
if (stamp != fileStamp) {
if (!cacheCleared) {
pd.getFSCache().clear();
cacheCleared = true;
}
pd.fsState.markDirty(null, file, descriptor, timestamps, saveEventStamp);
}
else {
if (LOG.isDebugEnabled()) {
LOG.debug(descriptor.getTarget() + ": Path considered up-to-date: " + changed + "; timestamp= " + stamp);
}
}
}
}
}
else {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping dirty path: " + file.getPath());
}
}
}
}
private static void updateFsStateOnDisk(File dataStorageRoot, DataInputStream original, final long ordinal) {
final File file = new File(dataStorageRoot, FS_STATE_FILE);
try {
final BufferExposingByteArrayOutputStream bytes = new BufferExposingByteArrayOutputStream();
final DataOutputStream out = new DataOutputStream(bytes);
try {
out.writeInt(BuildFSState.VERSION);
out.writeLong(ordinal);
out.writeBoolean(false);
while (true) {
final int b = original.read();
if (b == -1) {
break;
}
out.write(b);
}
}
finally {
out.close();
}
saveOnDisk(bytes, file);
}
catch (Throwable e) {
LOG.error(e);
FileUtil.delete(file);
}
}
private void saveFsState(File dataStorageRoot, BuildFSState state) {
final ProjectDescriptor pd = myProjectDescriptor;
final File file = new File(dataStorageRoot, FS_STATE_FILE);
try {
final BufferExposingByteArrayOutputStream bytes = new BufferExposingByteArrayOutputStream();
final DataOutputStream out = new DataOutputStream(bytes);
try {
out.writeInt(BuildFSState.VERSION);
out.writeLong(myLastEventOrdinal);
out.writeBoolean(hasWorkToDo(state, pd));
state.save(out);
}
finally {
out.close();
}
saveOnDisk(bytes, file);
}
catch (Throwable e) {
LOG.error(e);
FileUtil.delete(file);
}
}
private static boolean hasWorkToDo(BuildFSState state, @Nullable ProjectDescriptor pd) {
if (pd == null) {
return true; // assuming worst case
}
final BuildTargetIndex targetIndex = pd.getBuildTargetIndex();
for (JpsModule module : pd.getProject().getModules()) {
for (ModuleBasedTarget<?> target : targetIndex.getModuleBasedTargets(module, BuildTargetRegistry.ModuleTargetSelector.ALL)) {
if (!pd.getBuildTargetIndex().isDummy(target) && state.hasWorkToDo(target)) {
return true;
}
}
}
return false;
}
private static void saveOnDisk(BufferExposingByteArrayOutputStream bytes, final File file) throws IOException {
FileOutputStream fos = null;
try {
//noinspection IOResourceOpenedButNotSafelyClosed
fos = new FileOutputStream(file);
}
catch (FileNotFoundException ignored) {
FileUtil.createIfDoesntExist(file);
}
if (fos == null) {
fos = new FileOutputStream(file);
}
try {
fos.write(bytes.getInternalBuffer(), 0, bytes.size());
}
finally {
fos.close();
}
}
@Nullable
private static DataInputStream createFSDataStream(File dataStorageRoot, final long currentEventOrdinal) {
try {
final File file = new File(dataStorageRoot, FS_STATE_FILE);
byte[] bytes;
final InputStream fs = new FileInputStream(file);
try {
bytes = FileUtil.loadBytes(fs, (int)file.length());
}
finally {
fs.close();
}
final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes));
final int version = in.readInt();
if (version != BuildFSState.VERSION) {
return null;
}
final long savedOrdinal = in.readLong();
if (savedOrdinal + 1L != currentEventOrdinal) {
return null;
}
return in;
}
catch (FileNotFoundException ignored) {
}
catch (Throwable e) {
LOG.error(e);
}
return null;
}
private static boolean containsChanges(CmdlineRemoteProto.Message.ControllerMessage.FSEvent event) {
return event.getChangedPathsCount() != 0 || event.getDeletedPathsCount() != 0;
}
private void finishBuild(final Throwable error, boolean hadBuildErrors, boolean doneSomething) {
CmdlineRemoteProto.Message lastMessage = null;
try {
if (error instanceof CannotLoadJpsModelException) {
String text = "Failed to load project configuration: " + StringUtil.decapitalize(error.getMessage());
String path = ((CannotLoadJpsModelException)error).getFile().getAbsolutePath();
lastMessage = CmdlineProtoUtil.toMessage(mySessionId, CmdlineProtoUtil.createCompileMessage(BuildMessage.Kind.ERROR, text, path, -1, -1, -1, -1, -1, -1.0f));
}
else if (error != null) {
Throwable cause = error.getCause();
if (cause == null) {
cause = error;
}
final ByteArrayOutputStream out = new ByteArrayOutputStream();
final PrintStream stream = new PrintStream(out);
try {
cause.printStackTrace(stream);
}
finally {
stream.close();
}
final StringBuilder messageText = new StringBuilder();
messageText.append("Internal error: (").append(cause.getClass().getName()).append(") ").append(cause.getMessage());
final String trace = out.toString();
if (!trace.isEmpty()) {
messageText.append("\n").append(trace);
}
if (error instanceof RebuildRequestedException) {
messageText.append("\n").append("Please perform full project rebuild (Build | Rebuild Project)");
}
lastMessage = CmdlineProtoUtil.toMessage(mySessionId, CmdlineProtoUtil.createFailure(messageText.toString(), cause));
}
else {
CmdlineRemoteProto.Message.BuilderMessage.BuildEvent.Status status = CmdlineRemoteProto.Message.BuilderMessage.BuildEvent.Status.SUCCESS;
if (myCanceled) {
status = CmdlineRemoteProto.Message.BuilderMessage.BuildEvent.Status.CANCELED;
}
else if (hadBuildErrors) {
status = CmdlineRemoteProto.Message.BuilderMessage.BuildEvent.Status.ERRORS;
}
else if (!doneSomething){
status = CmdlineRemoteProto.Message.BuilderMessage.BuildEvent.Status.UP_TO_DATE;
}
lastMessage = CmdlineProtoUtil.toMessage(mySessionId, CmdlineProtoUtil.createBuildCompletedEvent("build completed", status));
}
}
catch (Throwable e) {
lastMessage = CmdlineProtoUtil.toMessage(mySessionId, CmdlineProtoUtil.createFailure(e.getMessage(), e));
}
finally {
try {
myChannel.writeAndFlush(lastMessage).await();
}
catch (InterruptedException e) {
LOG.info(e);
}
}
}
public void cancel() {
myCanceled = true;
}
@Override
public boolean isCanceled() {
return myCanceled;
}
private static BuildType convertCompileType(CmdlineRemoteProto.Message.ControllerMessage.ParametersMessage.Type compileType) {
switch (compileType) {
case CLEAN: return BuildType.CLEAN;
case BUILD: return BuildType.BUILD;
case UP_TO_DATE_CHECK: return BuildType.UP_TO_DATE_CHECK;
}
return BuildType.BUILD;
}
private static class EventsProcessor extends SequentialTaskExecutor {
private final AtomicBoolean myProcessingEnabled = new AtomicBoolean(false);
EventsProcessor() {
super(SharedThreadPool.getInstance());
}
public void startProcessing() {
if (!myProcessingEnabled.getAndSet(true)) {
super.processQueue();
}
}
@Override
protected void processQueue() {
if (myProcessingEnabled.get()) {
super.processQueue();
}
}
}
private class ConstantSearch implements Callbacks.ConstantAffectionResolver {
private ConstantSearch() {
}
@Nullable @Override
public Future<Callbacks.ConstantAffection> request(String ownerClassName, String fieldName, int accessFlags, boolean fieldRemoved, boolean accessChanged) {
final CmdlineRemoteProto.Message.BuilderMessage.ConstantSearchTask.Builder task =
CmdlineRemoteProto.Message.BuilderMessage.ConstantSearchTask.newBuilder();
task.setOwnerClassName(ownerClassName);
task.setFieldName(fieldName);
task.setAccessFlags(accessFlags);
task.setIsAccessChanged(accessChanged);
task.setIsFieldRemoved(fieldRemoved);
final ConstantSearchFuture future = new ConstantSearchFuture(BuildSession.this);
final ConstantSearchFuture prev = mySearchTasks.put(Pair.create(ownerClassName, fieldName), future);
if (prev != null) {
prev.setDone();
}
myChannel.writeAndFlush(CmdlineProtoUtil.toMessage(mySessionId, CmdlineRemoteProto.Message.BuilderMessage.newBuilder()
.setType(CmdlineRemoteProto.Message.BuilderMessage.Type.CONSTANT_SEARCH_TASK).setConstantSearchTask(task.build()).build()));
return future;
}
}
private static class ConstantSearchFuture extends BasicFuture<Callbacks.ConstantAffection> {
private volatile Callbacks.ConstantAffection myResult = Callbacks.ConstantAffection.EMPTY;
private final CanceledStatus myCanceledStatus;
private ConstantSearchFuture(CanceledStatus canceledStatus) {
myCanceledStatus = canceledStatus;
}
public void setResult(final Collection<File> affectedFiles) {
myResult = new Callbacks.ConstantAffection(affectedFiles);
setDone();
}
@Override
public Callbacks.ConstantAffection get() throws InterruptedException, ExecutionException {
while (true) {
try {
return get(300L, TimeUnit.MILLISECONDS);
}
catch (TimeoutException ignored) {
}
if (myCanceledStatus.isCanceled()) {
return myResult;
}
}
}
@Override
public Callbacks.ConstantAffection get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
super.get(timeout, unit);
return myResult;
}
}
}
| apache-2.0 |
QuantConnect/Lean | ToolBox/Properties/AssemblyInfo.cs | 865 | using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
[assembly: AssemblyTitle("QuantConnect.ToolBox")]
[assembly: AssemblyProduct("QuantConnect.ToolBox")]
[assembly: AssemblyCulture("")]
// Setting ComVisible to false makes the types in this assembly not visible
// to COM components. If you need to access a type in this assembly from
// COM, set the ComVisible attribute to true on that type.
[assembly: ComVisible(false)]
// The following GUID is for the ID of the typelib if this project is exposed to COM
[assembly: Guid("5c3d0688-07ac-4cd4-8f2e-e74ebcf32a88")]
[assembly: InternalsVisibleTo("QuantConnect.Tests")]
| apache-2.0 |
clicktravel-chris/Cheddar | cheddar/cheddar-messaging/src/main/java/com/clicktravel/cheddar/infrastructure/messaging/exception/MessageHandlingException.java | 932 | /*
* Copyright 2014 Click Travel Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.clicktravel.cheddar.infrastructure.messaging.exception;
public class MessageHandlingException extends MessagingException {
private static final long serialVersionUID = 5991755885193413206L;
public MessageHandlingException(final String message, final Throwable cause) {
super(message, cause);
}
}
| apache-2.0 |
romankagan/DDBWorkbench | plugins/groovy/src/org/jetbrains/plugins/groovy/gpp/GppExpectedTypesContributor.java | 3841 | package org.jetbrains.plugins.groovy.gpp;
import com.intellij.openapi.util.Pair;
import com.intellij.psi.*;
import com.intellij.util.NullableFunction;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.plugins.groovy.lang.psi.api.GroovyResolveResult;
import org.jetbrains.plugins.groovy.lang.psi.api.auxiliary.GrListOrMap;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.arguments.GrNamedArgument;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.blocks.GrClosableBlock;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.expressions.GrExpression;
import org.jetbrains.plugins.groovy.lang.psi.expectedTypes.GroovyExpectedTypesContributor;
import org.jetbrains.plugins.groovy.lang.psi.expectedTypes.GroovyExpectedTypesProvider;
import org.jetbrains.plugins.groovy.lang.psi.expectedTypes.SubtypeConstraint;
import org.jetbrains.plugins.groovy.lang.psi.expectedTypes.TypeConstraint;
import org.jetbrains.plugins.groovy.lang.psi.impl.GrTupleType;
import org.jetbrains.plugins.groovy.lang.psi.impl.signatures.GrClosureSignatureUtil;
import org.jetbrains.plugins.groovy.lang.psi.util.PsiUtil;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* @author peter
*/
public class GppExpectedTypesContributor extends GroovyExpectedTypesContributor {
@Override
public List<TypeConstraint> calculateTypeConstraints(@NotNull GrExpression expression) {
final PsiElement parent = expression.getParent();
if (parent instanceof GrListOrMap) {
final GrListOrMap list = (GrListOrMap)parent;
if (!list.isMap()) {
final PsiType listType = list.getType();
if (!(listType instanceof GrTupleType)) {
return Collections.emptyList();
}
return addExpectedConstructorParameters(list, list.getInitializers(), expression);
}
}
if (parent instanceof GrNamedArgument) {
final PsiElement map = parent.getParent();
if (map instanceof GrListOrMap && "super".equals(((GrNamedArgument)parent).getLabelName())) {
//todo expected property types
return addExpectedConstructorParameters((GrListOrMap)map, new GrExpression[]{expression}, expression);
}
}
return Collections.emptyList();
}
private static List<TypeConstraint> addExpectedConstructorParameters(GrListOrMap list,
GrExpression[] args,
GrExpression arg) {
PsiType[] argTypes = ContainerUtil.map2Array(args, PsiType.class, new NullableFunction<GrExpression, PsiType>() {
@Override
public PsiType fun(GrExpression grExpression) {
return grExpression.getType();
}
});
final ArrayList<TypeConstraint> result = new ArrayList<TypeConstraint>();
for (PsiType type : GroovyExpectedTypesProvider.getDefaultExpectedTypes(list)) {
if (type instanceof PsiClassType) {
for (GroovyResolveResult resolveResult : PsiUtil.getConstructorCandidates((PsiClassType)type, argTypes, list)) {
final PsiElement method = resolveResult.getElement();
if (method instanceof PsiMethod && ((PsiMethod)method).isConstructor()) {
final Map<GrExpression,Pair<PsiParameter,PsiType>> map = GrClosureSignatureUtil
.mapArgumentsToParameters(resolveResult, list, false, true, GrNamedArgument.EMPTY_ARRAY, args, GrClosableBlock.EMPTY_ARRAY);
if (map != null) {
final Pair<PsiParameter, PsiType> pair = map.get(arg);
if (pair != null) {
result.add(SubtypeConstraint.create(pair.second));
}
}
}
}
}
}
return result;
}
}
| apache-2.0 |
forjobshunting/hh2016 | app/lib/jsPDFMod/TiJSPDF.js | 336 | /*jslint maxerr:1000 */
/*
* CommonJS convenience wrapper around jsPDF
*/
Ti.include(
'/jsPDFMod/libs/sprintf.js',
'/jsPDFMod/libs/base64.js'
);
Ti.include('/jsPDFMod/plugins/jspdf.js');
Ti.include('/jsPDFMod/plugins/jspdf.plugin.addimage.js');
Ti.include('/jsPDFMod/plugins/jspdf.plugin.save.js');
module.exports = jsPDF; | apache-2.0 |
Mohitsahu123/magento-elasticsearch | lib/Elastica/Filter/Or.php | 875 | <?php
/**
* Or Filter
*
* @uses Elastica_Filter_Abstract
* @category Xodoa
* @package Elastica
* @author Nicolas Ruflin <spam@ruflin.com>
* @link http://www.elasticsearch.com/docs/elasticsearch/rest_api/query_dsl/or_filter/
*/
class Elastica_Filter_Or extends Elastica_Filter_Abstract {
/**
* Filter array
*
* @var array Filter
*/
protected $_filters = array();
/**
* Adds filter to or filter
*
* @param Elastica_Filter_Abstract $filter Filter object
* @return Elastica_Filter_Or Filter object
*/
public function addFilter(Elastica_Filter_Abstract $filter) {
$this->_filters[] = $filter->toArray();
return $this;
}
/**
* Convers current object to array.
*
* @see Elastica_Filter_Abstract::toArray()
* @return array Or array
*/
public function toArray() {
$this->setParams($this->_filters);
return parent::toArray();
}
}
| apache-2.0 |
paulswithers/XPagesExtensionLibrary | extlib-des/lwp/openntf/tools/xpages-doc-generator/eclipse/plugins/com.ibm.designer.xsp.docgenerator/src/com/ibm/xsp/eclipse/tools/ui/RunGeneratorAction.java | 1562 | /*
* © Copyright IBM Corp. 2010
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.ibm.xsp.eclipse.tools.ui;
import org.eclipse.jface.action.Action;
import org.eclipse.jface.action.IAction;
import org.eclipse.jface.viewers.ISelection;
import org.eclipse.swt.widgets.Shell;
import org.eclipse.ui.IObjectActionDelegate;
import org.eclipse.ui.IWorkbenchPart;
import org.eclipse.ui.PlatformUI;
/**
*
*/
public class RunGeneratorAction extends Action implements IObjectActionDelegate {
/**
*
*/
public RunGeneratorAction() {
super("com.ibm.designer.domino.generate.doc.action");
}
public void run(IAction action) {
Shell shell = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getShell();
DocGeneratorDialog dialog = new DocGeneratorDialog(shell);
dialog.open();
}
//WE DON'T CARE!
public void setActivePart(IAction action, IWorkbenchPart part) {
}
public void selectionChanged(IAction action, ISelection selection) {
}
}
| apache-2.0 |
xtern/ignite | modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/RetryCauseMessageSelfTest.java | 18784 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.query.h2.twostep;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicLong;
import javax.cache.CacheException;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.IgniteException;
import org.apache.ignite.IgniteLogger;
import org.apache.ignite.cache.CacheMode;
import org.apache.ignite.cache.query.SqlFieldsQuery;
import org.apache.ignite.cache.query.SqlQuery;
import org.apache.ignite.cluster.ClusterNode;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.internal.GridKernalContext;
import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
import org.apache.ignite.internal.processors.cache.GridCacheContext;
import org.apache.ignite.internal.processors.cache.distributed.dht.GridReservable;
import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition;
import org.apache.ignite.internal.processors.cache.index.AbstractIndexingCommonTest;
import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx;
import org.apache.ignite.internal.processors.query.GridQueryProcessor;
import org.apache.ignite.internal.processors.query.h2.H2Utils;
import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing;
import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest;
import org.apache.ignite.lang.IgniteInClosure;
import org.apache.ignite.plugin.extensions.communication.Message;
import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
import org.apache.ignite.testframework.GridTestUtils;
import org.apache.ignite.testframework.junits.WithSystemProperty;
import org.junit.Ignore;
import org.junit.Test;
import static org.apache.ignite.IgniteSystemProperties.IGNITE_SQL_RETRY_TIMEOUT;
import static org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion.NONE;
import static org.apache.ignite.internal.processors.query.h2.twostep.JoinSqlTestHelper.JOIN_SQL;
import static org.apache.ignite.internal.processors.query.h2.twostep.JoinSqlTestHelper.Organization;
import static org.apache.ignite.internal.processors.query.h2.twostep.JoinSqlTestHelper.Person;
/**
* Test for 6 retry cases
*/
@WithSystemProperty(key = IGNITE_SQL_RETRY_TIMEOUT, value = "5000")
public class RetryCauseMessageSelfTest extends AbstractIndexingCommonTest {
/** */
private static final int NODES_COUNT = 2;
/** */
private static final String ORG_SQL = "select * from Organization";
/** */
static final String UPDATE_SQL = "UPDATE Person SET name=lower(?) ";
/** */
private static final String ORG = "org";
/** */
private IgniteCache<String, Person> personCache;
/** */
private IgniteCache<String, Organization> orgCache;
/** */
private IgniteH2Indexing h2Idx;
/** */
@Override protected long getTestTimeout() {
return 600 * 1000;
}
/**
* Failed to reserve partitions for query (cache is not found on local node)
*/
@Test
public void testSynthCacheWasNotFoundMessage() {
GridMapQueryExecutor mapQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec");
GridTestUtils.setFieldValue(h2Idx, "mapQryExec",
new MockGridMapQueryExecutor() {
@Override public void onQueryRequest(ClusterNode node, GridH2QueryRequest qryReq)
throws IgniteCheckedException {
qryReq.caches().add(Integer.MAX_VALUE);
startedExecutor.onQueryRequest(node, qryReq);
qryReq.caches().remove(qryReq.caches().size() - 1);
}
}.insertRealExecutor(mapQryExec));
SqlQuery<String, Person> qry = new SqlQuery<String, Person>(Person.class, JOIN_SQL).setArgs("Organization #0");
qry.setDistributedJoins(true);
try {
personCache.query(qry).getAll();
}
catch (CacheException e) {
assertTrue(
e.getMessage(),
e.getMessage().contains("Failed to reserve partitions for query (cache is not found on local node) [")
);
return;
}
finally {
GridTestUtils.setFieldValue(h2Idx, "mapQryExec", mapQryExec);
}
fail();
}
/**
* Failed to reserve partitions for query (group reservation failed)
*/
@Test
public void testGrpReservationFailureMessage() {
final GridMapQueryExecutor mapQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec");
final ConcurrentMap<PartitionReservationKey, GridReservable> reservations = reservations(h2Idx);
GridTestUtils.setFieldValue(h2Idx, "mapQryExec",
new MockGridMapQueryExecutor() {
@Override public void onQueryRequest(ClusterNode node, GridH2QueryRequest qryReq)
throws IgniteCheckedException {
final PartitionReservationKey grpKey = new PartitionReservationKey(ORG, null);
reservations.put(grpKey, new GridReservable() {
@Override public boolean reserve() {
return false;
}
@Override public void release() {}
});
startedExecutor.onQueryRequest(node, qryReq);
}
}.insertRealExecutor(mapQryExec));
SqlQuery<String, Person> qry = new SqlQuery<String, Person>(Person.class, JOIN_SQL).setArgs("Organization #0");
qry.setDistributedJoins(true);
try {
personCache.query(qry).getAll();
}
catch (CacheException e) {
assertTrue(e.getMessage().contains("Failed to reserve partitions for query (group reservation failed) ["));
return;
}
finally {
GridTestUtils.setFieldValue(h2Idx, "mapQryExec", mapQryExec);
}
fail();
}
/**
* Failed to reserve partitions for query (partition of REPLICATED cache is not in OWNING state)
*/
@Ignore("https://issues.apache.org/jira/browse/IGNITE-7039")
@Test
public void testReplicatedCacheReserveFailureMessage() {
GridMapQueryExecutor mapQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec");
final GridKernalContext ctx = GridTestUtils.getFieldValue(mapQryExec, GridMapQueryExecutor.class, "ctx");
GridTestUtils.setFieldValue(h2Idx, "mapQryExec",
new MockGridMapQueryExecutor() {
@Override public void onQueryRequest(ClusterNode node, GridH2QueryRequest qryReq) throws IgniteCheckedException {
GridCacheContext<?, ?> cctx = ctx.cache().context().cacheContext(qryReq.caches().get(0));
GridDhtLocalPartition part = cctx.topology().localPartition(0, NONE, false);
AtomicLong aState = GridTestUtils.getFieldValue(part, GridDhtLocalPartition.class, "state");
long stateVal = aState.getAndSet(2);
startedExecutor.onQueryRequest(node, qryReq);
aState.getAndSet(stateVal);
}
}.insertRealExecutor(mapQryExec));
SqlQuery<String, Organization> qry = new SqlQuery<>(Organization.class, ORG_SQL);
qry.setDistributedJoins(true);
try {
orgCache.query(qry).getAll();
}
catch (CacheException e) {
assertTrue(
e.getMessage().contains("Failed to reserve partitions for query (partition of REPLICATED cache is not in OWNING state) [")
);
return;
}
finally {
GridTestUtils.setFieldValue(h2Idx, "mapQryExec", mapQryExec);
}
fail();
}
/**
* Failed to reserve partitions for query (partition of PARTITIONED cache cannot be reserved)
*/
@Test
public void testPartitionedCacheReserveFailureMessage() {
GridMapQueryExecutor mapQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec");
final GridKernalContext ctx = GridTestUtils.getFieldValue(mapQryExec, GridMapQueryExecutor.class, "ctx");
GridTestUtils.setFieldValue(h2Idx, "mapQryExec",
new MockGridMapQueryExecutor() {
@Override public void onQueryRequest(ClusterNode node, GridH2QueryRequest qryReq)
throws IgniteCheckedException {
GridCacheContext<?, ?> cctx = ctx.cache().context().cacheContext(qryReq.caches().get(0));
GridDhtLocalPartition part = cctx.topology().localPartition(0, NONE, false);
AtomicLong aState = GridTestUtils.getFieldValue(part, GridDhtLocalPartition.class, "state");
long stateVal = aState.getAndSet(2);
startedExecutor.onQueryRequest(node, qryReq);
aState.getAndSet(stateVal);
}
}.insertRealExecutor(mapQryExec));
SqlQuery<String, Person> qry = new SqlQuery<String, Person>(Person.class, JOIN_SQL).setArgs("Organization #0");
qry.setDistributedJoins(true);
try {
personCache.query(qry).getAll();
}
catch (CacheException e) {
assertTrue(e.getMessage().contains("Failed to reserve partitions for query (partition of PARTITIONED " +
"cache is not found or not in OWNING state) "));
return;
}
finally {
GridTestUtils.setFieldValue(h2Idx, "mapQryExec", mapQryExec);
}
fail();
}
/**
* Failed to execute non-collocated query (will retry)
*/
@Test
public void testNonCollocatedFailureMessage() {
final GridMapQueryExecutor mapQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec");
final ConcurrentMap<PartitionReservationKey, GridReservable> reservations = reservations(h2Idx);
GridTestUtils.setFieldValue(h2Idx, "mapQryExec",
new MockGridMapQueryExecutor() {
@Override public void onQueryRequest(ClusterNode node, GridH2QueryRequest qryReq)
throws IgniteCheckedException {
final PartitionReservationKey grpKey = new PartitionReservationKey(ORG, null);
reservations.put(grpKey, new GridReservable() {
@Override public boolean reserve() {
throw H2Utils.retryException("test retry exception");
}
@Override public void release() {}
});
startedExecutor.onQueryRequest(node, qryReq);
}
}.insertRealExecutor(mapQryExec));
SqlQuery<String, Person> qry = new SqlQuery<String, Person>(Person.class, JOIN_SQL).setArgs("Organization #0");
qry.setDistributedJoins(true);
try {
personCache.query(qry).getAll();
}
catch (CacheException e) {
assertTrue(e.getMessage().contains("Failed to execute non-collocated query (will retry) ["));
return;
}
finally {
GridTestUtils.setFieldValue(h2Idx, "mapQryExec", mapQryExec);
}
fail();
}
/**
* Test query remap failure reason.
*/
@Test
public void testQueryMappingFailureMessage() {
final GridReduceQueryExecutor rdcQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "rdcQryExec");
final ReducePartitionMapper mapper = GridTestUtils.getFieldValue(rdcQryExec, GridReduceQueryExecutor.class, "mapper");
final IgniteLogger logger = GridTestUtils.getFieldValue(rdcQryExec, GridReduceQueryExecutor.class, "log");
final GridKernalContext ctx = GridTestUtils.getFieldValue(rdcQryExec, GridReduceQueryExecutor.class, "ctx");
GridTestUtils.setFieldValue(rdcQryExec, "mapper",
new ReducePartitionMapper(ctx, logger) {
@Override public ReducePartitionMapResult nodesForPartitions(List<Integer> cacheIds,
AffinityTopologyVersion topVer, int[] parts, boolean isReplicatedOnly) {
final ReducePartitionMapResult res = super.nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
return new ReducePartitionMapResult(Collections.emptyList(), res.partitionsMap(), res.queryPartitionsMap());
}
});
try {
SqlFieldsQuery qry = new SqlFieldsQuery(JOIN_SQL).setArgs("Organization #0");
final Throwable throwable = GridTestUtils.assertThrows(log, () -> {
return personCache.query(qry).getAll();
}, CacheException.class, "Failed to map SQL query to topology during timeout:");
throwable.printStackTrace();
}
finally {
GridTestUtils.setFieldValue(rdcQryExec, "mapper", mapper);
}
}
/**
* Test update query remap failure reason.
*/
@Test
public void testUpdateQueryMappingFailureMessage() {
final GridReduceQueryExecutor rdcQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "rdcQryExec");
final ReducePartitionMapper mapper = GridTestUtils.getFieldValue(rdcQryExec, GridReduceQueryExecutor.class, "mapper");
final IgniteLogger logger = GridTestUtils.getFieldValue(rdcQryExec, GridReduceQueryExecutor.class, "log");
final GridKernalContext ctx = GridTestUtils.getFieldValue(rdcQryExec, GridReduceQueryExecutor.class, "ctx");
GridTestUtils.setFieldValue(rdcQryExec, "mapper",
new ReducePartitionMapper(ctx, logger) {
@Override public ReducePartitionMapResult nodesForPartitions(List<Integer> cacheIds,
AffinityTopologyVersion topVer, int[] parts, boolean isReplicatedOnly) {
final ReducePartitionMapResult res = super.nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
return new ReducePartitionMapResult(Collections.emptyList(), res.partitionsMap(), res.queryPartitionsMap());
}
});
try {
final SqlFieldsQueryEx qry = new SqlFieldsQueryEx(UPDATE_SQL, false)
.setArgs("New Name");
GridTestUtils.assertThrows(log, () -> {
return personCache.query(qry).getAll();
}, CacheException.class, "Failed to map SQL query to topology during timeout");
qry.setArgs("Another Name");
qry.setSkipReducerOnUpdate(true);
GridTestUtils.assertThrows(log, () -> {
return personCache.query(qry).getAll();
}, CacheException.class, "Failed to determine nodes participating in the update. ");
}
finally {
GridTestUtils.setFieldValue(rdcQryExec, "mapper", mapper);
}
}
/** {@inheritDoc} */
@Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
IgniteConfiguration cfg = super.getConfiguration(gridName);
cfg.setCommunicationSpi(new TcpCommunicationSpi() {
/** {@inheritDoc} */
@Override public void sendMessage(ClusterNode node, Message msg, IgniteInClosure<IgniteException> ackC) {
assert msg != null;
super.sendMessage(node, msg, ackC);
}
});
return cfg;
}
/** {@inheritDoc} */
@Override protected void beforeTest() throws Exception {
Ignite ignite = startGridsMultiThreaded(NODES_COUNT, false);
GridQueryProcessor qryProc = grid(ignite.name()).context().query();
h2Idx = GridTestUtils.getFieldValue(qryProc, GridQueryProcessor.class, "idx");
personCache = ignite(0).getOrCreateCache(new CacheConfiguration<String, Person>("pers")
.setQueryEntities(JoinSqlTestHelper.personQueryEntity())
);
orgCache = ignite(0).getOrCreateCache(new CacheConfiguration<String, Organization>(ORG)
.setCacheMode(CacheMode.REPLICATED)
.setQueryEntities(JoinSqlTestHelper.organizationQueryEntity())
);
awaitPartitionMapExchange();
JoinSqlTestHelper.populateDataIntoOrg(orgCache);
JoinSqlTestHelper.populateDataIntoPerson(personCache);
}
/** {@inheritDoc} */
@Override protected void afterTest() throws Exception {
stopAllGrids();
}
/**
* @param h2Idx Indexing.
* @return Current reservations.
*/
private static ConcurrentMap<PartitionReservationKey, GridReservable> reservations(IgniteH2Indexing h2Idx) {
PartitionReservationManager partReservationMgr = h2Idx.partitionReservationManager();
return GridTestUtils.getFieldValue(partReservationMgr, PartitionReservationManager.class, "reservations");
}
/**
* Wrapper around @{GridMapQueryExecutor}
*/
private abstract static class MockGridMapQueryExecutor extends GridMapQueryExecutor {
/** Wrapped executor */
GridMapQueryExecutor startedExecutor;
/**
* @param startedExecutor Started executor.
* @return Mocked map query executor.
*/
MockGridMapQueryExecutor insertRealExecutor(GridMapQueryExecutor startedExecutor) {
this.startedExecutor = startedExecutor;
return this;
}
/** {@inheritDoc} */
@Override public void onQueryRequest(ClusterNode node, GridH2QueryRequest req) throws IgniteCheckedException {
startedExecutor.onQueryRequest(node, req);
}
}
}
| apache-2.0 |
chetanmeh/jackrabbit-oak | oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/LastRevRecoveryTest.java | 14206 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.jackrabbit.oak.plugins.document;
import java.util.Map;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.jackrabbit.oak.api.CommitFailedException;
import org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore;
import org.apache.jackrabbit.oak.plugins.document.util.Utils;
import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
import org.apache.jackrabbit.oak.spi.state.NodeStore;
import org.apache.jackrabbit.oak.stats.Clock;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import static com.google.common.collect.Lists.newArrayList;
import static org.apache.jackrabbit.oak.plugins.document.Collection.CLUSTER_NODES;
import static org.apache.jackrabbit.oak.plugins.document.Collection.NODES;
import static org.apache.jackrabbit.oak.plugins.document.util.Utils.getRootDocument;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class LastRevRecoveryTest {
@Rule
public DocumentMKBuilderProvider builderProvider = new DocumentMKBuilderProvider();
private Clock clock;
private DocumentNodeStore ds1;
private DocumentNodeStore ds2;
private int c1Id;
private int c2Id;
private MemoryDocumentStore sharedStore;
@Before
public void setUp() throws Exception {
clock = new Clock.Virtual();
clock.waitUntil(System.currentTimeMillis());
Revision.setClock(clock);
ClusterNodeInfo.setClock(clock);
// disable lease check because we fiddle with the virtual clock
final boolean leaseCheck = false;
sharedStore = new MemoryDocumentStore();
ds1 = builderProvider.newBuilder()
.clock(clock)
.setLeaseCheck(leaseCheck)
.setAsyncDelay(0)
.setDocumentStore(sharedStore)
.setClusterId(1)
.getNodeStore();
c1Id = ds1.getClusterId();
ds2 = builderProvider.newBuilder()
.clock(clock)
.setLeaseCheck(leaseCheck)
.setAsyncDelay(0)
.setDocumentStore(sharedStore)
.setClusterId(2)
.getNodeStore();
c2Id = ds2.getClusterId();
}
@After
public void tearDown() {
ds1.dispose();
ds2.dispose();
ClusterNodeInfo.resetClockToDefault();
Revision.resetClockToDefault();
}
@Test
public void testRecover() throws Exception {
//1. Create base structure /x/y
NodeBuilder b1 = ds1.getRoot().builder();
b1.child("x").child("y");
ds1.merge(b1, EmptyHook.INSTANCE, CommitInfo.EMPTY);
ds1.runBackgroundOperations();
//lastRev are persisted directly for new nodes. In case of
// updates they are persisted via background jobs
//1.2 Get last rev populated for root node for ds2
ds2.runBackgroundOperations();
NodeBuilder b2 = ds2.getRoot().builder();
b2.child("x").setProperty("f1","b1");
ds2.merge(b2, EmptyHook.INSTANCE, CommitInfo.EMPTY);
ds2.runBackgroundOperations();
//2. Add a new node /x/y/z
b2 = ds2.getRoot().builder();
b2.child("x").child("y").child("z").setProperty("foo", "bar");
ds2.merge(b2, EmptyHook.INSTANCE, CommitInfo.EMPTY);
//Refresh DS1
ds1.runBackgroundOperations();
NodeDocument z1 = getDocument(ds1, "/x/y/z");
NodeDocument y1 = getDocument(ds1, "/x/y");
NodeDocument x1 = getDocument(ds1, "/x");
Revision zlastRev2 = z1.getLastRev().get(c2Id);
// /x/y/z is a new node and does not have a _lastRev
assertNull(zlastRev2);
Revision head2 = ds2.getHeadRevision().getRevision(c2Id);
//lastRev should not be updated for C #2
assertNull(y1.getLastRev().get(c2Id));
LastRevRecoveryAgent recovery = new LastRevRecoveryAgent(ds1);
//Do not pass y1 but still y1 should be updated
recovery.recover(Lists.newArrayList(x1, z1), c2Id);
//Post recovery the lastRev should be updated for /x/y and /x
assertEquals(head2, getDocument(ds1, "/x/y").getLastRev().get(c2Id));
assertEquals(head2, getDocument(ds1, "/x").getLastRev().get(c2Id));
assertEquals(head2, getDocument(ds1, "/").getLastRev().get(c2Id));
}
// OAK-3079
@Test
public void recoveryWithoutRootUpdate() throws Exception {
String clusterId = String.valueOf(c1Id);
ClusterNodeInfoDocument doc = sharedStore.find(CLUSTER_NODES, clusterId);
NodeBuilder builder = ds1.getRoot().builder();
builder.child("x").child("y").child("z");
merge(ds1, builder);
ds1.dispose();
// reset clusterNodes entry to simulate a crash
sharedStore.remove(CLUSTER_NODES, clusterId);
sharedStore.create(CLUSTER_NODES, newArrayList(updateOpFromDocument(doc)));
// 'wait' until lease expires
clock.waitUntil(doc.getLeaseEndTime() + 1);
// run recovery on ds2
LastRevRecoveryAgent agent = new LastRevRecoveryAgent(ds2);
Iterable<Integer> clusterIds = agent.getRecoveryCandidateNodes();
assertTrue(Iterables.contains(clusterIds, c1Id));
assertEquals("must not recover any documents",
0, agent.recover(c1Id));
}
// OAK-3488
@Test
public void recoveryWithTimeout() throws Exception {
String clusterId = String.valueOf(c1Id);
ClusterNodeInfoDocument doc = sharedStore.find(CLUSTER_NODES, clusterId);
NodeBuilder builder = ds1.getRoot().builder();
builder.child("x").child("y").child("z");
merge(ds1, builder);
ds1.dispose();
// reset clusterNodes entry to simulate a crash
sharedStore.remove(CLUSTER_NODES, clusterId);
sharedStore.create(CLUSTER_NODES, newArrayList(updateOpFromDocument(doc)));
// 'wait' until lease expires
clock.waitUntil(doc.getLeaseEndTime() + 1);
// simulate ongoing recovery by cluster node 2
MissingLastRevSeeker seeker = new MissingLastRevSeeker(sharedStore, clock);
seeker.acquireRecoveryLock(c1Id, c2Id);
// run recovery from ds1
LastRevRecoveryAgent a1 = new LastRevRecoveryAgent(ds1);
// use current time -> do not wait for recovery of other agent
assertEquals(-1, a1.recover(c1Id, clock.getTime()));
seeker.releaseRecoveryLock(c1Id, true);
assertEquals(0, a1.recover(c1Id, clock.getTime() + 1000));
}
// OAK-3488
@Test
public void failStartupOnRecoveryTimeout() throws Exception {
String clusterId = String.valueOf(c1Id);
ClusterNodeInfoDocument doc = sharedStore.find(CLUSTER_NODES, clusterId);
NodeBuilder builder = ds1.getRoot().builder();
builder.child("x").child("y").child("z");
merge(ds1, builder);
ds1.dispose();
// reset clusterNodes entry to simulate a crash
sharedStore.remove(CLUSTER_NODES, clusterId);
sharedStore.create(CLUSTER_NODES, newArrayList(updateOpFromDocument(doc)));
// 'wait' until lease expires
clock.waitUntil(doc.getLeaseEndTime() + 1);
// make sure ds2 lease is still fine
ds2.getClusterInfo().renewLease();
// simulate ongoing recovery by cluster node 2
MissingLastRevSeeker seeker = new MissingLastRevSeeker(sharedStore, clock);
assertTrue(seeker.acquireRecoveryLock(c1Id, c2Id));
// attempt to restart ds1 while lock is acquired
try {
ds1 = new DocumentMK.Builder()
.clock(clock)
.setDocumentStore(sharedStore)
.setClusterId(c1Id)
.getNodeStore();
fail("DocumentStoreException expected");
} catch (DocumentStoreException e) {
// expected
}
seeker.releaseRecoveryLock(c1Id, true);
}
// OAK-3488
@Test
public void breakRecoveryLockWithExpiredLease() throws Exception {
String clusterId = String.valueOf(c1Id);
ClusterNodeInfoDocument info1 = sharedStore.find(CLUSTER_NODES, clusterId);
NodeBuilder builder = ds1.getRoot().builder();
builder.child("x").child("y").child("z");
merge(ds1, builder);
ds1.dispose();
// reset clusterNodes entry to simulate a crash of ds1
sharedStore.remove(CLUSTER_NODES, clusterId);
sharedStore.create(CLUSTER_NODES, newArrayList(updateOpFromDocument(info1)));
// 'wait' until lease expires
clock.waitUntil(info1.getLeaseEndTime() + 1);
// make sure ds2 lease is still fine
ds2.getClusterInfo().renewLease();
// start of recovery by ds2
MissingLastRevSeeker seeker = new MissingLastRevSeeker(sharedStore, clock);
assertTrue(seeker.acquireRecoveryLock(c1Id, c2Id));
// simulate crash of ds2
ClusterNodeInfoDocument info2 = sharedStore.find(CLUSTER_NODES, String.valueOf(c2Id));
ds2.dispose();
// reset clusterNodes entry
sharedStore.remove(CLUSTER_NODES, String.valueOf(c2Id));
sharedStore.create(CLUSTER_NODES, newArrayList(updateOpFromDocument(info2)));
// 'wait' until ds2's lease expires
clock.waitUntil(info2.getLeaseEndTime() + 1);
info1 = sharedStore.find(CLUSTER_NODES, clusterId);
assertTrue(seeker.isRecoveryNeeded(info1));
assertTrue(info1.isBeingRecovered());
// restart ds1
ds1 = builderProvider.newBuilder()
.clock(clock)
.setLeaseCheck(false)
.setAsyncDelay(0)
.setDocumentStore(sharedStore)
.setClusterId(1)
.getNodeStore();
info1 = sharedStore.find(CLUSTER_NODES, clusterId);
assertFalse(seeker.isRecoveryNeeded(info1));
assertFalse(info1.isBeingRecovered());
}
@Test
public void recoveryMustNotPerformInitialSweep() throws Exception {
String clusterId = String.valueOf(c1Id);
ClusterNodeInfoDocument info1 = sharedStore.find(CLUSTER_NODES, clusterId);
NodeBuilder builder = ds1.getRoot().builder();
builder.child("x").child("y").child("z");
merge(ds1, builder);
ds1.dispose();
// reset clusterNodes entry to simulate a crash of ds1
sharedStore.remove(CLUSTER_NODES, clusterId);
sharedStore.create(CLUSTER_NODES, newArrayList(updateOpFromDocument(info1)));
// remove the sweep revision as well
UpdateOp op = new UpdateOp(Utils.getIdFromPath("/"), false);
op.removeMapEntry("_sweepRev", new Revision(0, 0, c1Id));
assertNotNull(sharedStore.findAndUpdate(NODES, op));
NodeDocument doc = getRootDocument(sharedStore);
assertNull(doc.getSweepRevisions().getRevision(c1Id));
// 'wait' until lease expires
clock.waitUntil(info1.getLeaseEndTime() + 1);
// make sure ds2 lease is still fine
ds2.getClusterInfo().renewLease();
// run recovery on ds2 for ds1
LastRevRecoveryAgent agent = new LastRevRecoveryAgent(ds2);
Iterable<Integer> clusterIds = agent.getRecoveryCandidateNodes();
assertTrue(Iterables.contains(clusterIds, c1Id));
// nothing to recover
assertEquals("must not recover any documents",
0, agent.recover(c1Id));
// must not set sweep revision
doc = getRootDocument(sharedStore);
assertNull(doc.getSweepRevisions().getRevision(c1Id));
}
private NodeDocument getDocument(DocumentNodeStore nodeStore, String path) {
return nodeStore.getDocumentStore().find(Collection.NODES, Utils.getIdFromPath(path));
}
private static void merge(NodeStore store, NodeBuilder builder)
throws CommitFailedException {
store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
}
private static UpdateOp updateOpFromDocument(Document doc) {
UpdateOp op = new UpdateOp(doc.getId(), true);
for (String key : doc.keySet()) {
if (key.equals(Document.ID)) {
continue;
}
Object obj = doc.get(key);
if (obj instanceof Map) {
@SuppressWarnings("unchecked")
Map<Revision, String> map = (Map<Revision, String>) obj;
for (Map.Entry<Revision, String> entry : map.entrySet()) {
op.setMapEntry(key, entry.getKey(), entry.getValue());
}
} else {
if (obj instanceof Boolean) {
op.set(key, (Boolean) obj);
} else if (obj instanceof Number) {
op.set(key, ((Number) obj).longValue());
} else if (obj != null) {
op.set(key, obj.toString());
} else {
op.set(key, null);
}
}
}
return op;
}
}
| apache-2.0 |
MER-GROUP/intellij-community | java/java-tests/testData/inspection/dataFlow/fixture/VolatileFieldNPEFixes.java | 298 | import org.jetbrains.annotations.Nullable;
class Test {
@Nullable volatile String x;
public void foo() {
if (x != null) {
System.out.println(<warning descr="Method invocation 'x.substring(1)' may produce 'java.lang.NullPointerException'">x.sub<caret>string(1)</warning>);
}
}
} | apache-2.0 |
kba/clstm | test-cderiv.cc | 12059 | #include <assert.h>
#include <math.h>
#include <cmath>
#include <iostream>
#include <memory>
#include <string>
#include <vector>
#include "clstm.h"
#include "clstm_compute.h"
#include "extras.h"
#include "utils.h"
using std_string = std::string;
#define string std_string
using std::vector;
using std::shared_ptr;
using std::unique_ptr;
using std::to_string;
using std::make_pair;
using std::cout;
using std::stoi;
using namespace Eigen;
using namespace ocropus;
typedef vector<Params> ParamVec;
double sqr(double x) { return x * x; }
double randu() {
static int count = 1;
for (;;) {
double x = cos(count * 3.7);
count++;
if (fabs(x) > 0.1) return x;
}
}
void randseq(Sequence &a, int N, int n, int m) {
bool finit = getienv("finit", 0);
a.resize(N, n, m);
for (int t = 0; t < N; t++) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
if (finit) {
a[t].v(i, j) = 10000 * t + 100 * i + j;
a[t].d(i, j) = 10000 * t + 100 * i + j + 0.5;
} else {
a[t].v(i, j) = randu();
a[t].d(i, j) = randu();
}
}
}
}
a.check();
}
void randparams(ParamVec &a, const vector<vector<int>> &specs) {
int N = specs.size();
a.resize(N);
for (int k = 0; k < N; k++) {
int n = specs[k][0];
int m = specs[k][1];
a[k].setZero(n, m);
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
a[k].v(i, j) = randu();
a[k].d(i, j) = randu();
}
}
}
}
double maxerr(Sequence &out, Sequence &target) {
assert(out.size() == target.size());
assert(out.rows() == target.rows());
assert(out.cols() == target.cols());
int N = out.size(), n = out.rows(), m = out.cols();
double maxerr = 0.0;
for (int t = 0; t < N; t++) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
double delta = target[t].v(i, j) - out[t].v(i, j);
if (fabs(delta) > maxerr) maxerr = fabs(delta);
}
}
}
return maxerr;
}
double avgerr(Sequence &out, Sequence &target) {
assert(out.size() == target.size());
assert(out.rows() == target.rows());
assert(out.cols() == target.cols());
int N = out.size(), n = out.rows(), m = out.cols();
double total = 0.0;
int count = 0;
for (int t = 0; t < N; t++) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
double delta = target[t].v(i, j) - out[t].v(i, j);
total += fabs(delta);
count++;
}
}
}
return total / count;
}
double mse(Sequence &out, Sequence &target) {
assert(out.size() == target.size());
assert(out.rows() == target.rows());
assert(out.cols() == target.cols());
int N = out.size(), n = out.rows(), m = out.cols();
double total = 0.0;
for (int t = 0; t < N; t++) {
out[t].zeroGrad();
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
double delta = target[t].v(i, j) - out[t].v(i, j);
out[t].d(i, j) = delta;
total += sqr(delta);
}
}
}
return total;
}
struct Minimizer {
double value = INFINITY;
double param = 0;
void add(double value, double param = NAN) {
if (value >= this->value) return;
this->value = value;
this->param = param;
}
};
struct Maximizer {
double value = -INFINITY;
double param = 0;
void add(double value, double param = NAN) {
if (value <= this->value) return;
this->value = value;
this->param = param;
}
};
struct Testcase;
vector<Testcase *> testcases;
struct Testcase {
virtual ~Testcase() {}
Sequence inputs;
ParamVec ps;
Sequence outputs;
Sequence targets;
virtual string name() { return typeid(*this).name(); }
// Store random initial test values appropriate for
// the test case into inputs, ps, and targets
virtual void init() {
// reasonable defaults
randseq(inputs, 1, 7, 4);
randseq(targets, 1, 3, 4);
randparams(ps, {{3, 7}});
}
// Perform forward and backward steps using inputs,
// outputs, and ps.
virtual void forward() = 0;
virtual void backward() = 0;
};
void test_net(Testcase &tc) {
int verbose = getienv("verbose", 0);
print("testing", tc.name());
tc.init();
// make backups for computing derivatives
Sequence inputs = tc.inputs;
Sequence targets = tc.targets;
ParamVec ps = tc.ps;
Maximizer maxinerr;
int N = inputs.size();
int ninput = inputs.rows();
int bs = inputs.cols();
for (int t = 0; t < N; t++) {
for (int i = 0; i < ninput; i++) {
for (int b = 0; b < bs; b++) {
Minimizer minerr;
for (float h = 1e-6; h < 1.0; h *= 10) {
tc.inputs = inputs;
tc.outputs.like(targets);
tc.forward();
double out = mse(tc.outputs, targets);
tc.inputs.zeroGrad();
for (Params &p : tc.ps) p.zeroGrad();
tc.backward();
double a_deriv = tc.inputs[t].d(i, b);
tc.inputs[t].v(i, b) += h;
tc.forward();
double out1 = mse(tc.outputs, targets);
double num_deriv = (out1 - out) / h;
double error = fabs(1.0 - num_deriv / a_deriv / -2.0);
if (verbose > 1)
print(t, i, b, ":", error, h, "num:", num_deriv, "analytic:",
a_deriv, "out:", out1, out);
minerr.add(error, h);
}
if (verbose) print("deltas", t, i, b, minerr.value, minerr.param);
assert(minerr.value < 0.1);
maxinerr.add(minerr.value);
}
}
}
Maximizer maxparamerr;
for (int k = 0; k < ps.size(); k++) {
int n = ps[k].rows();
int m = ps[k].cols();
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
Minimizer minerr;
for (float h = 1e-6; h < 1.0; h *= 10) {
tc.ps = ps;
tc.inputs = inputs;
tc.outputs.like(targets);
tc.forward();
double out = mse(tc.outputs, targets);
tc.inputs.zeroGrad();
for (Params &p : tc.ps) p.zeroGrad();
tc.backward();
double a_deriv = tc.ps[k].d(i, j);
tc.ps[k].v(i, j) += h;
tc.forward();
double out1 = mse(tc.outputs, targets);
double num_deriv = (out1 - out) / h;
double error = fabs(1.0 - num_deriv / a_deriv / -2.0);
if (verbose > 1)
print(k, i, j, ":", error, h, "/", num_deriv, a_deriv, out1, out);
minerr.add(error, h);
}
maxparamerr.add(minerr.value);
}
}
}
tc.inputs = inputs;
tc.ps = ps;
tc.targets = targets;
print("OK", maxinerr.value, maxparamerr.value);
}
struct TestFull1Sigmoid : Testcase {
virtual void init() {
randseq(inputs, 1, 7, 4);
randseq(targets, 1, 3, 4);
randparams(ps, {{3, 8}});
}
void forward() { forward_full1(outputs[0], ps[0], inputs[0], SIG); }
void backward() { backward_full1(outputs[0], ps[0], inputs[0], SIG); }
};
struct TestFull1Tanh : Testcase {
virtual void init() {
randseq(inputs, 1, 7, 4);
randseq(targets, 1, 3, 4);
randparams(ps, {{3, 8}});
}
void forward() { forward_full1(outputs[0], ps[0], inputs[0], TANH); }
void backward() { backward_full1(outputs[0], ps[0], inputs[0], TANH); }
};
struct TestFull1Logmag : Testcase {
virtual void init() {
randseq(inputs, 1, 7, 4);
randseq(targets, 1, 3, 4);
randparams(ps, {{3, 8}});
}
void forward() { forward_full1(outputs[0], ps[0], inputs[0], LOGMAG); }
void backward() { backward_full1(outputs[0], ps[0], inputs[0], LOGMAG); }
};
struct TestStack : Testcase {
virtual void init() {
randseq(inputs, 2, 7, 4);
randseq(targets, 1, 14, 4);
randparams(ps, {});
}
void forward() { forward_stack(outputs[0], inputs[0], inputs[1]); }
void backward() { backward_stack(outputs[0], inputs[0], inputs[1]); }
};
struct TestStackDelay : Testcase {
virtual void init() {
randseq(inputs, 2, 7, 4);
randseq(targets, 1, 14, 4);
randparams(ps, {});
}
void forward() { forward_stack_delay(outputs[0], inputs[0], inputs, 1); }
void backward() { backward_stack_delay(outputs[0], inputs[0], inputs, 1); }
};
#ifdef DEPRECATED
struct TestFullSigmoid : Testcase {
void forward() { forward_full<SigmoidNonlin>(outputs[0], ps[0], inputs[0]); }
void backward() {
backward_full<SigmoidNonlin>(outputs[0], ps[0], inputs[0]);
}
};
struct TestFullTanh : Testcase {
void forward() { forward_full<SigmoidNonlin>(outputs[0], ps[0], inputs[0]); }
void backward() {
backward_full<SigmoidNonlin>(outputs[0], ps[0], inputs[0]);
}
};
struct TestStack1Delay : Testcase {
virtual void init() {
randseq(inputs, 2, 7, 4);
randseq(targets, 1, 15, 4);
randparams(ps, {});
}
void forward() { forward_stack1(outputs[0], inputs[0], inputs, 1); }
void backward() { backward_stack1(outputs[0], inputs[0], inputs, 1); }
};
#endif
struct TestReverse : Testcase {
virtual void init() {
randseq(inputs, 5, 7, 4);
randseq(targets, 5, 7, 4);
randparams(ps, {});
}
void forward() { forward_reverse(outputs, inputs); }
void backward() { backward_reverse(outputs, inputs); }
};
struct TestBtswitch : Testcase {
virtual void init() {
randseq(inputs, 5, 7, 4);
randseq(targets, 4, 7, 5);
randparams(ps, {});
}
void forward() { forward_btswitch(outputs, inputs); }
void backward() { backward_btswitch(outputs, inputs); }
};
struct TestBatchstack : Testcase {
virtual void init() {
randseq(inputs, 5, 4, 11);
randseq(targets, 5, 12, 11);
randparams(ps, {});
}
void forward() { forward_batchstack(outputs, inputs, 1, 1); }
void backward() { backward_batchstack(outputs, inputs, 1, 1); }
};
struct TestStatemem : Testcase {
virtual void init() {
randseq(inputs, 4, 7, 4);
randseq(targets, 1, 7, 4);
randparams(ps, {});
}
void forward() {
forward_statemem(outputs[0], inputs[0], inputs[1], inputs, 2, inputs[3]);
}
void backward() {
backward_statemem(outputs[0], inputs[0], inputs[1], inputs, 2, inputs[3]);
}
};
struct TestNonlingate : Testcase {
virtual void init() {
randseq(inputs, 2, 7, 4);
randseq(targets, 1, 7, 4);
randparams(ps, {});
}
void forward() { forward_nonlingate(outputs[0], inputs[0], inputs[1], TANH); }
void backward() {
backward_nonlingate(outputs[0], inputs[0], inputs[1], TANH);
}
};
inline Eigen::array<ptrdiff_t, 1> indexes(int i) {
return Eigen::array<ptrdiff_t, 1>({i});
}
inline Eigen::array<ptrdiff_t, 2> indexes(int i, int j) {
return Eigen::array<ptrdiff_t, 2>({i, j});
}
#ifdef DEPRECATED
void test_full() {
print("comparing full and full1");
Sequence inputs;
ParamVec ps;
Sequence outputs;
randseq(inputs, 1, 7, 4);
randparams(ps, {{3, 8}});
randseq(outputs, 2, 3, 4);
Batch inputs1;
inputs1.resize(8, 4);
inputs1.v().slice(indexes(0, 0), indexes(1, 4)).setConstant(Float(1));
inputs1.v().slice(indexes(1, 0), indexes(7, 4)) = inputs[0].v();
forward_full1<SigmoidNonlin>(outputs[0], ps[0], inputs[0]);
forward_full<SigmoidNonlin>(outputs[1], ps[0], inputs1);
EigenTensor1 err = (outputs[0].v() - outputs[1].v()).abs().maximum();
assert(err(0) < 0.001);
print("OK", err(0));
backward_full1<SigmoidNonlin>(outputs[0], ps[0], inputs[0]);
backward_full<SigmoidNonlin>(outputs[1], ps[0], inputs1);
EigenTensor1 derr =
(inputs[0].d() - inputs1.d().slice(indexes(1, 0), indexes(7, 4)))
.abs()
.maximum();
// assert(derr(0) < 0.001);
print("OK", derr(0));
}
#endif
int main(int argc, char **argv) {
TRY {
test_net(*new TestBatchstack);
test_net(*new TestFull1Sigmoid);
test_net(*new TestFull1Tanh);
test_net(*new TestFull1Logmag);
test_net(*new TestStack);
test_net(*new TestStackDelay);
test_net(*new TestReverse);
test_net(*new TestBtswitch);
test_net(*new TestStatemem);
test_net(*new TestNonlingate);
#ifdef DEPRECATED
test_net(*new TestFullSigmoid);
test_net(*new TestFullTanh);
test_net(*new TestStack1Delay);
test_full();
#endif
}
CATCH(const char *message) { print("ERROR", message); }
}
| apache-2.0 |
mycFelix/heron | heron/api/src/java/org/apache/heron/streamlet/impl/operators/UnionOperator.java | 1444 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.heron.streamlet.impl.operators;
import org.apache.heron.api.tuple.Tuple;
import org.apache.heron.api.tuple.Values;
/**
* UnionOperator is the class that implements the union functionality.
* Its a very simple bolt that re-emits every tuple that it sees.
*/
public class UnionOperator<I> extends StreamletOperator<I, I> {
private static final long serialVersionUID = -7326832064961413315L;
public UnionOperator() {
}
@SuppressWarnings("unchecked")
@Override
public void execute(Tuple tuple) {
I obj = (I) tuple.getValue(0);
collector.emit(new Values(obj));
collector.ack(tuple);
}
}
| apache-2.0 |
naveedaz/azure-powershell | src/Common/Commands.Common.Authentication.Abstractions/Interfaces/IDataStore.cs | 8337 | // ----------------------------------------------------------------------------------
//
// Copyright Microsoft Corporation
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ----------------------------------------------------------------------------------
using System.IO;
using System.Security.Cryptography.X509Certificates;
using System.Text;
namespace Microsoft.Azure.Commands.Common.Authentication
{
/// <summary>
/// An abstraction over the file system
/// </summary>
public interface IDataStore
{
/// <summary>
/// Write the given contents to the specified file
/// </summary>
/// <param name="path">The file path</param>
/// <param name="contents">The fiel contents</param>
void WriteFile(string path, string contents);
/// <summary>
/// Write the given contents to the specified file, using the specified encoding
/// </summary>
/// <param name="path">The file path</param>
/// <param name="content">The file contents</param>
/// <param name="encoding">The encoding to use</param>
void WriteFile(string path, string content, Encoding encoding);
/// <summary>
/// Write the given binary contents to the specified file
/// </summary>
/// <param name="path">The file path</param>
/// <param name="contents">The binary contents</param>
void WriteFile(string path, byte[] contents);
/// <summary>
/// Return the contents of the given file as a text string
/// </summary>
/// <param name="path">The file path</param>
/// <returns>The contents of the given file as a string. Throws an exception if the file is not found.</returns>
string ReadFileAsText(string path);
/// <summary>
/// Return the contents of the given file as a stream
/// </summary>
/// <param name="path">The file path</param>
/// <returns>The contents of the given file as a stream. Throws an exception if the file is not found.</returns>
Stream ReadFileAsStream(string path);
/// <summary>
/// Return the contents of the given file as a byte array
/// </summary>
/// <param name="path">The file path</param>
/// <returns>The contents of the given file as a byte array. Throws an exception if the file is not found.</returns>
byte[] ReadFileAsBytes(string path);
/// <summary>
/// Move the file to the specified location. Overwrites the file if it exists
/// </summary>
/// <param name="oldPath">Source file path</param>
/// <param name="newPath">Target file path</param>
void RenameFile(string oldPath, string newPath);
/// <summary>
/// Open the file for shared Read access
/// </summary>
/// <param name="path">Path to the file</param>
/// <returns>A FileSTream poiinting to the beginning of the file</returns>
Stream OpenForSharedRead(string path);
/// <summary>
/// Open the file for exclusive read/write access
/// </summary>
/// <param name="path">Path to the file to open</param>
/// <returns>A FileSTream pointing to the beginning of the file</returns>
Stream OpenForExclusiveWrite(string path);
/// <summary>
/// Copy the given file to the target path. Overwirtes the file if it exists
/// </summary>
/// <param name="oldPath">Source file path</param>
/// <param name="newPath">Target file path</param>
void CopyFile(string oldPath, string newPath);
/// <summary>
/// Checks if the given file exists
/// </summary>
/// <param name="path">The file path to check</param>
/// <returns>True if the file exists, false otherwise</returns>
bool FileExists(string path);
/// <summary>
/// Remove the given file
/// </summary>
/// <param name="path">The path of the file to delete</param>
void DeleteFile(string path);
/// <summary>
/// Remove the given directory
/// </summary>
/// <param name="dir">The directory path</param>
void DeleteDirectory(string dir);
/// <summary>
/// Remove all files from the given directory
/// </summary>
/// <param name="dirPath">The directory to empty</param>
void EmptyDirectory(string dirPath);
/// <summary>
/// Check for existence of the given directory
/// </summary>
/// <param name="path">The directory path to check</param>
/// <returns>True if the directory exists, otherwise false</returns>
bool DirectoryExists(string path);
/// <summary>
/// Create a directory at the given path
/// </summary>
/// <param name="path">The directory path</param>
void CreateDirectory(string path);
/// <summary>
/// Get the set of directories inside the given directory path
/// </summary>
/// <param name="sourceDirName">The directory to list directory contents of</param>
/// <returns></returns>
string[] GetDirectories(string sourceDirName);
/// <summary>
/// Get directories at the given path matchign the givven pattern and search options
/// </summary>
/// <param name="startDirectory">The directory to list directory contents of</param>
/// <param name="filePattern">The pattern of directory naems to include</param>
/// <param name="options">Directory search options</param>
/// <returns>The path to all contained directories</returns>
string[] GetDirectories(string startDirectory, string filePattern, SearchOption options);
/// <summary>
/// Get the files in the given directory
/// </summary>
/// <param name="sourceDirName">The directory path to check</param>
/// <returns>The list of file paths in the given directory</returns>
string[] GetFiles(string sourceDirName);
/// <summary>
/// Get files at the given path matchign the givven pattern and search options
/// </summary>
/// <param name="startDirectory">The directory to list file contents of</param>
/// <param name="filePattern">The pattern of file naems to include</param>
/// <param name="options">File search options</param>
/// <returns>The path to all contained files</returns>
string[] GetFiles(string startDirectory, string filePattern, SearchOption options);
/// <summary>
/// Get the file system attributes for the given file
/// </summary>
/// <param name="path">The fiel path</param>
/// <returns>The file system attributes associated with the file</returns>
FileAttributes GetFileAttributes(string path);
/// <summary>
/// Search for the given certificate from the CurrentUser and LocalSystem 'My' directory stores
/// </summary>
/// <param name="thumbprint">The thumbprint of the certificate to look for</param>
/// <returns>The certificate matching the given thumbprint</returns>
X509Certificate2 GetCertificate(string thumbprint);
/// <summary>
/// Add the given certificate to the CurrentUser 'My ' store
/// </summary>
/// <param name="cert">The certificate to add</param>
void AddCertificate(X509Certificate2 cert);
/// <summary>
/// Remove the given certificate from the CurrentUser 'My' directory store
/// </summary>
/// <param name="thumbprint">The thumbprint of the certificate to look for</param>
void RemoveCertificate(string thumbprint);
}
}
| apache-2.0 |
Gayany/product-mdm | modules/mobile-agents/windows/jax-rs/src/main/java/org/wso2/carbon/mdm/mobileservices/windows/operations/AlertTag.java | 1913 | /*
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.carbon.mdm.mobileservices.windows.operations;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.wso2.carbon.mdm.mobileservices.windows.operations.util.Constants;
/**
* Inform an event occurred from device to server.
*/
public class AlertTag {
int commandId = -1;
String data;
public int getCommandId() {
return commandId;
}
public void setCommandId(int commandId) {
this.commandId = commandId;
}
public String getData() {
return data;
}
public void setData(String data) {
this.data = data;
}
public void buildAlertElement(Document doc, Element rootElement) {
Element alert = doc.createElement(Constants.ALERT);
rootElement.appendChild(alert);
if (getCommandId() != -1) {
Element commandId = doc.createElement(Constants.COMMAND_ID);
commandId.appendChild(doc.createTextNode(String.valueOf(getCommandId())));
alert.appendChild(commandId);
}
if (getData() != null) {
Element data = doc.createElement(Constants.DATA);
data.appendChild(doc.createTextNode(getData()));
alert.appendChild(data);
}
}
}
| apache-2.0 |
Microsoft/TypeScript | tests/baselines/reference/super.js | 2504 | //// [super.ts]
class Base {
constructor() {
var x;
}
public foo() {
return "base";
}
public bar() {
return "basebar";
}
}
class Sub1 extends Base {
public foo() {
return "sub1" + super.foo() + super.bar();
}
}
class SubSub1 extends Sub1 {
public foo() {
return "subsub1" + super.foo();
}
}
class Base2 {
public foo() {
super.foo();
}
}
var s = new Sub1();
var ss = new SubSub1();
s.foo() + ss.foo();
//// [super.js]
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
if (typeof b !== "function" && b !== null)
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var Base = /** @class */ (function () {
function Base() {
var x;
}
Base.prototype.foo = function () {
return "base";
};
Base.prototype.bar = function () {
return "basebar";
};
return Base;
}());
var Sub1 = /** @class */ (function (_super) {
__extends(Sub1, _super);
function Sub1() {
return _super !== null && _super.apply(this, arguments) || this;
}
Sub1.prototype.foo = function () {
return "sub1" + _super.prototype.foo.call(this) + _super.prototype.bar.call(this);
};
return Sub1;
}(Base));
var SubSub1 = /** @class */ (function (_super) {
__extends(SubSub1, _super);
function SubSub1() {
return _super !== null && _super.apply(this, arguments) || this;
}
SubSub1.prototype.foo = function () {
return "subsub1" + _super.prototype.foo.call(this);
};
return SubSub1;
}(Sub1));
var Base2 = /** @class */ (function () {
function Base2() {
}
Base2.prototype.foo = function () {
_super.prototype.foo.call(this);
};
return Base2;
}());
var s = new Sub1();
var ss = new SubSub1();
s.foo() + ss.foo();
| apache-2.0 |
lyogavin/spark | streaming/src/test/scala/spark/streaming/TestSuiteBase.scala | 11725 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.streaming
import spark.streaming.dstream.{InputDStream, ForEachDStream}
import spark.streaming.util.ManualClock
import spark.{RDD, Logging}
import collection.mutable.ArrayBuffer
import collection.mutable.SynchronizedBuffer
import java.io.{ObjectInputStream, IOException}
import org.scalatest.{BeforeAndAfter, FunSuite}
/**
* This is a input stream just for the testsuites. This is equivalent to a checkpointable,
* replayable, reliable message queue like Kafka. It requires a sequence as input, and
* returns the i_th element at the i_th batch unde manual clock.
*/
class TestInputStream[T: ClassManifest](ssc_ : StreamingContext, input: Seq[Seq[T]], numPartitions: Int)
extends InputDStream[T](ssc_) {
def start() {}
def stop() {}
def compute(validTime: Time): Option[RDD[T]] = {
logInfo("Computing RDD for time " + validTime)
val index = ((validTime - zeroTime) / slideDuration - 1).toInt
val selectedInput = if (index < input.size) input(index) else Seq[T]()
// lets us test cases where RDDs are not created
if (selectedInput == null)
return None
val rdd = ssc.sc.makeRDD(selectedInput, numPartitions)
logInfo("Created RDD " + rdd.id + " with " + selectedInput)
Some(rdd)
}
}
/**
* This is a output stream just for the testsuites. All the output is collected into a
* ArrayBuffer. This buffer is wiped clean on being restored from checkpoint.
*/
class TestOutputStream[T: ClassManifest](parent: DStream[T], val output: ArrayBuffer[Seq[T]])
extends ForEachDStream[T](parent, (rdd: RDD[T], t: Time) => {
val collected = rdd.collect()
output += collected
}) {
// This is to clear the output buffer every it is read from a checkpoint
@throws(classOf[IOException])
private def readObject(ois: ObjectInputStream) {
ois.defaultReadObject()
output.clear()
}
}
/**
* This is the base trait for Spark Streaming testsuites. This provides basic functionality
* to run user-defined set of input on user-defined stream operations, and verify the output.
*/
trait TestSuiteBase extends FunSuite with BeforeAndAfter with Logging {
// Name of the framework for Spark context
def framework = "TestSuiteBase"
// Master for Spark context
def master = "local[2]"
// Batch duration
def batchDuration = Seconds(1)
// Directory where the checkpoint data will be saved
def checkpointDir = "checkpoint"
// Number of partitions of the input parallel collections created for testing
def numInputPartitions = 2
// Maximum time to wait before the test times out
def maxWaitTimeMillis = 10000
// Whether to actually wait in real time before changing manual clock
def actuallyWait = false
/**
* Set up required DStreams to test the DStream operation using the two sequences
* of input collections.
*/
def setupStreams[U: ClassManifest, V: ClassManifest](
input: Seq[Seq[U]],
operation: DStream[U] => DStream[V]
): StreamingContext = {
// Create StreamingContext
val ssc = new StreamingContext(master, framework, batchDuration)
if (checkpointDir != null) {
ssc.checkpoint(checkpointDir)
}
// Setup the stream computation
val inputStream = new TestInputStream(ssc, input, numInputPartitions)
val operatedStream = operation(inputStream)
val outputStream = new TestOutputStream(operatedStream, new ArrayBuffer[Seq[V]] with SynchronizedBuffer[Seq[V]])
ssc.registerInputStream(inputStream)
ssc.registerOutputStream(outputStream)
ssc
}
/**
* Set up required DStreams to test the binary operation using the sequence
* of input collections.
*/
def setupStreams[U: ClassManifest, V: ClassManifest, W: ClassManifest](
input1: Seq[Seq[U]],
input2: Seq[Seq[V]],
operation: (DStream[U], DStream[V]) => DStream[W]
): StreamingContext = {
// Create StreamingContext
val ssc = new StreamingContext(master, framework, batchDuration)
if (checkpointDir != null) {
ssc.checkpoint(checkpointDir)
}
// Setup the stream computation
val inputStream1 = new TestInputStream(ssc, input1, numInputPartitions)
val inputStream2 = new TestInputStream(ssc, input2, numInputPartitions)
val operatedStream = operation(inputStream1, inputStream2)
val outputStream = new TestOutputStream(operatedStream, new ArrayBuffer[Seq[W]] with SynchronizedBuffer[Seq[W]])
ssc.registerInputStream(inputStream1)
ssc.registerInputStream(inputStream2)
ssc.registerOutputStream(outputStream)
ssc
}
/**
* Runs the streams set up in `ssc` on manual clock for `numBatches` batches and
* returns the collected output. It will wait until `numExpectedOutput` number of
* output data has been collected or timeout (set by `maxWaitTimeMillis`) is reached.
*/
def runStreams[V: ClassManifest](
ssc: StreamingContext,
numBatches: Int,
numExpectedOutput: Int
): Seq[Seq[V]] = {
assert(numBatches > 0, "Number of batches to run stream computation is zero")
assert(numExpectedOutput > 0, "Number of expected outputs after " + numBatches + " is zero")
logInfo("numBatches = " + numBatches + ", numExpectedOutput = " + numExpectedOutput)
// Get the output buffer
val outputStream = ssc.graph.getOutputStreams.head.asInstanceOf[TestOutputStream[V]]
val output = outputStream.output
try {
// Start computation
ssc.start()
// Advance manual clock
val clock = ssc.scheduler.clock.asInstanceOf[ManualClock]
logInfo("Manual clock before advancing = " + clock.time)
if (actuallyWait) {
for (i <- 1 to numBatches) {
logInfo("Actually waiting for " + batchDuration)
clock.addToTime(batchDuration.milliseconds)
Thread.sleep(batchDuration.milliseconds)
}
} else {
clock.addToTime(numBatches * batchDuration.milliseconds)
}
logInfo("Manual clock after advancing = " + clock.time)
// Wait until expected number of output items have been generated
val startTime = System.currentTimeMillis()
while (output.size < numExpectedOutput && System.currentTimeMillis() - startTime < maxWaitTimeMillis) {
logInfo("output.size = " + output.size + ", numExpectedOutput = " + numExpectedOutput)
Thread.sleep(100)
}
val timeTaken = System.currentTimeMillis() - startTime
assert(timeTaken < maxWaitTimeMillis, "Operation timed out after " + timeTaken + " ms")
assert(output.size === numExpectedOutput, "Unexpected number of outputs generated")
Thread.sleep(500) // Give some time for the forgetting old RDDs to complete
} catch {
case e: Exception => e.printStackTrace(); throw e;
} finally {
ssc.stop()
}
output
}
/**
* Verify whether the output values after running a DStream operation
* is same as the expected output values, by comparing the output
* collections either as lists (order matters) or sets (order does not matter)
*/
def verifyOutput[V: ClassManifest](
output: Seq[Seq[V]],
expectedOutput: Seq[Seq[V]],
useSet: Boolean
) {
logInfo("--------------------------------")
logInfo("output.size = " + output.size)
logInfo("output")
output.foreach(x => logInfo("[" + x.mkString(",") + "]"))
logInfo("expected output.size = " + expectedOutput.size)
logInfo("expected output")
expectedOutput.foreach(x => logInfo("[" + x.mkString(",") + "]"))
logInfo("--------------------------------")
// Match the output with the expected output
assert(output.size === expectedOutput.size, "Number of outputs do not match")
for (i <- 0 until output.size) {
if (useSet) {
assert(output(i).toSet === expectedOutput(i).toSet)
} else {
assert(output(i).toList === expectedOutput(i).toList)
}
}
logInfo("Output verified successfully")
}
/**
* Test unary DStream operation with a list of inputs, with number of
* batches to run same as the number of expected output values
*/
def testOperation[U: ClassManifest, V: ClassManifest](
input: Seq[Seq[U]],
operation: DStream[U] => DStream[V],
expectedOutput: Seq[Seq[V]],
useSet: Boolean = false
) {
testOperation[U, V](input, operation, expectedOutput, -1, useSet)
}
/**
* Test unary DStream operation with a list of inputs
* @param input Sequence of input collections
* @param operation Binary DStream operation to be applied to the 2 inputs
* @param expectedOutput Sequence of expected output collections
* @param numBatches Number of batches to run the operation for
* @param useSet Compare the output values with the expected output values
* as sets (order matters) or as lists (order does not matter)
*/
def testOperation[U: ClassManifest, V: ClassManifest](
input: Seq[Seq[U]],
operation: DStream[U] => DStream[V],
expectedOutput: Seq[Seq[V]],
numBatches: Int,
useSet: Boolean
) {
val numBatches_ = if (numBatches > 0) numBatches else expectedOutput.size
val ssc = setupStreams[U, V](input, operation)
val output = runStreams[V](ssc, numBatches_, expectedOutput.size)
verifyOutput[V](output, expectedOutput, useSet)
}
/**
* Test binary DStream operation with two lists of inputs, with number of
* batches to run same as the number of expected output values
*/
def testOperation[U: ClassManifest, V: ClassManifest, W: ClassManifest](
input1: Seq[Seq[U]],
input2: Seq[Seq[V]],
operation: (DStream[U], DStream[V]) => DStream[W],
expectedOutput: Seq[Seq[W]],
useSet: Boolean
) {
testOperation[U, V, W](input1, input2, operation, expectedOutput, -1, useSet)
}
/**
* Test binary DStream operation with two lists of inputs
* @param input1 First sequence of input collections
* @param input2 Second sequence of input collections
* @param operation Binary DStream operation to be applied to the 2 inputs
* @param expectedOutput Sequence of expected output collections
* @param numBatches Number of batches to run the operation for
* @param useSet Compare the output values with the expected output values
* as sets (order matters) or as lists (order does not matter)
*/
def testOperation[U: ClassManifest, V: ClassManifest, W: ClassManifest](
input1: Seq[Seq[U]],
input2: Seq[Seq[V]],
operation: (DStream[U], DStream[V]) => DStream[W],
expectedOutput: Seq[Seq[W]],
numBatches: Int,
useSet: Boolean
) {
val numBatches_ = if (numBatches > 0) numBatches else expectedOutput.size
val ssc = setupStreams[U, V, W](input1, input2, operation)
val output = runStreams[W](ssc, numBatches_, expectedOutput.size)
verifyOutput[W](output, expectedOutput, useSet)
}
}
| apache-2.0 |
tst-lsavoie/earthenterprise | earth_enterprise/src/fusion/geindexgen/Todo.cpp | 3687 | // Copyright 2017 Google Inc.
// Copyright 2020 The Open GEE Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Todo.h"
#include <khException.h>
#include <khFileUtils.h>
#include <packetfile/packetfile.h>
#include <packetfile/packetindex.h>
namespace geindexgen {
// ****************************************************************************
// *** BlendTodo
// ****************************************************************************
// Build a Todo list for a new index (no delta)
BlendTodo::BlendTodo(geindex::BlendWriter &writer, geFilePool &file_pool,
const BlendStack &stack) :
total_tile_visits_(0)
{
fprintf(stdout, "Calculating work to be done ... ");
fflush(stdout);
for (unsigned int i = 0; i < stack.insets_.size(); ++i) {
for (unsigned int j = 0; j < stack.insets_[i].levels_.size(); ++j) {
const std::string &packet_filename =
stack.insets_[i].levels_[j].packetfile_;
std::uint64_t packet_count = PacketIndexReader::NumPackets(
PacketFile::IndexFilename(packet_filename));
// degenerate case w/ no packets. Just skip it.
if (packet_count == 0) continue;
std::uint32_t file_num = writer.AddExternalPacketFile(packet_filename);
// extra defaults to 0. We add 1 so our stack order is distinct from
// the default. It probably doesn't matter anyway, but it keeps things
// clean.
writer.SetPacketExtra(file_num, i+1);
readers_.push_back(
TransferOwnership(
new BlendPacketReader(file_pool, packet_filename, file_num,
stack.insets_[i].levels_[j].index_version_,
BlendPacketReader::PutOp)));
total_tile_visits_ += packet_count;
}
}
fprintf(stdout, "Finished.\n");
fflush(stdout);
}
// ****************************************************************************
// *** VectorTodo
// ****************************************************************************
// Build a Todo list for a new index (no delta)
VectorTodo::VectorTodo(geindex::VectorWriter &writer, geFilePool &file_pool,
const VectorStack &stack) :
total_tile_visits_(0)
{
fprintf(stdout, "Calculating work to be done ... ");
fflush(stdout);
for (unsigned int i = 0; i < stack.layers_.size(); ++i) {
const std::string &packet_filename = stack.layers_[i].packetfile_;
std::uint64_t packet_count = PacketIndexReader::NumPackets(
PacketFile::IndexFilename(packet_filename));
// degenerate case w/ no packets. Just skip it.
if (packet_count == 0) continue;
std::uint32_t file_num = writer.AddExternalPacketFile(packet_filename);
readers_.push_back(
TransferOwnership(
new VectorPacketReader(file_pool, packet_filename, file_num,
stack.layers_[i].index_version_,
stack.layers_[i].channel_id_,
VectorPacketReader::PutOp)));
total_tile_visits_ += packet_count;
}
fprintf(stdout, "Finished.\n");
fflush(stdout);
}
} // namespace geindexgen
| apache-2.0 |
shankarh/geode | geode-core/src/main/java/org/apache/geode/admin/SystemMemberRegion.java | 9030 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.admin;
import org.apache.geode.cache.*;
import java.io.File;
/**
* Administrative interface that represent's the {@link SystemMember}'s view of one of its cache's
* {@link org.apache.geode.cache.Region}s. If the region in the remote system member is closed or
* destroyed, the methods of <code>SystemMemberRegion</code> will throw
* {@link RegionNotFoundException}.
*
* @since GemFire 3.5
* @deprecated as of 7.0 use the <code><a href=
* "{@docRoot}/org/apache/geode/management/package-summary.html">management</a></code>
* package instead
*/
public interface SystemMemberRegion {
// attributes
/**
* Returns the name that identifies this region in its cache.
*
* @see org.apache.geode.cache.Region#getName
*/
public String getName();
/**
* Returns the full path name that identifies this region in its cache.
*
* @see org.apache.geode.cache.Region#getFullPath
*/
public String getFullPath();
/**
* Returns the names of all the subregions of this region.
*/
public java.util.Set getSubregionNames();
/**
* Returns the full path of each of the subregions of this region. These paths are suitable for
* use with {@link SystemMemberCache#getRegion}.
*/
public java.util.Set getSubregionFullPaths();
/**
* Returns a description of any user attribute associated with this region. The description
* includes the classname of the user attribute object as well as its <code>toString</code>
* representation.
*/
public String getUserAttribute();
/**
* Returns a description of any CacheLoader associated with this region.
*/
public String getCacheLoader();
/**
* Returns a description of any CacheWriter associated with this region.
*/
public String getCacheWriter();
/**
* Returns the <code>EvictionAttributes</code> that configure how entries in the the region are
* evicted
*/
public EvictionAttributes getEvictionAttributes();
/**
* Returns a description of the CacheListener in this region's attributes. If there is more than 1
* CacheListener defined for a region this method will return the description of the 1st
* CacheListener returned from {@link #getCacheListeners}
*
* @deprecated as of 6.0 use getCacheListeners() instead
*/
@Deprecated
public String getCacheListener();
/**
* This method will return an empty array if there are no CacheListeners defined on the region. If
* there are one or more than 1 CacheListeners defined, this method will return an array which has
* the names of all the CacheListeners
*
* @return String[] the region's <code>CacheListeners</code> as a String array
* @since GemFire 6.0
*/
public String[] getCacheListeners();
/**
* Returns the KeyConstraint in this region's attributes.
*/
public String getKeyConstraint();
/**
* Returns the ValueConstraint in this region's attributes.
*/
public String getValueConstraint();
/**
* Returns the RegionTimeToLive time limit in this region's attributes.
*/
public int getRegionTimeToLiveTimeLimit();
/**
* Returns the RegionTimeToLive action in this region's attributes.
*/
public ExpirationAction getRegionTimeToLiveAction();
/**
* Returns the EntryTimeToLive time limit in this region's attributes.
*/
public int getEntryTimeToLiveTimeLimit();
/**
* Returns the EntryTimeToLive action in this region's attributes.
*/
public ExpirationAction getEntryTimeToLiveAction();
/**
* string describing the CustomExpiry for entry-time-to-live
*
* @return the CustomExpiry for entry-time-to-live
*/
public String getCustomEntryTimeToLive();
/**
* Returns the RegionIdleTimeout time limit in this region's attributes.
*/
public int getRegionIdleTimeoutTimeLimit();
/**
* Returns the RegionIdleTimeout action in this region's attributes.
*/
public ExpirationAction getRegionIdleTimeoutAction();
/**
* Returns the EntryIdleTimeout time limit in this region's attributes.
*/
public int getEntryIdleTimeoutTimeLimit();
/**
* Returns the EntryIdleTimeout action in this region's attributes.
*/
public ExpirationAction getEntryIdleTimeoutAction();
/**
* string describing the CustomExpiry for entry-idle-timeout
*
* @return the CustomExpiry for entry-idle-timeout
*/
public String getCustomEntryIdleTimeout();
/**
* Returns the MirrorType in this region's attributes.
*
* @deprecated as of 5.0, you should use getDataPolicy instead
*/
@Deprecated
public MirrorType getMirrorType();
/**
* Returns the DataPolicy in this region's attributes.
*/
public DataPolicy getDataPolicy();
/**
*
* /** Returns the Scope in this region's attributes.
*/
public Scope getScope();
/**
* Returns the InitialCapacity in this region's attributes.
*/
public int getInitialCapacity();
/**
* Returns the LoadFactor in this region's attributes.
*/
public float getLoadFactor();
/**
* Returns the ConcurrencyLevel in this region's attributes.
*/
public int getConcurrencyLevel();
/**
* Returns whether or not conflicting concurrent operations on this region are prevented
*/
public boolean getConcurrencyChecksEnabled();
/**
* Returns the StatisticsEnabled in this region's attributes.
*/
public boolean getStatisticsEnabled();
/**
* Returns whether or not a persistent backup should be made of the region (as opposed to just
* writing the overflow data to disk).
*/
public boolean getPersistBackup();
/**
* Returns the <code>DiskWriteAttributes</code> that configure how the region is written to disk.
*/
public DiskWriteAttributes getDiskWriteAttributes();
/**
* Returns the directories to which the region's data are written. If multiple directories are
* used, GemFire will attempt to distribute the data evenly amongst them.
*/
public File[] getDiskDirs();
/**
* Returns the number of entries currently in this region.
*/
public int getEntryCount();
/**
* Returns the number of subregions currently in this region.
*/
public int getSubregionCount();
/**
* Returns the LastModifiedTime obtained from this region's statistics.
*/
public long getLastModifiedTime();
/**
* Returns the LastAccessedTime obtained from this region's statistics.
*/
public long getLastAccessedTime();
/**
* Returns the HitCount obtained from this region's statistics.
*/
public long getHitCount();
/**
* Returns the MissCount obtained from this region's statistics.
*/
public long getMissCount();
/**
* Returns the HitRatio obtained from this region's statistics.
*/
public float getHitRatio();
/**
* Returns whether or not acks are sent after an update is processed.
*
* @return False if acks are sent after updates are processed; true if acks are sent before
* updates are processed.
*
* @since GemFire 4.1
*/
public boolean getEarlyAck();
// operations
/**
* Updates the state of this region instance. Note that once a cache instance is closed refresh
* will never change the state of its regions.
*/
public void refresh();
/**
* Creates a subregion of this region.
*
* @param name The name of the region to create
* @param attrs The attributes of the root region
*
* @throws AdminException If the region cannot be created
*
* @since GemFire 4.0
*/
public SystemMemberRegion createSubregion(String name, RegionAttributes attrs)
throws AdminException;
/**
* Returns the <code>MembershipAttributes</code> that configure required roles for reliable access
* to the region.
*
* @deprecated this API is scheduled to be removed
*/
public MembershipAttributes getMembershipAttributes();
/**
* Returns the <code>SubscriptionAttributes</code> for the region.
*
* @since GemFire 5.0
*/
public SubscriptionAttributes getSubscriptionAttributes();
/**
* Returns the <code>PartitionAttributes</code> for the region.
*
* @since GemFire 5.7
*/
public PartitionAttributes getPartitionAttributes();
}
| apache-2.0 |
sekikn/ambari | ambari-admin/src/main/resources/ui/admin-web/test/e2e/signout.js | 1654 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
describe('Ambari sign out from Admin view', function () {
describe('Admin view', function () {
var ptor = protractor.getInstance();
beforeEach(function () {
ptor.get('app/index.html');
ptor.waitForAngular();
});
it('should navigate to login page on clicking "Sign out" action', function () {
var userDropdownBtn = element(by.binding('currentUser'));
var signOutAction = element(by.css('[ng-click="signOut()"]'));
//Action-1: Click on user dropdown menu and
//Action-2: Click on SignOut action link
userDropdownBtn.click().then(function () {
signOutAction.click().then(function () {
//Validation
setTimeout(function () {
expect(ptor.getCurrentUrl()).toContain('#/login');
}, 3000);
});
});
});
});
});
| apache-2.0 |
awillis/dirsrv-cookbook | recipes/_vagrant_consumer.rb | 1596 | #
# Cookbook Name:: dirsrv
# Recipe:: _vagrant_consumer
#
# Copyright 2014 Riot Games, Inc.
# Author:: Alan Willis <alwillis@riotgames.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "dirsrv"
include_recipe "dirsrv::_vagrant_hosts"
dirsrv_instance node[:hostname] + '_389' do
has_cfgdir true
cfgdir_addr '172.31.255.10'
cfgdir_domain "vagrant"
cfgdir_ldap_port 389
host node[:hostname] + '.vagrant'
suffix 'o=vagrant'
action [ :create, :start ]
end
include_recipe "dirsrv::_vagrant_replication"
# o=vagrant replica
dirsrv_replica 'o=vagrant' do
instance node[:hostname] + '_389'
id 6
role :consumer
end
# link back to proxyhub
dirsrv_agreement 'consumer-proxyhub' do
host '172.31.255.15'
suffix 'o=vagrant'
replica_host '172.31.255.14'
replica_credentials 'CopyCat!'
end
# Request initialization from proxyhub
dirsrv_agreement 'proxyhub-consumer' do
host '172.31.255.14'
suffix 'o=vagrant'
replica_host '172.31.255.15'
replica_credentials 'CopyCat!'
action :create_and_initialize
end
| apache-2.0 |
hqstevenson/camel | components-starter/camel-core-starter/src/main/java/org/apache/camel/impl/springboot/StringDataFormatAutoConfiguration.java | 4733 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.impl.springboot;
import java.util.HashMap;
import java.util.Map;
import org.apache.camel.CamelContext;
import org.apache.camel.CamelContextAware;
import org.apache.camel.impl.StringDataFormat;
import org.apache.camel.util.IntrospectionSupport;
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
import org.springframework.boot.autoconfigure.condition.ConditionMessage;
import org.springframework.boot.autoconfigure.condition.ConditionOutcome;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.SpringBootCondition;
import org.springframework.boot.bind.RelaxedPropertyResolver;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ConditionContext;
import org.springframework.context.annotation.Conditional;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Scope;
import org.springframework.core.type.AnnotatedTypeMetadata;
/**
* Generated by camel-package-maven-plugin - do not edit this file!
*/
@Configuration
@ConditionalOnBean(type = "org.apache.camel.spring.boot.CamelAutoConfiguration")
@Conditional(StringDataFormatAutoConfiguration.Condition.class)
@AutoConfigureAfter(name = "org.apache.camel.spring.boot.CamelAutoConfiguration")
@EnableConfigurationProperties(StringDataFormatConfiguration.class)
public class StringDataFormatAutoConfiguration {
@Bean(name = "string-dataformat")
@Scope("prototype")
@ConditionalOnClass(CamelContext.class)
@ConditionalOnMissingBean(StringDataFormat.class)
public StringDataFormat configureStringDataFormat(
CamelContext camelContext,
StringDataFormatConfiguration configuration) throws Exception {
StringDataFormat dataformat = new StringDataFormat();
if (CamelContextAware.class.isAssignableFrom(StringDataFormat.class)) {
CamelContextAware contextAware = CamelContextAware.class
.cast(dataformat);
if (contextAware != null) {
contextAware.setCamelContext(camelContext);
}
}
Map<String, Object> parameters = new HashMap<>();
IntrospectionSupport.getProperties(configuration, parameters, null,
false);
IntrospectionSupport.setProperties(camelContext,
camelContext.getTypeConverter(), dataformat, parameters);
return dataformat;
}
public static class Condition extends SpringBootCondition {
@Override
public ConditionOutcome getMatchOutcome(
ConditionContext conditionContext,
AnnotatedTypeMetadata annotatedTypeMetadata) {
boolean groupEnabled = isEnabled(conditionContext,
"camel.dataformat.", true);
ConditionMessage.Builder message = ConditionMessage
.forCondition("camel.dataformat.string");
if (isEnabled(conditionContext, "camel.dataformat.string.",
groupEnabled)) {
return ConditionOutcome.match(message.because("enabled"));
}
return ConditionOutcome.noMatch(message.because("not enabled"));
}
private boolean isEnabled(
org.springframework.context.annotation.ConditionContext context,
java.lang.String prefix, boolean defaultValue) {
RelaxedPropertyResolver resolver = new RelaxedPropertyResolver(
context.getEnvironment(), prefix);
return resolver.getProperty("enabled", Boolean.class, defaultValue);
}
}
} | apache-2.0 |
blois/AndroidSDKCloneMin | sdk/sources/android-20/com/android/rs/image/LevelsV4.java | 5183 | /*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.rs.image;
import java.lang.Math;
import android.renderscript.Allocation;
import android.renderscript.Element;
import android.renderscript.RenderScript;
import android.renderscript.Matrix3f;
import android.renderscript.Script;
import android.renderscript.ScriptC;
import android.renderscript.Type;
import android.util.Log;
import android.widget.SeekBar;
import android.widget.TextView;
public class LevelsV4 extends TestBase {
private ScriptC_levels_relaxed mScriptR;
private ScriptC_levels_full mScriptF;
private float mInBlack = 0.0f;
private float mOutBlack = 0.0f;
private float mInWhite = 255.0f;
private float mOutWhite = 255.0f;
private float mSaturation = 1.0f;
Matrix3f satMatrix = new Matrix3f();
float mInWMinInB;
float mOutWMinOutB;
float mOverInWMinInB;
boolean mUseFull;
boolean mUseV4;
LevelsV4(boolean useFull, boolean useV4) {
mUseFull = useFull;
mUseV4 = useV4;
}
private void setLevels() {
mInWMinInB = mInWhite - mInBlack;
mOutWMinOutB = mOutWhite - mOutBlack;
mOverInWMinInB = 1.f / mInWMinInB;
mScriptR.set_inBlack(mInBlack);
mScriptR.set_outBlack(mOutBlack);
mScriptR.set_inWMinInB(mInWMinInB);
mScriptR.set_outWMinOutB(mOutWMinOutB);
mScriptR.set_overInWMinInB(mOverInWMinInB);
mScriptF.set_inBlack(mInBlack);
mScriptF.set_outBlack(mOutBlack);
mScriptF.set_inWMinInB(mInWMinInB);
mScriptF.set_outWMinOutB(mOutWMinOutB);
mScriptF.set_overInWMinInB(mOverInWMinInB);
}
private void setSaturation() {
float rWeight = 0.299f;
float gWeight = 0.587f;
float bWeight = 0.114f;
float oneMinusS = 1.0f - mSaturation;
satMatrix.set(0, 0, oneMinusS * rWeight + mSaturation);
satMatrix.set(0, 1, oneMinusS * rWeight);
satMatrix.set(0, 2, oneMinusS * rWeight);
satMatrix.set(1, 0, oneMinusS * gWeight);
satMatrix.set(1, 1, oneMinusS * gWeight + mSaturation);
satMatrix.set(1, 2, oneMinusS * gWeight);
satMatrix.set(2, 0, oneMinusS * bWeight);
satMatrix.set(2, 1, oneMinusS * bWeight);
satMatrix.set(2, 2, oneMinusS * bWeight + mSaturation);
mScriptR.set_colorMat(satMatrix);
mScriptF.set_colorMat(satMatrix);
}
public boolean onBar1Setup(SeekBar b, TextView t) {
b.setProgress(50);
t.setText("Saturation");
return true;
}
public boolean onBar2Setup(SeekBar b, TextView t) {
b.setMax(128);
b.setProgress(0);
t.setText("In Black");
return true;
}
public boolean onBar3Setup(SeekBar b, TextView t) {
b.setMax(128);
b.setProgress(0);
t.setText("Out Black");
return true;
}
public boolean onBar4Setup(SeekBar b, TextView t) {
b.setMax(128);
b.setProgress(128);
t.setText("In White");
return true;
}
public boolean onBar5Setup(SeekBar b, TextView t) {
b.setMax(128);
b.setProgress(128);
t.setText("Out White");
return true;
}
public void onBar1Changed(int progress) {
mSaturation = (float)progress / 50.0f;
setSaturation();
}
public void onBar2Changed(int progress) {
mInBlack = (float)progress;
setLevels();
}
public void onBar3Changed(int progress) {
mOutBlack = (float)progress;
setLevels();
}
public void onBar4Changed(int progress) {
mInWhite = (float)progress + 127.0f;
setLevels();
}
public void onBar5Changed(int progress) {
mOutWhite = (float)progress + 127.0f;
setLevels();
}
public void createTest(android.content.res.Resources res) {
mScriptR = new ScriptC_levels_relaxed(mRS, res, R.raw.levels_relaxed);
mScriptF = new ScriptC_levels_full(mRS, res, R.raw.levels_full);
setSaturation();
setLevels();
}
public void runTest() {
if (mUseFull) {
if (mUseV4) {
mScriptF.forEach_root4(mInPixelsAllocation, mOutPixelsAllocation);
} else {
mScriptF.forEach_root(mInPixelsAllocation, mOutPixelsAllocation);
}
} else {
if (mUseV4) {
mScriptR.forEach_root4(mInPixelsAllocation, mOutPixelsAllocation);
} else {
mScriptR.forEach_root(mInPixelsAllocation, mOutPixelsAllocation);
}
}
}
}
| apache-2.0 |
jcamachor/hive | ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java | 18356 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.plan;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Stack;
import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
import org.apache.hadoop.hive.ql.exec.OperatorUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hive.common.type.DataTypePhysicalVariation;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator;
import org.apache.hadoop.hive.ql.exec.Operator;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedSupport.Support;
import org.apache.hadoop.hive.ql.parse.RuntimeValuesInfo;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
import org.apache.hadoop.hive.ql.optimizer.physical.VectorizerReason;
import org.apache.hadoop.hive.ql.plan.Explain.Level;
import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
/**
* BaseWork. Base class for any "work" that's being done on the cluster. Items like stats
* gathering that are commonly used regardless of the type of work live here.
*/
@SuppressWarnings({"serial"})
public abstract class BaseWork extends AbstractOperatorDesc {
protected static final Logger LOG = LoggerFactory.getLogger(BaseWork.class);
// dummyOps is a reference to all the HashTableDummy operators in the
// plan. These have to be separately initialized when we setup a task.
// Their function is mainly as root ops to give the mapjoin the correct
// schema info.
List<HashTableDummyOperator> dummyOps;
int tag = 0;
private final List<String> sortColNames = new ArrayList<String>();
private MapredLocalWork mrLocalWork;
public BaseWork() {}
public BaseWork(String name) {
setName(name);
}
private boolean gatheringStats;
private String name;
/*
* Vectorization.
*/
// This will be true if a node was examined by the Vectorizer class.
protected boolean vectorizationExamined;
protected boolean vectorizationEnabled;
protected VectorizedRowBatchCtx vectorizedRowBatchCtx;
protected boolean useVectorizedInputFileFormat;
protected Set<Support> inputFormatSupportSet;
protected Set<Support> supportSetInUse;
protected List<String> supportRemovedReasons;
private transient VectorizerReason notVectorizedReason;
private boolean groupByVectorOutput;
private boolean allNative;
private boolean usesVectorUDFAdaptor;
protected long vectorizedVertexNum;
protected int vectorizedTestingReducerBatchSize;
private boolean isTestForcedVectorizationEnable;
private boolean isTestVectorizationSuppressExplainExecutionMode;
protected boolean llapMode = false;
protected boolean uberMode = false;
private int reservedMemoryMB = -1; // default to -1 means we leave it up to Tez to decide
// Used for value registry
private Map<String, RuntimeValuesInfo> inputSourceToRuntimeValuesInfo =
new HashMap<String, RuntimeValuesInfo>();
public void setGatheringStats(boolean gatherStats) {
this.gatheringStats = gatherStats;
}
public boolean isGatheringStats() {
return this.gatheringStats;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<HashTableDummyOperator> getDummyOps() {
return dummyOps;
}
public void setDummyOps(List<HashTableDummyOperator> dummyOps) {
if (this.dummyOps != null && !this.dummyOps.isEmpty()
&& (dummyOps == null || dummyOps.isEmpty())) {
LOG.info("Removing dummy operators from " + name + " " + this.getClass().getSimpleName());
}
this.dummyOps = dummyOps;
}
public void addDummyOp(HashTableDummyOperator dummyOp) {
if (dummyOps == null) {
dummyOps = new LinkedList<HashTableDummyOperator>();
}
dummyOps.add(dummyOp);
}
public abstract void replaceRoots(Map<Operator<?>, Operator<?>> replacementMap);
public abstract Set<Operator<? extends OperatorDesc>> getAllRootOperators();
public abstract Operator<? extends OperatorDesc> getAnyRootOperator();
public Set<Operator<?>> getAllOperators() {
Set<Operator<?>> returnSet = new LinkedHashSet<Operator<?>>();
Set<Operator<?>> opSet = getAllRootOperators();
Stack<Operator<?>> opStack = new Stack<Operator<?>>();
// add all children
opStack.addAll(opSet);
while(!opStack.empty()) {
Operator<?> op = opStack.pop();
returnSet.add(op);
if (op.getChildOperators() != null) {
opStack.addAll(op.getChildOperators());
}
}
return returnSet;
}
/**
* Returns a set containing all leaf operators from the operator tree in this work.
* @return a set containing all leaf operators in this operator tree.
*/
public Set<Operator<? extends OperatorDesc>> getAllLeafOperators() {
Set<Operator<?>> returnSet = new LinkedHashSet<Operator<?>>();
Set<Operator<?>> opSet = getAllRootOperators();
Stack<Operator<?>> opStack = new Stack<Operator<?>>();
// add all children
opStack.addAll(opSet);
while (!opStack.empty()) {
Operator<?> op = opStack.pop();
if (op.getNumChild() == 0) {
returnSet.add(op);
}
if (op.getChildOperators() != null) {
opStack.addAll(op.getChildOperators());
}
}
return returnSet;
}
public void setVectorizedVertexNum(long vectorizedVertexNum) {
this.vectorizedVertexNum = vectorizedVertexNum;
}
public long getVectorizedVertexNum() {
return vectorizedVertexNum;
}
public void setVectorizedTestingReducerBatchSize(int vectorizedTestingReducerBatchSize) {
this.vectorizedTestingReducerBatchSize = vectorizedTestingReducerBatchSize;
}
public int getVectorizedTestingReducerBatchSize() {
return vectorizedTestingReducerBatchSize;
}
// -----------------------------------------------------------------------------------------------
public void setVectorizationExamined(boolean vectorizationExamined) {
this.vectorizationExamined = vectorizationExamined;
}
public boolean getVectorizationExamined() {
return vectorizationExamined;
}
public void setVectorizationEnabled(boolean vectorizationEnabled) {
this.vectorizationEnabled = vectorizationEnabled;
}
public boolean getVectorizationEnabled() {
return vectorizationEnabled;
}
/*
* The vectorization context for creating the VectorizedRowBatch for the node.
*/
public VectorizedRowBatchCtx getVectorizedRowBatchCtx() {
return vectorizedRowBatchCtx;
}
public void setVectorizedRowBatchCtx(VectorizedRowBatchCtx vectorizedRowBatchCtx) {
this.vectorizedRowBatchCtx = vectorizedRowBatchCtx;
}
public void setNotVectorizedReason(VectorizerReason notVectorizedReason) {
this.notVectorizedReason = notVectorizedReason;
}
public VectorizerReason getNotVectorizedReason() {
return notVectorizedReason;
}
public void setUsesVectorUDFAdaptor(boolean usesVectorUDFAdaptor) {
this.usesVectorUDFAdaptor = usesVectorUDFAdaptor;
}
public boolean getUsesVectorUDFAdaptor() {
return usesVectorUDFAdaptor;
}
public void setAllNative(boolean allNative) {
this.allNative = allNative;
}
public boolean getAllNative() {
return allNative;
}
public void setIsTestForcedVectorizationEnable(boolean isTestForcedVectorizationEnable) {
this.isTestForcedVectorizationEnable = isTestForcedVectorizationEnable;
}
public boolean getIsTestForcedVectorizationEnable() {
return isTestForcedVectorizationEnable;
}
public void setIsTestVectorizationSuppressExplainExecutionMode(
boolean isTestVectorizationSuppressExplainExecutionMode) {
this.isTestVectorizationSuppressExplainExecutionMode =
isTestVectorizationSuppressExplainExecutionMode;
}
public boolean getIsTestVectorizationSuppressExplainExecutionMode() {
return isTestVectorizationSuppressExplainExecutionMode;
}
public static class BaseExplainVectorization {
private final BaseWork baseWork;
public BaseExplainVectorization(BaseWork baseWork) {
this.baseWork = baseWork;
}
public static List<String> getColumnAndTypes(
int[] projectionColumns,
String[] columnNames, TypeInfo[] typeInfos,
DataTypePhysicalVariation[] dataTypePhysicalVariations) {
final int size = columnNames.length;
List<String> result = new ArrayList<String>(size);
for (int i = 0; i < size; i++) {
String displayString = projectionColumns[i] + ":" + columnNames[i] + ":" + typeInfos[i];
if (dataTypePhysicalVariations != null &&
dataTypePhysicalVariations[i] != DataTypePhysicalVariation.NONE) {
displayString += "/" + dataTypePhysicalVariations[i].toString();
}
result.add(displayString);
}
return result;
}
@Explain(vectorization = Vectorization.SUMMARY, displayName = "enabled",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public boolean enabled() {
return baseWork.getVectorizationEnabled();
}
@Explain(vectorization = Vectorization.SUMMARY, displayName = "vectorized",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public Boolean vectorized() {
if (!baseWork.getVectorizationEnabled()) {
return null;
}
return baseWork.getVectorMode();
}
@Explain(vectorization = Vectorization.SUMMARY, displayName = "notVectorizedReason",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public String notVectorizedReason() {
if (!baseWork.getVectorizationEnabled() || baseWork.getVectorMode()) {
return null;
}
VectorizerReason notVectorizedReason = baseWork.getNotVectorizedReason();
if (notVectorizedReason == null) {
return "Unknown";
}
return notVectorizedReason.toString();
}
@Explain(vectorization = Vectorization.SUMMARY, displayName = "allNative",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public Boolean nativeVectorized() {
if (!baseWork.getVectorMode()) {
return null;
}
return baseWork.getAllNative();
}
public static List<String> getColumns(VectorizedRowBatchCtx vectorizedRowBatchCtx,
int startIndex, int count) {
String[] rowColumnNames = vectorizedRowBatchCtx.getRowColumnNames();
TypeInfo[] rowColumnTypeInfos = vectorizedRowBatchCtx.getRowColumnTypeInfos();
DataTypePhysicalVariation[] dataTypePhysicalVariations =
vectorizedRowBatchCtx.getRowdataTypePhysicalVariations();
List<String> result = new ArrayList<String>(count);
final int end = startIndex + count;
for (int i = startIndex; i < end; i++) {
String displayString = rowColumnNames[i] + ":" + rowColumnTypeInfos[i];
if (dataTypePhysicalVariations != null &&
dataTypePhysicalVariations[i] != DataTypePhysicalVariation.NONE) {
displayString += "/" + dataTypePhysicalVariations[i].toString();
}
result.add(displayString);
}
return result;
}
public static String getScratchColumns(VectorizedRowBatchCtx vectorizedRowBatchCtx) {
String[] scratchColumnTypeNames = vectorizedRowBatchCtx.getScratchColumnTypeNames();
DataTypePhysicalVariation[] scratchDataTypePhysicalVariations =
vectorizedRowBatchCtx.getScratchDataTypePhysicalVariations();
final int size = scratchColumnTypeNames.length;
List<String> result = new ArrayList<String>(size);
for (int i = 0; i < size; i++) {
String displayString = scratchColumnTypeNames[i];
if (scratchDataTypePhysicalVariations != null &&
scratchDataTypePhysicalVariations[i] != DataTypePhysicalVariation.NONE) {
displayString += "/" + scratchDataTypePhysicalVariations[i].toString();
}
result.add(displayString);
}
return result.toString();
}
@Explain(vectorization = Vectorization.SUMMARY, displayName = "usesVectorUDFAdaptor",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public Boolean usesVectorUDFAdaptor() {
if (!baseWork.getVectorMode()) {
return null;
}
return baseWork.getUsesVectorUDFAdaptor();
}
public static class RowBatchContextExplainVectorization {
private final VectorizedRowBatchCtx vectorizedRowBatchCtx;
public RowBatchContextExplainVectorization(VectorizedRowBatchCtx vectorizedRowBatchCtx) {
this.vectorizedRowBatchCtx = vectorizedRowBatchCtx;
}
@Explain(vectorization = Vectorization.DETAIL, displayName = "dataColumns",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public List<String> getDataColumns() {
return getColumns(
vectorizedRowBatchCtx,
0,
vectorizedRowBatchCtx.getDataColumnCount());
}
@Explain(vectorization = Vectorization.DETAIL, displayName = "partitionColumns",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public List<String> getPartitionColumns() {
return getColumns(
vectorizedRowBatchCtx,
vectorizedRowBatchCtx.getDataColumnCount(),
vectorizedRowBatchCtx.getPartitionColumnCount());
}
@Explain(vectorization = Vectorization.DETAIL, displayName = "includeColumns",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public String getDataColumnNums() {
int[] dataColumnNums = vectorizedRowBatchCtx.getDataColumnNums();
if (dataColumnNums == null) {
return null;
}
return Arrays.toString(dataColumnNums);
}
@Explain(vectorization = Vectorization.DETAIL, displayName = "dataColumnCount",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public int getDataColumnCount() {
return vectorizedRowBatchCtx.getDataColumnCount();
}
@Explain(vectorization = Vectorization.DETAIL, displayName = "partitionColumnCount",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public int getPartitionColumnCount() {
return vectorizedRowBatchCtx.getPartitionColumnCount();
}
@Explain(vectorization = Vectorization.DETAIL, displayName = "scratchColumnTypeNames",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public String getScratchColumnTypeNames() {
return getScratchColumns(vectorizedRowBatchCtx);
}
@Explain(vectorization = Vectorization.DETAIL, displayName = "neededVirtualColumns",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public String getNeededVirtualColumns() {
VirtualColumn[] neededVirtualColumns = vectorizedRowBatchCtx.getNeededVirtualColumns();
if (neededVirtualColumns == null || neededVirtualColumns.length == 0) {
return null;
}
return Arrays.toString(neededVirtualColumns);
}
}
@Explain(vectorization = Vectorization.DETAIL, displayName = "rowBatchContext",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public RowBatchContextExplainVectorization vectorizedRowBatchContext() {
if (!baseWork.getVectorMode()) {
return null;
}
return new RowBatchContextExplainVectorization(baseWork.getVectorizedRowBatchCtx());
}
}
// -----------------------------------------------------------------------------------------------
/**
* @return the mapredLocalWork
*/
@Explain(displayName = "Local Work", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
public MapredLocalWork getMapRedLocalWork() {
return mrLocalWork;
}
/**
* @param mapLocalWork
* the mapredLocalWork to set
*/
public void setMapRedLocalWork(final MapredLocalWork mapLocalWork) {
this.mrLocalWork = mapLocalWork;
}
public void setUberMode(boolean uberMode) {
this.uberMode = uberMode;
}
public boolean getUberMode() {
return uberMode;
}
public void setLlapMode(boolean llapMode) {
this.llapMode = llapMode;
}
public boolean getLlapMode() {
return llapMode;
}
public int getReservedMemoryMB() {
return reservedMemoryMB;
}
public void setReservedMemoryMB(int memoryMB) {
reservedMemoryMB = memoryMB;
}
public void configureJobConf(JobConf job) {
OperatorUtils.findOperators(getAllRootOperators(), FileSinkOperator.class).forEach(fs -> {
LOG.debug("Configuring JobConf for table {}.{}", fs.getConf().getTableInfo().getDbName(),
fs.getConf().getTableInfo().getTableName());
PlanUtils.configureJobConf(fs.getConf().getTableInfo(), job);
});
}
public void setTag(int tag) {
this.tag = tag;
}
@Explain(displayName = "tag", explainLevels = { Level.USER })
public int getTag() {
return tag;
}
public void addSortCols(List<String> sortCols) {
this.sortColNames.addAll(sortCols);
}
public List<String> getSortCols() {
return sortColNames;
}
public Map<String, RuntimeValuesInfo> getInputSourceToRuntimeValuesInfo() {
return inputSourceToRuntimeValuesInfo;
}
public void setInputSourceToRuntimeValuesInfo(
String workName, RuntimeValuesInfo runtimeValuesInfo) {
inputSourceToRuntimeValuesInfo.put(workName, runtimeValuesInfo);
}
}
| apache-2.0 |
cloudfoundry-incubator/bosh-google-cpi-release | src/bosh-google-cpi/google/instance_group_service/google_instance_group_service_find_by_instance.go | 586 | package instancegroup
func (i GoogleInstanceGroupService) FindByInstance(vmLink string, zone string) (string, bool, error) {
// Unfortunatelly, there is no direct way to find what instance group is attached to an instance,
// so we need to list all instance groups and look up for the instance
instanceGroups, err := i.List(zone)
if err != nil {
return "", false, err
}
for _, instanceGroup := range instanceGroups {
for _, instance := range instanceGroup.Instances {
if instance == vmLink {
return instanceGroup.Name, true, nil
}
}
}
return "", false, nil
}
| apache-2.0 |
gopal1cloud/neutron | neutron/db/metering/metering_db.py | 10297 | # Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api
from neutron.common import constants
from neutron.db import api as dbapi
from neutron.db import db_base_plugin_v2 as base_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import metering
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
class MeteringLabelRule(model_base.BASEV2, models_v2.HasId):
direction = sa.Column(sa.Enum('ingress', 'egress',
name='meteringlabels_direction'))
remote_ip_prefix = sa.Column(sa.String(64))
metering_label_id = sa.Column(sa.String(36),
sa.ForeignKey("meteringlabels.id",
ondelete="CASCADE"),
nullable=False)
excluded = sa.Column(sa.Boolean, default=False)
class MeteringLabel(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
rules = orm.relationship(MeteringLabelRule, backref="label",
cascade="delete", lazy="joined")
routers = orm.relationship(
l3_db.Router,
primaryjoin="MeteringLabel.tenant_id==Router.tenant_id",
foreign_keys='MeteringLabel.tenant_id',
uselist=True)
class MeteringDbMixin(metering.MeteringPluginBase,
base_db.CommonDbMixin):
def __init__(self):
dbapi.register_models()
self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI()
def _make_metering_label_dict(self, metering_label, fields=None):
res = {'id': metering_label['id'],
'name': metering_label['name'],
'description': metering_label['description'],
'tenant_id': metering_label['tenant_id']}
return self._fields(res, fields)
def create_metering_label(self, context, metering_label):
m = metering_label['metering_label']
tenant_id = self._get_tenant_id_for_create(context, m)
with context.session.begin(subtransactions=True):
metering_db = MeteringLabel(id=uuidutils.generate_uuid(),
description=m['description'],
tenant_id=tenant_id,
name=m['name'])
context.session.add(metering_db)
return self._make_metering_label_dict(metering_db)
def delete_metering_label(self, context, label_id):
with context.session.begin(subtransactions=True):
try:
label = self._get_by_id(context, MeteringLabel, label_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelNotFound(label_id=label_id)
context.session.delete(label)
def get_metering_label(self, context, label_id, fields=None):
try:
metering_label = self._get_by_id(context, MeteringLabel, label_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelNotFound(label_id=label_id)
return self._make_metering_label_dict(metering_label, fields)
def get_metering_labels(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'metering_labels', limit,
marker)
return self._get_collection(context, MeteringLabel,
self._make_metering_label_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def _make_metering_label_rule_dict(self, metering_label_rule, fields=None):
res = {'id': metering_label_rule['id'],
'metering_label_id': metering_label_rule['metering_label_id'],
'direction': metering_label_rule['direction'],
'remote_ip_prefix': metering_label_rule['remote_ip_prefix'],
'excluded': metering_label_rule['excluded']}
return self._fields(res, fields)
def get_metering_label_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'metering_label_rules',
limit, marker)
return self._get_collection(context, MeteringLabelRule,
self._make_metering_label_rule_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def get_metering_label_rule(self, context, rule_id, fields=None):
try:
metering_label_rule = self._get_by_id(context,
MeteringLabelRule, rule_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
return self._make_metering_label_rule_dict(metering_label_rule, fields)
def _validate_cidr(self, context, label_id, remote_ip_prefix,
direction, excluded):
r_ips = self.get_metering_label_rules(context,
filters={'metering_label_id':
label_id,
'direction':
[direction],
'excluded':
[excluded]},
fields=['remote_ip_prefix'])
cidrs = [r['remote_ip_prefix'] for r in r_ips]
new_cidr_ipset = netaddr.IPSet([remote_ip_prefix])
if (netaddr.IPSet(cidrs) & new_cidr_ipset):
raise metering.MeteringLabelRuleOverlaps(remote_ip_prefix=
remote_ip_prefix)
def create_metering_label_rule(self, context, metering_label_rule):
m = metering_label_rule['metering_label_rule']
with context.session.begin(subtransactions=True):
label_id = m['metering_label_id']
ip_prefix = m['remote_ip_prefix']
direction = m['direction']
excluded = m['excluded']
self._validate_cidr(context, label_id, ip_prefix, direction,
excluded)
metering_db = MeteringLabelRule(id=uuidutils.generate_uuid(),
metering_label_id=label_id,
direction=direction,
excluded=m['excluded'],
remote_ip_prefix=ip_prefix)
context.session.add(metering_db)
return self._make_metering_label_rule_dict(metering_db)
def delete_metering_label_rule(self, context, rule_id):
with context.session.begin(subtransactions=True):
try:
rule = self._get_by_id(context, MeteringLabelRule, rule_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
context.session.delete(rule)
def _get_metering_rules_dict(self, metering_label):
rules = []
for rule in metering_label.rules:
rule_dict = self._make_metering_label_rule_dict(rule)
rules.append(rule_dict)
return rules
def _make_router_dict(self, router):
res = {'id': router['id'],
'name': router['name'],
'tenant_id': router['tenant_id'],
'admin_state_up': router['admin_state_up'],
'status': router['status'],
'gw_port_id': router['gw_port_id'],
constants.METERING_LABEL_KEY: []}
return res
def _process_sync_metering_data(self, labels):
routers_dict = {}
for label in labels:
routers = label.routers
for router in routers:
router_dict = routers_dict.get(
router['id'],
self._make_router_dict(router))
rules = self._get_metering_rules_dict(label)
data = {'id': label['id'], 'rules': rules}
router_dict[constants.METERING_LABEL_KEY].append(data)
routers_dict[router['id']] = router_dict
return routers_dict.values()
def get_sync_data_metering(self, context, label_id=None, router_ids=None):
labels = context.session.query(MeteringLabel)
if label_id:
labels = labels.filter(MeteringLabel.id == label_id)
elif router_ids:
labels = (labels.join(MeteringLabel.routers).
filter(l3_db.Router.id.in_(router_ids)))
return self._process_sync_metering_data(labels)
| apache-2.0 |
OscarSwanros/swift | lib/SILOptimizer/ARC/RCStateTransitionVisitors.cpp | 14052 | //===--- RCStateTransitionVisitors.cpp ------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "arc-sequence-opts"
#include "RCStateTransitionVisitors.h"
#include "ARCBBState.h"
#include "swift/SILOptimizer/Analysis/ARCAnalysis.h"
#include "swift/SILOptimizer/Analysis/RCIdentityAnalysis.h"
#include "llvm/Support/Debug.h"
using namespace swift;
namespace {
using ARCBBState = ARCSequenceDataflowEvaluator::ARCBBState;
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// Utilities
//===----------------------------------------------------------------------===//
/// Return true if this instruction is the epilogue release for the \p Arg.
/// false otherwise.
static bool isOwnedArgumentEpilogueRelease(SILInstruction *I, SILValue Arg,
EpilogueARCFunctionInfo *EAFI) {
auto Releases =
EAFI->computeEpilogueARCInstructions(
EpilogueARCContext::EpilogueARCKind::Release, Arg);
return Releases.size() && Releases.count(I);
}
static bool isGuaranteedSafetyByEpilogueRelease(SILInstruction *I, SILValue Arg,
EpilogueARCFunctionInfo *EAFI) {
auto Releases =
EAFI->computeEpilogueARCInstructions(
EpilogueARCContext::EpilogueARCKind::Release, Arg);
return Releases.size() && !Releases.count(I);
}
//===----------------------------------------------------------------------===//
// BottomUpRCStateTransitionVisitor
//===----------------------------------------------------------------------===//
template <class ARCState>
BottomUpDataflowRCStateVisitor<ARCState>::BottomUpDataflowRCStateVisitor(
RCIdentityFunctionInfo *RCFI, EpilogueARCFunctionInfo *EAFI,
ARCState &State, bool FreezeOwnedArgEpilogueReleases,
IncToDecStateMapTy &IncToDecStateMap,
ImmutablePointerSetFactory<SILInstruction> &SetFactory)
: RCFI(RCFI), EAFI(EAFI), DataflowState(State),
FreezeOwnedArgEpilogueReleases(FreezeOwnedArgEpilogueReleases),
IncToDecStateMap(IncToDecStateMap), SetFactory(SetFactory) {}
template <class ARCState>
typename BottomUpDataflowRCStateVisitor<ARCState>::DataflowResult
BottomUpDataflowRCStateVisitor<ARCState>::
visitAutoreleasePoolCall(SILNode *N) {
DataflowState.clear();
// We just cleared our BB State so we have no more possible effects.
return DataflowResult(RCStateTransitionDataflowResultKind::NoEffects);
}
// private helper method since C++ does not have extensions... *sigh*.
//
// TODO: This needs a better name.
template <class ARCState>
static bool isKnownSafe(BottomUpDataflowRCStateVisitor<ARCState> *State,
SILInstruction *I, SILValue Op) {
// If we are running with 'frozen' owned arg releases, check if we have a
// frozen use in the side table. If so, this release must be known safe.
if (State->FreezeOwnedArgEpilogueReleases)
if (isGuaranteedSafetyByEpilogueRelease(I, Op, State->EAFI))
return true;
// A guaranteed function argument is guaranteed to outlive the function we are
// processing. So bottom up for such a parameter, we are always known safe.
if (auto *Arg = dyn_cast<SILFunctionArgument>(Op)) {
if (Arg->hasConvention(SILArgumentConvention::Direct_Guaranteed)) {
return true;
}
}
// If Op is a load from an in_guaranteed parameter, it is guaranteed as well.
if (auto *LI = dyn_cast<LoadInst>(Op)) {
SILValue RCIdentity = State->RCFI->getRCIdentityRoot(LI->getOperand());
if (auto *Arg = dyn_cast<SILFunctionArgument>(RCIdentity)) {
if (Arg->hasConvention(SILArgumentConvention::Indirect_In_Guaranteed)) {
return true;
}
}
}
return false;
}
template <class ARCState>
typename BottomUpDataflowRCStateVisitor<ARCState>::DataflowResult
BottomUpDataflowRCStateVisitor<ARCState>::visitStrongDecrement(SILNode *N) {
auto *I = dyn_cast<SILInstruction>(N);
if (!I)
return DataflowResult();
SILValue Op = RCFI->getRCIdentityRoot(I->getOperand(0));
// If this instruction is a post dominating release, skip it so we don't pair
// it up with anything. Do make sure that it does not effect any other
// instructions.
if (FreezeOwnedArgEpilogueReleases && isOwnedArgumentEpilogueRelease(I, Op, EAFI))
return DataflowResult(Op);
BottomUpRefCountState &State = DataflowState.getBottomUpRefCountState(Op);
bool NestingDetected = State.initWithMutatorInst(SetFactory.get(I), RCFI);
if (isKnownSafe(this, I, Op)) {
State.updateKnownSafe(true);
}
DEBUG(llvm::dbgs() << " REF COUNT DECREMENT! Known Safe: "
<< (State.isKnownSafe() ? "yes" : "no") << "\n");
// Continue on to see if our reference decrement could potentially affect
// any other pointers via a use or a decrement.
return DataflowResult(Op, NestingDetected);
}
template <class ARCState>
typename BottomUpDataflowRCStateVisitor<ARCState>::DataflowResult
BottomUpDataflowRCStateVisitor<ARCState>::visitStrongIncrement(SILNode *N) {
auto *I = dyn_cast<SILInstruction>(N);
if (!I)
return DataflowResult();
// Look up the state associated with its operand...
SILValue Op = RCFI->getRCIdentityRoot(I->getOperand(0));
auto &RefCountState = DataflowState.getBottomUpRefCountState(Op);
DEBUG(llvm::dbgs() << " REF COUNT INCREMENT!\n");
// If we find a state initialized with a matching increment, pair this
// decrement with a copy of the ref count state and then clear the ref
// count state in preparation for any future pairs we may see on the same
// pointer.
if (RefCountState.isRefCountInstMatchedToTrackedInstruction(I)) {
// Copy the current value of ref count state into the result map.
IncToDecStateMap[I] = RefCountState;
DEBUG(llvm::dbgs() << " MATCHING DECREMENT:"
<< RefCountState.getRCRoot());
// Clear the ref count state so it can be used for future pairs we may
// see.
RefCountState.clear();
}
#ifndef NDEBUG
else {
if (RefCountState.isTrackingRefCountInst()) {
DEBUG(llvm::dbgs() << " FAILED MATCH DECREMENT:"
<< RefCountState.getRCRoot());
} else {
DEBUG(llvm::dbgs() << " FAILED MATCH DECREMENT. Not tracking a "
"decrement.\n");
}
}
#endif
return DataflowResult(Op);
}
//===----------------------------------------------------------------------===//
// TopDownDataflowRCStateVisitor
//===----------------------------------------------------------------------===//
template <class ARCState>
TopDownDataflowRCStateVisitor<ARCState>::TopDownDataflowRCStateVisitor(
RCIdentityFunctionInfo *RCFI, ARCState &DataflowState,
DecToIncStateMapTy &DecToIncStateMap,
ImmutablePointerSetFactory<SILInstruction> &SetFactory)
: RCFI(RCFI), DataflowState(DataflowState),
DecToIncStateMap(DecToIncStateMap), SetFactory(SetFactory) {}
template <class ARCState>
typename TopDownDataflowRCStateVisitor<ARCState>::DataflowResult
TopDownDataflowRCStateVisitor<ARCState>::
visitAutoreleasePoolCall(SILNode *N) {
DataflowState.clear();
// We just cleared our BB State so we have no more possible effects.
return DataflowResult(RCStateTransitionDataflowResultKind::NoEffects);
}
template <class ARCState>
typename TopDownDataflowRCStateVisitor<ARCState>::DataflowResult
TopDownDataflowRCStateVisitor<ARCState>::visitStrongDecrement(SILNode *N) {
auto *I = dyn_cast<SILInstruction>(N);
if (!I)
return DataflowResult();
// Look up the state associated with I's operand...
SILValue Op = RCFI->getRCIdentityRoot(I->getOperand(0));
auto &RefCountState = DataflowState.getTopDownRefCountState(Op);
DEBUG(llvm::dbgs() << " REF COUNT DECREMENT!\n");
// If we are tracking an increment on the ref count root associated with
// the decrement and the decrement matches, pair this decrement with a
// copy of the increment state and then clear the original increment state
// so that we are ready to process further values.
if (RefCountState.isRefCountInstMatchedToTrackedInstruction(I)) {
// Copy the current value of ref count state into the result map.
DecToIncStateMap[I] = RefCountState;
DEBUG(llvm::dbgs() << " MATCHING INCREMENT:\n"
<< RefCountState.getRCRoot());
// Clear the ref count state in preparation for more pairs.
RefCountState.clear();
}
#if NDEBUG
else {
if (RefCountState.isTrackingRefCountInst()) {
DEBUG(llvm::dbgs() << " FAILED MATCH INCREMENT:\n"
<< RefCountState.getValue());
} else {
DEBUG(llvm::dbgs() << " FAILED MATCH. NO INCREMENT.\n");
}
}
#endif
// Otherwise we continue processing the reference count decrement to see if
// the decrement can affect any other pointers that we are tracking.
return DataflowResult(Op);
}
template <class ARCState>
typename TopDownDataflowRCStateVisitor<ARCState>::DataflowResult
TopDownDataflowRCStateVisitor<ARCState>::visitStrongIncrement(SILNode *N) {
auto *I = dyn_cast<SILInstruction>(N);
if (!I)
return DataflowResult();
// Map the increment's operand to a newly initialized or reinitialized ref
// count state and continue...
SILValue Op = RCFI->getRCIdentityRoot(I->getOperand(0));
auto &State = DataflowState.getTopDownRefCountState(Op);
bool NestingDetected = State.initWithMutatorInst(SetFactory.get(I), RCFI);
DEBUG(llvm::dbgs() << " REF COUNT INCREMENT! Known Safe: "
<< (State.isKnownSafe() ? "yes" : "no") << "\n");
// Continue processing in case this increment could be a CanUse for a
// different pointer.
return DataflowResult(Op, NestingDetected);
}
template <class ARCState>
typename TopDownDataflowRCStateVisitor<ARCState>::DataflowResult
TopDownDataflowRCStateVisitor<ARCState>::visitStrongEntranceArgument(
SILFunctionArgument *Arg) {
DEBUG(llvm::dbgs() << "VISITING ENTRANCE ARGUMENT: " << *Arg);
if (!Arg->hasConvention(SILArgumentConvention::Direct_Owned)) {
DEBUG(llvm::dbgs() << " Not owned! Bailing!\n");
return DataflowResult();
}
DEBUG(llvm::dbgs() << " Initializing state.\n");
auto &State = DataflowState.getTopDownRefCountState(Arg);
State.initWithArg(Arg);
return DataflowResult();
}
template <class ARCState>
typename TopDownDataflowRCStateVisitor<ARCState>::DataflowResult
TopDownDataflowRCStateVisitor<ARCState>::
visitStrongEntranceApply(ApplyInst *AI) {
DEBUG(llvm::dbgs() << "VISITING ENTRANCE APPLY: " << *AI);
// We should have checked earlier that AI has an owned result value. To
// prevent mistakes, assert that here.
#ifndef NDEBUG
bool hasOwnedResult = false;
for (auto result : AI->getSubstCalleeConv().getDirectSILResults()) {
if (result.getConvention() == ResultConvention::Owned)
hasOwnedResult = true;
}
assert(hasOwnedResult && "Expected AI to be Owned here");
#endif
// Otherwise, return a dataflow result containing a +1.
DEBUG(llvm::dbgs() << " Initializing state.\n");
auto &State = DataflowState.getTopDownRefCountState(AI);
State.initWithEntranceInst(SetFactory.get(AI), AI);
return DataflowResult(AI);
}
template <class ARCState>
typename TopDownDataflowRCStateVisitor<ARCState>::DataflowResult
TopDownDataflowRCStateVisitor<ARCState>::
visitStrongEntranceAllocRef(AllocRefInst *ARI) {
// Alloc refs always introduce new references at +1.
TopDownRefCountState &State = DataflowState.getTopDownRefCountState(ARI);
State.initWithEntranceInst(SetFactory.get(ARI), ARI);
return DataflowResult(ARI);
}
template <class ARCState>
typename TopDownDataflowRCStateVisitor<ARCState>::DataflowResult
TopDownDataflowRCStateVisitor<ARCState>::
visitStrongEntranceAllocRefDynamic(AllocRefDynamicInst *ARI) {
// Alloc ref dynamic always introduce references at +1.
auto &State = DataflowState.getTopDownRefCountState(ARI);
State.initWithEntranceInst(SetFactory.get(ARI), ARI);
return DataflowResult(ARI);
}
template <class ARCState>
typename TopDownDataflowRCStateVisitor<ARCState>::DataflowResult
TopDownDataflowRCStateVisitor<ARCState>::
visitStrongAllocBox(AllocBoxInst *ABI) {
// Alloc box introduces a ref count of +1 on its container.
auto &State = DataflowState.getTopDownRefCountState(ABI);
State.initWithEntranceInst(SetFactory.get(ABI), ABI);
return DataflowResult(ABI);
}
template <class ARCState>
typename TopDownDataflowRCStateVisitor<ARCState>::DataflowResult
TopDownDataflowRCStateVisitor<ARCState>::
visitStrongEntrance(SILNode *N) {
if (auto *Arg = dyn_cast<SILFunctionArgument>(N))
return visitStrongEntranceArgument(Arg);
if (auto *AI = dyn_cast<ApplyInst>(N))
return visitStrongEntranceApply(AI);
if (auto *ARI = dyn_cast<AllocRefInst>(N))
return visitStrongEntranceAllocRef(ARI);
if (auto *ARI = dyn_cast<AllocRefDynamicInst>(N))
return visitStrongEntranceAllocRefDynamic(ARI);
if (auto *ABI = dyn_cast<AllocBoxInst>(N))
return visitStrongAllocBox(ABI);
return DataflowResult();
}
//===----------------------------------------------------------------------===//
// Template Instantiation
//===----------------------------------------------------------------------===//
namespace swift {
template class BottomUpDataflowRCStateVisitor<ARCBBState>;
template class BottomUpDataflowRCStateVisitor<ARCRegionState>;
template class TopDownDataflowRCStateVisitor<ARCBBState>;
template class TopDownDataflowRCStateVisitor<ARCRegionState>;
} // namespace swift
| apache-2.0 |
reaction1989/roslyn | src/EditorFeatures/Core/Extensibility/SignatureHelp/SignatureHelpItemEventArgs.cs | 579 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using Microsoft.CodeAnalysis.SignatureHelp;
namespace Microsoft.CodeAnalysis.Editor
{
internal class SignatureHelpItemEventArgs : EventArgs
{
public SignatureHelpItem SignatureHelpItem { get; }
public SignatureHelpItemEventArgs(SignatureHelpItem signatureHelpItem)
=> this.SignatureHelpItem = signatureHelpItem;
}
}
| apache-2.0 |
jpettitt/amphtml | extensions/amp-mustache/0.1/test/test-amp-mustache.js | 23337 | import * as sanitizer from '../../../../src/sanitizer';
import * as service from '../../../../src/service-helpers';
import {AmpMustache} from '../amp-mustache';
import mustache from '#third_party/mustache/mustache';
describes.repeated(
'amp-mustache 0.1',
{
'with script[type=text/plain][template=amp-mustache]': {
templateType: 'script',
},
'with template[type=amp-mustache]': {templateType: 'template'},
},
(name, variant, env) => {
let viewerCanRenderTemplates = false;
beforeEach(() => {
const getServiceForDocStub = env.sandbox.stub(
service,
'getServiceForDoc'
);
getServiceForDocStub.returns({
hasCapability: (unused) => viewerCanRenderTemplates,
});
});
let innerHtmlSetup;
let template;
let templateElement;
let textContentSetup;
let isTemplateType;
let isTemplateTypeScript;
beforeEach(() => {
const {templateType} = variant;
templateElement = document.createElement(templateType);
if (templateType == 'script') {
templateElement.setAttribute('type', 'amp-mustache');
}
template = new AmpMustache(templateElement);
isTemplateTypeScript = templateType == 'script';
isTemplateType = templateType == 'template';
textContentSetup = (contents) => {
if (isTemplateType) {
templateElement.content.textContent = contents;
} else if (isTemplateTypeScript) {
templateElement.textContent = contents;
}
};
innerHtmlSetup = (html) => {
if (isTemplateType) {
templateElement.innerHTML = html;
} else if (isTemplateTypeScript) {
templateElement.textContent = html;
}
};
});
afterEach(() => (viewerCanRenderTemplates = false));
it('should render', () => {
textContentSetup('value = {{value}}');
template.compileCallback();
const data = {value: 'abc'};
expect(template.render(data).innerHTML).to.equal('value = abc');
expect(template.renderAsString(data)).to.equal('value = abc');
});
it('should render {{.}} from string', () => {
textContentSetup('value = {{.}}');
template.compileCallback();
const data = 'abc';
expect(template.render(data).innerHTML).to.equal('value = abc');
expect(template.renderAsString(data)).to.equal('value = abc');
});
it('should sanitize output', () => {
innerHtmlSetup('value = <a href="{{value}}">abc</a>');
template.compileCallback();
const data = {
value: /*eslint no-script-url: 0*/ 'javascript:alert();',
};
allowConsoleError(() => {
expect(template.render(data).innerHTML).to.equal(
'value = <a target="_top">abc</a>'
);
expect(template.renderAsString(data)).to.equal(
'value = <a target="_top">abc</a>'
);
});
});
it('should sanitize templated tag names', () => {
innerHtmlSetup(
'value = <{{value}} href="javascript:alert(0)">abc</{{value}}>'
);
template.compileCallback();
const data = {value: 'a'};
expect(template.render(data).innerHTML).to.not.equal(
'<a href="javascript:alert(0)">abc</a>'
);
expect(template.renderAsString(data)).to.not.equal(
'<a href="javascript:alert(0)">abc</a>'
);
});
it('should unwrap output on compile', () => {
innerHtmlSetup('<a>abc</a>');
template.compileCallback();
const result = template.render({});
expect(result.tagName).to.equal('A');
expect(result.innerHTML).to.equal('abc');
expect(template.renderAsString({})).to.equal('<a>abc</a>');
});
it('should render fragments', () => {
innerHtmlSetup('<a>abc</a><a>def</a>');
template.compileCallback();
const result = template.render({});
expect(result.tagName).to.equal('DIV');
expect(result.innerHTML).to.equal('<a>abc</a><a>def</a>');
expect(template.renderAsString({})).to.equal('<a>abc</a><a>def</a>');
});
describe('Sanitizing data- attributes', () => {
it('should sanitize templated attribute names', () => {
innerHtmlSetup('value = <a {{value}}="javascript:alert(0)">abc</a>');
template.compileCallback();
allowConsoleError(() => {
const result = template.render({
value: 'href',
});
expect(result).to.not.equal('<a href="javascript:alert(0)">abc</a>');
expect(result.firstElementChild.getAttribute('href')).to.be.null;
});
});
it('should sanitize templated bind attribute names', () => {
innerHtmlSetup('value = <p [{{value}}]="javascript:alert()">ALERT</p>');
template.compileCallback();
allowConsoleError(() => {
const result = template.render({
value: 'onclick',
});
expect(result).to.not.equal(
'<p [onclick]="javascript:alert()">ALERT</p>'
);
expect(result.firstElementChild.getAttribute('[onclick]')).to.be.null;
expect(result.firstElementChild.getAttribute('onclick')).to.be.null;
});
});
it('should parse data-&style=value output correctly', () => {
innerHtmlSetup(
'value = <a href="{{value}}" data-&style="color:red;">abc</a>'
);
template.compileCallback();
allowConsoleError(() => {
const result = template.render({
value: /*eslint no-script-url: 0*/ 'javascript:alert();',
});
expect(result.innerHTML).to.equal(
'value = <a data-="" style="color:red;" target="_top">abc</a>'
);
});
});
it('should parse data-&attr=value output correctly', () => {
innerHtmlSetup('value = <a data-&href="{{value}}">abc</a>');
template.compileCallback();
const result = template.render({
value: 'https://google.com/',
});
expect(result.innerHTML).to.equal(
'value = <a data-=""' +
' href="https://google.com/" target="_top">abc</a>'
);
});
it('should allow for data-attr=value to output correctly', () => {
innerHtmlSetup(
'value = ' +
'<a data-my-attr="{{invalidValue}}"' +
'data-my-id="{{value}}">abc</a>'
);
template.compileCallback();
allowConsoleError(() => {
const result = template.render({
value: 'myid',
invalidValue: /*eslint no-script-url: 0*/ 'javascript:alert();',
});
expect(result.innerHTML).to.equal(
'value = <a data-my-id="myid">abc</a>'
);
});
});
});
describe('Rendering Form Fields', () => {
it('should allow rendering inputs', () => {
innerHtmlSetup(
'value = ' +
'<input value="{{value}}"' +
'type="text" onchange="{{invalidValue}}">'
);
template.compileCallback();
allowConsoleError(() => {
const result = template.render({
value: 'myid',
invalidValue: /*eslint no-script-url: 0*/ 'javascript:alert();',
});
expect(result.innerHTML).to.equal(
'value = <input value="myid" type="text">'
);
});
});
it('should allow rendering textarea', () => {
innerHtmlSetup('value = <textarea>{{value}}</textarea>');
template.compileCallback();
const data = {value: 'Cool story bro.'};
expect(template.render(data).innerHTML).to.equal(
'value = <textarea>Cool story bro.</textarea>'
);
expect(template.renderAsString(data)).to.equal(
'value = <textarea>Cool story bro.</textarea>'
);
});
it('should not allow image/file input types rendering', () => {
innerHtmlSetup('value = <input value="{{value}}" type="{{type}}">');
template.compileCallback();
allowConsoleError(() => {
const result = template.render({
value: 'myid',
type: 'image',
});
expect(result.innerHTML).to.equal('value = <input value="myid">');
});
allowConsoleError(() => {
const result = template.render({
value: 'myid',
type: 'button',
});
expect(result.innerHTML).to.equal('value = <input value="myid">');
});
const fileResult = template.render({
value: 'myid',
type: 'file',
});
expect(fileResult.innerHTML).to.equal(
'value = <input value="myid" type="file">'
);
const passwordResult = template.render({
value: 'myid',
type: 'password',
});
expect(passwordResult.innerHTML).to.equal(
'value = <input value="myid" type="password">'
);
});
it('should allow text input type rendering', () => {
innerHtmlSetup('value = <input value="{{value}}" type="{{type}}">');
template.compileCallback();
const result = template.render({
value: 'myid',
type: 'text',
});
expect(result.innerHTML).to.equal(
'value = <input value="myid" type="text">'
);
});
it('should sanitize form-related attrs properly', () => {
innerHtmlSetup(
'value = ' +
'<input value="{{value}}" ' +
'formaction="javascript:javascript:alert(1)" ' +
'formmethod="get" form="form1" formtarget="blank" formnovalidate ' +
'formenctype="">'
);
template.compileCallback();
allowConsoleError(() => {
const result = template.render({
value: 'myid',
});
expect(result.innerHTML).to.equal('value = <input value="myid">');
});
});
it('should not sanitize form tags', () => {
innerHtmlSetup(
'value = ' +
'<form><input value="{{value}}"></form><input value="hello">'
);
template.compileCallback();
const result = template.render({
value: 'myid',
});
expect(result.innerHTML).to.equal(
'value = <form><input value="myid"></form><input value="hello">'
);
});
});
describe('Nested templates', () => {
it('should not sanitize nested amp-mustache templates', () => {
innerHtmlSetup(
'text before a template ' +
'<template type="amp-mustache">text inside template</template> ' +
'text after a template'
);
template.compileCallback();
const result = template.render({});
expect(result.innerHTML).to.equal(
'text before a template ' +
'<template type="amp-mustache">text inside template</template> ' +
'text after a template'
);
});
if (isTemplateType) {
it('should sanitize nested templates without type="amp-mustache"', () => {
innerHtmlSetup(
'text before a template ' +
'<template>text inside template</template> ' +
'text after a template'
);
template.compileCallback();
const result = template.render({});
expect(result.innerHTML).to.equal(
'text before a template text after a template'
);
});
it('should not render variables inside a nested template', () => {
innerHtmlSetup(
'outer: {{outerOnlyValue}} {{mutualValue}} ' +
'<template type="amp-mustache">nested: {{nestedOnlyValue}}' +
' {{mutualValue}}</template>'
);
template.compileCallback();
const result = template.render({
outerOnlyValue: 'Outer',
mutualValue: 'Mutual',
nestedOnlyValue: 'Nested',
});
expect(result.innerHTML).to.equal(
'outer: Outer Mutual ' +
'<template type="amp-mustache">nested: {{nestedOnlyValue}}' +
' {{mutualValue}}</template>'
);
});
it('should compile and render nested templates when invoked', () => {
const outerTemplateElement = document.createElement('template');
outerTemplateElement.innerHTML =
'outer: {{value}} ' +
'<template type="amp-mustache">nested: {{value}}</template>';
const outerTemplate = new AmpMustache(outerTemplateElement);
outerTemplate.compileCallback();
const outerResult = outerTemplate.render({
value: 'Outer',
});
const nestedTemplateElement = outerResult.querySelector('template');
const nestedTemplate = new AmpMustache(nestedTemplateElement);
nestedTemplate.compileCallback();
const nestedResult = nestedTemplate.render({
value: 'Nested',
});
expect(nestedResult.innerHTML).to.equal('nested: Nested');
});
it('should sanitize the inner template when it gets rendered', () => {
const outerTemplateElement = document.createElement('template');
outerTemplateElement.innerHTML =
'outer: {{value}} ' +
'<template type="amp-mustache">' +
'<div onclick="javascript:alert(\'I am evil\')">nested</div>: ' +
'{{value}}</template>';
const outerTemplate = new AmpMustache(outerTemplateElement);
outerTemplate.compileCallback();
const outerResult = outerTemplate.render({
value: 'Outer',
});
const nestedTemplateElement = outerResult.querySelector('template');
const nestedTemplate = new AmpMustache(nestedTemplateElement);
nestedTemplate.compileCallback();
allowConsoleError(() => {
const nestedResult = nestedTemplate.render({
value: 'Nested',
});
expect(nestedResult.innerHTML).to.equal(
'<div>nested</div>: Nested'
);
});
});
it(
'should not allow users to pass data having key that starts with ' +
'__AMP_NESTED_TEMPLATE_0 when there is a nested template',
() => {
templateElement.innerHTML =
'outer: {{value}} ' +
'<template type="amp-mustache">nested: {{value}}</template>';
template.compileCallback();
const result = template.render({
__AMP_NESTED_TEMPLATE_0: 'MUST NOT RENDER THIS',
value: 'Outer',
});
expect(result.innerHTML).to.equal(
'outer: Outer ' +
'<template type="amp-mustache">nested: {{value}}</template>'
);
}
);
it(
'should render user data with a key __AMP_NESTED_TEMPLATE_0 when' +
' there are no nested templates, even though it is not a weird name' +
' for a template variable',
() => {
templateElement.innerHTML = '{{__AMP_NESTED_TEMPLATE_0}}';
template.compileCallback();
const result = template.render({
__AMP_NESTED_TEMPLATE_0: '123',
});
expect(result.innerHTML).to.equal('123');
}
);
}
});
describe('triple-mustache', () => {
it('should sanitize formatting related elements', () => {
textContentSetup('value = {{{value}}}');
template.compileCallback();
const result = template.render({
value:
'<b>abc</b><img><div>def</div>' +
'<br><code></code><del></del><em></em>' +
'<i></i><ins></ins><mark></mark><s></s>' +
'<small></small><strong></strong><sub></sub>' +
'<sup></sup><time></time><u></u><hr>',
});
expect(result.innerHTML).to.equal(
'value = <b>abc</b><div>def</div>' +
'<br><code></code><del></del><em></em>' +
'<i></i><ins></ins><mark></mark><s></s>' +
'<small></small><strong></strong><sub></sub>' +
'<sup></sup><time></time><u></u><hr>'
);
});
it('should sanitize table related elements and anchor tags', () => {
textContentSetup('value = {{{value}}}');
template.compileCallback();
const result = template.render({
value:
'<table class="valid-class">' +
'<colgroup><col><col></colgroup>' +
'<caption>caption</caption>' +
'<thead><tr><th colspan="2">header</th></tr></thead>' +
'<tbody><tr><td>' +
'<a href="http://www.google.com">google</a>' +
'</td></tr></tbody>' +
'<tfoot><tr>' +
'<td colspan="2"><span>footer</span></td>' +
'</tr></tfoot>' +
'</table>',
});
expect(result.innerHTML).to.equal(
'value = <table class="valid-class">' +
'<colgroup><col><col></colgroup>' +
'<caption>caption</caption>' +
'<thead><tr><th colspan="2">header</th></tr></thead>' +
'<tbody><tr><td>' +
'<a href="http://www.google.com/" target="_top">google</a>' +
'</td></tr></tbody>' +
'<tfoot><tr>' +
'<td colspan="2"><span>footer</span></td>' +
'</tr></tfoot>' +
'</table>'
);
});
it('should sanitize tags, removing unsafe attributes', () => {
textContentSetup('value = {{{value}}}');
template.compileCallback();
allowConsoleError(() => {
const result = template.render({
value:
'<a href="javascript:alert(\'XSS\')">test</a>' +
'<img src="x" onerror="alert(\'XSS\')" />',
});
expect(result.innerHTML).to.equal(
'value = <a target="_top">test</a>'
);
});
});
});
describe('tables', () => {
beforeEach(() => {
textContentSetup(
'<table>' +
'<tbody>' +
'<tr>' +
'<td>{{content}}</td>' +
'</tr>' +
'{{#replies}}' +
'<tr>' +
'<td>{{content}}</td>' +
'</tr>' +
'{{/replies}}' +
'</tbody>' +
'</table>'
);
template.compileCallback();
});
if (isTemplateTypeScript) {
it('should not foster text nodes in script template', () => {
return allowConsoleError(() => {
const data = {
'content': 'Howdy',
'replies': [{'content': 'hi'}],
};
const result =
'<tbody>' +
'<tr>' +
'<td>Howdy</td>' +
'</tr>' +
'<tr>' +
'<td>hi</td>' +
'</tr>' +
'</tbody>';
expect(template.render(data).innerHTML).to.equal(result);
expect(template.renderAsString(data)).to.equal(result);
});
});
}
if (isTemplateType) {
it(
'should foster text nodes in template[type="amp-mustache"] ' +
'destroying the templating',
() => {
return allowConsoleError(() => {
const result = template.render({
'content': 'Howdy',
'replies': [{'content': 'hi'}],
});
// Given the mustache markup {{#replies}} is hoisted.
// Expect the rendered HTML not to be what's expected.
expect(result.innerHTML).to.equal(
'<tbody>' +
'<tr>' +
'<td>Comment:</td>' +
'<td>Howdy</td>' +
'</tr>' +
'<tr>' +
'<td>Reply:</td>' +
'<td>Howdy</td>' +
'</tr>' +
'</tbody>'
);
});
}
);
}
});
describe('viewer can render templates', () => {
beforeEach(() => {
viewerCanRenderTemplates = true;
});
it('should not call mustache parsing', () => {
env.sandbox.spy(mustache, 'parse');
template.compileCallback();
expect(mustache.parse).to.have.not.been.called;
});
it('should not mustache render but still sanitize html', () => {
env.sandbox.spy(sanitizer, 'sanitizeHtml');
env.sandbox.spy(mustache, 'render');
template.setHtml('<div>test</div>');
expect(mustache.render).to.have.not.been.called;
expect(sanitizer.sanitizeHtml).to.have.been.called;
});
});
describe('setHtml()', () => {
it('should unwrap singular element output', () => {
template.compileCallback();
const result = template.setHtml('<a>abc</a>');
expect(result).to.have.length(1);
expect(result[0].tagName).to.equal('A');
expect(result[0].innerHTML).to.equal('abc');
});
it('should wrap singular text node output', () => {
template.compileCallback();
const result = template.setHtml('abc');
expect(result).to.have.length(1);
expect(result[0].tagName).to.equal('DIV');
expect(result[0].innerHTML).to.equal('abc');
});
it('should unwrap output with many elements', () => {
template.compileCallback();
const result = template.setHtml('<a>abc</a><a>def</a>');
expect(result).to.have.length(2);
const {0: first, 1: second} = result;
expect(first.tagName).to.equal('A');
expect(first.innerHTML).to.equal('abc');
expect(second.tagName).to.equal('A');
expect(second.innerHTML).to.equal('def');
});
it('should unwrap output with many elements and wrap text nodes', () => {
const html = `<a>abc</a>
def
<a>ghi </a>`;
template.compileCallback();
const result = template.setHtml(html);
expect(result).to.have.length(3);
const {0: first, 1: second, 2: third} = result;
expect(first.tagName).to.equal('A');
expect(first.innerHTML).to.equal('abc');
expect(second.tagName).to.equal('DIV');
expect(second.innerHTML).to.equal('def');
expect(third.tagName).to.equal('A');
expect(third.innerHTML).to.equal('ghi ');
});
it('should unwrap output with many elements and preserve subtrees', () => {
const html = `
<div>
<a>abc</a>
</div>
def
<a>ghi </a>`;
template.compileCallback();
const result = template.setHtml(html);
expect(result).to.have.length(3);
const {0: first, 1: second, 2: third} = result;
expect(first.tagName).to.equal('DIV');
expect(first.children).to.have.length(1);
expect(first.firstElementChild.tagName).to.equal('A');
expect(first.firstElementChild.innerHTML).to.equal('abc');
expect(second.tagName).to.equal('DIV');
expect(second.innerHTML).to.equal('def');
expect(third.tagName).to.equal('A');
expect(third.innerHTML).to.equal('ghi ');
});
});
}
);
| apache-2.0 |
bitbouncer/avro | lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroMultipleOutputs.java | 22063 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.List;
import java.util.Set;
import java.util.HashMap;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Collections;
import org.apache.hadoop.mapred.OutputFormat;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Progressable;
import org.apache.avro.Schema;
import org.apache.hadoop.io.NullWritable;
/**
* The AvroMultipleOutputs class simplifies writing Avro output data
* to multiple outputs
*
* <p>
* Case one: writing to additional outputs other than the job default output.
*
* Each additional output, or named output, may be configured with its own
* <code>Schema</code> and <code>OutputFormat</code>.
* A named output can be a single file or a multi file. The later is refered as
* a multi named output which is an unbound set of files all sharing the same
* <code>Schema</code>.
* </p>
* <p>
* Case two: to write data to different files provided by user
* </p>
*
* <p>
* AvroMultipleOutputs supports counters, by default they are disabled. The
* counters group is the {@link AvroMultipleOutputs} class name. The names of the
* counters are the same as the output name. These count the number of records
* written to each output name. For multi
* named outputs the name of the counter is the concatenation of the named
* output, and underscore '_' and the multiname.
* </p>
*
* Usage pattern for job submission:
* <pre>
*
* JobConf job = new JobConf();
*
* FileInputFormat.setInputPath(job, inDir);
* FileOutputFormat.setOutputPath(job, outDir);
*
* job.setMapperClass(MyAvroMapper.class);
* job.setReducerClass(HadoopReducer.class);
* job.set("avro.reducer",MyAvroReducer.class);
* ...
*
* Schema schema;
* ...
* // Defines additional single output 'avro1' for the job
* AvroMultipleOutputs.addNamedOutput(job, "avro1", AvroOutputFormat.class,
* schema);
*
* // Defines additional output 'avro2' with different schema for the job
* AvroMultipleOutputs.addNamedOutput(job, "avro2",
* AvroOutputFormat.class,
* null); // if Schema is specified as null then the default output schema is used
* ...
*
* job.waitForCompletion(true);
* ...
* </pre>
* <p>
* Usage in Reducer:
* <pre>
*
* public class MyAvroReducer extends
* AvroReducer<K, V, OUT> {
* private MultipleOutputs amos;
*
*
* public void configure(JobConf conf) {
* ...
* amos = new AvroMultipleOutputs(conf);
* }
*
* public void reduce(K, Iterator<V> values,
* AvroCollector<OUT>, Reporter reporter)
* throws IOException {
* ...
* amos.collect("avro1", reporter,datum);
* amos.getCollector("avro2", "A", reporter).collect(datum);
* amos.collect("avro1",reporter,schema,datum,"testavrofile");// this create a file testavrofile and writes data with schema "schema" into it
* and uses other values from namedoutput "avro1" like outputclass etc.
* amos.collect("avro1",reporter,schema,datum,"testavrofile1");
* ...
* }
*
* public void close() throws IOException {
* amos.close();
* ...
* }
*
* }
* </pre>
*/
public class AvroMultipleOutputs {
private static final String NAMED_OUTPUTS = "mo.namedOutputs";
private static final String MO_PREFIX = "mo.namedOutput.";
private static final String FORMAT = ".avro";
private static final String MULTI = ".multi";
private static final String COUNTERS_ENABLED = "mo.counters";
/**
* Counters group used by the counters of MultipleOutputs.
*/
private static final String COUNTERS_GROUP = AvroMultipleOutputs.class.getName();
/**
* Checks if a named output is alreadyDefined or not.
*
* @param conf job conf
* @param namedOutput named output names
* @param alreadyDefined whether the existence/non-existence of
* the named output is to be checked
* @throws IllegalArgumentException if the output name is alreadyDefined or
* not depending on the value of the
* 'alreadyDefined' parameter
*/
private static void checkNamedOutput(JobConf conf, String namedOutput,
boolean alreadyDefined) {
List<String> definedChannels = getNamedOutputsList(conf);
if (alreadyDefined && definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput +
"' already alreadyDefined");
} else if (!alreadyDefined && !definedChannels.contains(namedOutput)) {
throw new IllegalArgumentException("Named output '" + namedOutput +
"' not defined");
}
}
/**
* Checks if a named output name is valid token.
*
* @param namedOutput named output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkTokenName(String namedOutput) {
if (namedOutput == null || namedOutput.length() == 0) {
throw new IllegalArgumentException(
"Name cannot be NULL or empty");
}
for (char ch : namedOutput.toCharArray()) {
if ((ch >= 'A') && (ch <= 'Z')) {
continue;
}
if ((ch >= 'a') && (ch <= 'z')) {
continue;
}
if ((ch >= '0') && (ch <= '9')) {
continue;
}
throw new IllegalArgumentException(
"Name cannot have a '" + ch + "' char");
}
}
/**
* Checks if a named output name is valid.
*
* @param namedOutput named output Name
* @throws IllegalArgumentException if the output name is not valid.
*/
private static void checkNamedOutputName(String namedOutput) {
checkTokenName(namedOutput);
// name cannot be the name used for the default output
if (namedOutput.equals("part")) {
throw new IllegalArgumentException(
"Named output name cannot be 'part'");
}
}
/**
* Returns list of channel names.
*
* @param conf job conf
* @return List of channel Names
*/
public static List<String> getNamedOutputsList(JobConf conf) {
List<String> names = new ArrayList<String>();
StringTokenizer st = new StringTokenizer(conf.get(NAMED_OUTPUTS, ""), " ");
while (st.hasMoreTokens()) {
names.add(st.nextToken());
}
return names;
}
/**
* Returns if a named output is multiple.
*
* @param conf job conf
* @param namedOutput named output
* @return <code>true</code> if the name output is multi, <code>false</code>
* if it is single. If the name output is not defined it returns
* <code>false</code>
*/
public static boolean isMultiNamedOutput(JobConf conf, String namedOutput) {
checkNamedOutput(conf, namedOutput, false);
return conf.getBoolean(MO_PREFIX + namedOutput + MULTI, false);
}
/**
* Returns the named output OutputFormat.
*
* @param conf job conf
* @param namedOutput named output
* @return namedOutput OutputFormat
*/
public static Class<? extends OutputFormat> getNamedOutputFormatClass(
JobConf conf, String namedOutput) {
checkNamedOutput(conf, namedOutput, false);
return conf.getClass(MO_PREFIX + namedOutput + FORMAT, null,
OutputFormat.class);
}
/**
* Adds a named output for the job.
* <p/>
*
* @param conf job conf to add the named output
* @param namedOutput named output name, it has to be a word, letters
* and numbers only, cannot be the word 'part' as
* that is reserved for the
* default output.
* @param outputFormatClass OutputFormat class.
* @param schema Schema to used for this namedOutput
*/
public static void addNamedOutput(JobConf conf, String namedOutput,
Class<? extends OutputFormat> outputFormatClass,
Schema schema) {
addNamedOutput(conf, namedOutput, false, outputFormatClass, schema);
}
/**
* Adds a multi named output for the job.
* <p/>
*
* @param conf job conf to add the named output
* @param namedOutput named output name, it has to be a word, letters
* and numbers only, cannot be the word 'part' as
* that is reserved for the
* default output.
* @param outputFormatClass OutputFormat class.
* @param schema Schema to used for this namedOutput
*/
public static void addMultiNamedOutput(JobConf conf, String namedOutput,
Class<? extends OutputFormat> outputFormatClass,
Schema schema) {
addNamedOutput(conf, namedOutput, true, outputFormatClass, schema);
}
/**
* Adds a named output for the job.
* <p/>
*
* @param conf job conf to add the named output
* @param namedOutput named output name, it has to be a word, letters
* and numbers only, cannot be the word 'part' as
* that is reserved for the
* default output.
* @param multi indicates if the named output is multi
* @param outputFormatClass OutputFormat class.
* @param schema Schema to used for this namedOutput
*/
private static void addNamedOutput(JobConf conf, String namedOutput,
boolean multi,
Class<? extends OutputFormat> outputFormatClass,
Schema schema) {
checkNamedOutputName(namedOutput);
checkNamedOutput(conf, namedOutput, true);
boolean isMapOnly = conf.getNumReduceTasks() == 0;
if(schema!=null)
conf.set(MO_PREFIX+namedOutput+".schema", schema.toString());
conf.set(NAMED_OUTPUTS, conf.get(NAMED_OUTPUTS, "") + " " + namedOutput);
conf.setClass(MO_PREFIX + namedOutput + FORMAT, outputFormatClass,
OutputFormat.class);
conf.setBoolean(MO_PREFIX + namedOutput + MULTI, multi);
}
/**
* Enables or disables counters for the named outputs.
* <p/>
* By default these counters are disabled.
* <p/>
* MultipleOutputs supports counters, by default the are disabled.
* The counters group is the {@link AvroMultipleOutputs} class name.
* </p>
* The names of the counters are the same as the named outputs. For multi
* named outputs the name of the counter is the concatenation of the named
* output, and underscore '_' and the multiname.
*
* @param conf job conf to enableadd the named output.
* @param enabled indicates if the counters will be enabled or not.
*/
public static void setCountersEnabled(JobConf conf, boolean enabled) {
conf.setBoolean(COUNTERS_ENABLED, enabled);
}
/**
* Returns if the counters for the named outputs are enabled or not.
* <p/>
* By default these counters are disabled.
* <p/>
* MultipleOutputs supports counters, by default the are disabled.
* The counters group is the {@link AvroMultipleOutputs} class name.
* </p>
* The names of the counters are the same as the named outputs. For multi
* named outputs the name of the counter is the concatenation of the named
* output, and underscore '_' and the multiname.
*
*
* @param conf job conf to enableadd the named output.
* @return TRUE if the counters are enabled, FALSE if they are disabled.
*/
public static boolean getCountersEnabled(JobConf conf) {
return conf.getBoolean(COUNTERS_ENABLED, false);
}
// instance code, to be used from Mapper/Reducer code
private JobConf conf;
private OutputFormat outputFormat;
private Set<String> namedOutputs;
private Map<String, RecordWriter> recordWriters;
private boolean countersEnabled;
/**
* Creates and initializes multiple named outputs support, it should be
* instantiated in the Mapper/Reducer configure method.
*
* @param job the job configuration object
*/
public AvroMultipleOutputs(JobConf job) {
this.conf = job;
outputFormat = new InternalFileOutputFormat();
namedOutputs = Collections.unmodifiableSet(
new HashSet<String>(AvroMultipleOutputs.getNamedOutputsList(job)));
recordWriters = new HashMap<String, RecordWriter>();
countersEnabled = getCountersEnabled(job);
}
/**
* Returns iterator with the defined name outputs.
*
* @return iterator with the defined named outputs
*/
public Iterator<String> getNamedOutputs() {
return namedOutputs.iterator();
}
// by being synchronized MultipleOutputTask can be use with a
// MultithreaderMapRunner.
private synchronized RecordWriter getRecordWriter(String namedOutput,
String baseFileName,
final Reporter reporter,Schema schema)
throws IOException {
RecordWriter writer = recordWriters.get(baseFileName);
if (writer == null) {
if (countersEnabled && reporter == null) {
throw new IllegalArgumentException(
"Counters are enabled, Reporter cannot be NULL");
}
if(schema!=null)
conf.set(MO_PREFIX+namedOutput+".schema",schema.toString());
JobConf jobConf = new JobConf(conf);
jobConf.set(InternalFileOutputFormat.CONFIG_NAMED_OUTPUT, namedOutput);
FileSystem fs = FileSystem.get(conf);
writer = outputFormat.getRecordWriter(fs, jobConf, baseFileName, reporter);
if (countersEnabled) {
if (reporter == null) {
throw new IllegalArgumentException(
"Counters are enabled, Reporter cannot be NULL");
}
writer = new RecordWriterWithCounter(writer, baseFileName, reporter);
}
recordWriters.put(baseFileName, writer);
}
return writer;
}
private static class RecordWriterWithCounter implements RecordWriter {
private RecordWriter writer;
private String counterName;
private Reporter reporter;
public RecordWriterWithCounter(RecordWriter writer, String counterName,
Reporter reporter) {
this.writer = writer;
this.counterName = counterName;
this.reporter = reporter;
}
@SuppressWarnings({"unchecked"})
public void write(Object key, Object value) throws IOException {
reporter.incrCounter(COUNTERS_GROUP, counterName, 1);
writer.write(key, value);
}
public void close(Reporter reporter) throws IOException {
writer.close(reporter);
}
}
/**
* Output Collector for the default schema.
* <p/>
*
* @param namedOutput the named output name
* @param reporter the reporter
* @param datum output data
* @throws IOException thrown if output collector could not be created
*/
public void collect(String namedOutput, Reporter reporter,Object datum) throws IOException{
getCollector(namedOutput,reporter).collect(datum);
}
/**
* OutputCollector with custom schema.
* <p/>
*
* @param namedOutput the named output name (this will the output file name)
* @param reporter the reporter
* @param datum output data
* @param schema schema to use for this output
* @throws IOException thrown if output collector could not be created
*/
public void collect(String namedOutput, Reporter reporter, Schema schema,Object datum) throws IOException{
getCollector(namedOutput,reporter,schema).collect(datum);
}
/**
* OutputCollector with custom schema and file name.
* <p/>
*
* @param namedOutput the named output name
* @param reporter the reporter
* @param baseOutputPath outputfile name to use.
* @param datum output data
* @param schema schema to use for this output
* @throws IOException thrown if output collector could not be created
*/
public void collect(String namedOutput,Reporter reporter,Schema schema,Object datum,String baseOutputPath) throws IOException{
getCollector(namedOutput,null,reporter,baseOutputPath,schema).collect(datum);
}
/**
* Gets the output collector for a named output.
* <p/>
*
* @param namedOutput the named output name
* @param reporter the reporter
* @return the output collector for the given named output
* @throws IOException thrown if output collector could not be created
* @deprecated Use {@link #collect} method for collecting output
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public AvroCollector getCollector(String namedOutput, Reporter reporter)
throws IOException {
return getCollector(namedOutput, null, reporter,namedOutput,null);
}
@SuppressWarnings("rawtypes")
private AvroCollector getCollector(String namedOutput, Reporter reporter, Schema schema)
throws IOException{
return getCollector(namedOutput,null,reporter,namedOutput,schema);
}
/**
* Gets the output collector for a named output.
* <p/>
*
* @param namedOutput the named output name
* @param reporter the reporter
* @param multiName the multiname
* @return the output collector for the given named output
* @throws IOException thrown if output collector could not be created
*/
@SuppressWarnings("rawtypes")
public AvroCollector getCollector(String namedOutput,String multiName, Reporter reporter)
throws IOException{
return getCollector(namedOutput,multiName,reporter,namedOutput,null);
}
@SuppressWarnings("rawtypes")
private AvroCollector getCollector(String namedOutput,Schema schema, Reporter reporter, String baseFileName)
throws IOException{
//namedOutputs.add(baseFileName);
return getCollector(namedOutput,null,reporter,baseFileName,schema);
}
/**
* Gets the output collector for a multi named output.
* <p/>
*
* @param namedOutput the named output name
* @param multiName the multi name part
* @param reporter the reporter
* @return the output collector for the given named output
* @throws IOException thrown if output collector could not be created
*/
@SuppressWarnings({"unchecked"})
private AvroCollector getCollector(String namedOutput, String multiName,
Reporter reporter,String baseOutputFileName, Schema schema)
throws IOException {
checkNamedOutputName(namedOutput);
if (!namedOutputs.contains(namedOutput)) {
throw new IllegalArgumentException("Undefined named output '" +
namedOutput + "'");
}
boolean multi = isMultiNamedOutput(conf, namedOutput);
if (!multi && multiName != null) {
throw new IllegalArgumentException("Name output '" + namedOutput +
"' has not been defined as multi");
}
if (multi) {
checkTokenName(multiName);
}
String baseFileName = (multi) ? namedOutput + "_" + multiName : baseOutputFileName;
final RecordWriter writer =
getRecordWriter(namedOutput, baseFileName, reporter,schema);
return new AvroCollector() {
@SuppressWarnings({"unchecked"})
public void collect(Object key) throws IOException{
AvroWrapper wrapper = new AvroWrapper(key);
writer.write(wrapper, NullWritable.get());
}
public void collect(Object key,Object value) throws IOException
{
writer.write(key,value);
}
};
}
/**
* Closes all the opened named outputs.
* <p/>
* If overriden subclasses must invoke <code>super.close()</code> at the
* end of their <code>close()</code>
*
* @throws java.io.IOException thrown if any of the MultipleOutput files
* could not be closed properly.
*/
public void close() throws IOException {
for (RecordWriter writer : recordWriters.values()) {
writer.close(null);
}
}
private static class InternalFileOutputFormat extends FileOutputFormat<Object, Object> {
public static final String CONFIG_NAMED_OUTPUT = "mo.config.namedOutput";
@SuppressWarnings({"unchecked", "deprecation"})
public RecordWriter<Object, Object> getRecordWriter(FileSystem fs,JobConf job, String baseFileName, Progressable arg3) throws IOException {
String nameOutput = job.get(CONFIG_NAMED_OUTPUT, null);
String fileName = getUniqueName(job, baseFileName);
Schema schema = null;
String schemastr = job.get(MO_PREFIX+nameOutput+".schema",null);
if (schemastr!=null)
schema = Schema.parse(schemastr);
JobConf outputConf = new JobConf(job);
outputConf.setOutputFormat(getNamedOutputFormatClass(job, nameOutput));
boolean isMapOnly = job.getNumReduceTasks() == 0;
if (schema != null) {
if (isMapOnly)
AvroJob.setMapOutputSchema(outputConf, schema);
else
AvroJob.setOutputSchema(outputConf, schema);
}
OutputFormat outputFormat = outputConf.getOutputFormat();
return outputFormat.getRecordWriter(fs, outputConf, fileName, arg3);
}
}
}
| apache-2.0 |
DariusX/camel | components/camel-lra/src/test/java/org/apache/camel/service/lra/AbstractLRATestSupport.java | 3766 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.service.lra;
import java.io.IOException;
import java.io.InputStream;
import javax.ws.rs.client.Client;
import javax.ws.rs.client.ClientBuilder;
import javax.ws.rs.core.Response;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.camel.CamelContext;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.test.AvailablePortFinder;
import org.apache.camel.test.junit4.CamelTestSupport;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.awaitility.Awaitility.await;
import static org.hamcrest.Matchers.equalTo;
/**
* Base class for LRA based tests.
*/
public abstract class AbstractLRATestSupport extends CamelTestSupport {
private Integer serverPort;
private int activeLRAs;
@Before
public void getActiveLRAs() throws IOException {
this.activeLRAs = getNumberOfActiveLRAs();
}
@After
public void checkActiveLRAs() throws IOException {
await().atMost(2, SECONDS).until(() -> getNumberOfActiveLRAs(), equalTo(activeLRAs));
Assert.assertEquals("Some LRA have been left pending", activeLRAs, getNumberOfActiveLRAs());
}
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
context.addService(createLRASagaService());
context.addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
restConfiguration()
.port(getServerPort());
}
});
return context;
}
protected LRASagaService createLRASagaService() {
LRASagaService sagaService = new LRASagaService();
sagaService.setCoordinatorUrl(getCoordinatorURL());
sagaService.setLocalParticipantUrl("http://localhost:" + getServerPort());
return sagaService;
}
protected int getNumberOfActiveLRAs() throws IOException {
Client client = ClientBuilder.newClient();
Response response = client.target(getCoordinatorURL() + "/lra-coordinator")
.request()
.accept("application/json")
.get();
ObjectMapper mapper = new ObjectMapper();
JsonNode lras = mapper.readTree(InputStream.class.cast(response.getEntity()));
return lras.size();
}
private String getCoordinatorURL() {
String url = System.getenv("LRA_COORDINATOR_URL");
if (url == null) {
throw new IllegalStateException("Cannot run test: environment variable LRA_COORDINATOR_URL is missing");
}
return url;
}
protected int getServerPort() {
if (serverPort == null) {
serverPort = AvailablePortFinder.getNextAvailable();
}
return serverPort;
}
}
| apache-2.0 |
adup-tech/amphtml | extensions/amp-3q-player/0.1/test/test-amp-3q-player.js | 3374 | /**
* Copyright 2017 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import '../amp-3q-player';
import {Services} from '../../../../src/services';
import {VideoEvents} from '../../../../src/video-interface';
import {createElementWithAttributes} from '../../../../src/dom';
import {listenOncePromise} from '../../../../src/event-helper';
describes.realWin(
'amp-3q-player',
{
amp: {
extensions: ['amp-3q-player'],
},
},
function (env) {
let win;
let doc;
let timer;
beforeEach(() => {
win = env.win;
doc = win.document;
timer = Services.timerFor(win);
});
async function get3QElement(playoutId) {
const player = createElementWithAttributes(doc, 'amp-3q-player', {
width: 300,
height: 200,
});
if (playoutId) {
player.setAttribute('data-id', playoutId);
}
doc.body.appendChild(player);
await player.build();
player.layoutCallback();
const iframe = player.querySelector('iframe');
player.implementation_.sdnBridge_({
source: iframe.contentWindow,
data: JSON.stringify({data: 'ready'}),
});
return player;
}
it('renders', async () => {
const player = await get3QElement('c8dbe7f4-7f7f-11e6-a407-0cc47a188158');
const iframe = player.querySelector('iframe');
expect(iframe).to.not.be.null;
expect(iframe.src).to.equal(
'https://playout.3qsdn.com/c8dbe7f4-7f7f-11e6-a407-0cc47a188158?autoplay=false&=true'
);
});
it('requires data-id', () => {
return allowConsoleError(() => {
return get3QElement('').should.eventually.be.rejectedWith(
/The data-id attribute is required/
);
});
});
it('should forward events from amp-3q-player to the amp element', async () => {
const player = await get3QElement('c8dbe7f4-7f7f-11e6-a407-0cc47a188158');
const iframe = player.querySelector('iframe');
await Promise.resolve();
const p1 = listenOncePromise(player, VideoEvents.MUTED);
sendFakeMessage(player, iframe, 'muted');
await p1;
const p2 = listenOncePromise(player, VideoEvents.PLAYING);
sendFakeMessage(player, iframe, 'playing');
await p2;
const p3 = listenOncePromise(player, VideoEvents.PAUSE);
sendFakeMessage(player, iframe, 'paused');
await p3;
const p4 = listenOncePromise(player, VideoEvents.UNMUTED);
sendFakeMessage(player, iframe, 'unmuted');
const successTimeout = timer.promise(10);
return Promise.race([p4, successTimeout]);
});
function sendFakeMessage(player, iframe, command) {
player.implementation_.sdnBridge_({
source: iframe.contentWindow,
data: JSON.stringify({data: command}),
});
}
}
);
| apache-2.0 |
mbrukman/gcloud-java | google-cloud-pubsub/src/test/java/com/google/cloud/pubsub/v1/FakeSubscriberServiceImpl.java | 13984 | /*
* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.pubsub.v1;
import com.google.api.client.util.Preconditions;
import com.google.api.core.InternalApi;
import com.google.common.collect.ImmutableList;
import com.google.protobuf.Empty;
import com.google.pubsub.v1.AcknowledgeRequest;
import com.google.pubsub.v1.GetSubscriptionRequest;
import com.google.pubsub.v1.ModifyAckDeadlineRequest;
import com.google.pubsub.v1.PublisherGrpc.PublisherImplBase;
import com.google.pubsub.v1.PullRequest;
import com.google.pubsub.v1.PullResponse;
import com.google.pubsub.v1.StreamingPullRequest;
import com.google.pubsub.v1.StreamingPullResponse;
import com.google.pubsub.v1.SubscriberGrpc.SubscriberImplBase;
import com.google.pubsub.v1.Subscription;
import io.grpc.Status;
import io.grpc.Status.Code;
import io.grpc.StatusException;
import io.grpc.stub.StreamObserver;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A fake implementation of {@link PublisherImplBase}, that can be used to test clients of a Cloud
* Pub/Sub Publisher.
*/
class FakeSubscriberServiceImpl extends SubscriberImplBase {
private final AtomicBoolean subscriptionInitialized = new AtomicBoolean(false);
private String subscription = "";
private final AtomicInteger messageAckDeadline =
new AtomicInteger(Subscriber.MIN_ACK_DEADLINE_SECONDS);
private final AtomicInteger getSubscriptionCalled = new AtomicInteger();
private final List<Stream> openedStreams = new ArrayList<>();
private final List<Stream> closedStreams = new ArrayList<>();
private final List<String> acks = new ArrayList<>();
private final List<ModifyAckDeadline> modAckDeadlines = new ArrayList<>();
private final List<PullRequest> receivedPullRequest = new ArrayList<>();
private final BlockingQueue<PullResponse> pullResponses = new LinkedBlockingDeque<>();
private int currentStream;
public static enum CloseSide {
SERVER,
CLIENT
}
public static final class ModifyAckDeadline {
private final String ackId;
private final long seconds;
public ModifyAckDeadline(String ackId, long seconds) {
Preconditions.checkNotNull(ackId);
this.ackId = ackId;
this.seconds = seconds;
}
public String getAckId() {
return ackId;
}
public long getSeconds() {
return seconds;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof ModifyAckDeadline)) {
return false;
}
ModifyAckDeadline other = (ModifyAckDeadline) obj;
return other.ackId.equals(this.ackId) && other.seconds == this.seconds;
}
@Override
public int hashCode() {
return ackId.hashCode();
}
@Override
public String toString() {
return "Ack ID: " + ackId + ", deadline seconds: " + seconds;
}
}
private static class Stream {
private StreamObserver<StreamingPullRequest> requestObserver;
private StreamObserver<StreamingPullResponse> responseObserver;
}
private class StreamingPullRequestObserver implements StreamObserver<StreamingPullRequest> {
private final Stream stream;
private final StreamObserver<StreamingPullResponse> responseObserver;
StreamingPullRequestObserver(
Stream stream, StreamObserver<StreamingPullResponse> responseObserver) {
this.stream = stream;
this.responseObserver = responseObserver;
}
@Override
public void onNext(StreamingPullRequest request) {
synchronized (stream) {
if (!request.getSubscription().isEmpty()) {
if (!subscription.isEmpty() && !subscription.equals(request.getSubscription())) {
responseObserver.onError(
new StatusException(
Status.fromCode(Code.ABORTED)
.withDescription("Can only set one subscription.")));
return;
}
synchronized (subscriptionInitialized) {
if (subscription.isEmpty()) {
if (request.getStreamAckDeadlineSeconds() == 0) {
responseObserver.onError(
new StatusException(
Status.fromCode(Code.INVALID_ARGUMENT)
.withDescription("A stream must be initialized with a ack deadline.")));
}
subscription = request.getSubscription();
subscriptionInitialized.set(true);
subscriptionInitialized.notifyAll();
}
}
addOpenedStream(stream);
stream.notifyAll();
}
if (request.getStreamAckDeadlineSeconds() > 0) {
synchronized (messageAckDeadline) {
messageAckDeadline.set(request.getStreamAckDeadlineSeconds());
messageAckDeadline.notifyAll();
}
}
if (subscription.isEmpty()) {
closeStream(stream);
responseObserver.onError(
new StatusException(
Status.fromCode(Code.ABORTED)
.withDescription(
"The stream has not been properly initialized with a "
+ "subscription.")));
return;
}
if (request.getAckIdsCount() > 0) {
addReceivedAcks(request.getAckIdsList());
}
if (request.getModifyDeadlineAckIdsCount() > 0) {
if (request.getModifyDeadlineAckIdsCount() != request.getModifyDeadlineSecondsCount()) {
closeStream(stream);
responseObserver.onError(
new StatusException(
Status.fromCode(Code.ABORTED)
.withDescription("Invalid modify ack deadline request.")));
return;
}
Iterator<String> ackIds = request.getModifyDeadlineAckIdsList().iterator();
Iterator<Integer> seconds = request.getModifyDeadlineSecondsList().iterator();
while (ackIds.hasNext() && seconds.hasNext()) {
addReceivedModifyAckDeadline(new ModifyAckDeadline(ackIds.next(), seconds.next()));
}
}
}
}
@Override
public void onError(Throwable error) {
closeStream(stream);
}
@Override
public void onCompleted() {
closeStream(stream);
stream.responseObserver.onCompleted();
}
}
@Override
public StreamObserver<StreamingPullRequest> streamingPull(
StreamObserver<StreamingPullResponse> responseObserver) {
Stream stream = new Stream();
stream.requestObserver = new StreamingPullRequestObserver(stream, responseObserver);
stream.responseObserver = responseObserver;
return stream.requestObserver;
}
public void sendStreamingResponse(StreamingPullResponse pullResponse)
throws InterruptedException {
waitForRegistedSubscription();
synchronized (openedStreams) {
waitForOpenedStreams(1);
openedStreams.get(getAndAdvanceCurrentStream()).responseObserver.onNext(pullResponse);
}
}
public void setMessageAckDeadlineSeconds(int ackDeadline) {
messageAckDeadline.set(ackDeadline);
}
public void enqueuePullResponse(PullResponse response) {
pullResponses.add(response);
}
@Override
public void getSubscription(
GetSubscriptionRequest request, StreamObserver<Subscription> responseObserver) {
getSubscriptionCalled.incrementAndGet();
responseObserver.onNext(
Subscription.newBuilder()
.setName(request.getSubscription())
.setAckDeadlineSeconds(messageAckDeadline.get())
.setTopic("fake-topic")
.build());
responseObserver.onCompleted();
}
/** Returns the number of times getSubscription is called. */
@InternalApi
int getSubscriptionCalledCount() {
return getSubscriptionCalled.get();
}
@Override
public void pull(PullRequest request, StreamObserver<PullResponse> responseObserver) {
synchronized (receivedPullRequest) {
receivedPullRequest.add(request);
}
try {
responseObserver.onNext(pullResponses.take());
responseObserver.onCompleted();
} catch (InterruptedException e) {
responseObserver.onError(e);
}
}
@Override
public void acknowledge(
AcknowledgeRequest request, io.grpc.stub.StreamObserver<Empty> responseObserver) {
addReceivedAcks(request.getAckIdsList());
responseObserver.onNext(Empty.getDefaultInstance());
responseObserver.onCompleted();
}
@Override
public void modifyAckDeadline(
ModifyAckDeadlineRequest request, StreamObserver<Empty> responseObserver) {
for (String ackId : request.getAckIdsList()) {
addReceivedModifyAckDeadline(new ModifyAckDeadline(ackId, request.getAckDeadlineSeconds()));
}
responseObserver.onNext(Empty.getDefaultInstance());
responseObserver.onCompleted();
}
public void sendError(Throwable error) throws InterruptedException {
waitForRegistedSubscription();
synchronized (openedStreams) {
waitForOpenedStreams(1);
Stream stream = openedStreams.get(getAndAdvanceCurrentStream());
stream.responseObserver.onError(error);
closeStream(stream);
}
}
public String waitForRegistedSubscription() throws InterruptedException {
synchronized (subscriptionInitialized) {
while (!subscriptionInitialized.get()) {
subscriptionInitialized.wait();
}
}
return subscription;
}
public List<String> waitAndConsumeReceivedAcks(int expectedCount) throws InterruptedException {
synchronized (acks) {
waitAtLeast(acks, expectedCount);
List<String> receivedAcksCopy = ImmutableList.copyOf(acks.subList(0, expectedCount));
acks.subList(0, expectedCount).clear();
return receivedAcksCopy;
}
}
public List<ModifyAckDeadline> waitAndConsumeModifyAckDeadlines(int expectedCount)
throws InterruptedException {
synchronized (modAckDeadlines) {
waitAtLeast(modAckDeadlines, expectedCount);
List<ModifyAckDeadline> modAckDeadlinesCopy =
ImmutableList.copyOf(modAckDeadlines.subList(0, expectedCount));
modAckDeadlines.subList(0, expectedCount).clear();
return modAckDeadlinesCopy;
}
}
public int waitForClosedStreams(int expectedCount) throws InterruptedException {
synchronized (closedStreams) {
waitAtLeast(closedStreams, expectedCount);
return closedStreams.size();
}
}
public int waitForOpenedStreams(int expectedCount) throws InterruptedException {
synchronized (openedStreams) {
waitAtLeast(openedStreams, expectedCount);
return openedStreams.size();
}
}
// wait until the collection has at least target number of elements.
// caller MUST hold the monitor for the collection.
private static void waitAtLeast(Collection<?> collection, int target)
throws InterruptedException {
long untilMillis = System.currentTimeMillis() + 20_000;
while (collection.size() < target) {
long now = System.currentTimeMillis();
if (now >= untilMillis) {
throw new IllegalStateException("timed out, last state: " + collection);
}
collection.wait(untilMillis - now);
}
}
public void waitForStreamAckDeadline(int expectedValue) throws InterruptedException {
synchronized (messageAckDeadline) {
while (messageAckDeadline.get() != expectedValue) {
messageAckDeadline.wait();
}
}
}
public int getOpenedStreamsCount() {
return openedStreams.size();
}
public int getClosedStreamsCount() {
return closedStreams.size();
}
public List<String> getAcks() {
return acks;
}
public List<ModifyAckDeadline> getModifyAckDeadlines() {
return modAckDeadlines;
}
public void reset() {
synchronized (subscriptionInitialized) {
synchronized (openedStreams) {
synchronized (acks) {
synchronized (modAckDeadlines) {
openedStreams.clear();
closedStreams.clear();
acks.clear();
modAckDeadlines.clear();
subscriptionInitialized.set(false);
subscription = "";
pullResponses.clear();
receivedPullRequest.clear();
currentStream = 0;
}
}
}
}
}
private void addOpenedStream(Stream stream) {
synchronized (openedStreams) {
openedStreams.add(stream);
openedStreams.notifyAll();
}
}
private void closeStream(Stream stream) {
synchronized (openedStreams) {
openedStreams.remove(stream);
closedStreams.add(stream);
}
synchronized (closedStreams) {
closedStreams.notifyAll();
}
}
private int getAndAdvanceCurrentStream() {
int current = currentStream;
synchronized (openedStreams) {
currentStream = (currentStream + 1) % openedStreams.size();
}
return current;
}
private void addReceivedAcks(Collection<String> newAckIds) {
synchronized (acks) {
acks.addAll(newAckIds);
acks.notifyAll();
}
}
private void addReceivedModifyAckDeadline(ModifyAckDeadline newAckDeadline) {
synchronized (modAckDeadlines) {
modAckDeadlines.add(newAckDeadline);
modAckDeadlines.notifyAll();
}
}
}
| apache-2.0 |
gombadi/aws-tools | vendor/github.com/aws/aws-sdk-go/service/ssm/api.go | 51120 | // THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
// Package ssm provides a client for Amazon Simple Systems Management Service.
package ssm
import (
"time"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
)
const opCancelCommand = "CancelCommand"
// CancelCommandRequest generates a request for the CancelCommand operation.
func (c *SSM) CancelCommandRequest(input *CancelCommandInput) (req *request.Request, output *CancelCommandOutput) {
op := &request.Operation{
Name: opCancelCommand,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CancelCommandInput{}
}
req = c.newRequest(op, input, output)
output = &CancelCommandOutput{}
req.Data = output
return
}
// Attempts to cancel the command specified by the Command ID. There is no guarantee
// that the command will be terminated and the underlying process stopped.
func (c *SSM) CancelCommand(input *CancelCommandInput) (*CancelCommandOutput, error) {
req, out := c.CancelCommandRequest(input)
err := req.Send()
return out, err
}
const opCreateAssociation = "CreateAssociation"
// CreateAssociationRequest generates a request for the CreateAssociation operation.
func (c *SSM) CreateAssociationRequest(input *CreateAssociationInput) (req *request.Request, output *CreateAssociationOutput) {
op := &request.Operation{
Name: opCreateAssociation,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateAssociationInput{}
}
req = c.newRequest(op, input, output)
output = &CreateAssociationOutput{}
req.Data = output
return
}
// Associates the specified SSM document with the specified instance.
//
// When you associate an SSM document with an instance, the configuration agent
// on the instance processes the document and configures the instance as specified.
//
// If you associate a document with an instance that already has an associated
// document, the system throws the AssociationAlreadyExists exception.
func (c *SSM) CreateAssociation(input *CreateAssociationInput) (*CreateAssociationOutput, error) {
req, out := c.CreateAssociationRequest(input)
err := req.Send()
return out, err
}
const opCreateAssociationBatch = "CreateAssociationBatch"
// CreateAssociationBatchRequest generates a request for the CreateAssociationBatch operation.
func (c *SSM) CreateAssociationBatchRequest(input *CreateAssociationBatchInput) (req *request.Request, output *CreateAssociationBatchOutput) {
op := &request.Operation{
Name: opCreateAssociationBatch,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateAssociationBatchInput{}
}
req = c.newRequest(op, input, output)
output = &CreateAssociationBatchOutput{}
req.Data = output
return
}
// Associates the specified SSM document with the specified instances.
//
// When you associate an SSM document with an instance, the configuration agent
// on the instance processes the document and configures the instance as specified.
//
// If you associate a document with an instance that already has an associated
// document, the system throws the AssociationAlreadyExists exception.
func (c *SSM) CreateAssociationBatch(input *CreateAssociationBatchInput) (*CreateAssociationBatchOutput, error) {
req, out := c.CreateAssociationBatchRequest(input)
err := req.Send()
return out, err
}
const opCreateDocument = "CreateDocument"
// CreateDocumentRequest generates a request for the CreateDocument operation.
func (c *SSM) CreateDocumentRequest(input *CreateDocumentInput) (req *request.Request, output *CreateDocumentOutput) {
op := &request.Operation{
Name: opCreateDocument,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateDocumentInput{}
}
req = c.newRequest(op, input, output)
output = &CreateDocumentOutput{}
req.Data = output
return
}
// Creates an SSM document.
//
// After you create an SSM document, you can use CreateAssociation to associate
// it with one or more running instances.
func (c *SSM) CreateDocument(input *CreateDocumentInput) (*CreateDocumentOutput, error) {
req, out := c.CreateDocumentRequest(input)
err := req.Send()
return out, err
}
const opDeleteAssociation = "DeleteAssociation"
// DeleteAssociationRequest generates a request for the DeleteAssociation operation.
func (c *SSM) DeleteAssociationRequest(input *DeleteAssociationInput) (req *request.Request, output *DeleteAssociationOutput) {
op := &request.Operation{
Name: opDeleteAssociation,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteAssociationInput{}
}
req = c.newRequest(op, input, output)
output = &DeleteAssociationOutput{}
req.Data = output
return
}
// Disassociates the specified SSM document from the specified instance.
//
// When you disassociate an SSM document from an instance, it does not change
// the configuration of the instance. To change the configuration state of an
// instance after you disassociate a document, you must create a new document
// with the desired configuration and associate it with the instance.
func (c *SSM) DeleteAssociation(input *DeleteAssociationInput) (*DeleteAssociationOutput, error) {
req, out := c.DeleteAssociationRequest(input)
err := req.Send()
return out, err
}
const opDeleteDocument = "DeleteDocument"
// DeleteDocumentRequest generates a request for the DeleteDocument operation.
func (c *SSM) DeleteDocumentRequest(input *DeleteDocumentInput) (req *request.Request, output *DeleteDocumentOutput) {
op := &request.Operation{
Name: opDeleteDocument,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteDocumentInput{}
}
req = c.newRequest(op, input, output)
output = &DeleteDocumentOutput{}
req.Data = output
return
}
// Deletes the SSM document and all instance associations to the document.
//
// Before you delete the SSM document, we recommend that you use DeleteAssociation
// to disassociate all instances that are associated with the document.
func (c *SSM) DeleteDocument(input *DeleteDocumentInput) (*DeleteDocumentOutput, error) {
req, out := c.DeleteDocumentRequest(input)
err := req.Send()
return out, err
}
const opDescribeAssociation = "DescribeAssociation"
// DescribeAssociationRequest generates a request for the DescribeAssociation operation.
func (c *SSM) DescribeAssociationRequest(input *DescribeAssociationInput) (req *request.Request, output *DescribeAssociationOutput) {
op := &request.Operation{
Name: opDescribeAssociation,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DescribeAssociationInput{}
}
req = c.newRequest(op, input, output)
output = &DescribeAssociationOutput{}
req.Data = output
return
}
// Describes the associations for the specified SSM document or instance.
func (c *SSM) DescribeAssociation(input *DescribeAssociationInput) (*DescribeAssociationOutput, error) {
req, out := c.DescribeAssociationRequest(input)
err := req.Send()
return out, err
}
const opDescribeDocument = "DescribeDocument"
// DescribeDocumentRequest generates a request for the DescribeDocument operation.
func (c *SSM) DescribeDocumentRequest(input *DescribeDocumentInput) (req *request.Request, output *DescribeDocumentOutput) {
op := &request.Operation{
Name: opDescribeDocument,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DescribeDocumentInput{}
}
req = c.newRequest(op, input, output)
output = &DescribeDocumentOutput{}
req.Data = output
return
}
// Describes the specified SSM document.
func (c *SSM) DescribeDocument(input *DescribeDocumentInput) (*DescribeDocumentOutput, error) {
req, out := c.DescribeDocumentRequest(input)
err := req.Send()
return out, err
}
const opDescribeInstanceInformation = "DescribeInstanceInformation"
// DescribeInstanceInformationRequest generates a request for the DescribeInstanceInformation operation.
func (c *SSM) DescribeInstanceInformationRequest(input *DescribeInstanceInformationInput) (req *request.Request, output *DescribeInstanceInformationOutput) {
op := &request.Operation{
Name: opDescribeInstanceInformation,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DescribeInstanceInformationInput{}
}
req = c.newRequest(op, input, output)
output = &DescribeInstanceInformationOutput{}
req.Data = output
return
}
// Describes one or more of your instances. You can use this to get information
// about instances like the operating system platform, the SSM agent version,
// status etc. If you specify one or more instance IDs, it returns information
// for those instances. If you do not specify instance IDs, it returns information
// for all your instances. If you specify an instance ID that is not valid or
// an instance that you do not own, you receive an error.
func (c *SSM) DescribeInstanceInformation(input *DescribeInstanceInformationInput) (*DescribeInstanceInformationOutput, error) {
req, out := c.DescribeInstanceInformationRequest(input)
err := req.Send()
return out, err
}
const opGetDocument = "GetDocument"
// GetDocumentRequest generates a request for the GetDocument operation.
func (c *SSM) GetDocumentRequest(input *GetDocumentInput) (req *request.Request, output *GetDocumentOutput) {
op := &request.Operation{
Name: opGetDocument,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &GetDocumentInput{}
}
req = c.newRequest(op, input, output)
output = &GetDocumentOutput{}
req.Data = output
return
}
// Gets the contents of the specified SSM document.
func (c *SSM) GetDocument(input *GetDocumentInput) (*GetDocumentOutput, error) {
req, out := c.GetDocumentRequest(input)
err := req.Send()
return out, err
}
const opListAssociations = "ListAssociations"
// ListAssociationsRequest generates a request for the ListAssociations operation.
func (c *SSM) ListAssociationsRequest(input *ListAssociationsInput) (req *request.Request, output *ListAssociationsOutput) {
op := &request.Operation{
Name: opListAssociations,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ListAssociationsInput{}
}
req = c.newRequest(op, input, output)
output = &ListAssociationsOutput{}
req.Data = output
return
}
// Lists the associations for the specified SSM document or instance.
func (c *SSM) ListAssociations(input *ListAssociationsInput) (*ListAssociationsOutput, error) {
req, out := c.ListAssociationsRequest(input)
err := req.Send()
return out, err
}
const opListCommandInvocations = "ListCommandInvocations"
// ListCommandInvocationsRequest generates a request for the ListCommandInvocations operation.
func (c *SSM) ListCommandInvocationsRequest(input *ListCommandInvocationsInput) (req *request.Request, output *ListCommandInvocationsOutput) {
op := &request.Operation{
Name: opListCommandInvocations,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ListCommandInvocationsInput{}
}
req = c.newRequest(op, input, output)
output = &ListCommandInvocationsOutput{}
req.Data = output
return
}
// An invocation is copy of a command sent to a specific instance. A command
// can apply to one or more instances. A command invocation applies to one instance.
// For example, if a user executes SendCommand against three instances, then
// a command invocation is created for each requested instance ID. ListCommandInvocations
// provide status about command execution.
func (c *SSM) ListCommandInvocations(input *ListCommandInvocationsInput) (*ListCommandInvocationsOutput, error) {
req, out := c.ListCommandInvocationsRequest(input)
err := req.Send()
return out, err
}
const opListCommands = "ListCommands"
// ListCommandsRequest generates a request for the ListCommands operation.
func (c *SSM) ListCommandsRequest(input *ListCommandsInput) (req *request.Request, output *ListCommandsOutput) {
op := &request.Operation{
Name: opListCommands,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ListCommandsInput{}
}
req = c.newRequest(op, input, output)
output = &ListCommandsOutput{}
req.Data = output
return
}
// Lists the commands requested by users of the AWS account.
func (c *SSM) ListCommands(input *ListCommandsInput) (*ListCommandsOutput, error) {
req, out := c.ListCommandsRequest(input)
err := req.Send()
return out, err
}
const opListDocuments = "ListDocuments"
// ListDocumentsRequest generates a request for the ListDocuments operation.
func (c *SSM) ListDocumentsRequest(input *ListDocumentsInput) (req *request.Request, output *ListDocumentsOutput) {
op := &request.Operation{
Name: opListDocuments,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ListDocumentsInput{}
}
req = c.newRequest(op, input, output)
output = &ListDocumentsOutput{}
req.Data = output
return
}
// Describes one or more of your SSM documents.
func (c *SSM) ListDocuments(input *ListDocumentsInput) (*ListDocumentsOutput, error) {
req, out := c.ListDocumentsRequest(input)
err := req.Send()
return out, err
}
const opSendCommand = "SendCommand"
// SendCommandRequest generates a request for the SendCommand operation.
func (c *SSM) SendCommandRequest(input *SendCommandInput) (req *request.Request, output *SendCommandOutput) {
op := &request.Operation{
Name: opSendCommand,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &SendCommandInput{}
}
req = c.newRequest(op, input, output)
output = &SendCommandOutput{}
req.Data = output
return
}
// Executes commands on one or more remote instances.
func (c *SSM) SendCommand(input *SendCommandInput) (*SendCommandOutput, error) {
req, out := c.SendCommandRequest(input)
err := req.Send()
return out, err
}
const opUpdateAssociationStatus = "UpdateAssociationStatus"
// UpdateAssociationStatusRequest generates a request for the UpdateAssociationStatus operation.
func (c *SSM) UpdateAssociationStatusRequest(input *UpdateAssociationStatusInput) (req *request.Request, output *UpdateAssociationStatusOutput) {
op := &request.Operation{
Name: opUpdateAssociationStatus,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &UpdateAssociationStatusInput{}
}
req = c.newRequest(op, input, output)
output = &UpdateAssociationStatusOutput{}
req.Data = output
return
}
// Updates the status of the SSM document associated with the specified instance.
func (c *SSM) UpdateAssociationStatus(input *UpdateAssociationStatusInput) (*UpdateAssociationStatusOutput, error) {
req, out := c.UpdateAssociationStatusRequest(input)
err := req.Send()
return out, err
}
// Describes an association of an SSM document and an instance.
type Association struct {
_ struct{} `type:"structure"`
// The ID of the instance.
InstanceId *string `min:"10" type:"string"`
// The name of the SSM document.
Name *string `type:"string"`
}
// String returns the string representation
func (s Association) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Association) GoString() string {
return s.String()
}
// Describes the parameters for a document.
type AssociationDescription struct {
_ struct{} `type:"structure"`
// The date when the association was made.
Date *time.Time `type:"timestamp" timestampFormat:"unix"`
// The ID of the instance.
InstanceId *string `min:"10" type:"string"`
// The name of the SSM document.
Name *string `type:"string"`
// A description of the parameters for a document.
Parameters map[string][]*string `type:"map"`
// The association status.
Status *AssociationStatus `type:"structure"`
}
// String returns the string representation
func (s AssociationDescription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AssociationDescription) GoString() string {
return s.String()
}
// Describes a filter.
type AssociationFilter struct {
_ struct{} `type:"structure"`
// The name of the filter.
Key *string `locationName:"key" type:"string" required:"true" enum:"AssociationFilterKey"`
// The filter value.
Value *string `locationName:"value" min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s AssociationFilter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AssociationFilter) GoString() string {
return s.String()
}
// Describes an association status.
type AssociationStatus struct {
_ struct{} `type:"structure"`
// A user-defined string.
AdditionalInfo *string `type:"string"`
// The date when the status changed.
Date *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"`
// The reason for the status.
Message *string `type:"string" required:"true"`
// The status.
Name *string `type:"string" required:"true" enum:"AssociationStatusName"`
}
// String returns the string representation
func (s AssociationStatus) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AssociationStatus) GoString() string {
return s.String()
}
type CancelCommandInput struct {
_ struct{} `type:"structure"`
// The ID of the command you want to cancel.
CommandId *string `min:"36" type:"string" required:"true"`
// (Optional) A list of instance IDs on which you want to cancel the command.
// If not provided, the command is canceled on every instance on which it was
// requested.
InstanceIds []*string `min:"1" type:"list"`
}
// String returns the string representation
func (s CancelCommandInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CancelCommandInput) GoString() string {
return s.String()
}
// Whether or not the command was successfully canceled. There is no guarantee
// that a request can be canceled.
type CancelCommandOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s CancelCommandOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CancelCommandOutput) GoString() string {
return s.String()
}
// Describes a command request.
type Command struct {
_ struct{} `type:"structure"`
// A unique identifier for this command.
CommandId *string `min:"36" type:"string"`
// User-specified information about the command, such as a brief description
// of what the command should do.
Comment *string `type:"string"`
// The name of the SSM document requested for execution.
DocumentName *string `type:"string"`
// If this time is reached and the command has not already started executing,
// it will not execute. Calculated based on the ExpiresAfter user input provided
// as part of the SendCommand API.
ExpiresAfter *time.Time `type:"timestamp" timestampFormat:"unix"`
// The instance IDs against which this command was requested.
InstanceIds []*string `min:"1" type:"list"`
// The S3 bucket where the responses to the command executions should be stored.
// This was requested when issuing the command.
OutputS3BucketName *string `min:"3" type:"string"`
// The S3 directory path inside the bucket where the responses to the command
// executions should be stored. This was requested when issuing the command.
OutputS3KeyPrefix *string `type:"string"`
// The parameter values to be inserted in the SSM document when executing the
// command.
Parameters map[string][]*string `type:"map"`
// The date and time the command was requested.
RequestedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
// The status of the command.
Status *string `type:"string" enum:"CommandStatus"`
}
// String returns the string representation
func (s Command) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Command) GoString() string {
return s.String()
}
// Describes a command filter.
type CommandFilter struct {
_ struct{} `type:"structure"`
// The name of the filter. For example, requested date and time.
Key *string `locationName:"key" type:"string" required:"true" enum:"CommandFilterKey"`
// The filter value. For example: June 30, 2015.
Value *string `locationName:"value" min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s CommandFilter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CommandFilter) GoString() string {
return s.String()
}
// An invocation is copy of a command sent to a specific instance. A command
// can apply to one or more instances. A command invocation applies to one instance.
// For example, if a user executes SendCommand against three instances, then
// a command invocation is created for each requested instance ID. A command
// invocation returns status and detail information about a command you executed.
type CommandInvocation struct {
_ struct{} `type:"structure"`
// The command against which this invocation was requested.
CommandId *string `min:"36" type:"string"`
CommandPlugins []*CommandPlugin `type:"list"`
// User-specified information about the command, such as a brief description
// of what the command should do.
Comment *string `type:"string"`
// The document name that was requested for execution.
DocumentName *string `type:"string"`
// The instance ID in which this invocation was requested.
InstanceId *string `min:"10" type:"string"`
// The time and date the request was sent to this instance.
RequestedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
// Whether or not the invocation succeeded, failed, or is pending.
Status *string `type:"string" enum:"CommandInvocationStatus"`
// Gets the trace output sent by the agent.
TraceOutput *string `type:"string"`
}
// String returns the string representation
func (s CommandInvocation) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CommandInvocation) GoString() string {
return s.String()
}
// Describes plugin details.
type CommandPlugin struct {
_ struct{} `type:"structure"`
// The name of the plugin. Must be one of the following: AWS-JoinDirectoryServiceDomain,
// AWS-InstallApplication, AWS-RunPowerShellScript, AWS-InstallPowerShellModule,
// AWS-ConfigureCloudWatch.
Name *string `min:"4" type:"string"`
// Output of the plugin execution.
Output *string `type:"string"`
// The S3 bucket where the responses to the command executions should be stored.
// This was requested when issuing the command.
OutputS3BucketName *string `min:"3" type:"string"`
// The S3 directory path inside the bucket where the responses to the command
// executions should be stored. This was requested when issuing the command.
OutputS3KeyPrefix *string `type:"string"`
// A numeric response code generated after executing the plugin.
ResponseCode *int64 `type:"integer"`
// The time the plugin stopped executing. Could stop prematurely if, for example,
// a cancel command was sent.
ResponseFinishDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
// The time the plugin started executing.
ResponseStartDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
// The status of this plugin. You can execute a document with multiple plugins.
Status *string `type:"string" enum:"CommandPluginStatus"`
}
// String returns the string representation
func (s CommandPlugin) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CommandPlugin) GoString() string {
return s.String()
}
type CreateAssociationBatchInput struct {
_ struct{} `type:"structure"`
// One or more associations.
Entries []*CreateAssociationBatchRequestEntry `locationNameList:"entries" type:"list" required:"true"`
}
// String returns the string representation
func (s CreateAssociationBatchInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateAssociationBatchInput) GoString() string {
return s.String()
}
type CreateAssociationBatchOutput struct {
_ struct{} `type:"structure"`
// Information about the associations that failed.
Failed []*FailedCreateAssociation `locationNameList:"FailedCreateAssociationEntry" type:"list"`
// Information about the associations that succeeded.
Successful []*AssociationDescription `locationNameList:"AssociationDescription" type:"list"`
}
// String returns the string representation
func (s CreateAssociationBatchOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateAssociationBatchOutput) GoString() string {
return s.String()
}
// Describes the association of an SSM document and an instance.
type CreateAssociationBatchRequestEntry struct {
_ struct{} `type:"structure"`
// The ID of the instance.
InstanceId *string `min:"10" type:"string"`
// The name of the configuration document.
Name *string `type:"string"`
// A description of the parameters for a document.
Parameters map[string][]*string `type:"map"`
}
// String returns the string representation
func (s CreateAssociationBatchRequestEntry) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateAssociationBatchRequestEntry) GoString() string {
return s.String()
}
type CreateAssociationInput struct {
_ struct{} `type:"structure"`
// The instance ID.
InstanceId *string `min:"10" type:"string" required:"true"`
// The name of the SSM document.
Name *string `type:"string" required:"true"`
// The parameters for the document’s runtime configuration.
Parameters map[string][]*string `type:"map"`
}
// String returns the string representation
func (s CreateAssociationInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateAssociationInput) GoString() string {
return s.String()
}
type CreateAssociationOutput struct {
_ struct{} `type:"structure"`
// Information about the association.
AssociationDescription *AssociationDescription `type:"structure"`
}
// String returns the string representation
func (s CreateAssociationOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateAssociationOutput) GoString() string {
return s.String()
}
type CreateDocumentInput struct {
_ struct{} `type:"structure"`
// A valid JSON string. For more information about the contents of this string,
// see SSM Document (http://docs.aws.amazon.com/ssm/latest/APIReference/aws-ssm-document.html).
Content *string `min:"1" type:"string" required:"true"`
// A name for the SSM document.
Name *string `type:"string" required:"true"`
}
// String returns the string representation
func (s CreateDocumentInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateDocumentInput) GoString() string {
return s.String()
}
type CreateDocumentOutput struct {
_ struct{} `type:"structure"`
// Information about the SSM document.
DocumentDescription *DocumentDescription `type:"structure"`
}
// String returns the string representation
func (s CreateDocumentOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateDocumentOutput) GoString() string {
return s.String()
}
type DeleteAssociationInput struct {
_ struct{} `type:"structure"`
// The ID of the instance.
InstanceId *string `min:"10" type:"string" required:"true"`
// The name of the SSM document.
Name *string `type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteAssociationInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteAssociationInput) GoString() string {
return s.String()
}
type DeleteAssociationOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DeleteAssociationOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteAssociationOutput) GoString() string {
return s.String()
}
type DeleteDocumentInput struct {
_ struct{} `type:"structure"`
// The name of the SSM document.
Name *string `type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteDocumentInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteDocumentInput) GoString() string {
return s.String()
}
type DeleteDocumentOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DeleteDocumentOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteDocumentOutput) GoString() string {
return s.String()
}
type DescribeAssociationInput struct {
_ struct{} `type:"structure"`
// The ID of the instance.
InstanceId *string `min:"10" type:"string" required:"true"`
// The name of the SSM document.
Name *string `type:"string" required:"true"`
}
// String returns the string representation
func (s DescribeAssociationInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeAssociationInput) GoString() string {
return s.String()
}
type DescribeAssociationOutput struct {
_ struct{} `type:"structure"`
// Information about the association.
AssociationDescription *AssociationDescription `type:"structure"`
}
// String returns the string representation
func (s DescribeAssociationOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeAssociationOutput) GoString() string {
return s.String()
}
type DescribeDocumentInput struct {
_ struct{} `type:"structure"`
// The name of the SSM document.
Name *string `type:"string" required:"true"`
}
// String returns the string representation
func (s DescribeDocumentInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeDocumentInput) GoString() string {
return s.String()
}
type DescribeDocumentOutput struct {
_ struct{} `type:"structure"`
// Information about the SSM document.
Document *DocumentDescription `type:"structure"`
}
// String returns the string representation
func (s DescribeDocumentOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeDocumentOutput) GoString() string {
return s.String()
}
type DescribeInstanceInformationInput struct {
_ struct{} `type:"structure"`
// One or more filters. Use a filter to return a more specific list of instances.
InstanceInformationFilterList []*InstanceInformationFilter `locationNameList:"InstanceInformationFilter" min:"1" type:"list"`
// The maximum number of items to return for this call. The call also returns
// a token that you can specify in a subsequent call to get the next set of
// results.
MaxResults *int64 `min:"5" type:"integer"`
// The token for the next set of items to return. (You received this token from
// a previous call.)
NextToken *string `type:"string"`
}
// String returns the string representation
func (s DescribeInstanceInformationInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeInstanceInformationInput) GoString() string {
return s.String()
}
type DescribeInstanceInformationOutput struct {
_ struct{} `type:"structure"`
// The instance information list.
InstanceInformationList []*InstanceInformation `locationNameList:"InstanceInformation" type:"list"`
// The token to use when requesting the next set of items. If there are no additional
// items to return, the string is empty.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s DescribeInstanceInformationOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeInstanceInformationOutput) GoString() string {
return s.String()
}
// Describes an SSM document.
type DocumentDescription struct {
_ struct{} `type:"structure"`
// The date when the SSM document was created.
CreatedDate *time.Time `type:"timestamp" timestampFormat:"unix"`
// A description of the document.
Description *string `type:"string"`
// The name of the SSM document.
Name *string `type:"string"`
// A description of the parameters for a document.
Parameters []*DocumentParameter `locationNameList:"DocumentParameter" type:"list"`
// The list of OS platforms compatible with this SSM document.
PlatformTypes []*string `locationNameList:"PlatformType" type:"list"`
// The SHA1 hash of the document, which you can use for verification purposes.
Sha1 *string `type:"string"`
// The status of the SSM document.
Status *string `type:"string" enum:"DocumentStatus"`
}
// String returns the string representation
func (s DocumentDescription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DocumentDescription) GoString() string {
return s.String()
}
// Describes a filter.
type DocumentFilter struct {
_ struct{} `type:"structure"`
// The name of the filter.
Key *string `locationName:"key" type:"string" required:"true" enum:"DocumentFilterKey"`
// The value of the filter.
Value *string `locationName:"value" min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s DocumentFilter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DocumentFilter) GoString() string {
return s.String()
}
// Describes the name of an SSM document.
type DocumentIdentifier struct {
_ struct{} `type:"structure"`
// The name of the SSM document.
Name *string `type:"string"`
// The operating system platform.
PlatformTypes []*string `locationNameList:"PlatformType" type:"list"`
}
// String returns the string representation
func (s DocumentIdentifier) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DocumentIdentifier) GoString() string {
return s.String()
}
type DocumentParameter struct {
_ struct{} `type:"structure"`
// If specified, the default values for the parameters. Parameters without a
// default value are required. Parameters with a default value are optional.
DefaultValue *string `type:"string"`
// A description of what the parameter does, how to use it, the default value,
// and whether or not the parameter is optional.
Description *string `type:"string"`
// The name of the parameter.
Name *string `type:"string"`
// The type of parameter. The type can be either “String” or “StringList”.
Type *string `type:"string" enum:"DocumentParameterType"`
}
// String returns the string representation
func (s DocumentParameter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DocumentParameter) GoString() string {
return s.String()
}
// Describes a failed association.
type FailedCreateAssociation struct {
_ struct{} `type:"structure"`
// The association.
Entry *CreateAssociationBatchRequestEntry `type:"structure"`
// The source of the failure.
Fault *string `type:"string" enum:"Fault"`
// A description of the failure.
Message *string `type:"string"`
}
// String returns the string representation
func (s FailedCreateAssociation) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s FailedCreateAssociation) GoString() string {
return s.String()
}
type GetDocumentInput struct {
_ struct{} `type:"structure"`
// The name of the SSM document.
Name *string `type:"string" required:"true"`
}
// String returns the string representation
func (s GetDocumentInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetDocumentInput) GoString() string {
return s.String()
}
type GetDocumentOutput struct {
_ struct{} `type:"structure"`
// The contents of the SSM document.
Content *string `min:"1" type:"string"`
// The name of the SSM document.
Name *string `type:"string"`
}
// String returns the string representation
func (s GetDocumentOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetDocumentOutput) GoString() string {
return s.String()
}
// Describes a filter for a specific list of instances.
type InstanceInformation struct {
_ struct{} `type:"structure"`
// The version of the SSM agent running on your instance.
AgentVersion *string `type:"string"`
// The instance ID.
InstanceId *string `min:"10" type:"string"`
// Indicates whether latest version of the SSM agent is running on your instance.
IsLatestVersion *bool `type:"boolean"`
// The date and time when agent last pinged SSM service.
LastPingDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
// Connection status of the SSM agent.
PingStatus *string `type:"string" enum:"PingStatus"`
// The name of the operating system platform running on your instance.
PlatformName *string `type:"string"`
// The operating system platform type.
PlatformType *string `type:"string" enum:"PlatformType"`
// The version of the OS platform running on your instance.
PlatformVersion *string `type:"string"`
}
// String returns the string representation
func (s InstanceInformation) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InstanceInformation) GoString() string {
return s.String()
}
// Describes a filter for a specific list of instances.
type InstanceInformationFilter struct {
_ struct{} `type:"structure"`
// The name of the filter.
Key *string `locationName:"key" type:"string" required:"true" enum:"InstanceInformationFilterKey"`
// The filter values.
ValueSet []*string `locationName:"valueSet" locationNameList:"InstanceInformationFilterValue" min:"1" type:"list" required:"true"`
}
// String returns the string representation
func (s InstanceInformationFilter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InstanceInformationFilter) GoString() string {
return s.String()
}
type ListAssociationsInput struct {
_ struct{} `type:"structure"`
// One or more filters. Use a filter to return a more specific list of results.
AssociationFilterList []*AssociationFilter `locationNameList:"AssociationFilter" min:"1" type:"list" required:"true"`
// The maximum number of items to return for this call. The call also returns
// a token that you can specify in a subsequent call to get the next set of
// results.
MaxResults *int64 `min:"1" type:"integer"`
// The token for the next set of items to return. (You received this token from
// a previous call.)
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListAssociationsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListAssociationsInput) GoString() string {
return s.String()
}
type ListAssociationsOutput struct {
_ struct{} `type:"structure"`
// The associations.
Associations []*Association `locationNameList:"Association" type:"list"`
// The token to use when requesting the next set of items. If there are no additional
// items to return, the string is empty.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListAssociationsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListAssociationsOutput) GoString() string {
return s.String()
}
type ListCommandInvocationsInput struct {
_ struct{} `type:"structure"`
// (Optional) The invocations for a specific command ID.
CommandId *string `min:"36" type:"string"`
// (Optional) If set this returns the response of the command executions. By
// default this is set to False.
Details *bool `type:"boolean"`
// (Optional) One or more filters. Use a filter to return a more specific list
// of results.
Filters []*CommandFilter `min:"1" type:"list"`
// (Optional) The command execution details for a specific instance ID.
InstanceId *string `min:"10" type:"string"`
// (Optional) The maximum number of items to return for this call. The call
// also returns a token that you can specify in a subsequent call to get the
// next set of results.
MaxResults *int64 `min:"1" type:"integer"`
// (Optional) The token for the next set of items to return. (You received this
// token from a previous call.)
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListCommandInvocationsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListCommandInvocationsInput) GoString() string {
return s.String()
}
type ListCommandInvocationsOutput struct {
_ struct{} `type:"structure"`
// (Optional) A list of all invocations.
CommandInvocations []*CommandInvocation `type:"list"`
// (Optional) The token for the next set of items to return. (You received this
// token from a previous call.)
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListCommandInvocationsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListCommandInvocationsOutput) GoString() string {
return s.String()
}
type ListCommandsInput struct {
_ struct{} `type:"structure"`
// (Optional) If provided, lists only the specified command.
CommandId *string `min:"36" type:"string"`
// (Optional) One or more filters. Use a filter to return a more specific list
// of results.
Filters []*CommandFilter `min:"1" type:"list"`
// (Optional) Lists commands issued against this instance ID.
InstanceId *string `min:"10" type:"string"`
// (Optional) The maximum number of items to return for this call. The call
// also returns a token that you can specify in a subsequent call to get the
// next set of results.
MaxResults *int64 `min:"1" type:"integer"`
// (Optional) The token for the next set of items to return. (You received this
// token from a previous call.)
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListCommandsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListCommandsInput) GoString() string {
return s.String()
}
type ListCommandsOutput struct {
_ struct{} `type:"structure"`
// (Optional) The list of commands requested by the user.
Commands []*Command `type:"list"`
// (Optional) The token for the next set of items to return. (You received this
// token from a previous call.)
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListCommandsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListCommandsOutput) GoString() string {
return s.String()
}
type ListDocumentsInput struct {
_ struct{} `type:"structure"`
// One or more filters. Use a filter to return a more specific list of results.
DocumentFilterList []*DocumentFilter `locationNameList:"DocumentFilter" min:"1" type:"list"`
// The maximum number of items to return for this call. The call also returns
// a token that you can specify in a subsequent call to get the next set of
// results.
MaxResults *int64 `min:"1" type:"integer"`
// The token for the next set of items to return. (You received this token from
// a previous call.)
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListDocumentsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListDocumentsInput) GoString() string {
return s.String()
}
type ListDocumentsOutput struct {
_ struct{} `type:"structure"`
// The names of the SSM documents.
DocumentIdentifiers []*DocumentIdentifier `locationNameList:"DocumentIdentifier" type:"list"`
// The token to use when requesting the next set of items. If there are no additional
// items to return, the string is empty.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListDocumentsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListDocumentsOutput) GoString() string {
return s.String()
}
type SendCommandInput struct {
_ struct{} `type:"structure"`
// User-specified information about the command, such as a brief description
// of what the command should do.
Comment *string `type:"string"`
// Required. The name of the SSM document to execute. This can be an SSM public
// document or a custom document.
DocumentName *string `type:"string" required:"true"`
// Required. The instance IDs where the command should execute.
InstanceIds []*string `min:"1" type:"list" required:"true"`
// The name of the S3 bucket where command execution responses should be stored.
OutputS3BucketName *string `min:"3" type:"string"`
// The directory structure within the S3 bucket where the responses should be
// stored.
OutputS3KeyPrefix *string `type:"string"`
// The required and optional parameters specified in the SSM document being
// executed.
Parameters map[string][]*string `type:"map"`
// If this time is reached and the command has not already started executing,
// it will not execute.
TimeoutSeconds *int64 `min:"30" type:"integer"`
}
// String returns the string representation
func (s SendCommandInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SendCommandInput) GoString() string {
return s.String()
}
type SendCommandOutput struct {
_ struct{} `type:"structure"`
// The request as it was received by SSM. Also provides the command ID which
// can be used future references to this request.
Command *Command `type:"structure"`
}
// String returns the string representation
func (s SendCommandOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SendCommandOutput) GoString() string {
return s.String()
}
type UpdateAssociationStatusInput struct {
_ struct{} `type:"structure"`
// The association status.
AssociationStatus *AssociationStatus `type:"structure" required:"true"`
// The ID of the instance.
InstanceId *string `min:"10" type:"string" required:"true"`
// The name of the SSM document.
Name *string `type:"string" required:"true"`
}
// String returns the string representation
func (s UpdateAssociationStatusInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateAssociationStatusInput) GoString() string {
return s.String()
}
type UpdateAssociationStatusOutput struct {
_ struct{} `type:"structure"`
// Information about the association.
AssociationDescription *AssociationDescription `type:"structure"`
}
// String returns the string representation
func (s UpdateAssociationStatusOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateAssociationStatusOutput) GoString() string {
return s.String()
}
const (
// @enum AssociationFilterKey
AssociationFilterKeyInstanceId = "InstanceId"
// @enum AssociationFilterKey
AssociationFilterKeyName = "Name"
)
const (
// @enum AssociationStatusName
AssociationStatusNamePending = "Pending"
// @enum AssociationStatusName
AssociationStatusNameSuccess = "Success"
// @enum AssociationStatusName
AssociationStatusNameFailed = "Failed"
)
const (
// @enum CommandFilterKey
CommandFilterKeyInvokedAfter = "InvokedAfter"
// @enum CommandFilterKey
CommandFilterKeyInvokedBefore = "InvokedBefore"
// @enum CommandFilterKey
CommandFilterKeyStatus = "Status"
)
const (
// @enum CommandInvocationStatus
CommandInvocationStatusPending = "Pending"
// @enum CommandInvocationStatus
CommandInvocationStatusInProgress = "InProgress"
// @enum CommandInvocationStatus
CommandInvocationStatusCancelling = "Cancelling"
// @enum CommandInvocationStatus
CommandInvocationStatusSuccess = "Success"
// @enum CommandInvocationStatus
CommandInvocationStatusTimedOut = "TimedOut"
// @enum CommandInvocationStatus
CommandInvocationStatusCancelled = "Cancelled"
// @enum CommandInvocationStatus
CommandInvocationStatusFailed = "Failed"
)
const (
// @enum CommandPluginStatus
CommandPluginStatusPending = "Pending"
// @enum CommandPluginStatus
CommandPluginStatusInProgress = "InProgress"
// @enum CommandPluginStatus
CommandPluginStatusSuccess = "Success"
// @enum CommandPluginStatus
CommandPluginStatusTimedOut = "TimedOut"
// @enum CommandPluginStatus
CommandPluginStatusCancelled = "Cancelled"
// @enum CommandPluginStatus
CommandPluginStatusFailed = "Failed"
)
const (
// @enum CommandStatus
CommandStatusPending = "Pending"
// @enum CommandStatus
CommandStatusInProgress = "InProgress"
// @enum CommandStatus
CommandStatusCancelling = "Cancelling"
// @enum CommandStatus
CommandStatusSuccess = "Success"
// @enum CommandStatus
CommandStatusTimedOut = "TimedOut"
// @enum CommandStatus
CommandStatusCancelled = "Cancelled"
// @enum CommandStatus
CommandStatusFailed = "Failed"
)
const (
// @enum DocumentFilterKey
DocumentFilterKeyName = "Name"
// @enum DocumentFilterKey
DocumentFilterKeyOwner = "Owner"
// @enum DocumentFilterKey
DocumentFilterKeyPlatformTypes = "PlatformTypes"
)
const (
// @enum DocumentParameterType
DocumentParameterTypeString = "String"
// @enum DocumentParameterType
DocumentParameterTypeStringList = "StringList"
)
const (
// @enum DocumentStatus
DocumentStatusCreating = "Creating"
// @enum DocumentStatus
DocumentStatusActive = "Active"
// @enum DocumentStatus
DocumentStatusDeleting = "Deleting"
)
const (
// @enum Fault
FaultClient = "Client"
// @enum Fault
FaultServer = "Server"
// @enum Fault
FaultUnknown = "Unknown"
)
const (
// @enum InstanceInformationFilterKey
InstanceInformationFilterKeyInstanceIds = "InstanceIds"
// @enum InstanceInformationFilterKey
InstanceInformationFilterKeyAgentVersion = "AgentVersion"
// @enum InstanceInformationFilterKey
InstanceInformationFilterKeyPingStatus = "PingStatus"
// @enum InstanceInformationFilterKey
InstanceInformationFilterKeyPlatformTypes = "PlatformTypes"
)
const (
// @enum PingStatus
PingStatusOnline = "Online"
// @enum PingStatus
PingStatusConnectionLost = "ConnectionLost"
// @enum PingStatus
PingStatusInactive = "Inactive"
)
const (
// @enum PlatformType
PlatformTypeWindows = "Windows"
// @enum PlatformType
PlatformTypeLinux = "Linux"
)
| apache-2.0 |
Saulis/gerrit | gerrit-server/src/main/java/com/google/gerrit/common/Version.java | 1674 | // Copyright (C) 2009 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.common;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
public class Version {
private static final Logger log = LoggerFactory.getLogger(Version.class);
private static final String version;
public static String getVersion() {
return version;
}
static {
version = loadVersion();
}
private static String loadVersion() {
try (InputStream in = Version.class.getResourceAsStream("Version")) {
if (in == null) {
return null;
}
try (BufferedReader r = new BufferedReader(new InputStreamReader(in, "UTF-8"))) {
String vs = r.readLine();
if (vs != null && vs.startsWith("v")) {
vs = vs.substring(1);
}
if (vs != null && vs.isEmpty()) {
vs = null;
}
return vs;
}
} catch (IOException e) {
log.error(e.getMessage(), e);
return null;
}
}
private Version() {
}
}
| apache-2.0 |
cgruber/dagger | javatests/dagger/functional/modules/subpackage/PackagePrivateModule.java | 899 | /*
* Copyright (C) 2019 The Dagger Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dagger.functional.modules.subpackage;
import dagger.Binds;
import dagger.Module;
import dagger.Provides;
@Module
abstract class PackagePrivateModule {
@Binds
abstract Object bindObject(String string);
@Provides
static String provideString(int i) {
return "foo" + i;
}
}
| apache-2.0 |
anjalshireesh/gluster-ovirt-poc | backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dao/vds/VdsStaticDAOHibernateImpl.java | 510 | package org.ovirt.engine.core.dao.vds;
import org.ovirt.engine.core.common.businessentities.VdsStatic;
import org.ovirt.engine.core.compat.NGuid;
import org.ovirt.engine.core.dao.BaseDAOHibernateImpl;
/**
* <code>VdsStaticDAOHibernateImpl</code> extends {@link BaseDAOHibernateImpl} to work with instances of
* {@link VdsStatic}.
*
*/
public class VdsStaticDAOHibernateImpl extends BaseDAOHibernateImpl<VdsStatic, NGuid> {
public VdsStaticDAOHibernateImpl() {
super(VdsStatic.class);
}
}
| apache-2.0 |