code stringlengths 3 1.01M | repo_name stringlengths 5 116 | path stringlengths 3 311 | language stringclasses 30
values | license stringclasses 15
values | size int64 3 1.01M |
|---|---|---|---|---|---|
<?php
if (!defined('sugarEntry') || !sugarEntry) {
die('Not A Valid Entry Point');
}
/**
*
* SugarCRM Community Edition is a customer relationship management program developed by
* SugarCRM, Inc. Copyright (C) 2004-2013 SugarCRM Inc.
*
* SuiteCRM is an extension to SugarCRM Community Edition developed by SalesAgility Ltd.
* Copyright (C) 2011 - 2018 SalesAgility Ltd.
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License version 3 as published by the
* Free Software Foundation with the addition of the following permission added
* to Section 15 as permitted in Section 7(a): FOR ANY PART OF THE COVERED WORK
* IN WHICH THE COPYRIGHT IS OWNED BY SUGARCRM, SUGARCRM DISCLAIMS THE WARRANTY
* OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
* details.
*
* You should have received a copy of the GNU Affero General Public License along with
* this program; if not, see http://www.gnu.org/licenses or write to the Free
* Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.
*
* You can contact SugarCRM, Inc. headquarters at 10050 North Wolfe Road,
* SW2-130, Cupertino, CA 95014, USA. or at email address contact@sugarcrm.com.
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Public License version 3.
*
* In accordance with Section 7(b) of the GNU Affero General Public License version 3,
* these Appropriate Legal Notices must retain the display of the "Powered by
* SugarCRM" logo and "Supercharged by SuiteCRM" logo. If the display of the logos is not
* reasonably feasible for technical reasons, the Appropriate Legal Notices must
* display the words "Powered by SugarCRM" and "Supercharged by SuiteCRM".
*/
/*********************************************************************************
* Description: TODO: To be written.
* Portions created by SugarCRM are Copyright (C) SugarCRM, Inc.
* All Rights Reserved.
* Contributor(s): ______________________________________..
********************************************************************************/
require_once('include/DetailView/DetailView.php');
require_once('modules/Campaigns/Charts.php');
global $mod_strings;
global $app_strings;
global $app_list_strings;
global $sugar_version, $sugar_config;
global $theme;
$GLOBALS['log']->info("Campaign detail view");
$xtpl=new XTemplate('modules/Campaigns/PopupCampaignRoi.html');
//_pp($_REQUEST['id']);
$campaign_id=$_REQUEST['id'];
$campaign = new Campaign();
$opp_query1 = "select camp.name, camp.actual_cost,camp.budget,camp.expected_revenue,count(*) opp_count,SUM(opp.amount) as Revenue, SUM(camp.actual_cost) as Investment,
ROUND((SUM(opp.amount) - SUM(camp.actual_cost))/(SUM(camp.actual_cost)), 2)*100 as ROI";
$opp_query1 .= " from opportunities opp";
$opp_query1 .= " right join campaigns camp on camp.id = opp.campaign_id";
$opp_query1 .= " where opp.sales_stage = 'Closed Won' and camp.id='$campaign_id'";
$opp_query1 .= " group by camp.name";
//$opp_query1 .= " and deleted=0";
$opp_result1=$campaign->db->query($opp_query1);
$opp_data1=$campaign->db->fetchByAssoc($opp_result1);
//get the click-throughs
$query_click = "SELECT count(*) hits ";
$query_click.= " FROM campaign_log ";
$query_click.= " WHERE campaign_id = '$campaign_id' AND activity_type='link' AND related_type='CampaignTrackers' AND archived=0 AND deleted=0";
//if $marketing id is specified, then lets filter the chart by the value
if (!empty($marketing_id)) {
$query_click.= " AND marketing_id ='$marketing_id'";
}
$query_click.= " GROUP BY activity_type, target_type";
$query_click.= " ORDER BY activity_type, target_type";
$result = $campaign->db->query($query_click);
$xtpl->assign("OPP_COUNT", $opp_data1['opp_count']);
$xtpl->assign("ACTUAL_COST", $opp_data1['actual_cost']);
$xtpl->assign("PLANNED_BUDGET", $opp_data1['budget']);
$xtpl->assign("EXPECTED_REVENUE", $opp_data1['expected_revenue']);
$currency = new Currency();
if (isset($focus->currency_id) && !empty($focus->currency_id)) {
$currency->retrieve($focus->currency_id);
if ($currency->deleted != 1) {
$xtpl->assign("CURRENCY", $currency->iso4217 .' '.$currency->symbol);
} else {
$xtpl->assign("CURRENCY", $currency->getDefaultISO4217() .' '.$currency->getDefaultCurrencySymbol());
}
} else {
$xtpl->assign("CURRENCY", $currency->getDefaultISO4217() .' '.$currency->getDefaultCurrencySymbol());
}
global $current_user;
if (is_admin($current_user) && $_REQUEST['module'] != 'DynamicLayout' && !empty($_SESSION['editinplace'])) {
$xtpl->assign("ADMIN_EDIT", "<a href='index.php?action=index&module=DynamicLayout&from_action=".$_REQUEST['action'] ."&from_module=".$_REQUEST['module'] ."&record=".$_REQUEST['record']. "'>".SugarThemeRegistry::current()->getImage("EditLayout", "border='0' align='bottom'", null, null, '.gif', $mod_strings['LBL_EDIT_LAYOUT'])."</a>");
}
//$detailView->processListNavigation($xtpl, "CAMPAIGN", $offset, $focus->is_AuditEnabled());
// adding custom fields:
//require_once('modules/DynamicFields/templates/Files/DetailView.php');
/* we need to build the dropdown of related marketing values
$latest_marketing_id = '';
$selected_marketing_id = '';
if(isset($_REQUEST['mkt_id'])) $selected_marketing_id = $_REQUEST['mkt_id'];
$options_str = '<option value="all">--None--</option>';
//query for all email marketing records related to this campaign
$latest_marketing_query = "select id, name, date_modified from email_marketing where campaign_id = '$focus->id' order by date_modified desc";
//build string with value(s) retrieved
$result =$campaign->db->query($latest_marketing_query);
if ($row = $campaign->db->fetchByAssoc($result)){
//first, populated the latest marketing id variable, as this
// variable will be used to build chart and subpanels
$latest_marketing_id = $row['id'];
//fill in first option value
$options_str .= '<option value="'. $row['id'] .'"';
// if the marketing id is same as selected marketing id, set this option to render as "selected"
if (!empty($selected_marketing_id) && $selected_marketing_id == $row['id']) {
$options_str .=' selected>'. $row['name'] .'</option>';
// if the marketing id is empty then set this first option to render as "selected"
}elseif(empty($selected_marketing_id)){
$options_str .=' selected>'. $row['name'] .'</option>';
// if the marketing is not empty, but not same as selected marketing id, then..
//.. do not set this option to render as "selected"
}else{
$options_str .='>'. $row['name'] .'</option>';
}
}
//process rest of records, if they exist
while ($row = $campaign->db->fetchByAssoc($result)){
//add to list of option values
$options_str .= '<option value="'. $row['id'] .'"';
//if the marketing id is same as selected marketing id, then set this option to render as "selected"
if (!empty($selected_marketing_id) && $selected_marketing_id == $row['id']) {
$options_str .=' selected>'. $row['name'] .'</option>';
}else{
$options_str .=' >'. $row['name'] .'</option>';
}
}
//populate the dropdown
$xtpl->assign("MKT_DROP_DOWN",$options_str);
*/
//add chart
$seps = array("-", "/");
$dates = array(date($GLOBALS['timedate']->dbDayFormat), $GLOBALS['timedate']->dbDayFormat);
$dateFileNameSafe = str_replace($seps, "_", $dates);
$cache_file_name_roi = $current_user->getUserPrivGuid()."_campaign_response_by_roi_".$dateFileNameSafe[0]."_".$dateFileNameSafe[1].".xml";
$chart= new campaign_charts();
//ob_start();
//if marketing id has been selected, then set "latest_marketing_id" to the selected value
//latest marketing id will be passed in to filter the charts and subpanels
if (!empty($selected_marketing_id)) {
$latest_marketing_id = $selected_marketing_id;
}
if (empty($latest_marketing_id) || $latest_marketing_id === 'all') {
$xtpl->assign("MY_CHART_ROI", $chart->campaign_response_roi_popup($app_list_strings['roi_type_dom'], $app_list_strings['roi_type_dom'], $campaign_id, sugar_cached("xml/") . $cache_file_name_roi, true));
} else {
$xtpl->assign("MY_CHART_ROI", $chart->campaign_response_roi_popup($app_list_strings['roi_type_dom'], $app_list_strings['roi_type_dom'], $campaign_id, sugar_cached("xml/") .$cache_file_name_roi, true));
}
//$output_html .= ob_get_contents();
//ob_end_clean();
//_ppd($xtpl);
//end chart
$xtpl->parse("main");
$xtpl->out("main");
| gcoop-libre/SuiteCRM | modules/Campaigns/PopupCampaignRoi.php | PHP | agpl-3.0 | 9,312 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('organizations', '0002_migrate_locations_to_facilities'),
('notifications', '0003_auto_20150912_2049'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='location',
field=models.ForeignKey(verbose_name='facility', to='organizations.Facility'),
),
migrations.RenameField(
model_name='notification',
old_name='location',
new_name='facility',
),
migrations.AlterField(
model_name='notification',
name='facility',
field=models.ForeignKey(to='organizations.Facility'),
),
]
| alper/volunteer_planner | notifications/migrations/0004_auto_20151003_2033.py | Python | agpl-3.0 | 847 |
/* Hide default login label, so it doesn't visibly switch from one to the other with the JS */
#login_form label[for=pseudonym_session_unique_id]>span { display: none; } | mattattack7/canvas-contrib | Branding/JS_and_CSS/change_login_username/change_login_username.css | CSS | agpl-3.0 | 169 |
/*
Theme Name: Recline
Description: Layout and styling for reclinejs.com
Author: Sam Smith
Author URI: http://www.mintcanary.com/
*/
/* --------------------------------------------------
Table of Contents
-----------------------------------------------------
:: General Styles
:: Layout
::
::
::
*/
/* ---------------------------------------------------
General Styles
--------------------------------------------------- */
@import url(http://fonts.googleapis.com/css?family=Open+Sans:400,400italic,700);
h1, h2, h3, h4, h5, h6 {
font-family:'Open Sans', Helvetica, Arial, sans-serif;
}
a {
color: #c7231d;
}
a:hover {
color: #bc130e;
}
/* ---------------------------------------------------
Layout
--------------------------------------------------- */
.navbar .logo-icon img {
margin-top: 6px;
margin-right: 8px;
height: 34px;
}
.navbar .brand {
font-family:'Open Sans', Helvetica, Arial, sans-serif;
font-style:italic;
font-size:18px;
font-weight:400;
letter-spacing:-1px;
line-height: 32px;
}
.navbar .nav > li > a {
padding: 15px 10px;
}
.navbar .divider-vertical {
height: 50px;
}
.navbar-inner {
height:50px;
background: #303030; /* Old browsers */
background: -moz-linear-gradient(top, #303030 0%, #2d2d2d 100%); /* FF3.6+ */
background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,#303030), color-stop(100%,#2d2d2d)); /* Chrome,Safari4+ */
background: -webkit-linear-gradient(top, #303030 0%,#2d2d2d 100%); /* Chrome10+,Safari5.1+ */
background: -o-linear-gradient(top, #303030 0%,#2d2d2d 100%); /* Opera 11.10+ */
background: -ms-linear-gradient(top, #303030 0%,#2d2d2d 100%); /* IE10+ */
background: linear-gradient(top, #303030 0%,#2d2d2d 100%); /* W3C */
filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#303030', endColorstr='#2d2d2d',GradientType=0 ); /* IE6-9 */
-webkit-box-shadow:none;
-moz-box-shadow: none;
box-shadow: none;
}
body {
padding-top: 60px;
}
section {
padding-top:20px;
}
.home-page.page-header {
margin-top:-10px;
background: #2d2d2d; /* Old browsers */
background: -moz-linear-gradient(top, #2d2d2d 0%, #040404 100%); /* FF3.6+ */
background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,#2d2d2d), color-stop(100%,#040404)); /* Chrome,Safari4+ */
background: -webkit-linear-gradient(top, #2d2d2d 0%,#040404 100%); /* Chrome10+,Safari5.1+ */
background: -o-linear-gradient(top, #2d2d2d 0%,#040404 100%); /* Opera 11.10+ */
background: -ms-linear-gradient(top, #2d2d2d 0%,#040404 100%); /* IE10+ */
background: linear-gradient(top, #2d2d2d 0%,#040404 100%); /* W3C */
filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#2d2d2d', endColorstr='#040404',GradientType=0 ); /* IE6-9 */
color:#FFF;
padding:0px;
margin-bottom:0px;
border:none;
font-family:'Open Sans', Helvetica, Arial, sans-serif;
padding: 60px;
padding-bottom: 200px;
/* hide crocodile top to footer on front page */
margin-bottom: -30px;
}
.home-page.page-header a {
color:#FFF;
}
.home-page.page-header a.dotted {
border-color:#FFF;
}
.home-page.page-header .container {
background-image: url(images/header-screen.png);
background-repeat: no-repeat;
background-position: -3px 0px;
}
.home-page.page-header .inner {
padding:0px 0px 30px 40px;
font-size:16px;
line-height: 23px;
width: 400px;
}
.home-page.page-header:after {
margin-top:-14px;
}
section.grey {
background-color:#f5f5f5;
}
section.grey:after {
background-position: center -50px;
}
.footer {
background-color:#040404;
color:#CCC;
margin-top: 30px;
}
.footer:before {
content: " ";
height:14px;
display:block;
background-image: url(images/zigzags.png);
background-repeat: repeat-x;
background-position: center -100px;
margin-top:-34px;
}
.footer:after {
display:none;
}
.footer .row {
margin-top:15px;
margin-bottom:15px;
}
.footer a {
color:#CCC;
}
.footer a.btn {
color:#333333;
}
.tutorials .well {
height: 60px;
}
.tutorials:last-child {
margin-bottom: 200px;
}
/** Code / Pre **/
.container pre {
padding: 10px 15px;
border: 1px solid #ccc;
background: white;
color: #444;
box-shadow: 0 0 15px #ddd;
-moz-box-shadow: 0 0 15px #ddd;
-webkit-box-shadow: 0 0 15px #ddd;
-webkit-border-radius: 0;
-moz-border-radius: 0;
border-radius: 0;
}
.doc-ex-rendered {
margin-bottom: 20px;
}
| datalocale/drupal7 | sites/all/libraries/recline/css/site/site.css | CSS | agpl-3.0 | 4,405 |
/*
Copyright (c) 2003-2017, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'blockquote', 'af', {
toolbar: 'Sitaatblok'
} );
| ging/vish_orange | lib/plugins/ediphy/app/assets/javascripts/lib/ckeditor/plugins/blockquote/blockquote/lang/af.js | JavaScript | agpl-3.0 | 219 |
class AddAppIdToEmails < ActiveRecord::Migration
def change
add_column :emails, :app_id, :integer
end
end
| WebsterFolksLabs/cuttlefish | db/migrate/20130425025753_add_app_id_to_emails.rb | Ruby | agpl-3.0 | 114 |
--
-- Tigase Jabber/XMPP Server
-- Copyright (C) 2004-2012 "Artur Hefczyc" <artur.hefczyc@tigase.org>
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as published by
-- the Free Software Foundation, either version 3 of the License.
--
-- This program is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU Affero General Public License for more details.
--
-- You should have received a copy of the GNU Affero General Public License
-- along with this program. Look for COPYING file in the top folder.
-- If not, see http://www.gnu.org/licenses/.
--
-- $Rev: $
-- Last modified by $Author: $
-- $Date: $
-- Database stored procedures and fucntions for Tigase schema version 4.0.0
-- QUERY START:
CREATE OR REPLACE FUNCTION public.create_plpgsql_language ()
RETURNS TEXT
AS $$
CREATE LANGUAGE plpgsql;
SELECT 'language plpgsql created'::TEXT;
$$
LANGUAGE 'sql';
SELECT CASE WHEN
(SELECT true::BOOLEAN
FROM pg_language
WHERE lanname='plpgsql')
THEN
(SELECT 'language already installed'::TEXT)
ELSE
(SELECT public.create_plpgsql_language())
END;
DROP FUNCTION public.create_plpgsql_language ();
-- QUERY END:
-- QUERY START:
-- Database properties get - function
create or replace function TigGetDBProperty(varchar(255)) returns text as '
declare
_result text;
_tkey alias for $1;
begin
select pval into _result from tig_pairs, tig_users
where (pkey = _tkey) AND (lower(user_id) = lower(''db-properties''))
AND (tig_pairs.uid = tig_users.uid);
return _result;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Database properties set - procedure
create or replace function TigPutDBProperty(varchar(255), text) returns void as '
declare
_tkey alias for $1;
_tval alias for $2;
begin
if exists( select pval from tig_pairs, tig_users where
(lower(user_id) = lower(''db-properties'')) AND (tig_users.uid = tig_pairs.uid)
AND (pkey = _tkey))
then
update tig_pairs set pval = _tval from tig_users
where (lower(tig_users.user_id) = lower(''db-properties''))
AND (tig_users.uid = tig_pairs.uid)
AND (pkey = _tkey);
else
insert into tig_pairs (pkey, pval, uid)
select _tkey, _tval, uid from tig_users
where (lower(user_id) = lower(''db-properties''));
end if;
return;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- The initialization of the database.
-- The procedure should be called manually somehow before starting the
-- server. In theory the server could call the procedure automatically
-- at the startup time but I don't know yet how to solve the problem
-- with multiple cluster nodes starting at later time when the server
-- is already running.
create or replace function TigInitdb() returns void as '
begin
update tig_users set online_status = 0;
return;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Add a new user to the database assuming the user password is already
-- encoded properly according to the database settings.
-- If password is not encoded TigAddUserPlainPw should be used instead.
create or replace function TigAddUser(varchar(2049), varchar(255))
returns bigint as '
declare
_user_id alias for $1;
_user_pw alias for $2;
_res_uid bigint;
begin
if exists( select uid from tig_users where
(lower(user_id) = lower(_user_id)) AND (user_pw = _user_pw) )
then
return null;
else
insert into tig_users (user_id, user_pw)
values (_user_id, _user_pw);
select currval(''tig_users_uid_seq'') into _res_uid;
insert into tig_nodes (parent_nid, uid, node)
values (NULL, _res_uid, ''root'');
return _res_uid as uid;
end if;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Takes plain text user password and converts it to internal representation
-- and creates a new user account.
create or replace function TigAddUserPlainPw(varchar(2049), varchar(255))
returns bigint as '
declare
_user_id alias for $1;
_user_pw alias for $2;
_enc text;
_res_uid bigint;
begin
select TigGetDBProperty(''password-encoding'') into _enc;
select
case _enc
when ''MD5-PASSWORD'' then TigAddUser(_user_id, MD5(_user_pw))
when ''MD5-USERID-PASSWORD'' then TigAddUser(_user_id, MD5(_user_id || _user_pw))
when ''MD5-USERNAME-PASSWORD'' then
TigAddUser(_user_id, MD5(split_part(_user_id, ''@'', 1) || _user_pw))
else TigAddUser(_user_id, _user_pw)
end into _res_uid;
return _res_uid as uid;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Low level database user id as big number. Used only for performance reasons
-- and save database space. Besides JID is too large to server as UID
create or replace function TigGetUserDBUid(varchar(2049)) returns bigint as '
declare
_user_id alias for $1;
res_uid bigint;
begin
select uid into res_uid from tig_users where lower(user_id) = lower(_user_id);
return res_uid;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Removes a user from the database
create or replace function TigRemoveUser(varchar(2049)) returns void as '
declare
_user_id alias for $1;
res_uid bigint;
begin
select uid into res_uid from tig_users where lower(user_id) = lower(_user_id);
delete from tig_pairs where uid = res_uid;
delete from tig_nodes where uid = res_uid;
delete from tig_users where uid = res_uid;
return;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Returns user's password from the database
create or replace function TigGetPassword(varchar(2049)) returns varchar(255) as '
declare
_user_id alias for $1;
res_pw varchar(255);
begin
select user_pw into res_pw from tig_users where lower(user_id) = lower(_user_id);
return res_pw;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Takes plain text user password and converts it to internal representation
create or replace function TigUpdatePasswordPlainPw(varchar(2049), varchar(255))
returns void as '
declare
_user_id alias for $1;
_user_pw alias for $2;
_enc text;
begin
select TigGetDBProperty(''password-encoding'') into _enc;
perform
case _enc
when ''MD5-PASSWORD'' then TigUpdatePassword(_user_id, MD5(_user_pw))
when ''MD5-USERID-PASSWORD'' then
TigUpdatePassword(_user_id, MD5(_user_id || _user_pw))
when ''MD5-USERNAME-PASSWORD'' then
TigUpdatePassword(_user_id, MD5(split_part(_user_id, ''@'', 1) || _user_pw))
else TigUpdatePassword(_user_id, _user_pw)
end;
return;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Variant of TigUpdatePasswordPlainPw SP with parameters in reverse order.
-- Some implementations require the parameters to be in the same order as
-- the update query.
create or replace function TigUpdatePasswordPlainPwRev(varchar(255), varchar(2049))
returns void as '
declare
_user_pw alias for $1;
_user_id alias for $2;
begin
perform TigUpdatePasswordPlainPw(_user_id, _user_pw);
return;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Update user password
create or replace function TigUpdatePassword(varchar(2049), varchar(255))
returns void as '
declare
_user_id alias for $1;
_user_pw alias for $2;
begin
update tig_users set user_pw = _user_pw where lower(user_id) = lower(_user_id);
return;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- List all online users
create or replace function TigOnlineUsers() returns void as '
begin
return;
select user_id, last_login, last_logout, online_status, failed_logins, account_status
from tig_users where online_status > 0;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- List all offline users
create or replace function TigOfflineUsers() returns void as '
begin
return;
select user_id, last_login, last_logout, online_status, failed_logins, account_status
from tig_users where online_status = 0;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- List of all users in database
create or replace function TigAllUsers() returns setof varchar(2049) as
'select user_id from tig_users;'
LANGUAGE 'sql';
-- create or replace function TigAllUsers() returns void as '
-- begin
-- return;
-- select user_id, last_login, last_logout, online_status, failed_logins, account_status
-- from tig_users;
-- end;
-- ' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- All users count
create or replace function TigAllUsersCount() returns bigint as '
declare
res_cnt bigint;
begin
select count(*) into res_cnt from tig_users;
return res_cnt;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Performs user login for a plain text password, converting it to an internal
-- representation if necessary
create or replace function TigUserLoginPlainPw(varchar(2049), varchar(255))
returns varchar(2049) as '
declare
_user_id alias for $1;
_user_pw alias for $2;
res_user_id varchar(2049);
_enc text;
begin
select TigGetDBProperty(''password-encoding'') into _enc;
select
case _enc
when ''MD5-PASSWORD'' then TigUserLogin(_user_id, MD5(_user_pw))
when ''MD5-USERID-PASSWORD'' then
TigUserLogin(_user_id, MD5(_user_id || _user_pw))
when ''MD5-USERNAME-PASSWORD'' then
TigUserLogin(_user_id, MD5(split_part(_user_id, ''@'', 1) || _user_pw))
else TigUserLogin(_user_id, _user_pw)
end into res_user_id;
return res_user_id;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Perforrm user login. It returns user_id uppon success and NULL
-- on failure.
-- If the login is successful it also increases online_status and sets
-- last_login time to the current timestamp
create or replace function TigUserLogin(varchar(2049), varchar(255))
returns varchar(2049) as '
declare
_user_id alias for $1;
_user_pw alias for $2;
res_user_id varchar(2049);
begin
if exists(select user_id from tig_users
where (account_status > 0) AND (lower(user_id) = lower(_user_id)) AND (user_pw = _user_pw))
then
update tig_users
set online_status = online_status + 1, last_login = now()
where user_id = _user_id;
select _user_id into res_user_id;
else
update tig_users set failed_logins = failed_logins + 1 where user_id = _user_id;
select NULL into res_user_id;
end if;
return res_user_id;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- It decreases online_status and sets last_logout time to the current timestamp
create or replace function TigUserLogout(varchar(2049)) returns void as '
declare
_user_id alias for $1;
begin
update tig_users
set online_status = greatest(online_status - 1, 0),
last_logout = now()
where user_id = _user_id;
return;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Disable user account
create or replace function TigDisableAccount(varchar(2049)) returns void as '
declare
_user_id alias for $1;
begin
update tig_users set account_status = 0 where lower(user_id) = lower(_user_id);
return;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Enable user account
create or replace function TigEnableAccount(varchar(2049)) returns void as '
declare
_user_id alias for $1;
begin
update tig_users set account_status = 1 where lower(user_id) = lower(_user_id);
return;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Get list of all active user accounts
create or replace function TigActiveAccounts() returns void as '
begin
return;
select user_id, last_login, last_logout, online_status, failed_logins, account_status
from tig_users where account_status > 0;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Get list of all disabled user accounts
create or replace function TigDisabledAccounts() returns void as '
begin
return;
select user_id, last_login, last_logout, online_status, failed_logins, account_status
from tig_users where account_status = 0;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
-- Helper procedure for adding a new node
create or replace function TigAddNode(bigint, bigint, varchar(255))
returns bigint as '
declare
_parent_nid alias for $1;
_uid alias for $2;
_node alias for $3;
res_nid bigint;
begin
insert into tig_nodes (parent_nid, uid, node)
values (_parent_nid, _uid, _node);
select currval(''tig_nodes_nid_seq'') into res_nid;
return res_nid;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
create or replace function TigUsers2Ver4Convert() returns void as '
declare
user_row RECORD;
begin
for user_row in
select user_id, pval as password
from tig_users, tig_pairs
where tig_users.uid = tig_pairs.uid and pkey = ''password'' loop
perform TigUpdatePasswordPlainPw(user_row.user_id, user_row.password);
END LOOP;
return;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
-- QUERY START:
create or replace function TigUpdatePairs(bigint, bigint, varchar(255), text) returns void as '
declare
_nid alias for $1;
_uid alias for $2;
_tkey alias for $3;
_tval alias for $4;
begin
if exists(select 1 from tig_pairs where nid = _nid and uid = _uid and pkey = _tkey)
then
update tig_pairs set pval = _tval where nid = _nid and uid = _uid and pkey = _tkey;
else
insert into tig_pairs (nid, uid, pkey, pval) values (_nid, _uid, _tkey, _tval);
end if;
return;
end;
' LANGUAGE 'plpgsql';
-- QUERY END:
| zooldk/tigase-server | database/postgresql-schema-4-sp.sql | SQL | agpl-3.0 | 13,541 |
package org.cbioportal.service.impl;
import java.math.BigDecimal;
import java.util.*;
import org.cbioportal.model.*;
import org.cbioportal.model.meta.GenericAssayMeta;
import org.cbioportal.persistence.MolecularDataRepository;
import org.cbioportal.service.GeneService;
import org.cbioportal.service.GenericAssayService;
import org.cbioportal.service.MolecularProfileService;
import org.cbioportal.service.SampleService;
import org.cbioportal.service.exception.MolecularProfileNotFoundException;
import org.cbioportal.service.util.ExpressionEnrichmentUtil;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class ExpressionEnrichmentServiceImplTest extends BaseServiceImplTest {
@InjectMocks
private ExpressionEnrichmentServiceImpl enrichmentServiceImpl;
@Mock
private SampleService sampleService;
@Mock
private MolecularProfileService molecularProfileService;
@Mock
private MolecularDataRepository molecularDataRepository;
@Mock
private GeneService geneService;
@Spy
@InjectMocks
private ExpressionEnrichmentUtil expressionEnrichmentUtil;
@Mock
private GenericAssayService genericAssayService;
CancerStudy cancerStudy = new CancerStudy();
MolecularProfile geneMolecularProfile = new MolecularProfile();
MolecularProfileSamples molecularProfileSamples = new MolecularProfileSamples();
List<Sample> samples = new ArrayList<>();
Map<String, List<MolecularProfileCaseIdentifier>> molecularProfileCaseSets = new HashMap<>();
Map<String, List<MolecularProfileCaseIdentifier>> molecularProfilePatientLevelCaseSets = new HashMap<>();
// patient level only data
public static final String SAMPLE_ID5 = "sample_id5";
@Before
public void setup() throws MolecularProfileNotFoundException {
cancerStudy.setReferenceGenome(ReferenceGenome.HOMO_SAPIENS_DEFAULT_GENOME_NAME);
cancerStudy.setCancerStudyIdentifier(STUDY_ID);
geneMolecularProfile.setCancerStudyIdentifier(STUDY_ID);
geneMolecularProfile.setStableId(MOLECULAR_PROFILE_ID);
geneMolecularProfile.setCancerStudy(cancerStudy);
molecularProfileSamples.setMolecularProfileId(MOLECULAR_PROFILE_ID);
molecularProfileSamples.setCommaSeparatedSampleIds("1,2,3,4");
Sample sample1 = new Sample();
sample1.setStableId(SAMPLE_ID1);
sample1.setInternalId(1);
sample1.setCancerStudyIdentifier(STUDY_ID);
sample1.setPatientId(1);
samples.add(sample1);
Sample sample2 = new Sample();
sample2.setStableId(SAMPLE_ID2);
sample2.setInternalId(2);
sample2.setCancerStudyIdentifier(STUDY_ID);
sample2.setPatientId(2);
samples.add(sample2);
Sample sample3 = new Sample();
sample3.setStableId(SAMPLE_ID3);
sample3.setInternalId(3);
sample3.setCancerStudyIdentifier(STUDY_ID);
sample3.setPatientId(3);
samples.add(sample3);
Sample sample4 = new Sample();
sample4.setStableId(SAMPLE_ID4);
sample4.setInternalId(4);
sample4.setCancerStudyIdentifier(STUDY_ID);
sample4.setPatientId(4);
samples.add(sample4);
List<MolecularProfileCaseIdentifier> alteredSampleIdentifieres = new ArrayList<>();
List<MolecularProfileCaseIdentifier> unalteredSampleIdentifieres = new ArrayList<>();
List<MolecularProfileCaseIdentifier> unalteredPatientLevelSampleIdentifieres = new ArrayList<>();
MolecularProfileCaseIdentifier caseIdentifier1 = new MolecularProfileCaseIdentifier();
caseIdentifier1.setMolecularProfileId(MOLECULAR_PROFILE_ID);
caseIdentifier1.setCaseId(SAMPLE_ID1);
alteredSampleIdentifieres.add(caseIdentifier1);
MolecularProfileCaseIdentifier caseIdentifier2 = new MolecularProfileCaseIdentifier();
caseIdentifier2.setMolecularProfileId(MOLECULAR_PROFILE_ID);
caseIdentifier2.setCaseId(SAMPLE_ID2);
alteredSampleIdentifieres.add(caseIdentifier2);
MolecularProfileCaseIdentifier caseIdentifier3 = new MolecularProfileCaseIdentifier();
caseIdentifier3.setMolecularProfileId(MOLECULAR_PROFILE_ID);
caseIdentifier3.setCaseId(SAMPLE_ID3);
unalteredSampleIdentifieres.add(caseIdentifier3);
unalteredPatientLevelSampleIdentifieres.add(caseIdentifier3);
MolecularProfileCaseIdentifier caseIdentifier4 = new MolecularProfileCaseIdentifier();
caseIdentifier4.setMolecularProfileId(MOLECULAR_PROFILE_ID);
caseIdentifier4.setCaseId(SAMPLE_ID4);
unalteredSampleIdentifieres.add(caseIdentifier4);
unalteredPatientLevelSampleIdentifieres.add(caseIdentifier4);
// patient level only data
MolecularProfileCaseIdentifier caseIdentifier5 = new MolecularProfileCaseIdentifier();
caseIdentifier5.setMolecularProfileId(MOLECULAR_PROFILE_ID);
caseIdentifier5.setCaseId(SAMPLE_ID5);
unalteredPatientLevelSampleIdentifieres.add(caseIdentifier5);
molecularProfileCaseSets.put("altered samples", alteredSampleIdentifieres);
molecularProfileCaseSets.put("unaltered samples", unalteredSampleIdentifieres);
molecularProfilePatientLevelCaseSets.put("altered samples", alteredSampleIdentifieres);
molecularProfilePatientLevelCaseSets.put("unaltered samples", unalteredPatientLevelSampleIdentifieres);
Mockito.when(molecularProfileService.getMolecularProfile(MOLECULAR_PROFILE_ID))
.thenReturn(geneMolecularProfile);
Mockito.when(molecularDataRepository.getCommaSeparatedSampleIdsOfMolecularProfile(MOLECULAR_PROFILE_ID))
.thenReturn(molecularProfileSamples);
Mockito.when(sampleService.fetchSamples(Arrays.asList(STUDY_ID, STUDY_ID, STUDY_ID, STUDY_ID),
Arrays.asList(SAMPLE_ID3, SAMPLE_ID4, SAMPLE_ID1, SAMPLE_ID2), "ID")).thenReturn(samples);
}
@Test
public void getGenomicEnrichments() throws Exception {
geneMolecularProfile.setMolecularAlterationType(MolecularProfile.MolecularAlterationType.MRNA_EXPRESSION);
List<GeneMolecularAlteration> molecularDataList = new ArrayList<GeneMolecularAlteration>();
GeneMolecularAlteration geneMolecularAlteration1 = new GeneMolecularAlteration();
geneMolecularAlteration1.setEntrezGeneId(ENTREZ_GENE_ID_2);
geneMolecularAlteration1.setValues("2,3,2.1,3");
molecularDataList.add(geneMolecularAlteration1);
GeneMolecularAlteration geneMolecularAlteration2 = new GeneMolecularAlteration();
geneMolecularAlteration2.setEntrezGeneId(ENTREZ_GENE_ID_3);
geneMolecularAlteration2.setValues("1.1,5,2.3,3");
molecularDataList.add(geneMolecularAlteration2);
Mockito.when(molecularDataRepository.getGeneMolecularAlterationsIterableFast(MOLECULAR_PROFILE_ID))
.thenReturn(molecularDataList);
List<Gene> expectedGeneList = new ArrayList<>();
Gene gene1 = new Gene();
gene1.setEntrezGeneId(ENTREZ_GENE_ID_2);
gene1.setHugoGeneSymbol(HUGO_GENE_SYMBOL_2);
expectedGeneList.add(gene1);
Gene gene2 = new Gene();
gene2.setEntrezGeneId(ENTREZ_GENE_ID_3);
gene2.setHugoGeneSymbol(HUGO_GENE_SYMBOL_3);
expectedGeneList.add(gene2);
Mockito.when(geneService.fetchGenes(Arrays.asList("2", "3"), "ENTREZ_GENE_ID", "SUMMARY"))
.thenReturn(expectedGeneList);
List<GenomicEnrichment> result = enrichmentServiceImpl.getGenomicEnrichments(MOLECULAR_PROFILE_ID,
molecularProfileCaseSets, EnrichmentType.SAMPLE);
Assert.assertEquals(2, result.size());
GenomicEnrichment expressionEnrichment = result.get(0);
Assert.assertEquals(ENTREZ_GENE_ID_2, expressionEnrichment.getEntrezGeneId());
Assert.assertEquals(HUGO_GENE_SYMBOL_2, expressionEnrichment.getHugoGeneSymbol());
Assert.assertEquals(null, expressionEnrichment.getCytoband());
Assert.assertEquals(2, expressionEnrichment.getGroupsStatistics().size());
GroupStatistics unalteredGroupStats = expressionEnrichment.getGroupsStatistics().get(0);
Assert.assertEquals("unaltered samples", unalteredGroupStats.getName());
Assert.assertEquals(new BigDecimal("2.55"), unalteredGroupStats.getMeanExpression());
Assert.assertEquals(new BigDecimal("0.6363961030678927"), unalteredGroupStats.getStandardDeviation());
GroupStatistics alteredGroupStats = expressionEnrichment.getGroupsStatistics().get(1);
Assert.assertEquals("altered samples", alteredGroupStats.getName());
Assert.assertEquals(new BigDecimal("2.5"), alteredGroupStats.getMeanExpression());
Assert.assertEquals(new BigDecimal("0.7071067811865476"), alteredGroupStats.getStandardDeviation());
Assert.assertEquals(new BigDecimal("0.9475795430163914"), expressionEnrichment.getpValue());
expressionEnrichment = result.get(1);
Assert.assertEquals(ENTREZ_GENE_ID_3, expressionEnrichment.getEntrezGeneId());
Assert.assertEquals(HUGO_GENE_SYMBOL_3, expressionEnrichment.getHugoGeneSymbol());
Assert.assertEquals(null, expressionEnrichment.getCytoband());
Assert.assertEquals(2, expressionEnrichment.getGroupsStatistics().size());
unalteredGroupStats = expressionEnrichment.getGroupsStatistics().get(0);
Assert.assertEquals("unaltered samples", unalteredGroupStats.getName());
Assert.assertEquals(new BigDecimal("2.65"), unalteredGroupStats.getMeanExpression());
Assert.assertEquals(new BigDecimal("0.4949747468305834"), unalteredGroupStats.getStandardDeviation());
alteredGroupStats = expressionEnrichment.getGroupsStatistics().get(1);
Assert.assertEquals("altered samples", alteredGroupStats.getName());
Assert.assertEquals(new BigDecimal("3.05"), alteredGroupStats.getMeanExpression());
Assert.assertEquals(new BigDecimal("2.7577164466275352"), alteredGroupStats.getStandardDeviation());
Assert.assertEquals(new BigDecimal("0.8716148250471419"), expressionEnrichment.getpValue());
}
@Test
public void getGenericAssayEnrichments() throws Exception {
geneMolecularProfile.setMolecularAlterationType(MolecularProfile.MolecularAlterationType.GENERIC_ASSAY);
List<GenericAssayMolecularAlteration> molecularDataList = new ArrayList<GenericAssayMolecularAlteration>();
GenericAssayMolecularAlteration genericAssayMolecularAlteration1 = new GenericAssayMolecularAlteration();
genericAssayMolecularAlteration1.setGenericAssayStableId(HUGO_GENE_SYMBOL_1);
genericAssayMolecularAlteration1.setValues("2,3,2.1,3");
molecularDataList.add(genericAssayMolecularAlteration1);
GenericAssayMolecularAlteration genericAssayMolecularAlteration2 = new GenericAssayMolecularAlteration();
genericAssayMolecularAlteration2.setGenericAssayStableId(HUGO_GENE_SYMBOL_2);
genericAssayMolecularAlteration2.setValues("1.1,5,2.3,3");
molecularDataList.add(genericAssayMolecularAlteration2);
Mockito.when(molecularDataRepository.getGenericAssayMolecularAlterationsIterable(MOLECULAR_PROFILE_ID, null,
"SUMMARY")).thenReturn(molecularDataList);
Mockito.when(genericAssayService.getGenericAssayMetaByStableIdsAndMolecularIds(
Arrays.asList(HUGO_GENE_SYMBOL_1, HUGO_GENE_SYMBOL_2),
Arrays.asList(MOLECULAR_PROFILE_ID, MOLECULAR_PROFILE_ID), "SUMMARY"))
.thenReturn(Arrays.asList(new GenericAssayMeta(HUGO_GENE_SYMBOL_1),
new GenericAssayMeta(HUGO_GENE_SYMBOL_2)));
List<GenericAssayEnrichment> result = enrichmentServiceImpl.getGenericAssayEnrichments(MOLECULAR_PROFILE_ID,
molecularProfileCaseSets, EnrichmentType.SAMPLE);
Assert.assertEquals(2, result.size());
GenericAssayEnrichment genericAssayEnrichment = result.get(0);
Assert.assertEquals(HUGO_GENE_SYMBOL_1, genericAssayEnrichment.getStableId());
Assert.assertEquals(2, genericAssayEnrichment.getGroupsStatistics().size());
GroupStatistics unalteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(0);
Assert.assertEquals("unaltered samples", unalteredGroupStats.getName());
Assert.assertEquals(new BigDecimal("2.55"), unalteredGroupStats.getMeanExpression());
Assert.assertEquals(new BigDecimal("0.6363961030678927"), unalteredGroupStats.getStandardDeviation());
GroupStatistics alteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(1);
Assert.assertEquals("altered samples", alteredGroupStats.getName());
Assert.assertEquals(new BigDecimal("2.5"), alteredGroupStats.getMeanExpression());
Assert.assertEquals(new BigDecimal("0.7071067811865476"), alteredGroupStats.getStandardDeviation());
Assert.assertEquals(new BigDecimal("0.9475795430163914"), genericAssayEnrichment.getpValue());
genericAssayEnrichment = result.get(1);
Assert.assertEquals(HUGO_GENE_SYMBOL_2, genericAssayEnrichment.getStableId());
Assert.assertEquals(2, genericAssayEnrichment.getGroupsStatistics().size());
unalteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(0);
Assert.assertEquals("unaltered samples", unalteredGroupStats.getName());
Assert.assertEquals(new BigDecimal("2.65"), unalteredGroupStats.getMeanExpression());
Assert.assertEquals(new BigDecimal("0.4949747468305834"), unalteredGroupStats.getStandardDeviation());
alteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(1);
Assert.assertEquals("altered samples", alteredGroupStats.getName());
Assert.assertEquals(new BigDecimal("3.05"), alteredGroupStats.getMeanExpression());
Assert.assertEquals(new BigDecimal("2.7577164466275352"), alteredGroupStats.getStandardDeviation());
Assert.assertEquals(new BigDecimal("0.8716148250471419"), genericAssayEnrichment.getpValue());
}
@Test
public void getGenericAssayPatientLevelEnrichments() throws Exception {
geneMolecularProfile.setMolecularAlterationType(MolecularProfile.MolecularAlterationType.GENERIC_ASSAY);
geneMolecularProfile.setPatientLevel(true);
List<GenericAssayMolecularAlteration> molecularDataList = new ArrayList<GenericAssayMolecularAlteration>();
GenericAssayMolecularAlteration genericAssayMolecularAlteration1 = new GenericAssayMolecularAlteration();
genericAssayMolecularAlteration1.setGenericAssayStableId(HUGO_GENE_SYMBOL_1);
genericAssayMolecularAlteration1.setValues("2,3,2.1,3,3,3");
molecularDataList.add(genericAssayMolecularAlteration1);
GenericAssayMolecularAlteration genericAssayMolecularAlteration2 = new GenericAssayMolecularAlteration();
genericAssayMolecularAlteration2.setGenericAssayStableId(HUGO_GENE_SYMBOL_2);
genericAssayMolecularAlteration2.setValues("1.1,5,2.3,3,3");
molecularDataList.add(genericAssayMolecularAlteration2);
Mockito.when(molecularDataRepository.getGenericAssayMolecularAlterationsIterable(MOLECULAR_PROFILE_ID, null,
"SUMMARY")).thenReturn(molecularDataList);
Mockito.when(genericAssayService.getGenericAssayMetaByStableIdsAndMolecularIds(
Arrays.asList(HUGO_GENE_SYMBOL_1, HUGO_GENE_SYMBOL_2),
Arrays.asList(MOLECULAR_PROFILE_ID, MOLECULAR_PROFILE_ID), "SUMMARY"))
.thenReturn(Arrays.asList(new GenericAssayMeta(HUGO_GENE_SYMBOL_1),
new GenericAssayMeta(HUGO_GENE_SYMBOL_2)));
// add 5th sample which is the second sample of patient 4
Sample sample5 = new Sample();
sample5.setStableId(SAMPLE_ID5);
sample5.setInternalId(5);
sample5.setCancerStudyIdentifier(STUDY_ID);
sample5.setPatientId(4);
samples.add(sample5);
Mockito.when(sampleService.fetchSamples(Arrays.asList(STUDY_ID, STUDY_ID, STUDY_ID, STUDY_ID, STUDY_ID), Arrays.asList(SAMPLE_ID3, SAMPLE_ID4, SAMPLE_ID5, SAMPLE_ID1, SAMPLE_ID2), "ID")).thenReturn(samples);
List<GenericAssayEnrichment> result = enrichmentServiceImpl.getGenericAssayEnrichments(MOLECULAR_PROFILE_ID, molecularProfilePatientLevelCaseSets, EnrichmentType.SAMPLE);
Assert.assertEquals(2, result.size());
GenericAssayEnrichment genericAssayEnrichment = result.get(0);
Assert.assertEquals(HUGO_GENE_SYMBOL_1, genericAssayEnrichment.getStableId());
Assert.assertEquals(2, genericAssayEnrichment.getGroupsStatistics().size());
GroupStatistics unalteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(0);
Assert.assertEquals("unaltered samples", unalteredGroupStats.getName());
Assert.assertEquals(new BigDecimal("2.55"), unalteredGroupStats.getMeanExpression());
Assert.assertEquals(new BigDecimal("0.6363961030678927"), unalteredGroupStats.getStandardDeviation());
GroupStatistics alteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(1);
Assert.assertEquals("altered samples", alteredGroupStats.getName());
Assert.assertEquals(new BigDecimal("2.5"), alteredGroupStats.getMeanExpression());
Assert.assertEquals(new BigDecimal("0.7071067811865476"), alteredGroupStats.getStandardDeviation());
Assert.assertEquals(new BigDecimal("0.9475795430163914"), genericAssayEnrichment.getpValue());
genericAssayEnrichment = result.get(1);
Assert.assertEquals(HUGO_GENE_SYMBOL_2, genericAssayEnrichment.getStableId());
Assert.assertEquals(2, genericAssayEnrichment.getGroupsStatistics().size());
unalteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(0);
Assert.assertEquals("unaltered samples", unalteredGroupStats.getName());
Assert.assertEquals(new BigDecimal("2.65"), unalteredGroupStats.getMeanExpression());
Assert.assertEquals(new BigDecimal("0.4949747468305834"), unalteredGroupStats.getStandardDeviation());
alteredGroupStats = genericAssayEnrichment.getGroupsStatistics().get(1);
Assert.assertEquals("altered samples", alteredGroupStats.getName());
Assert.assertEquals(new BigDecimal("3.05"), alteredGroupStats.getMeanExpression());
Assert.assertEquals(new BigDecimal("2.7577164466275352"), alteredGroupStats.getStandardDeviation());
Assert.assertEquals(new BigDecimal("0.8716148250471419"), genericAssayEnrichment.getpValue());
}
}
| onursumer/cbioportal | service/src/test/java/org/cbioportal/service/impl/ExpressionEnrichmentServiceImplTest.java | Java | agpl-3.0 | 18,869 |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import erpnext
import unittest
from frappe.utils import nowdate, add_days
from erpnext.tests.utils import create_test_contact_and_address
from erpnext.stock.doctype.delivery_trip.delivery_trip import notify_customers, get_contact_and_address
class TestDeliveryTrip(unittest.TestCase):
def setUp(self):
create_driver()
create_vehicle()
create_delivery_notfication()
create_test_contact_and_address()
def test_delivery_trip(self):
contact = get_contact_and_address("_Test Customer")
if not frappe.db.exists("Delivery Trip", "TOUR-00000"):
delivery_trip = frappe.new_doc("Delivery Trip")
delivery_trip.company = erpnext.get_default_company()
delivery_trip.date = add_days(nowdate(), 5)
delivery_trip.driver = "DRIVER-00001"
delivery_trip.vehicle = "JB 007"
delivery_trip.append("delivery_stops", {
"customer": "_Test Customer",
"address": contact.shipping_address.parent,
"contact": contact.contact_person.parent
})
delivery_trip.delivery_notification = 'Delivery Notification'
delivery_trip.insert()
sender_email = frappe.db.get_value("User", frappe.session.user, "email")
notify_customers(docname=delivery_trip.name, date=delivery_trip.date, driver=delivery_trip.driver,
vehicle=delivery_trip.vehicle,
sender_email=sender_email, delivery_notification=delivery_trip.delivery_notification)
self.assertEquals(delivery_trip.get("delivery_stops")[0].notified_by_email, 0)
def create_driver():
if not frappe.db.exists("Driver", "Newton Scmander"):
driver = frappe.new_doc("Driver")
driver.full_name = "Newton Scmander"
driver.cell_number = "98343424242"
driver.license_number = "B809"
driver.insert()
def create_delivery_notfication():
if not frappe.db.exists("Standard Reply", "Delivery Notification"):
frappe.get_doc({
'doctype': 'Standard Reply',
'name': 'Delivery Notification',
'response': 'Test Delivery Trip',
'subject': 'Test Subject',
'owner': frappe.session.user
}).insert()
def create_vehicle():
if not frappe.db.exists("Vehicle", "JB 007"):
vehicle = frappe.get_doc({
"doctype": "Vehicle",
"license_plate": "JB 007",
"make": "Maruti",
"model": "PCM",
"last_odometer": 5000,
"acquisition_date": frappe.utils.nowdate(),
"location": "Mumbai",
"chassis_no": "1234ABCD",
"uom": "Litre",
"vehicle_value": frappe.utils.flt(500000)
})
vehicle.insert()
| indictranstech/erpnext | erpnext/stock/doctype/delivery_trip/test_delivery_trip.py | Python | agpl-3.0 | 2,568 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from cStringIO import StringIO
import sys
import tempfile
import unittest2 as unittest
import numpy
from nupic.encoders.base import defaultDtype
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.fieldmeta import FieldMetaType
from nupic.support.unittesthelpers.algorithm_test_helpers import getSeed
from nupic.encoders.random_distributed_scalar import (
RandomDistributedScalarEncoder
)
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.random_distributed_scalar_capnp import (
RandomDistributedScalarEncoderProto
)
# Disable warnings about accessing protected members
# pylint: disable=W0212
def computeOverlap(x, y):
"""
Given two binary arrays, compute their overlap. The overlap is the number
of bits where x[i] and y[i] are both 1
"""
return (x & y).sum()
def validateEncoder(encoder, subsampling):
"""
Given an encoder, calculate overlaps statistics and ensure everything is ok.
We don't check every possible combination for speed reasons.
"""
for i in range(encoder.minIndex, encoder.maxIndex+1, 1):
for j in range(i+1, encoder.maxIndex+1, subsampling):
if not encoder._overlapOK(i, j):
return False
return True
class RandomDistributedScalarEncoderTest(unittest.TestCase):
"""
Unit tests for RandomDistributedScalarEncoder class.
"""
def testEncoding(self):
"""
Test basic encoding functionality. Create encodings without crashing and
check they contain the correct number of on and off bits. Check some
encodings for expected overlap. Test that encodings for old values don't
change once we generate new buckets.
"""
# Initialize with non-default parameters and encode with a number close to
# the offset
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
w=23, n=500, offset=0.0)
e0 = encoder.encode(-0.1)
self.assertEqual(e0.sum(), 23, "Number of on bits is incorrect")
self.assertEqual(e0.size, 500, "Width of the vector is incorrect")
self.assertEqual(encoder.getBucketIndices(0.0)[0], encoder._maxBuckets / 2,
"Offset doesn't correspond to middle bucket")
self.assertEqual(len(encoder.bucketMap), 1, "Number of buckets is not 1")
# Encode with a number that is resolution away from offset. Now we should
# have two buckets and this encoding should be one bit away from e0
e1 = encoder.encode(1.0)
self.assertEqual(len(encoder.bucketMap), 2, "Number of buckets is not 2")
self.assertEqual(e1.sum(), 23, "Number of on bits is incorrect")
self.assertEqual(e1.size, 500, "Width of the vector is incorrect")
self.assertEqual(computeOverlap(e0, e1), 22, "Overlap is not equal to w-1")
# Encode with a number that is resolution*w away from offset. Now we should
# have many buckets and this encoding should have very little overlap with
# e0
e25 = encoder.encode(25.0)
self.assertGreater(len(encoder.bucketMap), 23,
"Number of buckets is not 2")
self.assertEqual(e25.sum(), 23, "Number of on bits is incorrect")
self.assertEqual(e25.size, 500, "Width of the vector is incorrect")
self.assertLess(computeOverlap(e0, e25), 4, "Overlap is too high")
# Test encoding consistency. The encodings for previous numbers
# shouldn't change even though we have added additional buckets
self.assertTrue(numpy.array_equal(e0, encoder.encode(-0.1)),
"Encodings are not consistent - they have changed after new buckets "
"have been created")
self.assertTrue(numpy.array_equal(e1, encoder.encode(1.0)),
"Encodings are not consistent - they have changed after new buckets "
"have been created")
def testMissingValues(self):
"""
Test that missing values and NaN return all zero's.
"""
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0)
empty = encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(empty.sum(), 0)
empty = encoder.encode(float("nan"))
self.assertEqual(empty.sum(), 0)
def testResolution(self):
"""
Test that numbers within the same resolution return the same encoding.
Numbers outside the resolution should return different encodings.
"""
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0)
# Since 23.0 is the first encoded number, it will be the offset.
# Since resolution is 1, 22.9 and 23.4 should have the same bucket index and
# encoding.
e23 = encoder.encode(23.0)
e23p1 = encoder.encode(23.1)
e22p9 = encoder.encode(22.9)
e24 = encoder.encode(24.0)
self.assertEqual(e23.sum(), encoder.w)
self.assertEqual((e23 == e23p1).sum(), encoder.getWidth(),
"Numbers within resolution don't have the same encoding")
self.assertEqual((e23 == e22p9).sum(), encoder.getWidth(),
"Numbers within resolution don't have the same encoding")
self.assertNotEqual((e23 == e24).sum(), encoder.getWidth(),
"Numbers outside resolution have the same encoding")
e22p9 = encoder.encode(22.5)
self.assertNotEqual((e23 == e22p9).sum(), encoder.getWidth(),
"Numbers outside resolution have the same encoding")
def testMapBucketIndexToNonZeroBits(self):
"""
Test that mapBucketIndexToNonZeroBits works and that max buckets and
clipping are handled properly.
"""
encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150)
# Set a low number of max buckets
encoder._initializeBucketMap(10, None)
encoder.encode(0.0)
encoder.encode(-7.0)
encoder.encode(7.0)
self.assertEqual(len(encoder.bucketMap), encoder._maxBuckets,
"_maxBuckets exceeded")
self.assertTrue(
numpy.array_equal(encoder.mapBucketIndexToNonZeroBits(-1),
encoder.bucketMap[0]),
"mapBucketIndexToNonZeroBits did not handle negative"
" index")
self.assertTrue(
numpy.array_equal(encoder.mapBucketIndexToNonZeroBits(1000),
encoder.bucketMap[9]),
"mapBucketIndexToNonZeroBits did not handle negative index")
e23 = encoder.encode(23.0)
e6 = encoder.encode(6)
self.assertEqual((e23 == e6).sum(), encoder.getWidth(),
"Values not clipped correctly during encoding")
ep8 = encoder.encode(-8)
ep7 = encoder.encode(-7)
self.assertEqual((ep8 == ep7).sum(), encoder.getWidth(),
"Values not clipped correctly during encoding")
self.assertEqual(encoder.getBucketIndices(-8)[0], 0,
"getBucketIndices returned negative bucket index")
self.assertEqual(encoder.getBucketIndices(23)[0], encoder._maxBuckets-1,
"getBucketIndices returned bucket index that is too"
" large")
def testParameterChecks(self):
"""
Test that some bad construction parameters get handled.
"""
# n must be >= 6*w
with self.assertRaises(ValueError):
RandomDistributedScalarEncoder(name="mv", resolution=1.0, n=int(5.9*21))
# n must be an int
with self.assertRaises(ValueError):
RandomDistributedScalarEncoder(name="mv", resolution=1.0, n=5.9*21)
# w can't be negative
with self.assertRaises(ValueError):
RandomDistributedScalarEncoder(name="mv", resolution=1.0, w=-1)
# resolution can't be negative
with self.assertRaises(ValueError):
RandomDistributedScalarEncoder(name="mv", resolution=-2)
def testOverlapStatistics(self):
"""
Check that the overlaps for the encodings are within the expected range.
Here we ask the encoder to create a bunch of representations under somewhat
stressful conditions, and then verify they are correct. We rely on the fact
that the _overlapOK and _countOverlapIndices methods are working correctly.
"""
seed = getSeed()
# Generate about 600 encodings. Set n relatively low to increase
# chance of false overlaps
encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150,
seed=seed)
encoder.encode(0.0)
encoder.encode(-300.0)
encoder.encode(300.0)
self.assertTrue(validateEncoder(encoder, subsampling=3),
"Illegal overlap encountered in encoder")
def testGetMethods(self):
"""
Test that the getWidth, getDescription, and getDecoderOutputFieldTypes
methods work.
"""
encoder = RandomDistributedScalarEncoder(name="theName", resolution=1.0, n=500)
self.assertEqual(encoder.getWidth(), 500,
"getWidth doesn't return the correct result")
self.assertEqual(encoder.getDescription(), [("theName", 0)],
"getDescription doesn't return the correct result")
self.assertEqual(encoder.getDecoderOutputFieldTypes(),
(FieldMetaType.float, ),
"getDecoderOutputFieldTypes doesn't return the correct"
" result")
def testOffset(self):
"""
Test that offset is working properly
"""
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0)
encoder.encode(23.0)
self.assertEqual(encoder._offset, 23.0,
"Offset not specified and not initialized to first input")
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
offset=25.0)
encoder.encode(23.0)
self.assertEqual(encoder._offset, 25.0,
"Offset not initialized to specified constructor"
" parameter")
def testSeed(self):
"""
Test that initializing twice with the same seed returns identical encodings
and different when not specified
"""
encoder1 = RandomDistributedScalarEncoder(name="encoder1", resolution=1.0,
seed=42)
encoder2 = RandomDistributedScalarEncoder(name="encoder2", resolution=1.0,
seed=42)
encoder3 = RandomDistributedScalarEncoder(name="encoder3", resolution=1.0,
seed=-1)
encoder4 = RandomDistributedScalarEncoder(name="encoder4", resolution=1.0,
seed=-1)
e1 = encoder1.encode(23.0)
e2 = encoder2.encode(23.0)
e3 = encoder3.encode(23.0)
e4 = encoder4.encode(23.0)
self.assertEqual((e1 == e2).sum(), encoder1.getWidth(),
"Same seed gives rise to different encodings")
self.assertNotEqual((e1 == e3).sum(), encoder1.getWidth(),
"Different seeds gives rise to same encodings")
self.assertNotEqual((e3 == e4).sum(), encoder1.getWidth(),
"seeds of -1 give rise to same encodings")
def testCountOverlapIndices(self):
"""
Test that the internal method _countOverlapIndices works as expected.
"""
# Create a fake set of encodings.
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
w=5, n=5*20)
midIdx = encoder._maxBuckets/2
encoder.bucketMap[midIdx-2] = numpy.array(range(3, 8))
encoder.bucketMap[midIdx-1] = numpy.array(range(4, 9))
encoder.bucketMap[midIdx] = numpy.array(range(5, 10))
encoder.bucketMap[midIdx+1] = numpy.array(range(6, 11))
encoder.bucketMap[midIdx+2] = numpy.array(range(7, 12))
encoder.bucketMap[midIdx+3] = numpy.array(range(8, 13))
encoder.minIndex = midIdx - 2
encoder.maxIndex = midIdx + 3
# Indices must exist
with self.assertRaises(ValueError):
encoder._countOverlapIndices(midIdx-3, midIdx-2)
with self.assertRaises(ValueError):
encoder._countOverlapIndices(midIdx-2, midIdx-3)
# Test some overlaps
self.assertEqual(encoder._countOverlapIndices(midIdx-2, midIdx-2), 5,
"_countOverlapIndices didn't work")
self.assertEqual(encoder._countOverlapIndices(midIdx-1, midIdx-2), 4,
"_countOverlapIndices didn't work")
self.assertEqual(encoder._countOverlapIndices(midIdx+1, midIdx-2), 2,
"_countOverlapIndices didn't work")
self.assertEqual(encoder._countOverlapIndices(midIdx-2, midIdx+3), 0,
"_countOverlapIndices didn't work")
def testOverlapOK(self):
"""
Test that the internal method _overlapOK works as expected.
"""
# Create a fake set of encodings.
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
w=5, n=5*20)
midIdx = encoder._maxBuckets/2
encoder.bucketMap[midIdx-3] = numpy.array(range(4, 9)) # Not ok with
# midIdx-1
encoder.bucketMap[midIdx-2] = numpy.array(range(3, 8))
encoder.bucketMap[midIdx-1] = numpy.array(range(4, 9))
encoder.bucketMap[midIdx] = numpy.array(range(5, 10))
encoder.bucketMap[midIdx+1] = numpy.array(range(6, 11))
encoder.bucketMap[midIdx+2] = numpy.array(range(7, 12))
encoder.bucketMap[midIdx+3] = numpy.array(range(8, 13))
encoder.minIndex = midIdx - 3
encoder.maxIndex = midIdx + 3
self.assertTrue(encoder._overlapOK(midIdx, midIdx-1),
"_overlapOK didn't work")
self.assertTrue(encoder._overlapOK(midIdx-2, midIdx+3),
"_overlapOK didn't work")
self.assertFalse(encoder._overlapOK(midIdx-3, midIdx-1),
"_overlapOK didn't work")
# We'll just use our own numbers
self.assertTrue(encoder._overlapOK(100, 50, 0),
"_overlapOK didn't work for far values")
self.assertTrue(encoder._overlapOK(100, 50, encoder._maxOverlap),
"_overlapOK didn't work for far values")
self.assertFalse(encoder._overlapOK(100, 50, encoder._maxOverlap+1),
"_overlapOK didn't work for far values")
self.assertTrue(encoder._overlapOK(50, 50, 5),
"_overlapOK didn't work for near values")
self.assertTrue(encoder._overlapOK(48, 50, 3),
"_overlapOK didn't work for near values")
self.assertTrue(encoder._overlapOK(46, 50, 1),
"_overlapOK didn't work for near values")
self.assertTrue(encoder._overlapOK(45, 50, encoder._maxOverlap),
"_overlapOK didn't work for near values")
self.assertFalse(encoder._overlapOK(48, 50, 4),
"_overlapOK didn't work for near values")
self.assertFalse(encoder._overlapOK(48, 50, 2),
"_overlapOK didn't work for near values")
self.assertFalse(encoder._overlapOK(46, 50, 2),
"_overlapOK didn't work for near values")
self.assertFalse(encoder._overlapOK(50, 50, 6),
"_overlapOK didn't work for near values")
def testCountOverlap(self):
"""
Test that the internal method _countOverlap works as expected.
"""
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
n=500)
r1 = numpy.array([1, 2, 3, 4, 5, 6])
r2 = numpy.array([1, 2, 3, 4, 5, 6])
self.assertEqual(encoder._countOverlap(r1, r2), 6,
"_countOverlap result is incorrect")
r1 = numpy.array([1, 2, 3, 4, 5, 6])
r2 = numpy.array([1, 2, 3, 4, 5, 7])
self.assertEqual(encoder._countOverlap(r1, r2), 5,
"_countOverlap result is incorrect")
r1 = numpy.array([1, 2, 3, 4, 5, 6])
r2 = numpy.array([6, 5, 4, 3, 2, 1])
self.assertEqual(encoder._countOverlap(r1, r2), 6,
"_countOverlap result is incorrect")
r1 = numpy.array([1, 2, 8, 4, 5, 6])
r2 = numpy.array([1, 2, 3, 4, 9, 6])
self.assertEqual(encoder._countOverlap(r1, r2), 4,
"_countOverlap result is incorrect")
r1 = numpy.array([1, 2, 3, 4, 5, 6])
r2 = numpy.array([1, 2, 3])
self.assertEqual(encoder._countOverlap(r1, r2), 3,
"_countOverlap result is incorrect")
r1 = numpy.array([7, 8, 9, 10, 11, 12])
r2 = numpy.array([1, 2, 3, 4, 5, 6])
self.assertEqual(encoder._countOverlap(r1, r2), 0,
"_countOverlap result is incorrect")
def testVerbosity(self):
"""
Test that nothing is printed out when verbosity=0
"""
_stdout = sys.stdout
sys.stdout = _stringio = StringIO()
encoder = RandomDistributedScalarEncoder(name="mv", resolution=1.0,
verbosity=0)
output = numpy.zeros(encoder.getWidth(), dtype=defaultDtype)
encoder.encodeIntoArray(23.0, output)
encoder.getBucketIndices(23.0)
sys.stdout = _stdout
self.assertEqual(len(_stringio.getvalue()), 0,
"zero verbosity doesn't lead to zero output")
def testEncodeInvalidInputType(self):
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
verbosity=0)
with self.assertRaises(TypeError):
encoder.encode("String")
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteRead(self):
original = RandomDistributedScalarEncoder(
name="encoder", resolution=1.0, w=23, n=500, offset=0.0)
originalValue = original.encode(1)
proto1 = RandomDistributedScalarEncoderProto.new_message()
original.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = RandomDistributedScalarEncoderProto.read(f)
encoder = RandomDistributedScalarEncoder.read(proto2)
self.assertIsInstance(encoder, RandomDistributedScalarEncoder)
self.assertEqual(encoder.resolution, original.resolution)
self.assertEqual(encoder.w, original.w)
self.assertEqual(encoder.n, original.n)
self.assertEqual(encoder.name, original.name)
self.assertEqual(encoder.verbosity, original.verbosity)
self.assertEqual(encoder.minIndex, original.minIndex)
self.assertEqual(encoder.maxIndex, original.maxIndex)
encodedFromOriginal = original.encode(1)
encodedFromNew = encoder.encode(1)
self.assertTrue(numpy.array_equal(encodedFromNew, originalValue))
self.assertEqual(original.decode(encodedFromNew),
encoder.decode(encodedFromOriginal))
self.assertEqual(original.random.getSeed(), encoder.random.getSeed())
for key, value in original.bucketMap.items():
self.assertTrue(numpy.array_equal(value, encoder.bucketMap[key]))
if __name__ == "__main__":
unittest.main()
| badlogicmanpreet/nupic | tests/unit/nupic/encoders/random_distributed_scalar_test.py | Python | agpl-3.0 | 19,742 |
/*
Cubesat Space Protocol - A small network-layer protocol designed for Cubesats
Copyright (C) 2012 Gomspace ApS (http://www.gomspace.com)
Copyright (C) 2012 AAUSAT3 Project (http://aausat3.space.aau.dk)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
Inspired by c-pthread-queue by Matthew Dickinson
http://code.google.com/p/c-pthread-queue/
*/
#include <pthread.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <mach/clock.h>
#include <mach/mach.h>
/* CSP includes */
#include "pthread_queue.h"
pthread_queue_t * pthread_queue_create(int length, size_t item_size) {
pthread_queue_t * q = malloc(sizeof(pthread_queue_t));
if (q != NULL) {
q->buffer = malloc(length*item_size);
if (q->buffer != NULL) {
q->size = length;
q->item_size = item_size;
q->items = 0;
q->in = 0;
q->out = 0;
if (pthread_mutex_init(&(q->mutex), NULL) || pthread_cond_init(&(q->cond_full), NULL) || pthread_cond_init(&(q->cond_empty), NULL)) {
free(q->buffer);
free(q);
q = NULL;
}
} else {
free(q);
q = NULL;
}
}
return q;
}
void pthread_queue_delete(pthread_queue_t * q) {
if (q == NULL)
return;
free(q->buffer);
free(q);
return;
}
int pthread_queue_enqueue(pthread_queue_t * queue, void * value, uint32_t timeout) {
int ret;
/* Calculate timeout */
struct timespec ts;
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
ts.tv_sec = mts.tv_sec;
ts.tv_nsec = mts.tv_nsec;
uint32_t sec = timeout / 1000;
uint32_t nsec = (timeout - 1000 * sec) * 1000000;
ts.tv_sec += sec;
if (ts.tv_nsec + nsec > 1000000000)
ts.tv_sec++;
ts.tv_nsec = (ts.tv_nsec + nsec) % 1000000000;
/* Get queue lock */
pthread_mutex_lock(&(queue->mutex));
while (queue->items == queue->size) {
ret = pthread_cond_timedwait(&(queue->cond_full), &(queue->mutex), &ts);
if (ret != 0) {
pthread_mutex_unlock(&(queue->mutex));
return PTHREAD_QUEUE_FULL;
}
}
/* Coby object from input buffer */
memcpy(queue->buffer+(queue->in * queue->item_size), value, queue->item_size);
queue->items++;
queue->in = (queue->in + 1) % queue->size;
pthread_mutex_unlock(&(queue->mutex));
/* Nofify blocked threads */
pthread_cond_broadcast(&(queue->cond_empty));
return PTHREAD_QUEUE_OK;
}
int pthread_queue_dequeue(pthread_queue_t * queue, void * buf, uint32_t timeout) {
int ret;
/* Calculate timeout */
struct timespec ts;
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
ts.tv_sec = mts.tv_sec;
ts.tv_nsec = mts.tv_nsec;
uint32_t sec = timeout / 1000;
uint32_t nsec = (timeout - 1000 * sec) * 1000000;
ts.tv_sec += sec;
if (ts.tv_nsec + nsec > 1000000000)
ts.tv_sec++;
ts.tv_nsec = (ts.tv_nsec + nsec) % 1000000000;
/* Get queue lock */
pthread_mutex_lock(&(queue->mutex));
while (queue->items == 0) {
ret = pthread_cond_timedwait(&(queue->cond_empty), &(queue->mutex), &ts);
if (ret != 0) {
pthread_mutex_unlock(&(queue->mutex));
return PTHREAD_QUEUE_EMPTY;
}
}
/* Coby object to output buffer */
memcpy(buf, queue->buffer+(queue->out * queue->item_size), queue->item_size);
queue->items--;
queue->out = (queue->out + 1) % queue->size;
pthread_mutex_unlock(&(queue->mutex));
/* Nofify blocked threads */
pthread_cond_broadcast(&(queue->cond_full));
return PTHREAD_QUEUE_OK;
}
int pthread_queue_items(pthread_queue_t * queue) {
pthread_mutex_lock(&(queue->mutex));
int items = queue->items;
pthread_mutex_unlock(&(queue->mutex));
return items;
}
| marshall/libcsp | src/arch/macosx/pthread_queue.c | C | lgpl-2.1 | 4,452 |
#include "clar_libgit2.h"
#include "git2/merge.h"
#include "buffer.h"
#include "merge.h"
#include "../merge_helpers.h"
#include "posix.h"
#define TEST_REPO_PATH "merge-resolve"
#define MERGE_BRANCH_OID "7cb63eed597130ba4abb87b3e544b85021905520"
#define AUTOMERGEABLE_MERGED_FILE \
"this file is changed in master\n" \
"this file is automergeable\n" \
"this file is automergeable\n" \
"this file is automergeable\n" \
"this file is automergeable\n" \
"this file is automergeable\n" \
"this file is automergeable\n" \
"this file is automergeable\n" \
"this file is changed in branch\n"
#define CHANGED_IN_BRANCH_FILE \
"changed in branch\n"
static git_repository *repo;
static git_index *repo_index;
static char *unaffected[][4] = {
{ "added-in-master.txt", NULL },
{ "changed-in-master.txt", NULL },
{ "unchanged.txt", NULL },
{ "added-in-master.txt", "changed-in-master.txt", NULL },
{ "added-in-master.txt", "unchanged.txt", NULL },
{ "changed-in-master.txt", "unchanged.txt", NULL },
{ "added-in-master.txt", "changed-in-master.txt", "unchanged.txt", NULL },
{ "new_file.txt", NULL },
{ "new_file.txt", "unchanged.txt", NULL },
{ NULL },
};
static char *affected[][5] = {
{ "automergeable.txt", NULL },
{ "changed-in-branch.txt", NULL },
{ "conflicting.txt", NULL },
{ "removed-in-branch.txt", NULL },
{ "automergeable.txt", "changed-in-branch.txt", NULL },
{ "automergeable.txt", "conflicting.txt", NULL },
{ "automergeable.txt", "removed-in-branch.txt", NULL },
{ "changed-in-branch.txt", "conflicting.txt", NULL },
{ "changed-in-branch.txt", "removed-in-branch.txt", NULL },
{ "conflicting.txt", "removed-in-branch.txt", NULL },
{ "automergeable.txt", "changed-in-branch.txt", "conflicting.txt", NULL },
{ "automergeable.txt", "changed-in-branch.txt", "removed-in-branch.txt", NULL },
{ "automergeable.txt", "conflicting.txt", "removed-in-branch.txt", NULL },
{ "changed-in-branch.txt", "conflicting.txt", "removed-in-branch.txt", NULL },
{ "automergeable.txt", "changed-in-branch.txt", "conflicting.txt", "removed-in-branch.txt", NULL },
{ NULL },
};
static char *result_contents[4][6] = {
{ "automergeable.txt", AUTOMERGEABLE_MERGED_FILE, NULL, NULL },
{ "changed-in-branch.txt", CHANGED_IN_BRANCH_FILE, NULL, NULL },
{ "automergeable.txt", AUTOMERGEABLE_MERGED_FILE, "changed-in-branch.txt", CHANGED_IN_BRANCH_FILE, NULL, NULL },
{ NULL }
};
void test_merge_workdir_dirty__initialize(void)
{
repo = cl_git_sandbox_init(TEST_REPO_PATH);
git_repository_index(&repo_index, repo);
}
void test_merge_workdir_dirty__cleanup(void)
{
git_index_free(repo_index);
cl_git_sandbox_cleanup();
}
static void set_core_autocrlf_to(git_repository *repo, bool value)
{
git_config *cfg;
cl_git_pass(git_repository_config(&cfg, repo));
cl_git_pass(git_config_set_bool(cfg, "core.autocrlf", value));
git_config_free(cfg);
}
static int merge_branch(void)
{
git_oid their_oids[1];
git_annotated_commit *their_head;
git_merge_options merge_opts = GIT_MERGE_OPTIONS_INIT;
git_checkout_options checkout_opts = GIT_CHECKOUT_OPTIONS_INIT;
int error;
cl_git_pass(git_oid_fromstr(&their_oids[0], MERGE_BRANCH_OID));
cl_git_pass(git_annotated_commit_lookup(&their_head, repo, &their_oids[0]));
checkout_opts.checkout_strategy = GIT_CHECKOUT_SAFE;
error = git_merge(repo, (const git_annotated_commit **)&their_head, 1, &merge_opts, &checkout_opts);
git_annotated_commit_free(their_head);
return error;
}
static void write_files(char *files[])
{
char *filename;
git_buf path = GIT_BUF_INIT, content = GIT_BUF_INIT;
size_t i;
for (i = 0, filename = files[i]; filename; filename = files[++i]) {
git_buf_clear(&path);
git_buf_clear(&content);
git_buf_printf(&path, "%s/%s", TEST_REPO_PATH, filename);
git_buf_printf(&content, "This is a dirty file in the working directory!\n\n"
"It will not be staged! Its filename is %s.\n", filename);
cl_git_mkfile(path.ptr, content.ptr);
}
git_buf_free(&path);
git_buf_free(&content);
}
static void hack_index(char *files[])
{
char *filename;
struct stat statbuf;
git_buf path = GIT_BUF_INIT;
git_index_entry *entry;
size_t i;
/* Update the index to suggest that checkout placed these files on
* disk, keeping the object id but updating the cache, which will
* emulate a Git implementation's different filter.
*/
for (i = 0, filename = files[i]; filename; filename = files[++i]) {
git_buf_clear(&path);
cl_assert(entry = (git_index_entry *)
git_index_get_bypath(repo_index, filename, 0));
cl_git_pass(git_buf_printf(&path, "%s/%s", TEST_REPO_PATH, filename));
cl_git_pass(p_stat(path.ptr, &statbuf));
entry->ctime.seconds = (git_time_t)statbuf.st_ctime;
entry->ctime.nanoseconds = 0;
entry->mtime.seconds = (git_time_t)statbuf.st_mtime;
entry->mtime.nanoseconds = 0;
entry->dev = statbuf.st_dev;
entry->ino = statbuf.st_ino;
entry->uid = statbuf.st_uid;
entry->gid = statbuf.st_gid;
entry->file_size = statbuf.st_size;
}
git_buf_free(&path);
}
static void stage_random_files(char *files[])
{
char *filename;
size_t i;
write_files(files);
for (i = 0, filename = files[i]; filename; filename = files[++i])
cl_git_pass(git_index_add_bypath(repo_index, filename));
}
static void stage_content(char *content[])
{
git_reference *head;
git_object *head_object;
git_buf path = GIT_BUF_INIT;
char *filename, *text;
size_t i;
cl_git_pass(git_repository_head(&head, repo));
cl_git_pass(git_reference_peel(&head_object, head, GIT_OBJ_COMMIT));
cl_git_pass(git_reset(repo, head_object, GIT_RESET_HARD, NULL));
for (i = 0, filename = content[i], text = content[++i];
filename && text;
filename = content[++i], text = content[++i]) {
git_buf_clear(&path);
cl_git_pass(git_buf_printf(&path, "%s/%s", TEST_REPO_PATH, filename));
cl_git_mkfile(path.ptr, text);
cl_git_pass(git_index_add_bypath(repo_index, filename));
}
git_object_free(head_object);
git_reference_free(head);
git_buf_free(&path);
}
static int merge_dirty_files(char *dirty_files[])
{
git_reference *head;
git_object *head_object;
int error;
cl_git_pass(git_repository_head(&head, repo));
cl_git_pass(git_reference_peel(&head_object, head, GIT_OBJ_COMMIT));
cl_git_pass(git_reset(repo, head_object, GIT_RESET_HARD, NULL));
write_files(dirty_files);
error = merge_branch();
git_object_free(head_object);
git_reference_free(head);
return error;
}
static int merge_differently_filtered_files(char *files[])
{
git_reference *head;
git_object *head_object;
int error;
cl_git_pass(git_repository_head(&head, repo));
cl_git_pass(git_reference_peel(&head_object, head, GIT_OBJ_COMMIT));
cl_git_pass(git_reset(repo, head_object, GIT_RESET_HARD, NULL));
write_files(files);
hack_index(files);
cl_git_pass(git_index_write(repo_index));
error = merge_branch();
git_object_free(head_object);
git_reference_free(head);
return error;
}
static int merge_staged_files(char *staged_files[])
{
stage_random_files(staged_files);
return merge_branch();
}
void test_merge_workdir_dirty__unaffected_dirty_files_allowed(void)
{
char **files;
size_t i;
for (i = 0, files = unaffected[i]; files[0]; files = unaffected[++i])
cl_git_pass(merge_dirty_files(files));
}
void test_merge_workdir_dirty__unstaged_deletes_maintained(void)
{
git_reference *head;
git_object *head_object;
cl_git_pass(git_repository_head(&head, repo));
cl_git_pass(git_reference_peel(&head_object, head, GIT_OBJ_COMMIT));
cl_git_pass(git_reset(repo, head_object, GIT_RESET_HARD, NULL));
cl_git_pass(p_unlink("merge-resolve/unchanged.txt"));
cl_git_pass(merge_branch());
git_object_free(head_object);
git_reference_free(head);
}
void test_merge_workdir_dirty__affected_dirty_files_disallowed(void)
{
char **files;
size_t i;
for (i = 0, files = affected[i]; files[0]; files = affected[++i])
cl_git_fail(merge_dirty_files(files));
}
void test_merge_workdir_dirty__staged_files_in_index_disallowed(void)
{
char **files;
size_t i;
for (i = 0, files = unaffected[i]; files[0]; files = unaffected[++i])
cl_git_fail(merge_staged_files(files));
for (i = 0, files = affected[i]; files[0]; files = affected[++i])
cl_git_fail(merge_staged_files(files));
}
void test_merge_workdir_dirty__identical_staged_files_allowed(void)
{
char **content;
size_t i;
set_core_autocrlf_to(repo, false);
for (i = 0, content = result_contents[i]; content[0]; content = result_contents[++i]) {
stage_content(content);
git_index_write(repo_index);
cl_git_pass(merge_branch());
}
}
void test_merge_workdir_dirty__honors_cache(void)
{
char **files;
size_t i;
for (i = 0, files = affected[i]; files[0]; files = affected[++i])
cl_git_pass(merge_differently_filtered_files(files));
}
| rcorre/libgit2 | tests/merge/workdir/dirty.c | C | lgpl-2.1 | 8,749 |
// @(#)root/cont:$Id$
// Author: Rene Brun 28/09/2001
/*************************************************************************
* Copyright (C) 1995-2000, Rene Brun and Fons Rademakers. *
* All rights reserved. *
* *
* For the licensing terms see $ROOTSYS/LICENSE. *
* For the list of contributors see $ROOTSYS/README/CREDITS. *
*************************************************************************/
#ifndef ROOT_TProcessID
#define ROOT_TProcessID
//////////////////////////////////////////////////////////////////////////
// //
// TProcessID //
// //
// Process Identifier object //
// //
//////////////////////////////////////////////////////////////////////////
#ifndef ROOT_TNamed
#include "TNamed.h"
#endif
#ifndef ROOT_TObjArray
#include "TObjArray.h"
#endif
#include <atomic>
#include <type_traits>
class TExMap;
namespace ROOT {
namespace Internal {
/**
* \class ROOT::Internal::TAtomicPointer
* \brief Helper class to manage atomic pointers.
* \tparam T Pointer type to be made atomic
*
* Helper class to manage atomic pointers. The class enforces that the templated type
* is a pointer.
*/
template <typename T> class TAtomicPointer {
private:
std::atomic<T> fAtomic;
public:
TAtomicPointer() : fAtomic(nullptr)
{
static_assert(std::is_pointer<T>::value, "Only pointer types supported");
}
~TAtomicPointer() { delete fAtomic.load(); }
T operator->() const { return fAtomic; }
operator T() const { return fAtomic; }
T operator=(const T& t)
{
fAtomic = t;
return t;
}
};
} // End of namespace Internal
} // End of namespace ROOT
class TProcessID : public TNamed {
private:
TProcessID(const TProcessID &ref); // TProcessID are not copiable.
TProcessID& operator=(const TProcessID &ref); // TProcessID are not copiable.
protected:
std::atomic_int fCount; //!Reference count to this object (from TFile)
ROOT::Internal::TAtomicPointer<TObjArray*> fObjects; //!Array pointing to the referenced objects
std::atomic_flag fLock; //!Spin lock for initialization of fObjects
static TProcessID *fgPID; //Pointer to current session ProcessID
static TObjArray *fgPIDs; //Table of ProcessIDs
static TExMap *fgObjPIDs; //Table pointer to pids
static UInt_t fgNumber; //Referenced objects count
public:
TProcessID();
virtual ~TProcessID();
void CheckInit();
virtual void Clear(Option_t *option="");
Int_t DecrementCount();
Int_t IncrementCount();
Int_t GetCount() const {return fCount;}
TObjArray *GetObjects() const {return fObjects;}
TObject *GetObjectWithID(UInt_t uid);
void PutObjectWithID(TObject *obj, UInt_t uid=0);
virtual void RecursiveRemove(TObject *obj);
static TProcessID *AddProcessID();
static UInt_t AssignID(TObject *obj);
static void Cleanup();
static UInt_t GetNProcessIDs();
static TProcessID *GetPID();
static TObjArray *GetPIDs();
static TProcessID *GetProcessID(UShort_t pid);
static TProcessID *GetProcessWithUID(const TObject *obj);
static TProcessID *GetProcessWithUID(UInt_t uid,const void *obj);
static TProcessID *GetSessionProcessID();
static UInt_t GetObjectCount();
static Bool_t IsValid(TProcessID *pid);
static void SetObjectCount(UInt_t number);
ClassDef(TProcessID,1) //Process Unique Identifier in time and space
};
#endif
| gbitzes/root | core/base/inc/TProcessID.h | C | lgpl-2.1 | 4,235 |
/****************************************************************/
/* DO NOT MODIFY THIS HEADER */
/* MOOSE - Multiphysics Object Oriented Simulation Environment */
/* */
/* (c) 2010 Battelle Energy Alliance, LLC */
/* ALL RIGHTS RESERVED */
/* */
/* Prepared by Battelle Energy Alliance, LLC */
/* Under Contract No. DE-AC07-05ID14517 */
/* With the U. S. Department of Energy */
/* */
/* See COPYRIGHT for full restrictions */
/****************************************************************/
#ifndef ADDBOUNDSVECTORSACTION_H
#define ADDBOUNDSVECTORSACTION_H
#include "Action.h"
class AddBoundsVectorsAction;
template <>
InputParameters validParams<AddBoundsVectorsAction>();
class AddBoundsVectorsAction : public Action
{
public:
AddBoundsVectorsAction(InputParameters params);
virtual void act() override;
};
#endif // ADDBOUNDSVECTORSACTION_H
| Chuban/moose | framework/include/actions/AddBoundsVectorsAction.h | C | lgpl-2.1 | 1,235 |
package org.jboss.hal.testsuite.test.configuration.undertow;
import org.apache.commons.lang.RandomStringUtils;
import org.jboss.arquillian.graphene.page.Page;
import org.jboss.arquillian.junit.Arquillian;
import org.jboss.hal.testsuite.category.Shared;
import org.jboss.hal.testsuite.page.config.UndertowServletPage;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.wildfly.extras.creaper.core.online.operations.Address;
import org.wildfly.extras.creaper.core.online.operations.OperationException;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
@RunWith(Arquillian.class)
@Category(Shared.class)
public class ServletContainerTestCase extends UndertowTestCaseAbstract {
@Page
private UndertowServletPage page;
//identifiers
private static final String ALLOW_NON_STANDARD_WRAPPERS = "allow-non-standard-wrappers";
private static final String DEFAULT_BUFFER_CACHE = "default-buffer-cache";
private static final String DEFAULT_ENCODING = "default-encoding";
private static final String DEFAULT_SESSION_TIMEOUT = "default-session-timeout";
private static final String DIRECTORY_LISTING = "directory-listing";
private static final String DISABLE_CACHING_FOR_SECURED_PAGES = "disable-caching-for-secured-pages";
private static final String EAGER_FILTER_INITIALIZATION = "eager-filter-initialization";
private static final String IGNORE_FLUSH = "ignore-flush";
private static final String STACK_TRACE_ON_ERROR = "stack-trace-on-error";
private static final String USE_LISTENER_ENCODING = "use-listener-encoding";
//values
private static final String STACK_TRACE_ON_ERROR_VALUE = "all";
private static final String SERVLET_CONTAINER = "servlet-container_" + RandomStringUtils.randomAlphanumeric(5);
private static final Address SERVLET_CONTAINER_ADDRESS = UNDERTOW_ADDRESS.and("servlet-container", SERVLET_CONTAINER);
@BeforeClass
public static void setUp() throws InterruptedException, IOException, TimeoutException {
operations.add(SERVLET_CONTAINER_ADDRESS);
}
@Before
public void before() {
page.navigate();
page.selectServletContainer(SERVLET_CONTAINER);
}
@AfterClass
public static void tearDown() throws InterruptedException, IOException, TimeoutException, OperationException {
operations.remove(SERVLET_CONTAINER_ADDRESS);
}
@Test
public void setAllowNonStandardWrappersToTrue() throws Exception {
editCheckboxAndVerify(SERVLET_CONTAINER_ADDRESS, ALLOW_NON_STANDARD_WRAPPERS, true);
}
@Test
public void setAllowNonStandardWrappersToFalse() throws Exception {
editCheckboxAndVerify(SERVLET_CONTAINER_ADDRESS, ALLOW_NON_STANDARD_WRAPPERS, false);
}
@Test
public void editDefaultBufferCache() throws Exception {
editTextAndVerify(SERVLET_CONTAINER_ADDRESS, DEFAULT_BUFFER_CACHE, undertowOps.createBufferCache());
}
@Test
public void editDefaultEncoding() throws Exception {
editTextAndVerify(SERVLET_CONTAINER_ADDRESS, DEFAULT_ENCODING);
}
@Test
public void editDefaultSessionTimeout() throws Exception {
editTextAndVerify(SERVLET_CONTAINER_ADDRESS, DEFAULT_SESSION_TIMEOUT, 42);
}
@Test
public void editDefaultSessionTimeoutInvalid() throws Exception {
verifyIfErrorAppears(DEFAULT_SESSION_TIMEOUT, "54sdfg");
}
@Test
public void setDirectoryListingToTrue() throws Exception {
editCheckboxAndVerify(SERVLET_CONTAINER_ADDRESS, DIRECTORY_LISTING, true);
}
@Test
public void setDirectoryListingToFalse() throws Exception {
editCheckboxAndVerify(SERVLET_CONTAINER_ADDRESS, DIRECTORY_LISTING, false);
}
@Test
public void setDisableCachingForSecuredPagesToTrue() throws Exception {
editCheckboxAndVerify(SERVLET_CONTAINER_ADDRESS, DISABLE_CACHING_FOR_SECURED_PAGES, true);
}
@Test
public void setDisableCachingForSecuredPagesToFalse() throws Exception {
editCheckboxAndVerify(SERVLET_CONTAINER_ADDRESS, DISABLE_CACHING_FOR_SECURED_PAGES, false);
}
@Test
public void setIgnoreFlushToTrue() throws Exception {
editCheckboxAndVerify(SERVLET_CONTAINER_ADDRESS, IGNORE_FLUSH, true);
}
@Test
public void setIgnoreFlushToFalse() throws Exception {
editCheckboxAndVerify(SERVLET_CONTAINER_ADDRESS, IGNORE_FLUSH, false);
}
@Test
public void setEagerFilterInitializationToTrue() throws Exception {
editCheckboxAndVerify(SERVLET_CONTAINER_ADDRESS, EAGER_FILTER_INITIALIZATION, true);
}
@Test
public void setEagerFilterInitializationToFalse() throws Exception {
editCheckboxAndVerify(SERVLET_CONTAINER_ADDRESS, EAGER_FILTER_INITIALIZATION, false);
}
@Test
public void selectStackTraceOnError() throws Exception {
selectOptionAndVerify(SERVLET_CONTAINER_ADDRESS, STACK_TRACE_ON_ERROR, STACK_TRACE_ON_ERROR_VALUE);
}
@Test
public void setUseListenerEncodingToTrue() throws Exception {
editCheckboxAndVerify(SERVLET_CONTAINER_ADDRESS, USE_LISTENER_ENCODING, true);
}
@Test
public void setUseListenerEncodingToFalse() throws Exception {
editCheckboxAndVerify(SERVLET_CONTAINER_ADDRESS, USE_LISTENER_ENCODING, false);
}
}
| hpehl/testsuite | basic/src/test/java/org/jboss/hal/testsuite/test/configuration/undertow/ServletContainerTestCase.java | Java | lgpl-2.1 | 5,479 |
/**********************************************************************
*
* GEOS - Geometry Engine Open Source
* http://geos.osgeo.org
*
* Copyright (C) 2011 Sandro Santilli <strk@keybit.net>
* Copyright (C) 2005-2006 Refractions Research Inc.
* Copyright (C) 2001-2002 Vivid Solutions Inc.
*
* This is free software; you can redistribute and/or modify it under
* the terms of the GNU Lesser General Public Licence as published
* by the Free Software Foundation.
* See the COPYING file for more information.
*
**********************************************************************
*
* Last port: geom/LineString.java r320 (JTS-1.12)
*
**********************************************************************/
#include <geos/util/IllegalArgumentException.h>
#include <geos/algorithm/CGAlgorithms.h>
#include <geos/geom/Coordinate.h>
#include <geos/geom/CoordinateSequenceFactory.h>
#include <geos/geom/CoordinateSequence.h>
#include <geos/geom/CoordinateSequenceFilter.h>
#include <geos/geom/CoordinateFilter.h>
#include <geos/geom/Dimension.h>
#include <geos/geom/GeometryFilter.h>
#include <geos/geom/GeometryComponentFilter.h>
#include <geos/geom/GeometryFactory.h>
#include <geos/geom/LineString.h>
#include <geos/geom/Point.h>
#include <geos/geom/MultiPoint.h> // for getBoundary
#include <geos/geom/Envelope.h>
#include <algorithm>
#include <typeinfo>
#include <memory>
#include <cassert>
using namespace std;
using namespace geos::algorithm;
namespace geos {
namespace geom { // geos::geom
/*protected*/
LineString::LineString(const LineString &ls)
:
Geometry(ls),
points(ls.points->clone())
{
//points=ls.points->clone();
}
Geometry*
LineString::reverse() const
{
assert(points.get());
CoordinateSequence* seq = points->clone();
CoordinateSequence::reverse(seq);
assert(getFactory());
return getFactory()->createLineString(seq);
}
/*private*/
void
LineString::validateConstruction()
{
if (points.get()==NULL)
{
points.reset(getFactory()->getCoordinateSequenceFactory()->create());
return;
}
if (points->size()==1)
{
throw util::IllegalArgumentException("point array must contain 0 or >1 elements\n");
}
}
/*protected*/
LineString::LineString(CoordinateSequence *newCoords,
const GeometryFactory *factory)
:
Geometry(factory),
points(newCoords)
{
validateConstruction();
}
/*public*/
LineString::LineString(CoordinateSequence::AutoPtr newCoords,
const GeometryFactory *factory)
:
Geometry(factory),
points(newCoords)
{
validateConstruction();
}
LineString::~LineString()
{
//delete points;
}
CoordinateSequence*
LineString::getCoordinates() const
{
assert(points.get());
return points->clone();
//return points;
}
const CoordinateSequence*
LineString::getCoordinatesRO() const
{
assert(0 != points.get());
return points.get();
}
const Coordinate&
LineString::getCoordinateN(int n) const
{
assert(points.get());
return points->getAt(n);
}
Dimension::DimensionType
LineString::getDimension() const
{
return Dimension::L; // line
}
int
LineString::getCoordinateDimension() const
{
return (int) points->getDimension();
}
int
LineString::getBoundaryDimension() const
{
if (isClosed()) {
return Dimension::False;
}
return 0;
}
bool
LineString::isEmpty() const
{
assert(points.get());
return points->isEmpty();
}
size_t
LineString::getNumPoints() const
{
assert(points.get());
return points->getSize();
}
Point*
LineString::getPointN(size_t n) const
{
assert(getFactory());
assert(points.get());
return getFactory()->createPoint(points->getAt(n));
}
Point*
LineString::getStartPoint() const
{
if (isEmpty()) {
return NULL;
//return new Point(NULL,NULL);
}
return getPointN(0);
}
Point*
LineString::getEndPoint() const
{
if (isEmpty()) {
return NULL;
//return new Point(NULL,NULL);
}
return getPointN(getNumPoints() - 1);
}
bool
LineString::isClosed() const
{
if (isEmpty()) {
return false;
}
return getCoordinateN(0).equals2D(getCoordinateN(getNumPoints()-1));
}
bool
LineString::isRing() const
{
return isClosed() && isSimple();
}
string
LineString::getGeometryType() const
{
return "LineString";
}
Geometry*
LineString::getBoundary() const
{
if (isEmpty()) {
return getFactory()->createMultiPoint();
}
// using the default OGC_SFS MOD2 rule, the boundary of a
// closed LineString is empty
if (isClosed()) {
return getFactory()->createMultiPoint();
}
vector<Geometry*> *pts=new vector<Geometry*>();
pts->push_back(getStartPoint());
pts->push_back(getEndPoint());
MultiPoint *mp = getFactory()->createMultiPoint(pts);
return mp;
}
bool
LineString::isCoordinate(Coordinate& pt) const
{
assert(points.get());
std::size_t npts=points->getSize();
for (std::size_t i = 0; i<npts; i++) {
if (points->getAt(i)==pt) {
return true;
}
}
return false;
}
/*protected*/
Envelope::AutoPtr
LineString::computeEnvelopeInternal() const
{
if (isEmpty()) {
// We don't return NULL here
// as it would indicate "unknown"
// envelope. In this case we
// *know* the envelope is EMPTY.
return Envelope::AutoPtr(new Envelope());
}
assert(points.get());
const Coordinate& c=points->getAt(0);
double minx = c.x;
double miny = c.y;
double maxx = c.x;
double maxy = c.y;
std::size_t npts=points->getSize();
for (std::size_t i=1; i<npts; i++) {
const Coordinate &c=points->getAt(i);
minx = minx < c.x ? minx : c.x;
maxx = maxx > c.x ? maxx : c.x;
miny = miny < c.y ? miny : c.y;
maxy = maxy > c.y ? maxy : c.y;
}
// caller expects a newly allocated Envelope.
// this function won't be called twice, unless
// cached Envelope is invalidated (set to NULL)
return Envelope::AutoPtr(new Envelope(minx, maxx, miny, maxy));
}
bool
LineString::equalsExact(const Geometry *other, double tolerance) const
{
if (!isEquivalentClass(other)) {
return false;
}
const LineString *otherLineString=dynamic_cast<const LineString*>(other);
assert(otherLineString);
size_t npts=points->getSize();
if (npts!=otherLineString->points->getSize()) {
return false;
}
for (size_t i=0; i<npts; ++i) {
if (!equal(points->getAt(i),otherLineString->points->getAt(i),tolerance)) {
return false;
}
}
return true;
}
void
LineString::apply_rw(const CoordinateFilter *filter)
{
assert(points.get());
points->apply_rw(filter);
}
void
LineString::apply_ro(CoordinateFilter *filter) const
{
assert(points.get());
points->apply_ro(filter);
}
void LineString::apply_rw(GeometryFilter *filter)
{
assert(filter);
filter->filter_rw(this);
}
void LineString::apply_ro(GeometryFilter *filter) const
{
assert(filter);
filter->filter_ro(this);
}
/*public*/
void
LineString::normalize()
{
assert(points.get());
std::size_t npts=points->getSize();
std::size_t n=npts/2;
for (std::size_t i=0; i<n; i++) {
std::size_t j = npts - 1 - i;
if (!(points->getAt(i)==points->getAt(j))) {
if (points->getAt(i).compareTo(points->getAt(j)) > 0) {
CoordinateSequence::reverse(points.get());
}
return;
}
}
}
int
LineString::compareToSameClass(const Geometry *ls) const
{
const LineString *line=dynamic_cast<const LineString*>(ls);
assert(line);
// MD - optimized implementation
std::size_t mynpts=points->getSize();
std::size_t othnpts=line->points->getSize();
if ( mynpts > othnpts ) return 1;
if ( mynpts < othnpts ) return -1;
for (std::size_t i=0; i<mynpts; i++)
{
int cmp=points->getAt(i).compareTo(line->points->getAt(i));
if (cmp) return cmp;
}
return 0;
}
const Coordinate*
LineString::getCoordinate() const
{
if (isEmpty()) return NULL;
return &(points->getAt(0));
}
double
LineString::getLength() const
{
return CGAlgorithms::length(points.get());
}
void
LineString::apply_rw(GeometryComponentFilter *filter)
{
assert(filter);
filter->filter_rw(this);
}
void
LineString::apply_ro(GeometryComponentFilter *filter) const
{
assert(filter);
filter->filter_ro(this);
}
void
LineString::apply_rw(CoordinateSequenceFilter& filter)
{
size_t npts=points->size();
if (!npts) return;
for (size_t i = 0; i<npts; ++i)
{
filter.filter_rw(*points, i);
if (filter.isDone()) break;
}
if (filter.isGeometryChanged()) geometryChanged();
}
void
LineString::apply_ro(CoordinateSequenceFilter& filter) const
{
size_t npts=points->size();
if (!npts) return;
for (size_t i = 0; i<npts; ++i)
{
filter.filter_ro(*points, i);
if (filter.isDone()) break;
}
//if (filter.isGeometryChanged()) geometryChanged();
}
GeometryTypeId
LineString::getGeometryTypeId() const
{
return GEOS_LINESTRING;
}
} // namespace geos::geom
} // namespace geos
| manisandro/libgeos | src/geom/LineString.cpp | C++ | lgpl-2.1 | 8,533 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyIlmbase(AutotoolsPackage):
"""The PyIlmBase libraries provides python bindings for the IlmBase libraries."""
homepage = "https://github.com/AcademySoftwareFoundation/openexr/tree/v2.3.0/PyIlmBase"
url = "https://github.com/AcademySoftwareFoundation/openexr/releases/download/v2.3.0/pyilmbase-2.3.0.tar.gz"
version('2.3.0', sha256='9c898bb16e7bc916c82bebdf32c343c0f2878fc3eacbafa49937e78f2079a425')
depends_on('ilmbase')
depends_on('boost+python')
# https://github.com/AcademySoftwareFoundation/openexr/issues/336
parallel = False
def configure_args(self):
spec = self.spec
args = [
'--with-boost-python-libname=boost_python{0}'.format(
spec['python'].version.up_to(2).joined)
]
return args
| LLNL/spack | var/spack/repos/builtin/packages/py-ilmbase/package.py | Python | lgpl-2.1 | 1,026 |
// ---------------------------------------------------------------------
//
// Copyright (C) 2004 - 2015 by the deal.II authors
//
// This file is part of the deal.II library.
//
// The deal.II library is free software; you can use it, redistribute
// it, and/or modify it under the terms of the GNU Lesser General
// Public License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
// The full text of the license can be found in the file LICENSE at
// the top level of the deal.II distribution.
//
// ---------------------------------------------------------------------
// check SparseMatrix::matrix_norm_square
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <iostream>
#include <vector>
void test (TrilinosWrappers::MPI::Vector &v,
TrilinosWrappers::MPI::Vector &w,
TrilinosWrappers::MPI::Vector &x)
{
TrilinosWrappers::SparseMatrix m(v.size(),v.size(),v.size());
for (unsigned int i=0; i<m.m(); ++i)
for (unsigned int j=0; j<m.m(); ++j)
m.set (i,j, i+2*j);
for (unsigned int i=0; i<v.size(); ++i)
{
v(i) = i;
w(i) = i+1;
}
m.compress (VectorOperation::insert);
v.compress (VectorOperation::insert);
w.compress (VectorOperation::insert);
// x=w-Mv
const double s = m.residual (x, v, w);
// make sure we get the expected result
for (unsigned int i=0; i<v.size(); ++i)
{
AssertThrow (v(i) == i, ExcInternalError());
AssertThrow (w(i) == i+1, ExcInternalError());
double result = i+1;
for (unsigned int j=0; j<m.m(); ++j)
result -= (i+2*j)*j;
AssertThrow (x(i) == result, ExcInternalError());
}
AssertThrow (s == x.l2_norm(), ExcInternalError());
deallog << "OK" << std::endl;
}
int main (int argc, char **argv)
{
initlog();
Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads());
try
{
{
TrilinosWrappers::MPI::Vector v;
v.reinit(complete_index_set(100), MPI_COMM_WORLD);
TrilinosWrappers::MPI::Vector w;
w.reinit(complete_index_set(100), MPI_COMM_WORLD);
TrilinosWrappers::MPI::Vector x;
x.reinit(complete_index_set(100), MPI_COMM_WORLD);
test (v,w,x);
}
}
catch (std::exception &exc)
{
std::cerr << std::endl << std::endl
<< "----------------------------------------------------"
<< std::endl;
std::cerr << "Exception on processing: " << std::endl
<< exc.what() << std::endl
<< "Aborting!" << std::endl
<< "----------------------------------------------------"
<< std::endl;
return 1;
}
catch (...)
{
std::cerr << std::endl << std::endl
<< "----------------------------------------------------"
<< std::endl;
std::cerr << "Unknown exception!" << std::endl
<< "Aborting!" << std::endl
<< "----------------------------------------------------"
<< std::endl;
return 1;
};
}
| kalj/dealii | tests/trilinos/sparse_matrix_vector_07.cc | C++ | lgpl-2.1 | 3,251 |
//* This file is part of the MOOSE framework
//* https://www.mooseframework.org
//*
//* All rights reserved, see COPYRIGHT for full restrictions
//* https://github.com/idaholab/moose/blob/master/COPYRIGHT
//*
//* Licensed under LGPL 2.1, please see LICENSE for details
//* https://www.gnu.org/licenses/lgpl-2.1.html
#include "AdaptivityAction.h"
#ifdef LIBMESH_ENABLE_AMR
#include "FEProblem.h"
#include "NonlinearSystemBase.h"
#include "Adaptivity.h"
#include "Executioner.h"
#include "MooseEnum.h"
#include "MooseVariableFE.h"
#include "RelationshipManager.h"
// libMesh includes
#include "libmesh/transient_system.h"
#include "libmesh/system_norm.h"
#include "libmesh/enum_norm_type.h"
registerMooseAction("MooseApp", AdaptivityAction, "setup_adaptivity");
registerMooseAction("MooseApp", AdaptivityAction, "add_geometric_rm");
registerMooseAction("MooseApp", AdaptivityAction, "add_algebraic_rm");
defineLegacyParams(AdaptivityAction);
InputParameters
AdaptivityAction::validParams()
{
InputParameters params = Action::validParams();
MooseEnum estimators("KellyErrorEstimator LaplacianErrorEstimator PatchRecoveryErrorEstimator",
"KellyErrorEstimator");
params.addParam<unsigned int>(
"steps", 0, "The number of adaptivity steps to perform at any one time for steady state");
params.addRangeCheckedParam<unsigned int>(
"interval", 1, "interval>0", "The number of time steps betweeen each adaptivity phase");
params.addParam<unsigned int>(
"initial_adaptivity",
0,
"The number of adaptivity steps to perform using the initial conditions");
params.addParam<Real>("refine_fraction",
0.0,
"The fraction of elements or error to refine. Should be between 0 and 1.");
params.addParam<Real>("coarsen_fraction",
0.0,
"The fraction of elements or error to coarsen. Should be between 0 and 1.");
params.addParam<unsigned int>(
"max_h_level",
0,
"Maximum number of times a single element can be refined. If 0 then infinite.");
params.addParam<MooseEnum>(
"error_estimator", estimators, "The class name of the error estimator you want to use.");
params.addDeprecatedParam<bool>(
"print_changed_info",
false,
"Determines whether information about the mesh is printed when adaptivity occurs",
"Use the Console output parameter 'print_mesh_changed_info'");
params.addParam<Real>("start_time",
-std::numeric_limits<Real>::max(),
"The time that adaptivity will be active after.");
params.addParam<Real>("stop_time",
std::numeric_limits<Real>::max(),
"The time after which adaptivity will no longer be active.");
params.addParam<std::vector<std::string>>(
"weight_names", "List of names of variables that will be associated with weight_values");
params.addParam<std::vector<Real>>(
"weight_values",
"List of values between 0 and 1 to weight the associated weight_names error by");
params.addParam<unsigned int>("cycles_per_step", 1, "The number of adaptivity cycles per step");
params.addParam<bool>(
"show_initial_progress", true, "Show the progress of the initial adaptivity");
params.addParam<bool>(
"recompute_markers_during_cycles", false, "Recompute markers during adaptivity cycles");
return params;
}
AdaptivityAction::AdaptivityAction(InputParameters params) : Action(params) {}
void
AdaptivityAction::act()
{
// Here we are going to mostly mimic the default ghosting in libmesh
// By default libmesh adds:
// 1) GhostPointNeighbors on the mesh
// 2) DefaultCoupling with 1 layer as an algebraic ghosting functor on the dof_map, which also
// gets added to the mesh at the time a new System is added
// 3) DefaultCoupling with 0 layers as a coupling functor on the dof_map, which also gets added to
// the mesh at the time a new System is added
//
// What we will do differently is:
// - The 3rd ghosting functor adds nothing so we will not add it at all
if (_current_task == "add_algebraic_rm")
{
auto rm_params = _factory.getValidParams("ElementSideNeighborLayers");
rm_params.set<std::string>("for_whom") = "Adaptivity";
rm_params.set<MooseMesh *>("mesh") = _mesh.get();
rm_params.set<Moose::RelationshipManagerType>("rm_type") =
Moose::RelationshipManagerType::ALGEBRAIC;
if (rm_params.areAllRequiredParamsValid())
{
auto rm_obj = _factory.create<RelationshipManager>(
"ElementSideNeighborLayers", "adaptivity_algebraic_ghosting", rm_params);
// Delete the resources created on behalf of the RM if it ends up not being added to the
// App.
if (!_app.addRelationshipManager(rm_obj))
_factory.releaseSharedObjects(*rm_obj);
}
else
mooseError("Invalid initialization of ElementSideNeighborLayers");
}
else if (_current_task == "add_geometric_rm")
{
auto rm_params = _factory.getValidParams("MooseGhostPointNeighbors");
rm_params.set<std::string>("for_whom") = "Adaptivity";
rm_params.set<MooseMesh *>("mesh") = _mesh.get();
rm_params.set<Moose::RelationshipManagerType>("rm_type") =
Moose::RelationshipManagerType::GEOMETRIC;
if (rm_params.areAllRequiredParamsValid())
{
auto rm_obj = _factory.create<RelationshipManager>(
"MooseGhostPointNeighbors", "adaptivity_geometric_ghosting", rm_params);
// Delete the resources created on behalf of the RM if it ends up not being added to the
// App.
if (!_app.addRelationshipManager(rm_obj))
_factory.releaseSharedObjects(*rm_obj);
}
else
mooseError("Invalid initialization of MooseGhostPointNeighbors");
}
else if (_current_task == "setup_adaptivity")
{
NonlinearSystemBase & system = _problem->getNonlinearSystemBase();
Adaptivity & adapt = _problem->adaptivity();
// we don't need to run mesh modifiers *again* after they ran already during the mesh
// splitting process. Adaptivity::init must be called for any adaptivity to work, however, so we
// can't just skip it for the useSplit case.
if (_app.isUseSplit())
adapt.init(0, 0);
else
adapt.init(getParam<unsigned int>("steps"), getParam<unsigned int>("initial_adaptivity"));
adapt.setErrorEstimator(getParam<MooseEnum>("error_estimator"));
adapt.setParam("cycles_per_step", getParam<unsigned int>("cycles_per_step"));
adapt.setParam("refine fraction", getParam<Real>("refine_fraction"));
adapt.setParam("coarsen fraction", getParam<Real>("coarsen_fraction"));
adapt.setParam("max h-level", getParam<unsigned int>("max_h_level"));
adapt.setParam("recompute_markers_during_cycles",
getParam<bool>("recompute_markers_during_cycles"));
adapt.setPrintMeshChanged(getParam<bool>("print_changed_info"));
const std::vector<std::string> & weight_names =
getParam<std::vector<std::string>>("weight_names");
const std::vector<Real> & weight_values = getParam<std::vector<Real>>("weight_values");
auto num_weight_names = weight_names.size();
auto num_weight_values = weight_values.size();
if (num_weight_names)
{
if (num_weight_names != num_weight_values)
mooseError("Number of weight_names must be equal to number of weight_values in "
"Execution/Adaptivity");
// If weights have been specified then set the default weight to zero
std::vector<Real> weights(system.nVariables(), 0);
for (MooseIndex(num_weight_names) i = 0; i < num_weight_names; i++)
{
std::string name = weight_names[i];
auto value = weight_values[i];
weights[system.getVariable(0, name).number()] = value;
}
std::vector<FEMNormType> norms(system.nVariables(), H1_SEMINORM);
SystemNorm sys_norm(norms, weights);
adapt.setErrorNorm(sys_norm);
}
adapt.setTimeActive(getParam<Real>("start_time"), getParam<Real>("stop_time"));
adapt.setInterval(getParam<unsigned int>("interval"));
}
}
#endif // LIBMESH_ENABLE_AMR
| nuclear-wizard/moose | framework/src/actions/AdaptivityAction.C | C++ | lgpl-2.1 | 8,214 |
//* This file is part of the MOOSE framework
//* https://www.mooseframework.org
//*
//* All rights reserved, see COPYRIGHT for full restrictions
//* https://github.com/idaholab/moose/blob/master/COPYRIGHT
//*
//* Licensed under LGPL 2.1, please see LICENSE for details
//* https://www.gnu.org/licenses/lgpl-2.1.html
#include "FullSolveMultiApp.h"
#include "LayeredSideFluxAverage.h"
#include "Executioner.h"
// libMesh
#include "libmesh/mesh_tools.h"
registerMooseObject("MooseApp", FullSolveMultiApp);
defineLegacyParams(FullSolveMultiApp);
InputParameters
FullSolveMultiApp::validParams()
{
InputParameters params = MultiApp::validParams();
params.addClassDescription("Performs a complete simulation during each execution.");
params.addParam<bool>(
"no_backup_and_restore",
false,
"True to turn off backup/restore for this multiapp. This is useful when doing steady-state "
"Picard iterations where we want to use the solution of previous Picard iteration as the "
"initial guess of the current Picard iteration");
params.addParam<bool>(
"keep_full_output_history",
false,
"Whether or not to keep the full output history when this multiapp has multiple entries");
return params;
}
FullSolveMultiApp::FullSolveMultiApp(const InputParameters & parameters) : MultiApp(parameters) {}
void
FullSolveMultiApp::backup()
{
if (getParam<bool>("no_backup_and_restore"))
return;
else
MultiApp::backup();
}
void
FullSolveMultiApp::restore()
{
if (getParam<bool>("no_backup_and_restore"))
return;
else
MultiApp::restore();
}
void
FullSolveMultiApp::initialSetup()
{
MultiApp::initialSetup();
if (_has_an_app)
{
Moose::ScopedCommSwapper swapper(_my_comm);
_executioners.resize(_my_num_apps);
// Grab Executioner from each app
for (unsigned int i = 0; i < _my_num_apps; i++)
{
auto & app = _apps[i];
Executioner * ex = app->getExecutioner();
if (!ex)
mooseError("Executioner does not exist!");
ex->init();
_executioners[i] = ex;
}
}
}
bool
FullSolveMultiApp::solveStep(Real /*dt*/, Real /*target_time*/, bool auto_advance)
{
if (!auto_advance)
mooseError("FullSolveMultiApp is not compatible with auto_advance=false");
if (!_has_an_app)
return true;
Moose::ScopedCommSwapper swapper(_my_comm);
int rank;
int ierr;
ierr = MPI_Comm_rank(_communicator.get(), &rank);
mooseCheckMPIErr(ierr);
bool last_solve_converged = true;
for (unsigned int i = 0; i < _my_num_apps; i++)
{
// reset output system if desired
if (!getParam<bool>("keep_full_output_history"))
_apps[i]->getOutputWarehouse().reset();
Executioner * ex = _executioners[i];
ex->execute();
if (!ex->lastSolveConverged())
last_solve_converged = false;
}
return last_solve_converged;
}
| nuclear-wizard/moose | framework/src/multiapps/FullSolveMultiApp.C | C++ | lgpl-2.1 | 2,866 |
# Copyright (c) 2005 Ruby-GNOME2 Project Team
# This program is licenced under the same licence as Ruby-GNOME2.
#
# $Id: cairo-self-intersect.rb,v 1.1 2005/10/12 05:38:30 ktou Exp $
=begin
= cairo/Self Intersect
This demo shows how to use GDK and cairo to show cross.
From http://cairographics.org/samples/xxx_self_intersect.html.
=end
require 'common'
module Demo
class CairoSelfIntersect < CairoWindow
def initialize
super('cairo self intersect')
end
def draw(cr)
cr.move_to(0.3, 0.3)
cr.line_to(0.7, 0.3)
cr.line_to(0.5, 0.3)
cr.line_to(0.5, 0.7)
cr.set_line_width(0.22)
cr.set_line_cap(Cairo::LINE_CAP_ROUND)
cr.set_line_join(Cairo::LINE_JOIN_ROUND)
cr.stroke
end
end
end
| benolee/ruby-gnome2 | gtk2/sample/gtk-demo/cairo-self-intersect.rb | Ruby | lgpl-2.1 | 767 |
/****************************************************************************
**
** Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
** Contact: http://www.qt-project.org/legal
**
** This file is part of the Qt Assistant of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:LGPL$
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and Digia. For licensing terms and
** conditions see http://qt.digia.com/licensing. For further information
** use the contact form at http://qt.digia.com/contact-us.
**
** GNU Lesser General Public License Usage
** Alternatively, this file may be used under the terms of the GNU Lesser
** General Public License version 2.1 as published by the Free Software
** Foundation and appearing in the file LICENSE.LGPL included in the
** packaging of this file. Please review the following information to
** ensure the GNU Lesser General Public License version 2.1 requirements
** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
**
** In addition, as a special exception, Digia gives you certain additional
** rights. These rights are described in the Digia Qt LGPL Exception
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU
** General Public License version 3.0 as published by the Free Software
** Foundation and appearing in the file LICENSE.GPL included in the
** packaging of this file. Please review the following information to
** ensure the GNU General Public License version 3.0 requirements will be
** met: http://www.gnu.org/copyleft/gpl.html.
**
**
** $QT_END_LICENSE$
**
****************************************************************************/
#include "bookmarkfiltermodel.h"
#include "bookmarkitem.h"
#include "bookmarkmodel.h"
BookmarkFilterModel::BookmarkFilterModel(QObject *parent)
: QAbstractProxyModel(parent)
, hideBookmarks(true)
, sourceModel(0)
{
}
void BookmarkFilterModel::setSourceModel(QAbstractItemModel *_sourceModel)
{
beginResetModel();
if (sourceModel) {
disconnect(sourceModel, SIGNAL(dataChanged(QModelIndex, QModelIndex)),
this, SLOT(changed(QModelIndex, QModelIndex)));
disconnect(sourceModel, SIGNAL(rowsInserted(QModelIndex, int, int)),
this, SLOT(rowsInserted(QModelIndex, int, int)));
disconnect(sourceModel,
SIGNAL(rowsAboutToBeRemoved(QModelIndex, int, int)), this,
SLOT(rowsAboutToBeRemoved(QModelIndex, int, int)));
disconnect(sourceModel, SIGNAL(rowsRemoved(QModelIndex, int, int)),
this, SLOT(rowsRemoved(QModelIndex, int, int)));
disconnect(sourceModel, SIGNAL(layoutAboutToBeChanged()), this,
SLOT(layoutAboutToBeChanged()));
disconnect(sourceModel, SIGNAL(layoutChanged()), this,
SLOT(layoutChanged()));
disconnect(sourceModel, SIGNAL(modelAboutToBeReset()), this,
SLOT(modelAboutToBeReset()));
disconnect(sourceModel, SIGNAL(modelReset()), this, SLOT(modelReset()));
}
QAbstractProxyModel::setSourceModel(sourceModel);
sourceModel = qobject_cast<BookmarkModel*> (_sourceModel);
connect(sourceModel, SIGNAL(dataChanged(QModelIndex, QModelIndex)), this,
SLOT(changed(QModelIndex, QModelIndex)));
connect(sourceModel, SIGNAL(rowsInserted(QModelIndex, int, int)),
this, SLOT(rowsInserted(QModelIndex, int, int)));
connect(sourceModel, SIGNAL(rowsAboutToBeRemoved(QModelIndex, int, int)),
this, SLOT(rowsAboutToBeRemoved(QModelIndex, int, int)));
connect(sourceModel, SIGNAL(rowsRemoved(QModelIndex, int, int)), this,
SLOT(rowsRemoved(QModelIndex, int, int)));
connect(sourceModel, SIGNAL(layoutAboutToBeChanged()), this,
SLOT(layoutAboutToBeChanged()));
connect(sourceModel, SIGNAL(layoutChanged()), this,
SLOT(layoutChanged()));
connect(sourceModel, SIGNAL(modelAboutToBeReset()), this,
SLOT(modelAboutToBeReset()));
connect(sourceModel, SIGNAL(modelReset()), this, SLOT(modelReset()));
if (sourceModel)
setupCache(sourceModel->index(0, 0, QModelIndex()).parent());
endResetModel();
}
int BookmarkFilterModel::rowCount(const QModelIndex &index) const
{
Q_UNUSED(index)
return cache.count();
}
int BookmarkFilterModel::columnCount(const QModelIndex &index) const
{
Q_UNUSED(index)
if (sourceModel)
return sourceModel->columnCount();
return 0;
}
QModelIndex BookmarkFilterModel::mapToSource(const QModelIndex &proxyIndex) const
{
const int row = proxyIndex.row();
if (proxyIndex.isValid() && row >= 0 && row < cache.count())
return cache[row];
return QModelIndex();
}
QModelIndex BookmarkFilterModel::mapFromSource(const QModelIndex &sourceIndex) const
{
return index(cache.indexOf(sourceIndex), 0, QModelIndex());
}
QModelIndex BookmarkFilterModel::parent(const QModelIndex &child) const
{
Q_UNUSED(child)
return QModelIndex();
}
QModelIndex BookmarkFilterModel::index(int row, int column,
const QModelIndex &index) const
{
Q_UNUSED(index)
if (row < 0 || column < 0 || cache.count() <= row
|| !sourceModel || sourceModel->columnCount() <= column) {
return QModelIndex();
}
return createIndex(row, 0);
}
Qt::DropActions BookmarkFilterModel::supportedDropActions () const
{
if (sourceModel)
return sourceModel->supportedDropActions();
return Qt::IgnoreAction;
}
Qt::ItemFlags BookmarkFilterModel::flags(const QModelIndex &index) const
{
if (sourceModel)
return sourceModel->flags(index);
return Qt::NoItemFlags;
}
QVariant BookmarkFilterModel::data(const QModelIndex &index, int role) const
{
if (sourceModel)
return sourceModel->data(mapToSource(index), role);
return QVariant();
}
bool BookmarkFilterModel::setData(const QModelIndex &index, const QVariant &value,
int role)
{
if (sourceModel)
return sourceModel->setData(mapToSource(index), value, role);
return false;
}
void BookmarkFilterModel::filterBookmarks()
{
if (sourceModel) {
beginResetModel();
hideBookmarks = true;
setupCache(sourceModel->index(0, 0, QModelIndex()).parent());
endResetModel();
}
}
void BookmarkFilterModel::filterBookmarkFolders()
{
if (sourceModel) {
beginResetModel();
hideBookmarks = false;
setupCache(sourceModel->index(0, 0, QModelIndex()).parent());
endResetModel();
}
}
void BookmarkFilterModel::changed(const QModelIndex &topLeft,
const QModelIndex &bottomRight)
{
emit dataChanged(mapFromSource(topLeft), mapFromSource(bottomRight));
}
void BookmarkFilterModel::rowsInserted(const QModelIndex &parent, int start,
int end)
{
if (!sourceModel)
return;
QModelIndex cachePrevious = parent;
if (BookmarkItem *parentItem = sourceModel->itemFromIndex(parent)) {
BookmarkItem *newItem = parentItem->child(start);
// iterate over tree hirarchie to find the previous folder
for (int i = 0; i < parentItem->childCount(); ++i) {
if (BookmarkItem *child = parentItem->child(i)) {
const QModelIndex &tmp = sourceModel->indexFromItem(child);
if (tmp.data(UserRoleFolder).toBool() && child != newItem)
cachePrevious = tmp;
}
}
const QModelIndex &newIndex = sourceModel->indexFromItem(newItem);
const bool isFolder = newIndex.data(UserRoleFolder).toBool();
if ((isFolder && hideBookmarks) || (!isFolder && !hideBookmarks)) {
beginInsertRows(mapFromSource(parent), start, end);
const int index = cache.indexOf(cachePrevious) + 1;
if (cache.value(index, QPersistentModelIndex()) != newIndex)
cache.insert(index, newIndex);
endInsertRows();
}
}
}
void BookmarkFilterModel::rowsAboutToBeRemoved(const QModelIndex &parent,
int start, int end)
{
if (!sourceModel)
return;
if (BookmarkItem *parentItem = sourceModel->itemFromIndex(parent)) {
if (BookmarkItem *child = parentItem->child(start)) {
indexToRemove = sourceModel->indexFromItem(child);
if (cache.contains(indexToRemove))
beginRemoveRows(mapFromSource(parent), start, end);
}
}
}
void BookmarkFilterModel::rowsRemoved(const QModelIndex &/*parent*/, int, int)
{
if (cache.contains(indexToRemove)) {
cache.removeAll(indexToRemove);
endRemoveRows();
}
}
void BookmarkFilterModel::layoutAboutToBeChanged()
{
// TODO: ???
}
void BookmarkFilterModel::layoutChanged()
{
// TODO: ???
}
void BookmarkFilterModel::modelAboutToBeReset()
{
beginResetModel();
}
void BookmarkFilterModel::modelReset()
{
if (sourceModel)
setupCache(sourceModel->index(0, 0, QModelIndex()).parent());
endResetModel();
}
void BookmarkFilterModel::setupCache(const QModelIndex &parent)
{
cache.clear();
for (int i = 0; i < sourceModel->rowCount(parent); ++i)
collectItems(sourceModel->index(i, 0, parent));
}
void BookmarkFilterModel::collectItems(const QModelIndex &parent)
{
if (parent.isValid()) {
bool isFolder = sourceModel->data(parent, UserRoleFolder).toBool();
if ((isFolder && hideBookmarks) || (!isFolder && !hideBookmarks))
cache.append(parent);
if (sourceModel->hasChildren(parent)) {
for (int i = 0; i < sourceModel->rowCount(parent); ++i)
collectItems(sourceModel->index(i, 0, parent));
}
}
}
// -- BookmarkTreeModel
BookmarkTreeModel::BookmarkTreeModel(QObject *parent)
: QSortFilterProxyModel(parent)
{
}
int BookmarkTreeModel::columnCount(const QModelIndex &parent) const
{
return qMin(1, QSortFilterProxyModel::columnCount(parent));
}
bool BookmarkTreeModel::filterAcceptsRow(int row, const QModelIndex &parent) const
{
Q_UNUSED(row)
BookmarkModel *model = qobject_cast<BookmarkModel*> (sourceModel());
if (model->rowCount(parent) > 0
&& model->data(model->index(row, 0, parent), UserRoleFolder).toBool())
return true;
return false;
}
| mer-qt/qttools | src/assistant/assistant/bookmarkfiltermodel.cpp | C++ | lgpl-2.1 | 10,492 |
//
// chat_client.cpp
// ~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2010 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <cstdlib>
#include <deque>
#include <iostream>
#include <boost/bind.hpp>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include "chat_message.hpp"
using boost::asio::ip::tcp;
typedef std::deque<chat_message> chat_message_queue;
class chat_client
{
public:
chat_client(boost::asio::io_service& io_service,
tcp::resolver::iterator endpoint_iterator)
: io_service_(io_service),
socket_(io_service)
{
tcp::endpoint endpoint = *endpoint_iterator;
socket_.async_connect(endpoint,
boost::bind(&chat_client::handle_connect, this,
boost::asio::placeholders::error, ++endpoint_iterator));
}
void write(const chat_message& msg)
{
io_service_.post(boost::bind(&chat_client::do_write, this, msg));
}
void close()
{
io_service_.post(boost::bind(&chat_client::do_close, this));
}
private:
void handle_connect(const boost::system::error_code& error,
tcp::resolver::iterator endpoint_iterator)
{
if (!error)
{
boost::asio::async_read(socket_,
boost::asio::buffer(read_msg_.data(), chat_message::header_length),
boost::bind(&chat_client::handle_read_header, this,
boost::asio::placeholders::error));
}
else if (endpoint_iterator != tcp::resolver::iterator())
{
socket_.close();
tcp::endpoint endpoint = *endpoint_iterator;
socket_.async_connect(endpoint,
boost::bind(&chat_client::handle_connect, this,
boost::asio::placeholders::error, ++endpoint_iterator));
}
}
void handle_read_header(const boost::system::error_code& error)
{
if (!error && read_msg_.decode_header())
{
boost::asio::async_read(socket_,
boost::asio::buffer(read_msg_.body(), read_msg_.body_length()),
boost::bind(&chat_client::handle_read_body, this,
boost::asio::placeholders::error));
}
else
{
do_close();
}
}
void handle_read_body(const boost::system::error_code& error)
{
if (!error)
{
std::cout.write(read_msg_.body(), read_msg_.body_length());
std::cout << "\n";
boost::asio::async_read(socket_,
boost::asio::buffer(read_msg_.data(), chat_message::header_length),
boost::bind(&chat_client::handle_read_header, this,
boost::asio::placeholders::error));
}
else
{
do_close();
}
}
void do_write(chat_message msg)
{
bool write_in_progress = !write_msgs_.empty();
write_msgs_.push_back(msg);
if (!write_in_progress)
{
boost::asio::async_write(socket_,
boost::asio::buffer(write_msgs_.front().data(),
write_msgs_.front().length()),
boost::bind(&chat_client::handle_write, this,
boost::asio::placeholders::error));
}
}
void handle_write(const boost::system::error_code& error)
{
if (!error)
{
write_msgs_.pop_front();
if (!write_msgs_.empty())
{
boost::asio::async_write(socket_,
boost::asio::buffer(write_msgs_.front().data(),
write_msgs_.front().length()),
boost::bind(&chat_client::handle_write, this,
boost::asio::placeholders::error));
}
}
else
{
do_close();
}
}
void do_close()
{
socket_.close();
}
private:
boost::asio::io_service& io_service_;
tcp::socket socket_;
chat_message read_msg_;
chat_message_queue write_msgs_;
};
int main(int argc, char* argv[])
{
try
{
if (argc != 3)
{
std::cerr << "Usage: chat_client <host> <port>\n";
return 1;
}
boost::asio::io_service io_service;
tcp::resolver resolver(io_service);
tcp::resolver::query query(argv[1], argv[2]);
tcp::resolver::iterator iterator = resolver.resolve(query);
chat_client c(io_service, iterator);
boost::thread t(boost::bind(&boost::asio::io_service::run, &io_service));
char line[chat_message::max_body_length + 1];
while (std::cin.getline(line, chat_message::max_body_length + 1))
{
using namespace std; // For strlen and memcpy.
chat_message msg;
msg.body_length(strlen(line));
memcpy(msg.body(), line, msg.body_length());
msg.encode_header();
c.write(msg);
}
c.close();
t.join();
}
catch (std::exception& e)
{
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}
| airsim/tvlsim | test/boost/asio/chat/chat_client.cpp | C++ | lgpl-2.1 | 4,699 |
// ---------------------------------------------------------------------
//
// Copyright (C) 2016 by the deal.II authors
//
// This file is part of the deal.II library.
//
// The deal.II library is free software; you can use it, redistribute
// it, and/or modify it under the terms of the GNU Lesser General
// Public License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
// The full text of the license can be found in the file LICENSE at
// the top level of the deal.II distribution.
//
// ---------------------------------------------------------------------
// Test Legendre expansion in 2D and 3D for a function given using Legendre
// coefficients.
#include "../tests.h"
#include <iostream>
#include <deal.II/base/function.h>
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/grid/tria.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/hp/dof_handler.h>
#include <deal.II/fe/fe_series.h>
#include <deal.II/fe/fe_q.h>
#include <deal.II/lac/vector.h>
#include <deal.II/hp/q_collection.h>
#include <deal.II/numerics/vector_tools.h>
#include <gsl/gsl_sf_legendre.h>
using namespace dealii;
template <int dim>
class LegendreFunction : public Function<dim>
{
public:
LegendreFunction(const Table<dim,double> &coefficients)
:
dealii::Function<dim>(1),
coefficients(coefficients)
{
}
virtual double value(const Point<dim> &point,
const unsigned int component = 0 ) const;
const Table<dim,double> &get_coefficients() const
{
return coefficients;
}
private:
const Table<dim,double> coefficients;
};
// copy-paste from fe_series.cc
template <int dim>
double Lh(const Point<dim> &x_q,
const TableIndices<dim> &indices)
{
double res = 1.0;
for (unsigned int d = 0; d < dim; d++)
{
const double x = 2.0*(x_q[d]-0.5);
Assert ( (x_q[d] <= 1.0) && (x_q[d] >= 0.),
ExcMessage("x_q is not in [0,1]" +
Utilities::to_string(x_q[d])));
const int ind = indices[d];
res *= sqrt(2.0) * gsl_sf_legendre_Pl (ind, x);
}
return res;
}
template <>
double LegendreFunction<2>::value(const dealii::Point<2> &point,
const unsigned int ) const
{
double f = 0.0;
for (unsigned int i = 0; i < coefficients.size(0); i++)
for (unsigned int j = 0; j < coefficients.size(1); j++)
f+= Lh(point, TableIndices<2>(i,j)) * coefficients(i,j);
return f;
}
template <>
double LegendreFunction<3>::value(const dealii::Point<3> &point,
const unsigned int ) const
{
double f = 0.0;
for (unsigned int i = 0; i < coefficients.size(0); i++)
for (unsigned int j = 0; j < coefficients.size(1); j++)
for (unsigned int k = 0; k < coefficients.size(2); k++)
f+= Lh(point, TableIndices<3>(i,j,k)) * coefficients(i,j,k);
return f;
}
void print(const Table<2,double> &coeff)
{
for (unsigned int i = 0; i < coeff.size(0); i++)
for (unsigned int j = 0; j < coeff.size(1); j++)
deallog << coeff(i,j) << " ";
deallog << std::endl;
}
void print(const Table<3,double> &coeff)
{
for (unsigned int i = 0; i < coeff.size(0); i++)
for (unsigned int j = 0; j < coeff.size(1); j++)
for (unsigned int k = 0; k < coeff.size(2); k++)
deallog << coeff(i,j,k) << " ";
deallog << std::endl;
}
void resize(Table<2,double> &coeff, const unsigned int N)
{
coeff.reinit(N,N);
}
void resize(Table<3,double> &coeff, const unsigned int N)
{
TableIndices<3> size;
for (unsigned int d=0; d<3; d++)
size[d] = N;
coeff.reinit(size);
}
template <int dim>
void test(const LegendreFunction<dim> &func,
const unsigned int poly_degree)
{
const unsigned int max_poly = poly_degree+3;
deallog <<"-----------------------------------"<<std::endl;
deallog << dim <<"d, p="<<poly_degree<<", max_p="<<max_poly<<std::endl;
deallog <<"-----------------------------------"<<std::endl;
Triangulation<dim> triangulation;
hp::DoFHandler<dim> dof_handler(triangulation);
hp::FECollection<dim> fe_collection;
hp::QCollection<dim> quadrature_formula;
// add some extra FEs in fe_collection
for (unsigned int p = 1; p <= max_poly; p++)
{
fe_collection.push_back(FE_Q<dim>(p));
quadrature_formula.push_back(QGauss<dim>(p+1+5));
}
GridGenerator::hyper_cube (triangulation,0.0,1.0); // reference cell
const unsigned int fe_index = poly_degree-1;
dof_handler.begin_active()->set_active_fe_index(fe_index);
dof_handler.distribute_dofs (fe_collection);
Vector<double> values(dof_handler.n_dofs());
VectorTools::interpolate (dof_handler,func,values);
const unsigned int N = poly_degree+1;
FESeries::Legendre<dim> legendre(N,
fe_collection,
quadrature_formula);
const Table<dim,double> &coeff_in = func.get_coefficients();
Table<dim,double> coeff_out;
resize(coeff_out,N);
Vector<double> local_dof_values;
typename hp::DoFHandler<dim>::active_cell_iterator
cell = dof_handler.begin_active();
{
const unsigned int cell_n_dofs = cell->get_fe().dofs_per_cell;
const unsigned int cell_active_fe_index = cell->active_fe_index();
local_dof_values.reinit (cell_n_dofs);
cell->get_dof_values (values, local_dof_values);
legendre.calculate(local_dof_values,
cell_active_fe_index,
coeff_out);
}
deallog << "calculated:" << std::endl;
print(coeff_out);
deallog <<"exact:"<<std::endl;
print(coeff_in);
dof_handler.clear();
}
int main ()
{
std::ofstream logfile("output");
dealii::deallog.attach(logfile,/*do not print job id*/false);
dealii::deallog.depth_console(0);
{
const unsigned int dim = 2;
const unsigned int coeff_1d = 2;
const unsigned int p = 1;
Table<dim,double> coeff_in(coeff_1d,coeff_1d);
unsigned int ind = 0;
for (unsigned int i = 0; i < coeff_1d; i++)
for (unsigned int j = 0; j < coeff_1d; j++)
coeff_in(i,j) = 1.0 + ind++;
LegendreFunction<dim> function(coeff_in);
test(function,p);
}
{
const unsigned int dim = 2;
const unsigned int coeff_1d = 3;
const unsigned int p = 2;
Table<dim,double> coeff_in(coeff_1d,coeff_1d);
unsigned int ind = 0;
for (unsigned int i = 0; i < coeff_1d; i++)
for (unsigned int j = 0; j < coeff_1d; j++)
coeff_in(i,j) = 1.0 + ind++;
LegendreFunction<dim> function(coeff_in);
test(function,p);
}
{
const unsigned int dim = 3;
const unsigned int coeff_1d = 2;
const unsigned int p = 1;
Table<dim,double> coeff_in(coeff_1d,coeff_1d,coeff_1d);
unsigned int ind = 0;
for (unsigned int i = 0; i < coeff_1d; i++)
for (unsigned int j = 0; j < coeff_1d; j++)
for (unsigned int k = 0; k < coeff_1d; k++)
coeff_in(i,j,k) = 1.0 + ind++;
LegendreFunction<dim> function(coeff_in);
test(function,p);
}
{
const unsigned int dim = 3;
const unsigned int coeff_1d = 3;
const unsigned int p = 2;
Table<dim,double> coeff_in(coeff_1d,coeff_1d,coeff_1d);
unsigned int ind = 0;
for (unsigned int i = 0; i < coeff_1d; i++)
for (unsigned int j = 0; j < coeff_1d; j++)
for (unsigned int k = 0; k < coeff_1d; k++)
coeff_in(i,j,k) = 1.0 + ind++;
LegendreFunction<dim> function(coeff_in);
test(function,p);
}
dealii::deallog << "Ok"<<std::endl;
}
| kalj/dealii | tests/fe/fe_series_05.cc | C++ | lgpl-2.1 | 7,546 |
/*
* eXist Open Source Native XML Database
* Copyright (C) 2001-07 The eXist Project
* http://exist-db.org
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* $Id$
*/
package org.exist.management;
import org.exist.management.impl.PerInstanceMBean;
import org.exist.storage.BrokerPool;
import org.exist.util.DatabaseConfigurationException;
/**
* A dummy agent which will be used if JMX is disabled. It just acts as an empty
* placeholder.
*/
public class DummyAgent implements Agent {
@Override
public void initDBInstance(final BrokerPool instance) {
// do nothing
}
@Override
public void closeDBInstance(final BrokerPool instance) {
// nothing to do
}
@Override
public void addMBean(final PerInstanceMBean mbean) throws DatabaseConfigurationException {
// just do nothing
}
@Override
public void changeStatus(final BrokerPool instance, final TaskStatus actualStatus) {
// nothing to do
}
@Override
public void updateStatus(final BrokerPool instance, final int percentage) {
// nothing to do
}
}
| ljo/exist | src/org/exist/management/DummyAgent.java | Java | lgpl-2.1 | 1,815 |
{% if level_1 == 'ubuntu' %}{% endif %}
{% if level_1 == 'business' %}{% endif %}
{% if level_1 == 'devices' %}{% endif %}
{% if level_1 == 'cloud' %}{% endif %}
{% if level_1 == 'download' %}{% endif %}
{% if level_1 == 'support' %}{% endif %}
{% if level_1 == 'project' %}{% endif %}
{% if level_1 == 'community' %}{% endif %}
{% if level_1 == 'partners' %}{% endif %} | ubuntudesign/www.ubuntu.com | templates/templates/meta.html | HTML | lgpl-3.0 | 370 |
/*
* #%L
* Alfresco Repository
* %%
* Copyright (C) 2005 - 2016 Alfresco Software Limited
* %%
* This file is part of the Alfresco software.
* If the software was purchased under a paid Alfresco license, the terms of
* the paid license agreement will prevail. Otherwise, the software is
* provided under the following open source license terms:
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.alfresco.repo.management.subsystems;
import java.io.IOException;
/**
* @author Andy
*
*/
public class LuceneChildApplicationContextFactory extends ChildApplicationContextFactory
{
/* (non-Javadoc)
* @see org.alfresco.repo.management.subsystems.ChildApplicationContextFactory#createInitialState()
*/
@Override
protected PropertyBackedBeanState createInitialState() throws IOException
{
return new ApplicationContextState(true);
}
protected void destroy(boolean isPermanent)
{
super.destroy(isPermanent);
doInit();
}
}
| Alfresco/alfresco-repository | src/main/java/org/alfresco/repo/management/subsystems/LuceneChildApplicationContextFactory.java | Java | lgpl-3.0 | 1,669 |
/*
* #%L
* Alfresco Repository
* %%
* Copyright (C) 2005 - 2016 Alfresco Software Limited
* %%
* This file is part of the Alfresco software.
* If the software was purchased under a paid Alfresco license, the terms of
* the paid license agreement will prevail. Otherwise, the software is
* provided under the following open source license terms:
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.alfresco.repo.template;
import java.io.StringReader;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.model.ContentModel;
import org.alfresco.service.ServiceRegistry;
import org.alfresco.service.cmr.repository.ContentReader;
import org.alfresco.service.cmr.repository.NodeRef;
import org.dom4j.Document;
import org.dom4j.Element;
import org.dom4j.io.SAXReader;
/**
* Provides functionality to execute a Lucene search string and return TemplateNode objects.
*
* @author Kevin Roast
*/
public class LuceneSearchResultsMap extends BaseSearchResultsMap
{
/**
* Constructor
*
* @param parent The parent TemplateNode to execute searches from
* @param services The ServiceRegistry to use
*/
public LuceneSearchResultsMap(TemplateNode parent, ServiceRegistry services)
{
super(parent, services);
}
/**
* @see org.alfresco.repo.template.BaseTemplateMap#get(java.lang.Object)
*/
public Object get(Object key)
{
// execute the search
return query(key.toString());
}
}
| Alfresco/alfresco-repository | src/main/java/org/alfresco/repo/template/LuceneSearchResultsMap.java | Java | lgpl-3.0 | 2,177 |
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Arabic language implementations of Integer and Digits classes
============================================================================
"""
from ..base.integer_internal import (MapIntBuilder, CollectionIntBuilder,
MagnitudeIntBuilder, IntegerContentBase)
from ..base.digits_internal import DigitsContentBase
#---------------------------------------------------------------------------
int_0 = MapIntBuilder({
"صفر": 0,
})
int_1_9 = MapIntBuilder({
"واحد": 1,
"اثنان": 2,
"ثلاثة": 3,
"اربعة": 4,
"خمسة": 5,
"ستة": 6,
"سبعة": 7,
"ثمانية": 8,
"تسعة": 9,
})
int_10_19 = MapIntBuilder({
"عشرة": 10,
"احدى عشر": 11,
"اثنا عشر": 12,
"ثلاثة عشر": 13,
"اربعة عشر": 14,
"خمسة عشر": 15,
"ستة عشر": 16,
"سبعة عشر": 17,
"ثمانية عشر": 18,
"تسعة عشر": 19,
})
int_20_90_10 = MapIntBuilder({
"عشرون": 2,
"ثلاثون": 3,
"اربعون": 4,
"خمسون": 5,
"ستون": 6,
"سبعون": 7,
"ثمانون": 8,
"تسعون": 9,
})
int_20_99 = MagnitudeIntBuilder(
factor = 10,
spec = "<multiplier> [<remainder>]",
multipliers = [int_20_90_10],
remainders = [int_1_9],
)
int_and_1_99 = CollectionIntBuilder(
spec = "[و] <element>",
set = [int_1_9, int_10_19, int_20_99],
)
int_100s = MagnitudeIntBuilder(
factor = 100,
spec = "[<multiplier>] hundred [<remainder>]",
multipliers = [int_1_9],
remainders = [int_and_1_99],
)
int_100big = MagnitudeIntBuilder(
factor = 100,
spec = "[<multiplier>] hundred [<remainder>]",
multipliers = [int_10_19, int_20_99],
remainders = [int_و_1_99]
)
int_1000s = MagnitudeIntBuilder(
factor = 1000,
spec = "[<multiplier>] thousand [<remainder>]",
multipliers = [int_1_9, int_10_19, int_20_99, int_100s],
remainders = [int_و_1_99, int_100s]
)
int_1000000s = MagnitudeIntBuilder(
factor = 1000000,
spec = "[<multiplier>] million [<remainder>]",
multipliers = [int_1_9, int_10_19, int_20_99, int_100s, int_1000s],
remainders = [int_و_1_99, int_100s, int_1000s],
)
#---------------------------------------------------------------------------
class IntegerContent(IntegerContentBase):
builders = [int_0, int_1_9, int_10_19, int_20_99,
int_100s, int_100big, int_1000s, int_1000000s]
class DigitsContent(DigitsContentBase):
digits = [("صفر", "اووه"), "واحد", "اثنان", "ثلاثة", "اربعة",
"خمسة", "ستة", "سبعة", "ثمانية", "تسعة"] | summermk/dragonfly | dragonfly/language/other/number_arabic.py | Python | lgpl-3.0 | 5,267 |
/** @defgroup crc_file CRC
@ingroup STM32F4xx
@brief <b>libopencm3 STM32F4xx CRC</b>
@version 1.0.0
@date 15 October 2012
LGPL License Terms @ref lgpl_license
*/
/*
* This file is part of the libopencm3 project.
*
* This library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library. If not, see <http://www.gnu.org/licenses/>.
*/
#include <libopencm3/stm32/crc.h>
#include <libopencm3/stm32/common/crc_common_all.h>
| mrnuke/libopencm3 | lib/stm32/f4/crc.c | C | lgpl-3.0 | 982 |
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<link rel="stylesheet" type="text/css" href="manual.css">
</head>
<body>
<h1>Event</h1>
<h2>Request</h2>
<h3>Path</h3>
<table>
<tr>
<td>/events</td>
</tr>
</table>
<h3>Parameters</h3>
<table>
<tr>
<td>nothing</td>
</tr>
</table>
<h2>Response</h2>
<table>
<tr>
<th>Key</th>
<th>Value type</th>
<th>Brief</th>
<th>Reply</th>
</tr>
<tr>
<td>result</td>
<td>Boolean</td>
<td>True on success. Otherwise False.</td>
<td>A</td>
</tr>
<tr>
<td>message</td>
<td>String</td>
<td>Error message. This key is reply only when result is False.</td>
<td>F</td>
</tr>
<tr>
<td>numberOfEvents</td>
<td>Number</td>
<td>The number of events.</td>
<td>T</td>
</tr>
<tr>
<td>events</td>
<td>Array</td>
<td>The array of event object.</td>
<td>T</td>
</tr>
</table>
A: always, T: only when result is True, F: only when result is False.
<h3>Event Object</h3>
<table>
<tr>
<th>Key</th>
<th>Value type</th>
<th>Brief</th>
</tr>
<tr>
<td>serverId</td>
<td>Number</td>
<td>A unique server ID.</td>
</tr>
<tr>
<td>time</td>
<td>Number</td>
<td></td>
</tr>
<tr>
<td>eventValue</td>
<td>Number</td>
<td></td>
</tr>
<tr>
<td>triggerId</td>
<td>Number</td>
<td></td>
</tr>
<tr>
<td>status</td>
<td>Number</td>
<td></td>
</tr>
<tr>
<td>severity</td>
<td>Number</td>
<td></td>
</tr>
<tr>
<td>lastChangeTime</td>
<td>Number</td>
<td></td>
</tr>
<tr>
<td>hostId</td>
<td>String</td>
<td></td>
</tr>
<tr>
<td>hostName</td>
<td>String</td>
<td></td>
</tr>
<tr>
<td>brief</td>
<td>String</td>
<td>Brief of the event(trigger).</td>
</tr>
</body>
</html>
| project-hatohol/hatohol-14.09 | doc/server/manual/rest_event.html | HTML | lgpl-3.0 | 1,711 |
// RUN: %clang_cc1 -triple armv7-apple-darwin9 -emit-llvm -o - %s | FileCheck %s
// This isn't really testing anything ARM-specific; it's just a convenient
// 32-bit platform.
#define SWIFTCALL __attribute__((swiftcall))
#define OUT __attribute__((swift_indirect_result))
#define ERROR __attribute__((swift_error_result))
#define CONTEXT __attribute__((swift_context))
/*****************************************************************************/
/****************************** PARAMETER ABIS *******************************/
/*****************************************************************************/
SWIFTCALL void indirect_result_1(OUT int *arg0, OUT float *arg1) {}
// CHECK-LABEL: define {{.*}} void @indirect_result_1(i32* noalias sret align 4 dereferenceable(4){{.*}}, float* noalias align 4 dereferenceable(4){{.*}})
// TODO: maybe this shouldn't suppress sret.
SWIFTCALL int indirect_result_2(OUT int *arg0, OUT float *arg1) { __builtin_unreachable(); }
// CHECK-LABEL: define {{.*}} i32 @indirect_result_2(i32* noalias align 4 dereferenceable(4){{.*}}, float* noalias align 4 dereferenceable(4){{.*}})
typedef struct { char array[1024]; } struct_reallybig;
SWIFTCALL struct_reallybig indirect_result_3(OUT int *arg0, OUT float *arg1) { __builtin_unreachable(); }
// CHECK-LABEL: define {{.*}} void @indirect_result_3({{.*}}* noalias sret {{.*}}, i32* noalias align 4 dereferenceable(4){{.*}}, float* noalias align 4 dereferenceable(4){{.*}})
SWIFTCALL void context_1(CONTEXT void *self) {}
// CHECK-LABEL: define {{.*}} void @context_1(i8* swiftself
SWIFTCALL void context_2(void *arg0, CONTEXT void *self) {}
// CHECK-LABEL: define {{.*}} void @context_2(i8*{{.*}}, i8* swiftself
SWIFTCALL void context_error_1(CONTEXT int *self, ERROR float **error) {}
// CHECK-LABEL: define {{.*}} void @context_error_1(i32* swiftself{{.*}}, float** swifterror)
// CHECK: [[TEMP:%.*]] = alloca float*, align 4
// CHECK: [[T0:%.*]] = load float*, float** [[ERRORARG:%.*]], align 4
// CHECK: store float* [[T0]], float** [[TEMP]], align 4
// CHECK: [[T0:%.*]] = load float*, float** [[TEMP]], align 4
// CHECK: store float* [[T0]], float** [[ERRORARG]], align 4
void test_context_error_1() {
int x;
float *error;
context_error_1(&x, &error);
}
// CHECK-LABEL: define void @test_context_error_1()
// CHECK: [[X:%.*]] = alloca i32, align 4
// CHECK: [[ERROR:%.*]] = alloca float*, align 4
// CHECK: [[TEMP:%.*]] = alloca swifterror float*, align 4
// CHECK: [[T0:%.*]] = load float*, float** [[ERROR]], align 4
// CHECK: store float* [[T0]], float** [[TEMP]], align 4
// CHECK: call [[SWIFTCC:swiftcc]] void @context_error_1(i32* swiftself [[X]], float** swifterror [[TEMP]])
// CHECK: [[T0:%.*]] = load float*, float** [[TEMP]], align 4
// CHECK: store float* [[T0]], float** [[ERROR]], align 4
SWIFTCALL void context_error_2(short s, CONTEXT int *self, ERROR float **error) {}
// CHECK-LABEL: define {{.*}} void @context_error_2(i16{{.*}}, i32* swiftself{{.*}}, float** swifterror)
/*****************************************************************************/
/********************************** LOWERING *********************************/
/*****************************************************************************/
typedef float float4 __attribute__((ext_vector_type(4)));
typedef float float8 __attribute__((ext_vector_type(8)));
typedef double double2 __attribute__((ext_vector_type(2)));
typedef double double4 __attribute__((ext_vector_type(4)));
typedef int int3 __attribute__((ext_vector_type(3)));
typedef int int4 __attribute__((ext_vector_type(4)));
typedef int int5 __attribute__((ext_vector_type(5)));
typedef int int8 __attribute__((ext_vector_type(8)));
#define TEST(TYPE) \
SWIFTCALL TYPE return_##TYPE(void) { \
TYPE result = {}; \
return result; \
} \
SWIFTCALL void take_##TYPE(TYPE v) { \
} \
void test_##TYPE() { \
take_##TYPE(return_##TYPE()); \
}
/*****************************************************************************/
/*********************************** STRUCTS *********************************/
/*****************************************************************************/
typedef struct {
} struct_empty;
TEST(struct_empty);
// CHECK-LABEL: define {{.*}} @return_struct_empty()
// CHECK: ret void
// CHECK-LABEL: define {{.*}} @take_struct_empty()
// CHECK: ret void
typedef struct {
int x;
char c0;
char c1;
float f0;
float f1;
} struct_1;
TEST(struct_1);
// CHECK-LABEL: define {{.*}} @return_struct_1()
// CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align 4
// CHECK: [[VAR:%.*]] = alloca [[REC]], align 4
// CHECK: @llvm.memset
// CHECK: @llvm.memcpy
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ i32, i16, \[2 x i8\], float, float }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[SECOND:%.*]] = load i16, i16* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3
// CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 4
// CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align
// CHECK: [[T0:%.*]] = insertvalue [[UAGG:{ i32, i16, float, float }]] undef, i32 [[FIRST]], 0
// CHECK: [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], i16 [[SECOND]], 1
// CHECK: [[T2:%.*]] = insertvalue [[UAGG]] [[T1]], float [[THIRD]], 2
// CHECK: [[T3:%.*]] = insertvalue [[UAGG]] [[T2]], float [[FOURTH]], 3
// CHECK: ret [[UAGG]] [[T3]]
// CHECK-LABEL: define {{.*}} @take_struct_1(i32, i16, float, float)
// CHECK: [[V:%.*]] = alloca [[REC]], align 4
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: store i32 %0, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: store i16 %1, i16* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3
// CHECK: store float %2, float* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 4
// CHECK: store float %3, float* [[T0]], align 4
// CHECK: ret void
// CHECK-LABEL: define void @test_struct_1()
// CHECK: [[TMP:%.*]] = alloca [[REC]], align 4
// CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG]] @return_struct_1()
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0
// CHECK: store i32 [[T1]], i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1
// CHECK: store i16 [[T1]], i16* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 2
// CHECK: store float [[T1]], float* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 4
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 3
// CHECK: store float [[T1]], float* [[T0]], align 4
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[SECOND:%.*]] = load i16, i16* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3
// CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 4
// CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align 4
// CHECK: call [[SWIFTCC]] void @take_struct_1(i32 [[FIRST]], i16 [[SECOND]], float [[THIRD]], float [[FOURTH]])
// CHECK: ret void
typedef struct {
int x;
char c0;
__attribute__((aligned(2))) char c1;
float f0;
float f1;
} struct_2;
TEST(struct_2);
// CHECK-LABEL: define {{.*}} @return_struct_2()
// CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align 4
// CHECK: [[VAR:%.*]] = alloca [[REC]], align 4
// CHECK: @llvm.memcpy
// CHECK: @llvm.memcpy
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ i32, i32, float, float }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2
// CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3
// CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align
// CHECK: [[T0:%.*]] = insertvalue [[UAGG:{ i32, i32, float, float }]] undef, i32 [[FIRST]], 0
// CHECK: [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], i32 [[SECOND]], 1
// CHECK: [[T2:%.*]] = insertvalue [[UAGG]] [[T1]], float [[THIRD]], 2
// CHECK: [[T3:%.*]] = insertvalue [[UAGG]] [[T2]], float [[FOURTH]], 3
// CHECK: ret [[UAGG]] [[T3]]
// CHECK-LABEL: define {{.*}} @take_struct_2(i32, i32, float, float)
// CHECK: [[V:%.*]] = alloca [[REC]], align 4
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: store i32 %0, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: store i32 %1, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2
// CHECK: store float %2, float* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3
// CHECK: store float %3, float* [[T0]], align 4
// CHECK: ret void
// CHECK-LABEL: define void @test_struct_2()
// CHECK: [[TMP:%.*]] = alloca [[REC]], align 4
// CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG]] @return_struct_2()
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0
// CHECK: store i32 [[T1]], i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1
// CHECK: store i32 [[T1]], i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 2
// CHECK: store float [[T1]], float* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 3
// CHECK: store float [[T1]], float* [[T0]], align 4
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2
// CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3
// CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align 4
// CHECK: call [[SWIFTCC]] void @take_struct_2(i32 [[FIRST]], i32 [[SECOND]], float [[THIRD]], float [[FOURTH]])
// CHECK: ret void
// There's no way to put a field randomly in the middle of an otherwise
// empty storage unit in C, so that case has to be tested in C++, which
// can use empty structs to introduce arbitrary padding. (In C, they end up
// with size 0 and so don't affect layout.)
// Misaligned data rule.
typedef struct {
char c0;
__attribute__((packed)) float f;
} struct_misaligned_1;
TEST(struct_misaligned_1)
// CHECK-LABEL: define {{.*}} @return_struct_misaligned_1()
// CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align
// CHECK: [[VAR:%.*]] = alloca [[REC]], align
// CHECK: @llvm.memset
// CHECK: @llvm.memcpy
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ i32, i8 }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[SECOND:%.*]] = load i8, i8* [[T0]], align
// CHECK: [[T0:%.*]] = insertvalue [[UAGG:{ i32, i8 }]] undef, i32 [[FIRST]], 0
// CHECK: [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], i8 [[SECOND]], 1
// CHECK: ret [[UAGG]] [[T1]]
// CHECK-LABEL: define {{.*}} @take_struct_misaligned_1(i32, i8)
// CHECK: [[V:%.*]] = alloca [[REC]], align
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: store i32 %0, i32* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: store i8 %1, i8* [[T0]], align
// CHECK: ret void
// Too many scalars.
typedef struct {
int x[5];
} struct_big_1;
TEST(struct_big_1)
// CHECK-LABEL: define {{.*}} void @return_struct_big_1({{.*}} noalias sret
// Should not be byval.
// CHECK-LABEL: define {{.*}} void @take_struct_big_1({{.*}}*{{( %.*)?}})
/*****************************************************************************/
/********************************* TYPE MERGING ******************************/
/*****************************************************************************/
typedef union {
float f;
double d;
} union_het_fp;
TEST(union_het_fp)
// CHECK-LABEL: define {{.*}} @return_union_het_fp()
// CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align 4
// CHECK: [[VAR:%.*]] = alloca [[REC]], align 4
// CHECK: @llvm.memcpy
// CHECK: @llvm.memcpy
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ i32, i32 }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = insertvalue [[UAGG:{ i32, i32 }]] undef, i32 [[FIRST]], 0
// CHECK: [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], i32 [[SECOND]], 1
// CHECK: ret [[UAGG]] [[T1]]
// CHECK-LABEL: define {{.*}} @take_union_het_fp(i32, i32)
// CHECK: [[V:%.*]] = alloca [[REC]], align 4
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: store i32 %0, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: store i32 %1, i32* [[T0]], align 4
// CHECK: ret void
// CHECK-LABEL: define void @test_union_het_fp()
// CHECK: [[TMP:%.*]] = alloca [[REC]], align 4
// CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG]] @return_union_het_fp()
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0
// CHECK: store i32 [[T1]], i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1
// CHECK: store i32 [[T1]], i32* [[T0]], align 4
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align 4
// CHECK: call [[SWIFTCC]] void @take_union_het_fp(i32 [[FIRST]], i32 [[SECOND]])
// CHECK: ret void
typedef union {
float f1;
float f2;
} union_hom_fp;
TEST(union_hom_fp)
// CHECK-LABEL: define void @test_union_hom_fp()
// CHECK: [[TMP:%.*]] = alloca [[REC:%.*]], align 4
// CHECK: [[CALL:%.*]] = call [[SWIFTCC]] float @return_union_hom_fp()
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG:{ float }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: store float [[CALL]], float* [[T0]], align 4
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load float, float* [[T0]], align 4
// CHECK: call [[SWIFTCC]] void @take_union_hom_fp(float [[FIRST]])
// CHECK: ret void
typedef union {
float f1;
float4 fv2;
} union_hom_fp_partial;
TEST(union_hom_fp_partial)
// CHECK-LABEL: define void @test_union_hom_fp_partial()
// CHECK: [[TMP:%.*]] = alloca [[REC:%.*]], align 16
// CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG:{ float, float, float, float }]] @return_union_hom_fp_partial()
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG:{ float, float, float, float }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0
// CHECK: store float [[T1]], float* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1
// CHECK: store float [[T1]], float* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 2
// CHECK: store float [[T1]], float* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 3
// CHECK: store float [[T1]], float* [[T0]], align
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load float, float* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[SECOND:%.*]] = load float, float* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2
// CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3
// CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align
// CHECK: call [[SWIFTCC]] void @take_union_hom_fp_partial(float [[FIRST]], float [[SECOND]], float [[THIRD]], float [[FOURTH]])
// CHECK: ret void
typedef union {
struct { int x, y; } f1;
float4 fv2;
} union_het_fpv_partial;
TEST(union_het_fpv_partial)
// CHECK-LABEL: define void @test_union_het_fpv_partial()
// CHECK: [[TMP:%.*]] = alloca [[REC:%.*]], align 16
// CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG:{ i32, i32, float, float }]] @return_union_het_fpv_partial()
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG:{ i32, i32, float, float }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0
// CHECK: store i32 [[T1]], i32* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1
// CHECK: store i32 [[T1]], i32* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 2
// CHECK: store float [[T1]], float* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 3
// CHECK: store float [[T1]], float* [[T0]], align
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2
// CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 3
// CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align
// CHECK: call [[SWIFTCC]] void @take_union_het_fpv_partial(i32 [[FIRST]], i32 [[SECOND]], float [[THIRD]], float [[FOURTH]])
// CHECK: ret void
/*****************************************************************************/
/****************************** VECTOR LEGALIZATION **************************/
/*****************************************************************************/
TEST(int4)
// CHECK-LABEL: define {{.*}} <4 x i32> @return_int4()
// CHECK-LABEL: define {{.*}} @take_int4(<4 x i32>
TEST(int8)
// CHECK-LABEL: define {{.*}} @return_int8()
// CHECK: [[RET:%.*]] = alloca [[REC:<8 x i32>]], align 32
// CHECK: [[VAR:%.*]] = alloca [[REC]], align
// CHECK: store
// CHECK: load
// CHECK: store
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ <4 x i32>, <4 x i32> }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[SECOND:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align
// CHECK: [[T0:%.*]] = insertvalue [[UAGG:{ <4 x i32>, <4 x i32> }]] undef, <4 x i32> [[FIRST]], 0
// CHECK: [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], <4 x i32> [[SECOND]], 1
// CHECK: ret [[UAGG]] [[T1]]
// CHECK-LABEL: define {{.*}} @take_int8(<4 x i32>, <4 x i32>)
// CHECK: [[V:%.*]] = alloca [[REC]], align
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: store <4 x i32> %0, <4 x i32>* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: store <4 x i32> %1, <4 x i32>* [[T0]], align
// CHECK: ret void
// CHECK-LABEL: define void @test_int8()
// CHECK: [[TMP1:%.*]] = alloca [[REC]], align
// CHECK: [[TMP2:%.*]] = alloca [[REC]], align
// CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG]] @return_int8()
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP1]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0
// CHECK: store <4 x i32> [[T1]], <4 x i32>* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1
// CHECK: store <4 x i32> [[T1]], <4 x i32>* [[T0]], align
// CHECK: [[V:%.*]] = load [[REC]], [[REC]]* [[TMP1]], align
// CHECK: store [[REC]] [[V]], [[REC]]* [[TMP2]], align
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP2]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[SECOND:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align
// CHECK: call [[SWIFTCC]] void @take_int8(<4 x i32> [[FIRST]], <4 x i32> [[SECOND]])
// CHECK: ret void
TEST(int5)
// CHECK-LABEL: define {{.*}} @return_int5()
// CHECK: [[RET:%.*]] = alloca [[REC:<5 x i32>]], align 32
// CHECK: [[VAR:%.*]] = alloca [[REC]], align
// CHECK: store
// CHECK: load
// CHECK: store
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ <4 x i32>, i32 }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align
// CHECK: [[T0:%.*]] = insertvalue [[UAGG:{ <4 x i32>, i32 }]] undef, <4 x i32> [[FIRST]], 0
// CHECK: [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], i32 [[SECOND]], 1
// CHECK: ret [[UAGG]] [[T1]]
// CHECK-LABEL: define {{.*}} @take_int5(<4 x i32>, i32)
// CHECK: [[V:%.*]] = alloca [[REC]], align
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: store <4 x i32> %0, <4 x i32>* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: store i32 %1, i32* [[T0]], align
// CHECK: ret void
// CHECK-LABEL: define void @test_int5()
// CHECK: [[TMP1:%.*]] = alloca [[REC]], align
// CHECK: [[TMP2:%.*]] = alloca [[REC]], align
// CHECK: [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG]] @return_int5()
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP1]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0
// CHECK: store <4 x i32> [[T1]], <4 x i32>* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1
// CHECK: store i32 [[T1]], i32* [[T0]], align
// CHECK: [[V:%.*]] = load [[REC]], [[REC]]* [[TMP1]], align
// CHECK: store [[REC]] [[V]], [[REC]]* [[TMP2]], align
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP2]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
// CHECK: [[SECOND:%.*]] = load i32, i32* [[T0]], align
// CHECK: call [[SWIFTCC]] void @take_int5(<4 x i32> [[FIRST]], i32 [[SECOND]])
// CHECK: ret void
typedef struct {
int x;
int3 v __attribute__((packed));
} misaligned_int3;
TEST(misaligned_int3)
// CHECK-LABEL: define {{.*}} @take_misaligned_int3(i32, i32, i32, i32)
| cd80/UtilizedLLVM | tools/clang/test/CodeGen/arm-swiftcall.c | C | unlicense | 28,014 |
"""
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import svm
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# # fit the model
clf = svm.SVC()
clf.fit(X, Y, sample_weight=sample_weight)
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
pl.contourf(xx, yy, Z, alpha=0.75, cmap=pl.cm.bone)
pl.scatter(X[:, 0], X[:, 1], c=Y, s=sample_weight, alpha=0.9, cmap=pl.cm.bone)
pl.axis('off')
pl.show()
| seckcoder/lang-learn | python/sklearn/examples/svm/plot_weighted_samples.py | Python | unlicense | 999 |
import urllib
import urlparse
def get_path(url):
scheme, host, path, query, fragment = urlparse.urlsplit(url)
return path
def get_host(url):
scheme, host, path, query, fragment = urlparse.urlsplit(url)
return host
def add_path(url, new_path):
"""Given a url and path, return a new url that combines
the two.
"""
scheme, host, path, query, fragment = urlparse.urlsplit(url)
new_path = new_path.lstrip('/')
if path.endswith('/'):
path += new_path
else:
path += '/' + new_path
return urlparse.urlunsplit([scheme, host, path, query, fragment])
def _query_param(key, value):
"""ensure that a query parameter's value is a string
of bytes in UTF-8 encoding.
"""
if isinstance(value, unicode):
pass
elif isinstance(value, str):
value = value.decode('utf-8')
else:
value = unicode(value)
return key, value.encode('utf-8')
def _make_query_tuples(params):
if hasattr(params, 'items'):
return [_query_param(*param) for param in params.items()]
else:
return [_query_param(*params)]
def add_query_params(url, params):
"""use the _update_query_params function to set a new query
string for the url based on params.
"""
return update_query_params(url, params, update=False)
def update_query_params(url, params, update=True):
"""Given a url and a tuple or dict of parameters, return
a url that includes the parameters as a properly formatted
query string.
If update is True, change any existing values to new values
given in params.
"""
scheme, host, path, query, fragment = urlparse.urlsplit(url)
# urlparse.parse_qsl gives back url-decoded byte strings. Leave these as
# they are: they will be re-urlencoded below
query_bits = [(k, v) for k, v in urlparse.parse_qsl(query)]
if update:
query_bits = dict(query_bits)
query_bits.update(_make_query_tuples(params))
else:
query_bits.extend(_make_query_tuples(params))
query = urllib.urlencode(query_bits)
return urlparse.urlunsplit([scheme, host, path, query, fragment])
| c-oreills/pyFaceGraph | src/facegraph/url_operations.py | Python | unlicense | 2,148 |
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
from getpass import getuser
import ctypes
from ctypes.util import find_library
from ctypes import c_void_p, c_uint32, POINTER, c_bool, byref
from .core_foundation import CoreFoundation, unicode_to_cfstring, cfstring_to_unicode
from .._types import str_cls, type_name
od_path = find_library('OpenDirectory')
OpenDirectory = ctypes.CDLL(od_path, use_errno=True)
ODAttributeType = CoreFoundation.CFStringRef
ODMatchType = c_uint32
ODRecordType = CoreFoundation.CFStringRef
ODSessionRef = c_void_p
ODNodeRef = c_void_p
ODQueryRef = c_void_p
ODRecordRef = c_void_p
OpenDirectory.ODSessionCreate.argtypes = [
CoreFoundation.CFAllocatorRef,
CoreFoundation.CFDictionaryRef,
POINTER(CoreFoundation.CFErrorRef)
]
OpenDirectory.ODSessionCreate.restype = ODSessionRef
OpenDirectory.ODNodeCreateWithName.argtypes = [
CoreFoundation.CFAllocatorRef,
ODSessionRef,
CoreFoundation.CFStringRef,
POINTER(CoreFoundation.CFErrorRef)
]
OpenDirectory.ODNodeCreateWithName.restype = ODNodeRef
OpenDirectory.ODQueryCreateWithNode.argtypes = [
CoreFoundation.CFAllocatorRef,
ODNodeRef,
CoreFoundation.CFTypeRef,
ODAttributeType,
ODMatchType,
CoreFoundation.CFTypeRef,
CoreFoundation.CFTypeRef,
CoreFoundation.CFIndex,
POINTER(CoreFoundation.CFErrorRef)
]
OpenDirectory.ODQueryCreateWithNode.restype = ODQueryRef
OpenDirectory.ODQueryCopyResults.argtypes = [
ODQueryRef,
c_bool,
POINTER(CoreFoundation.CFErrorRef)
]
OpenDirectory.ODQueryCopyResults.restype = CoreFoundation.CFArrayRef
OpenDirectory.ODRecordCopyValues.argtypes = [
ODRecordRef,
ODAttributeType,
POINTER(CoreFoundation.CFErrorRef)
]
OpenDirectory.ODRecordCopyValues.restype = CoreFoundation.CFArrayRef
kODMatchEqualTo = ODMatchType(0x2001)
kODRecordTypeUsers = ODRecordType.in_dll(OpenDirectory, 'kODRecordTypeUsers')
kODAttributeTypeRecordName = ODAttributeType.in_dll(OpenDirectory, 'kODAttributeTypeRecordName')
kODAttributeTypeUserShell = ODAttributeType.in_dll(OpenDirectory, 'kODAttributeTypeUserShell')
_login_shells = {}
def get_user_login_shell(username=None):
"""
Uses OS X's OpenDirectory.framework to get the user's login shell
:param username:
A unicode string of the user to get the shell for - None for the
current user
:return:
A unicode string of the user's login shell
"""
if username is None:
username = getuser()
if not isinstance(username, str_cls):
username = username.decode('utf-8')
if not isinstance(username, str_cls):
raise TypeError('username must be a unicode string, not %s' % type_name(username))
if username not in _login_shells:
error_ref = CoreFoundation.CFErrorRef()
session = OpenDirectory.ODSessionCreate(
CoreFoundation.kCFAllocatorDefault,
None,
byref(error_ref)
)
if bool(error_ref):
raise OSError('Error!')
node = OpenDirectory.ODNodeCreateWithName(
CoreFoundation.kCFAllocatorDefault,
session,
unicode_to_cfstring("/Local/Default"),
byref(error_ref)
)
if bool(error_ref):
raise OSError('Error!')
query = OpenDirectory.ODQueryCreateWithNode(
CoreFoundation.kCFAllocatorDefault,
node,
kODRecordTypeUsers,
kODAttributeTypeRecordName,
kODMatchEqualTo,
unicode_to_cfstring(username),
kODAttributeTypeUserShell,
1,
byref(error_ref)
)
if bool(error_ref):
raise OSError('Error!')
results = OpenDirectory.ODQueryCopyResults(
query,
False,
byref(error_ref)
)
if bool(error_ref):
raise OSError('Error!')
login_shell = None
num_results = CoreFoundation.CFArrayGetCount(results)
if num_results == 1:
od_record = CoreFoundation.CFArrayGetValueAtIndex(results, 0)
attributes = OpenDirectory.ODRecordCopyValues(od_record, kODAttributeTypeUserShell, byref(error_ref))
if bool(error_ref):
raise OSError('Error!')
num_attributes = CoreFoundation.CFArrayGetCount(results)
if num_attributes == 1:
string_ref = CoreFoundation.CFArrayGetValueAtIndex(attributes, 0)
login_shell = cfstring_to_unicode(string_ref)
_login_shells[username] = login_shell
return _login_shells.get(username)
| EnTeQuAk/dotfiles | sublime-text-3/Packages/shellenv/all/shellenv/_osx/open_directory.py | Python | unlicense | 4,675 |
/**
* JBoss, Home of Professional Open Source.
* Copyright 2014 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.pnc.common.util;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Stream;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Author: Michal Szynkiewicz, michal.l.szynkiewicz@gmail.com
* Date: 9/15/16
* Time: 1:37 PM
*/
public class StreamCollectorsTest {
@Test
public void shouldFlattenTwoLists() {
List<String> listOne = Arrays.asList("one-1", "one-2", "one-3");
List<String> listTwo = Arrays.asList("two-1", "two-2");
List<String> actual = Stream.of(listOne, listTwo).collect(StreamCollectors.toFlatList());
List<String> expected = new ArrayList<>(listOne);
expected.addAll(listTwo);
assertThat(actual).hasSameElementsAs(expected);
}
@Test
public void shouldFlattenOneList() {
List<String> listOne = Arrays.asList("one-1", "one-2", "one-3");
List<String> actual = Stream.of(listOne).collect(StreamCollectors.toFlatList());
assertThat(actual).hasSameElementsAs(listOne);
}
@Test
public void shouldFlattenNoList() {
List<String> actual = Stream.<List<String>>of().collect(StreamCollectors.toFlatList());
assertThat(actual).isNotNull().isEmpty();
}
} | ruhan1/pnc | common/src/test/java/org/jboss/pnc/common/util/StreamCollectorsTest.java | Java | apache-2.0 | 1,999 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.itest.karaf;
import org.junit.Test;
import org.junit.Ignore;
import org.junit.runner.RunWith;
import org.ops4j.pax.exam.junit.PaxExam;
@Ignore
@RunWith(PaxExam.class)
public class CamelChronicleTest extends BaseKarafTest {
public static final String COMPONENT = "chronicle";
@Test
public void test() throws Exception {
testComponent(COMPONENT);
}
}
| jarst/camel | tests/camel-itest-karaf/src/test/java/org/apache/camel/itest/karaf/CamelChronicleTest.java | Java | apache-2.0 | 1,206 |
/*
* Copyright (c) 2014, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.carbon.identity.mgt.util;
import org.apache.axiom.om.util.Base64;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.neethi.Policy;
import org.apache.neethi.PolicyEngine;
import org.wso2.carbon.CarbonConstants;
import org.wso2.carbon.context.PrivilegedCarbonContext;
import org.wso2.carbon.identity.base.IdentityException;
import org.wso2.carbon.identity.mgt.IdentityMgtConfig;
import org.wso2.carbon.identity.mgt.constants.IdentityMgtConstants;
import org.wso2.carbon.identity.mgt.dto.UserDTO;
import org.wso2.carbon.identity.mgt.internal.IdentityMgtServiceComponent;
import org.wso2.carbon.registry.core.RegistryConstants;
import org.wso2.carbon.registry.core.Resource;
import org.wso2.carbon.registry.core.exceptions.RegistryException;
import org.wso2.carbon.registry.core.session.UserRegistry;
import org.wso2.carbon.user.api.Tenant;
import org.wso2.carbon.user.api.UserStoreException;
import org.wso2.carbon.user.api.UserStoreManager;
import org.wso2.carbon.user.core.UserCoreConstants;
import org.wso2.carbon.user.core.service.RealmService;
import org.wso2.carbon.user.core.tenant.TenantManager;
import org.wso2.carbon.user.core.util.UserCoreUtil;
import org.wso2.carbon.utils.multitenancy.MultitenantConstants;
import org.wso2.carbon.utils.multitenancy.MultitenantUtils;
import java.io.ByteArrayInputStream;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.HashMap;
import java.util.Map;
/**
*
*/
public class Utils {
private static final Log log = LogFactory.getLog(Utils.class);
private Utils() {
}
public static UserDTO processUserId(String userId) throws IdentityException {
if (userId == null || userId.trim().length() < 1) {
throw IdentityException.error("Can not proceed with out a user id");
}
UserDTO userDTO = new UserDTO(userId);
if (!IdentityMgtConfig.getInstance().isSaasEnabled()) {
validateTenant(userDTO);
}
userDTO.setTenantId(getTenantId(userDTO.getTenantDomain()));
return userDTO;
}
public static void validateTenant(UserDTO user) throws IdentityException {
if (user.getTenantDomain() != null && !user.getTenantDomain().isEmpty()) {
if (!user.getTenantDomain().equals(
PrivilegedCarbonContext.getThreadLocalCarbonContext()
.getTenantDomain())) {
throw IdentityException.error(
"Failed access to unauthorized tenant domain");
}
user.setTenantId(getTenantId(user.getTenantDomain()));
}
}
/**
* gets no of verified user challenges
*
* @param userDTO bean class that contains user and tenant Information
* @return no of verified challenges
* @throws IdentityException if fails
*/
public static int getVerifiedChallenges(UserDTO userDTO) throws IdentityException {
int noOfChallenges = 0;
try {
UserRegistry registry = IdentityMgtServiceComponent.getRegistryService().
getConfigSystemRegistry(MultitenantConstants.SUPER_TENANT_ID);
String identityKeyMgtPath = IdentityMgtConstants.IDENTITY_MANAGEMENT_CHALLENGES +
RegistryConstants.PATH_SEPARATOR + userDTO.getUserId() +
RegistryConstants.PATH_SEPARATOR + userDTO.getUserId();
Resource resource;
if (registry.resourceExists(identityKeyMgtPath)) {
resource = registry.get(identityKeyMgtPath);
String property = resource.getProperty(IdentityMgtConstants.VERIFIED_CHALLENGES);
if (property != null) {
return Integer.parseInt(property);
}
}
} catch (RegistryException e) {
log.error("Error while processing userKey", e);
}
return noOfChallenges;
}
/**
* gets the tenant id from the tenant domain
*
* @param domain - tenant domain name
* @return tenantId
* @throws IdentityException if fails or tenant doesn't exist
*/
public static int getTenantId(String domain) throws IdentityException {
int tenantId;
TenantManager tenantManager = IdentityMgtServiceComponent.getRealmService().getTenantManager();
if (MultitenantConstants.SUPER_TENANT_DOMAIN_NAME.equals(domain)) {
tenantId = MultitenantConstants.SUPER_TENANT_ID;
if (log.isDebugEnabled()) {
String msg = "Domain is not defined implicitly. So it is Super Tenant domain.";
log.debug(msg);
}
} else {
try {
tenantId = tenantManager.getTenantId(domain);
if (tenantId < 1 && tenantId != MultitenantConstants.SUPER_TENANT_ID) {
String msg = "This action can not be performed by the users in non-existing domains.";
log.error(msg);
throw IdentityException.error(msg);
}
} catch (org.wso2.carbon.user.api.UserStoreException e) {
String msg = "Error in retrieving tenant id of tenant domain: " + domain + ".";
log.error(msg, e);
throw IdentityException.error(msg, e);
}
}
return tenantId;
}
/**
* Get the claims from the user store manager
*
* @param userName user name
* @param tenantId tenantId
* @param claim claim name
* @return claim value
* @throws IdentityException if fails
*/
public static String getClaimFromUserStoreManager(String userName, int tenantId, String claim)
throws IdentityException {
org.wso2.carbon.user.core.UserStoreManager userStoreManager = null;
RealmService realmService = IdentityMgtServiceComponent.getRealmService();
String claimValue = "";
try {
if (realmService.getTenantUserRealm(tenantId) != null) {
userStoreManager = (org.wso2.carbon.user.core.UserStoreManager) realmService.getTenantUserRealm(tenantId).
getUserStoreManager();
}
} catch (Exception e) {
String msg = "Error retrieving the user store manager for tenant id : " + tenantId;
log.error(msg, e);
throw IdentityException.error(msg, e);
}
try {
if (userStoreManager != null) {
Map<String, String> claimsMap = userStoreManager
.getUserClaimValues(userName, new String[]{claim}, UserCoreConstants.DEFAULT_PROFILE);
if (claimsMap != null && !claimsMap.isEmpty()) {
claimValue = claimsMap.get(claim);
}
}
return claimValue;
} catch (Exception e) {
String msg = "Unable to retrieve the claim for user : " + userName;
log.error(msg, e);
throw IdentityException.error(msg, e);
}
}
public static Map<String,String> getClaimsFromUserStoreManager(String userName, int tenantId, String[] claims)
throws IdentityException {
Map<String, String> claimValues = new HashMap<>();
org.wso2.carbon.user.core.UserStoreManager userStoreManager = null;
RealmService realmService = IdentityMgtServiceComponent.getRealmService();
try {
if (realmService.getTenantUserRealm(tenantId) != null) {
userStoreManager = (org.wso2.carbon.user.core.UserStoreManager) realmService.getTenantUserRealm(tenantId).
getUserStoreManager();
}
} catch (UserStoreException e) {
throw IdentityException.error("Error retrieving the user store manager for tenant id : " + tenantId, e);
}
try {
if (userStoreManager != null) {
claimValues = userStoreManager.getUserClaimValues(userName, claims, UserCoreConstants.DEFAULT_PROFILE);
}
} catch (Exception e) {
throw IdentityException.error("Unable to retrieve the claim for user : " + userName, e);
}
return claimValues;
}
/**
* get email address from user store
*
* @param userName user name
* @param tenantId tenant id
* @return email address
*/
public static String getEmailAddressForUser(String userName, int tenantId) {
String email = null;
try {
if (log.isDebugEnabled()) {
log.debug("Retrieving email address from user profile.");
}
Tenant tenant = IdentityMgtServiceComponent.getRealmService().
getTenantManager().getTenant(tenantId);
if (tenant != null && tenant.getAdminName().equals(userName)) {
email = tenant.getEmail();
}
if (email == null || email.trim().length() < 1) {
email = getClaimFromUserStoreManager(userName, tenantId,
UserCoreConstants.ClaimTypeURIs.EMAIL_ADDRESS);
}
if ((email == null || email.trim().length() < 1) && MultitenantUtils.isEmailUserName()) {
email = UserCoreUtil.removeDomainFromName(userName);
}
} catch (Exception e) {
String msg = "Unable to retrieve an email address associated with the given user : " + userName;
log.warn(msg, e); // It is common to have users with no email address defined.
}
return email;
}
/**
* Update Password with the user input
*
* @return true - if password was successfully reset
* @throws IdentityException
*/
public static boolean updatePassword(String userId, int tenantId, String password) throws IdentityException {
String tenantDomain = null;
if (userId == null || userId.trim().length() < 1 ||
password == null || password.trim().length() < 1) {
String msg = "Unable to find the required information for updating password";
log.error(msg);
throw IdentityException.error(msg);
}
try {
UserStoreManager userStoreManager = IdentityMgtServiceComponent.
getRealmService().getTenantUserRealm(tenantId).getUserStoreManager();
userStoreManager.updateCredentialByAdmin(userId, password);
if (log.isDebugEnabled()) {
String msg = "Password is updated for user: " + userId;
log.debug(msg);
}
return true;
} catch (UserStoreException e) {
String msg = "Error in changing the password, user name: " + userId + " domain: " +
tenantDomain + ".";
log.error(msg, e);
throw IdentityException.error(msg, e);
}
}
/**
* @param value
* @return
* @throws UserStoreException
*/
public static String doHash(String value) throws UserStoreException {
try {
String digsestFunction = "SHA-256";
MessageDigest dgst = MessageDigest.getInstance(digsestFunction);
byte[] byteValue = dgst.digest(value.getBytes());
return Base64.encode(byteValue);
} catch (NoSuchAlgorithmException e) {
log.error(e.getMessage(), e);
throw new UserStoreException(e.getMessage(), e);
}
}
/**
* Set claim to user store manager
*
* @param userName user name
* @param tenantId tenant id
* @param claim claim uri
* @param value claim value
* @throws IdentityException if fails
*/
public static void setClaimInUserStoreManager(String userName, int tenantId, String claim,
String value) throws IdentityException {
org.wso2.carbon.user.core.UserStoreManager userStoreManager = null;
RealmService realmService = IdentityMgtServiceComponent.getRealmService();
try {
if (realmService.getTenantUserRealm(tenantId) != null) {
userStoreManager = (org.wso2.carbon.user.core.UserStoreManager) realmService.getTenantUserRealm(tenantId).
getUserStoreManager();
}
} catch (Exception e) {
String msg = "Error retrieving the user store manager for the tenant";
log.error(msg, e);
throw IdentityException.error(msg, e);
}
try {
if (userStoreManager != null) {
String oldValue = userStoreManager.getUserClaimValue(userName, claim, null);
if (oldValue == null || !oldValue.equals(value)) {
Map<String,String> claimMap = new HashMap<String,String>();
claimMap.put(claim, value);
userStoreManager.setUserClaimValues(userName, claimMap, UserCoreConstants.DEFAULT_PROFILE);
}
}
} catch (Exception e) {
String msg = "Unable to set the claim for user : " + userName;
log.error(msg, e);
throw IdentityException.error(msg, e);
}
}
public static String getUserStoreDomainName(String userName) {
int index;
String userDomain;
if ((index = userName.indexOf(CarbonConstants.DOMAIN_SEPARATOR)) >= 0) {
// remove domain name if exist
userDomain = userName.substring(0, index);
} else {
userDomain = UserCoreConstants.PRIMARY_DEFAULT_DOMAIN_NAME;
}
return userDomain;
}
public static String[] getChallengeUris() {
//TODO
return new String[]{IdentityMgtConstants.DEFAULT_CHALLENGE_QUESTION_URI01,
IdentityMgtConstants.DEFAULT_CHALLENGE_QUESTION_URI02};
}
public static Policy getSecurityPolicy() {
String policyString = " <wsp:Policy wsu:Id=\"UTOverTransport\" xmlns:wsp=\"http://schemas.xmlsoap.org/ws/2004/09/policy\"\n" +
" xmlns:wsu=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd\">\n" +
" <wsp:ExactlyOne>\n" +
" <wsp:All>\n" +
" <sp:TransportBinding xmlns:sp=\"http://schemas.xmlsoap.org/ws/2005/07/securitypolicy\">\n" +
" <wsp:Policy>\n" +
" <sp:TransportToken>\n" +
" <wsp:Policy>\n" +
" <sp:HttpsToken RequireClientCertificate=\"true\"/>\n" +
" </wsp:Policy>\n" +
" </sp:TransportToken>\n" +
" <sp:AlgorithmSuite>\n" +
" <wsp:Policy>\n" +
" <sp:Basic256/>\n" +
" </wsp:Policy>\n" +
" </sp:AlgorithmSuite>\n" +
" <sp:Layout>\n" +
" <wsp:Policy>\n" +
" <sp:Lax/>\n" +
" </wsp:Policy>\n" +
" </sp:Layout>\n" +
" <sp:IncludeTimestamp/>\n" +
" </wsp:Policy>\n" +
" </sp:TransportBinding>\n" +
" </wsp:All>\n" +
" </wsp:ExactlyOne>\n" +
" </wsp:Policy>";
return PolicyEngine.getPolicy(new ByteArrayInputStream(policyString.getBytes()));
}
}
| wso2/carbon-identity-framework | components/identity-mgt/org.wso2.carbon.identity.mgt/src/main/java/org/wso2/carbon/identity/mgt/util/Utils.java | Java | apache-2.0 | 16,553 |
import { expect } from 'chai';
import { spec } from 'modules/yieldoneBidAdapter.js';
import { newBidder } from 'src/adapters/bidderFactory.js';
import { deepClone } from 'src/utils.js';
const ENDPOINT = 'https://y.one.impact-ad.jp/h_bid';
const USER_SYNC_URL = 'https://y.one.impact-ad.jp/push_sync';
const VIDEO_PLAYER_URL = 'https://img.ak.impact-ad.jp/ic/pone/ivt/firstview/js/dac-video-prebid.min.js';
const DEFAULT_VIDEO_SIZE = {w: 640, h: 360};
describe('yieldoneBidAdapter', function() {
const adapter = newBidder(spec);
describe('isBidRequestValid', function () {
let bid = {
'bidder': 'yieldone',
'params': {
placementId: '36891'
},
'adUnitCode': 'adunit-code',
'sizes': [[300, 250], [336, 280]],
'bidId': '23beaa6af6cdde',
'bidderRequestId': '19c0c1efdf37e7',
'auctionId': '61466567-d482-4a16-96f0-fe5f25ffbdf1',
};
it('should return true when required params found', function () {
expect(spec.isBidRequestValid(bid)).to.equal(true);
});
it('should return false when placementId not passed correctly', function () {
bid.params.placementId = '';
expect(spec.isBidRequestValid(bid)).to.equal(false);
});
it('should return false when require params are not passed', function () {
let bid = Object.assign({}, bid);
bid.params = {};
expect(spec.isBidRequestValid(bid)).to.equal(false);
});
});
describe('buildRequests', function () {
const bidderRequest = {
refererInfo: {
numIframes: 0,
reachedTop: true,
referer: 'http://example.com',
stack: ['http://example.com']
}
};
describe('Basic', function () {
const bidRequests = [
{
'bidder': 'yieldone',
'params': {placementId: '36891'},
'adUnitCode': 'adunit-code1',
'bidId': '23beaa6af6cdde',
'bidderRequestId': '19c0c1efdf37e7',
'auctionId': '61466567-d482-4a16-96f0-fe5f25ffbdf1',
},
{
'bidder': 'yieldone',
'params': {placementId: '47919'},
'adUnitCode': 'adunit-code2',
'bidId': '382091349b149f"',
'bidderRequestId': '"1f9c98192de251"',
'auctionId': '61466567-d482-4a16-96f0-fe5f25ffbdf1',
}
];
const request = spec.buildRequests(bidRequests, bidderRequest);
it('sends bid request to our endpoint via GET', function () {
expect(request[0].method).to.equal('GET');
expect(request[1].method).to.equal('GET');
});
it('attaches source and version to endpoint URL as query params', function () {
expect(request[0].url).to.equal(ENDPOINT);
expect(request[1].url).to.equal(ENDPOINT);
});
it('adUnitCode should be sent as uc parameters on any requests', function () {
expect(request[0].data.uc).to.equal('adunit-code1');
expect(request[1].data.uc).to.equal('adunit-code2');
});
});
describe('Old Format', function () {
const bidRequests = [
{
params: {placementId: '0'},
mediaType: 'banner',
sizes: [[300, 250], [336, 280]],
},
{
params: {placementId: '1'},
mediaType: 'banner',
sizes: [[336, 280]],
},
{
// It doesn't actually exist.
params: {placementId: '2'},
},
{
params: {placementId: '3'},
mediaType: 'video',
sizes: [[1280, 720], [1920, 1080]],
},
{
params: {placementId: '4'},
mediaType: 'video',
sizes: [[1920, 1080]],
},
{
params: {placementId: '5'},
mediaType: 'video',
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
it('parameter sz has more than one size on banner requests', function () {
expect(request[0].data.sz).to.equal('300x250,336x280');
expect(request[1].data.sz).to.equal('336x280');
expect(request[2].data.sz).to.equal('');
expect(request[3].data).to.not.have.property('sz');
expect(request[4].data).to.not.have.property('sz');
expect(request[5].data).to.not.have.property('sz');
});
it('width and height should be set as separate parameters on outstream requests', function () {
expect(request[0].data).to.not.have.property('w');
expect(request[1].data).to.not.have.property('w');
expect(request[2].data).to.not.have.property('w');
expect(request[3].data.w).to.equal(1280);
expect(request[3].data.h).to.equal(720);
expect(request[4].data.w).to.equal(1920);
expect(request[4].data.h).to.equal(1080);
expect(request[5].data.w).to.equal(DEFAULT_VIDEO_SIZE.w);
expect(request[5].data.h).to.equal(DEFAULT_VIDEO_SIZE.h);
});
});
describe('New Format', function () {
const bidRequests = [
{
params: {placementId: '0'},
mediaTypes: {
banner: {
sizes: [[300, 250], [336, 280]],
},
},
},
{
params: {placementId: '1'},
mediaTypes: {
banner: {
sizes: [[336, 280]],
},
},
},
{
// It doesn't actually exist.
params: {placementId: '2'},
mediaTypes: {
banner: {
},
},
},
{
params: {placementId: '3'},
mediaTypes: {
video: {
context: 'outstream',
playerSize: [[1280, 720], [1920, 1080]],
},
},
},
{
params: {placementId: '4'},
mediaTypes: {
video: {
context: 'outstream',
playerSize: [1920, 1080],
},
},
},
{
params: {placementId: '5'},
mediaTypes: {
video: {
context: 'outstream',
},
},
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
it('parameter sz has more than one size on banner requests', function () {
expect(request[0].data.sz).to.equal('300x250,336x280');
expect(request[1].data.sz).to.equal('336x280');
expect(request[2].data.sz).to.equal('');
expect(request[3].data).to.not.have.property('sz');
expect(request[4].data).to.not.have.property('sz');
expect(request[5].data).to.not.have.property('sz');
});
it('width and height should be set as separate parameters on outstream requests', function () {
expect(request[0].data).to.not.have.property('w');
expect(request[1].data).to.not.have.property('w');
expect(request[2].data).to.not.have.property('w');
expect(request[3].data.w).to.equal(1280);
expect(request[3].data.h).to.equal(720);
expect(request[4].data.w).to.equal(1920);
expect(request[4].data.h).to.equal(1080);
expect(request[5].data.w).to.equal(DEFAULT_VIDEO_SIZE.w);
expect(request[5].data.h).to.equal(DEFAULT_VIDEO_SIZE.h);
});
});
describe('Multiple Format', function () {
const bidRequests = [
{
// It will be treated as a banner.
params: {
placementId: '0',
},
mediaTypes: {
banner: {
sizes: [[300, 250], [336, 280]],
},
video: {
context: 'outstream',
playerSize: [1920, 1080],
},
},
},
{
// It will be treated as a video.
params: {
placementId: '1',
playerParams: {},
},
mediaTypes: {
banner: {
sizes: [[300, 250], [336, 280]],
},
video: {
context: 'outstream',
playerSize: [1920, 1080],
},
},
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
it('parameter sz has more than one size on banner requests', function () {
expect(request[0].data.sz).to.equal('300x250,336x280');
expect(request[1].data).to.not.have.property('sz');
});
it('width and height should be set as separate parameters on outstream requests', function () {
expect(request[0].data).to.not.have.property('w');
expect(request[1].data.w).to.equal(1920);
expect(request[1].data.h).to.equal(1080);
});
});
describe('FLUX Format', function () {
const bidRequests = [
{
// It will be treated as a banner.
params: {
placementId: '0',
},
mediaTypes: {
banner: {
sizes: [[300, 250], [336, 280]],
},
video: {
context: 'outstream',
playerSize: [[1, 1]],
},
},
},
{
// It will be treated as a video.
params: {
placementId: '1',
playerParams: {},
playerSize: [1920, 1080],
},
mediaTypes: {
banner: {
sizes: [[300, 250], [336, 280]],
},
video: {
context: 'outstream',
playerSize: [[1, 1]],
},
},
},
{
// It will be treated as a video.
params: {
placementId: '2',
playerParams: {},
},
mediaTypes: {
banner: {
sizes: [[300, 250], [336, 280]],
},
video: {
context: 'outstream',
playerSize: [[1, 1]],
},
},
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
it('parameter sz has more than one size on banner requests', function () {
expect(request[0].data.sz).to.equal('300x250,336x280');
expect(request[1].data).to.not.have.property('sz');
expect(request[2].data).to.not.have.property('sz');
});
it('width and height should be set as separate parameters on outstream requests', function () {
expect(request[0].data).to.not.have.property('w');
expect(request[1].data.w).to.equal(1920);
expect(request[1].data.h).to.equal(1080);
expect(request[2].data.w).to.equal(DEFAULT_VIDEO_SIZE.w);
expect(request[2].data.h).to.equal(DEFAULT_VIDEO_SIZE.h);
});
});
describe('LiveRampID', function () {
it('dont send LiveRampID if undefined', function () {
const bidRequests = [
{
params: {placementId: '0'},
},
{
params: {placementId: '1'},
userId: {},
},
{
params: {placementId: '2'},
userId: undefined,
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
expect(request[0].data).to.not.have.property('lr_env');
expect(request[1].data).to.not.have.property('lr_env');
expect(request[2].data).to.not.have.property('lr_env');
});
it('should send LiveRampID if available', function () {
const bidRequests = [
{
params: {placementId: '0'},
userId: {idl_env: 'idl_env_sample'},
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
expect(request[0].data.lr_env).to.equal('idl_env_sample');
});
});
describe('IMID', function () {
it('dont send IMID if undefined', function () {
const bidRequests = [
{
params: {placementId: '0'},
},
{
params: {placementId: '1'},
userId: {},
},
{
params: {placementId: '2'},
userId: undefined,
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
expect(request[0].data).to.not.have.property('imuid');
expect(request[1].data).to.not.have.property('imuid');
expect(request[2].data).to.not.have.property('imuid');
});
it('should send IMID if available', function () {
const bidRequests = [
{
params: {placementId: '0'},
userId: {imuid: 'imuid_sample'},
},
];
const request = spec.buildRequests(bidRequests, bidderRequest);
expect(request[0].data.imuid).to.equal('imuid_sample');
});
});
});
describe('interpretResponse', function () {
let bidRequestBanner = [
{
'method': 'GET',
'url': 'https://y.one.impact-ad.jp/h_bid',
'data': {
'v': 'hb1',
'p': '36891',
'sz': '300x250,336x280',
'cb': 12892917383,
'r': 'http%3A%2F%2Flocalhost%3A9876%2F%3Fid%3D74552836',
'uid': '23beaa6af6cdde',
't': 'i'
}
}
];
let serverResponseBanner = {
body: {
'adTag': '<!-- adtag -->',
'uid': '23beaa6af6cdde',
'height': 250,
'width': 300,
'cpm': 0.0536616,
'crid': '2494768',
'currency': 'JPY',
'statusMessage': 'Bid available',
'dealId': 'P1-FIX-7800-DSP-MON',
'admoain': [
'www.example.com'
]
}
};
it('should get the correct bid response for banner', function () {
let expectedResponse = [{
'requestId': '23beaa6af6cdde',
'cpm': 53.6616,
'width': 300,
'height': 250,
'creativeId': '2494768',
'dealId': 'P1-FIX-7800-DSP-MON',
'currency': 'JPY',
'netRevenue': true,
'ttl': 3000,
'referrer': '',
'meta': {
'advertiserDomains': [
'www.example.com'
]
},
'mediaType': 'banner',
'ad': '<!-- adtag -->'
}];
let result = spec.interpretResponse(serverResponseBanner, bidRequestBanner[0]);
expect(Object.keys(result[0])).to.deep.equal(Object.keys(expectedResponse[0]));
expect(result[0].mediaType).to.equal(expectedResponse[0].mediaType);
});
let serverResponseVideo = {
body: {
'uid': '23beaa6af6cdde',
'height': 360,
'width': 640,
'cpm': 0.0536616,
'dealId': 'P1-FIX-766-DSP-MON',
'crid': '2494768',
'currency': 'JPY',
'statusMessage': 'Bid available',
'adm': '<!-- vast -->'
}
};
let bidRequestVideo = [
{
'method': 'GET',
'url': 'https://y.one.impact-ad.jp/h_bid',
'data': {
'v': 'hb1',
'p': '41993',
'w': '640',
'h': '360',
'cb': 12892917383,
'r': 'http%3A%2F%2Flocalhost%3A9876%2F%3Fid%3D74552836',
'uid': '23beaa6af6cdde',
't': 'i'
}
}
];
it('should get the correct bid response for video', function () {
let expectedResponse = [{
'requestId': '23beaa6af6cdde',
'cpm': 53.6616,
'width': 640,
'height': 360,
'creativeId': '2494768',
'dealId': 'P1-FIX-7800-DSP-MON',
'currency': 'JPY',
'netRevenue': true,
'ttl': 3000,
'referrer': '',
'meta': {
'advertiserDomains': []
},
'mediaType': 'video',
'vastXml': '<!-- vast -->',
'renderer': {
id: '23beaa6af6cdde',
url: VIDEO_PLAYER_URL
}
}];
let result = spec.interpretResponse(serverResponseVideo, bidRequestVideo[0]);
expect(Object.keys(result[0])).to.deep.equal(Object.keys(expectedResponse[0]));
expect(result[0].mediaType).to.equal(expectedResponse[0].mediaType);
expect(result[0].renderer.id).to.equal(expectedResponse[0].renderer.id);
expect(result[0].renderer.url).to.equal(expectedResponse[0].renderer.url);
});
it('handles empty bid response', function () {
let response = {
body: {
'uid': '2c0b634db95a01',
'height': 0,
'crid': '',
'statusMessage': 'Bid returned empty or error response',
'width': 0,
'cpm': 0
}
};
let result = spec.interpretResponse(response, bidRequestBanner[0]);
expect(result.length).to.equal(0);
});
});
describe('getUserSyncs', function () {
it('handles empty sync options', function () {
expect(spec.getUserSyncs({})).to.be.undefined;
});
it('should return a sync url if iframe syncs are enabled', function () {
expect(spec.getUserSyncs({
'iframeEnabled': true
})).to.deep.equal([{
type: 'iframe', url: USER_SYNC_URL
}]);
});
});
});
| PubWise/Prebid.js | test/spec/modules/yieldoneBidAdapter_spec.js | JavaScript | apache-2.0 | 16,949 |
# Copyright (c) 2015 FUJITSU LIMITED
# Copyright (c) 2012 EMC Corporation.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
FibreChannel Cinder Volume driver for Fujitsu ETERNUS DX S3 series.
"""
from oslo_log import log as logging
import six
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.fujitsu import eternus_dx_common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
@interface.volumedriver
class FJDXFCDriver(driver.FibreChannelDriver):
"""FC Cinder Volume Driver for Fujitsu ETERNUS DX S3 series."""
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Fujitsu_ETERNUS_CI"
VERSION = eternus_dx_common.FJDXCommon.VERSION
def __init__(self, *args, **kwargs):
super(FJDXFCDriver, self).__init__(*args, **kwargs)
self.common = eternus_dx_common.FJDXCommon(
'fc',
configuration=self.configuration)
self.VERSION = self.common.VERSION
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Create volume."""
LOG.debug('create_volume, '
'volume id: %s, enter method.', volume['id'])
location, metadata = self.common.create_volume(volume)
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_volume, info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug('create_volume_from_snapshot, '
'volume id: %(vid)s, snap id: %(sid)s, enter method.',
{'vid': volume['id'], 'sid': snapshot['id']})
location, metadata = (
self.common.create_volume_from_snapshot(volume, snapshot))
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_volume_from_snapshot, '
'info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def create_cloned_volume(self, volume, src_vref):
"""Create cloned volume."""
LOG.debug('create_cloned_volume, '
'target volume id: %(tid)s, '
'source volume id: %(sid)s, enter method.',
{'tid': volume['id'], 'sid': src_vref['id']})
location, metadata = (
self.common.create_cloned_volume(volume, src_vref))
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_cloned_volume, '
'info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def delete_volume(self, volume):
"""Delete volume on ETERNUS."""
LOG.debug('delete_volume, '
'volume id: %s, enter method.', volume['id'])
vol_exist = self.common.delete_volume(volume)
LOG.debug('delete_volume, '
'delete: %s, exit method.', vol_exist)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.debug('create_snapshot, '
'snap id: %(sid)s, volume id: %(vid)s, enter method.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
location, metadata = self.common.create_snapshot(snapshot)
LOG.debug('create_snapshot, info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location)}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug('delete_snapshot, '
'snap id: %(sid)s, volume id: %(vid)s, enter method.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
vol_exist = self.common.delete_snapshot(snapshot)
LOG.debug('delete_snapshot, '
'delete: %s, exit method.', vol_exist)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
return
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
return
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
return
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
LOG.debug('initialize_connection, volume id: %(vid)s, '
'wwpns: %(wwpns)s, enter method.',
{'vid': volume['id'], 'wwpns': connector['wwpns']})
info = self.common.initialize_connection(volume, connector)
data = info['data']
init_tgt_map = (
self.common.build_fc_init_tgt_map(connector, data['target_wwn']))
data['initiator_target_map'] = init_tgt_map
info['data'] = data
LOG.debug('initialize_connection, '
'info: %s, exit method.', info)
return info
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
LOG.debug('terminate_connection, volume id: %(vid)s, '
'wwpns: %(wwpns)s, enter method.',
{'vid': volume['id'], 'wwpns': connector['wwpns']})
map_exist = self.common.terminate_connection(volume, connector)
attached = self.common.check_attached_volume_in_zone(connector)
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
if not attached:
# No more volumes attached to the host
init_tgt_map = self.common.build_fc_init_tgt_map(connector)
info['data'] = {'initiator_target_map': init_tgt_map}
LOG.debug('terminate_connection, unmap: %(unmap)s, '
'connection info: %(info)s, exit method',
{'unmap': map_exist, 'info': info})
return info
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
LOG.debug('get_volume_stats, refresh: %s, enter method.', refresh)
pool_name = None
if refresh is True:
data, pool_name = self.common.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'FJDXFCDriver'
data['storage_protocol'] = 'FC'
self._stats = data
LOG.debug('get_volume_stats, '
'pool name: %s, exit method.', pool_name)
return self._stats
def extend_volume(self, volume, new_size):
"""Extend volume."""
LOG.debug('extend_volume, '
'volume id: %s, enter method.', volume['id'])
used_pool_name = self.common.extend_volume(volume, new_size)
LOG.debug('extend_volume, '
'used pool name: %s, exit method.', used_pool_name)
def _get_metadata(self, volume):
v_metadata = volume.get('volume_metadata')
if v_metadata:
ret = {data['key']: data['value'] for data in v_metadata}
else:
ret = volume.get('metadata', {})
return ret
| Hybrid-Cloud/cinder | cinder/volume/drivers/fujitsu/eternus_dx_fc.py | Python | apache-2.0 | 8,064 |
---
license: >
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
---
# Positionsfehler
A `PositionError` -Objekt übergeben, um die `geolocationError` Rückruf, wenn ein Fehler auftritt.
## Eigenschaften
* **Code**: einer der vordefinierten Fehlercodes aufgeführt.
* **Nachricht**: Fehlermeldung, die die Informationen über den aufgetretenen Fehler beschreibt.
## Konstanten
* `PositionError.PERMISSION_DENIED`
* `PositionError.POSITION_UNAVAILABLE`
* `PositionError.TIMEOUT`
## Beschreibung
Das `PositionError` -Objekt übergeben, um die `geolocationError` Callback-Funktion tritt ein Fehler mit Geolocation.
### `PositionError.PERMISSION_DENIED`
Zurückgegeben, wenn der Benutzer die Anwendung zum Abrufen von Positionsinformationen nicht zulässt. Dies ist abhängig von der Plattform.
### `PositionError.POSITION_UNAVAILABLE`
Zurückgegeben, wenn das Gerät nicht in der Lage, eine Position abzurufen ist. Im Allgemeinen bedeutet dies, das Gerät hat keine Netzwerkkonnektivität und/oder kann kein Satelliten-Update erhalten.
### `PositionError.TIMEOUT`
Zurückgegeben, wenn das Gerät nicht in der Lage, eine Position innerhalb der angegebenen abzurufen ist die `geolocationOptions` ' `timeout` Eigenschaft. Bei Verwendung mit `geolocation.watchPosition` , dieser Fehler konnte übergeben werden, um die `geolocationError` Rückruf jedes `timeout` Millisekunden. | mallzee/cordova-docs | docs/de/3.1.0/cordova/geolocation/PositionError/positionError.md | Markdown | apache-2.0 | 2,158 |
#include "vocabulary.h"
namespace extractor {
Vocabulary::~Vocabulary() {}
int Vocabulary::GetTerminalIndex(const string& word) {
int word_id = -1;
#pragma omp critical (vocabulary)
{
auto it = dictionary.find(word);
if (it != dictionary.end()) {
word_id = it->second;
} else {
word_id = words.size();
dictionary[word] = word_id;
words.push_back(word);
}
}
return word_id;
}
int Vocabulary::GetNonterminalIndex(int position) {
return -position;
}
bool Vocabulary::IsTerminal(int symbol) {
return symbol >= 0;
}
string Vocabulary::GetTerminalValue(int symbol) {
string word;
#pragma omp critical (vocabulary)
word = words[symbol];
return word;
}
bool Vocabulary::operator==(const Vocabulary& other) const {
return words == other.words && dictionary == other.dictionary;
}
} // namespace extractor
| veer66/cdec | extractor/vocabulary.cc | C++ | apache-2.0 | 868 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.twitter.search;
import org.apache.camel.Consumer;
import org.apache.camel.Processor;
import org.apache.camel.Producer;
import org.apache.camel.component.twitter.AbstractTwitterEndpoint;
import org.apache.camel.component.twitter.TwitterConfiguration;
import org.apache.camel.component.twitter.TwitterHelper;
import org.apache.camel.spi.Metadata;
import org.apache.camel.spi.UriEndpoint;
import org.apache.camel.spi.UriPath;
import org.apache.camel.util.ObjectHelper;
/**
* The Twitter Search component consumes search results.
*/
@UriEndpoint(firstVersion = "2.10.0", scheme = "twitter-search", title = "Twitter Search", syntax = "twitter-search:keywords",
consumerClass = SearchConsumerHandler.class, label = "api,social")
public class TwitterSearchEndpoint extends AbstractTwitterEndpoint {
@UriPath(description = "The search keywords. Multiple values can be separated with comma.")
@Metadata(required = "true")
private String keywords;
public TwitterSearchEndpoint(String uri, String remaining, TwitterSearchComponent component, TwitterConfiguration properties) {
super(uri, component, properties);
this.keywords = remaining;
}
@Override
public Producer createProducer() throws Exception {
return new SearchProducer(this, keywords);
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
return TwitterHelper.createConsumer(processor, this, new SearchConsumerHandler(this, keywords));
}
}
| yuruki/camel | components/camel-twitter/src/main/java/org/apache/camel/component/twitter/search/TwitterSearchEndpoint.java | Java | apache-2.0 | 2,347 |
/*------------------------------------------------------------------------------
* Copyright (C) 2003-2006 Ben van Klinken and the CLucene Team
*
* Distributable under the terms of either the Apache License (Version 2.0) or
* the GNU Lesser General Public License, as specified in the COPYING file.
------------------------------------------------------------------------------*/
#ifndef _lucene_search_Scorer_
#define _lucene_search_Scorer_
CL_CLASS_DEF(search,Similarity)
CL_CLASS_DEF(search,HitCollector)
CL_CLASS_DEF(search,Explanation)
CL_NS_DEF(search)
/**
* Expert: Common scoring functionality for different types of queries.
*
* <p>
* A <code>Scorer</code> either iterates over documents matching a
* query in increasing order of doc Id, or provides an explanation of
* the score for a query for a given document.
* </p>
* <p>
* Document scores are computed using a given <code>Similarity</code>
* implementation.
* </p>
* @see BooleanQuery#setAllowDocsOutOfOrder
*/
class CLUCENE_EXPORT Scorer {
private:
Similarity* similarity;
protected:
/** Constructs a Scorer.
* @param similarity The <code>Similarity</code> implementation used by this scorer.
*/
Scorer(Similarity* _similarity);
public:
virtual ~Scorer();
/** Returns the Similarity implementation used by this scorer. */
Similarity* getSimilarity() const;
/** Scores and collects all matching documents.
* @param hc The collector to which all matching documents are passed through
* {@link HitCollector#collect(int, float)}.
* <br>When this method is used the {@link #explain(int)} method should not be used.
*/
virtual void score(HitCollector* hc) ;
/** Expert: Collects matching documents in a range. Hook for optimization.
* Note that {@link #next()} must be called once before this method is called
* for the first time.
* @param hc The collector to which all matching documents are passed through
* {@link HitCollector#collect(int, float)}.
* @param max Do not score documents past this.
* @return true if more matching documents may remain.
*/
virtual bool score( HitCollector* results, const int32_t maxDoc );
/**
* Advances to the document matching this Scorer with the lowest doc Id
* greater than the current value of {@link #doc()} (or to the matching
* document with the lowest doc Id if next has never been called on
* this Scorer).
*
* <p>
* When this method is used the {@link #explain(int)} method should not
* be used.
* </p>
*
* @return true iff there is another document matching the query.
* @see BooleanQuery#setAllowDocsOutOfOrder
*/
virtual bool next() = 0;
/** Returns the current document number matching the query.
* Initially invalid, until {@link #next()} is called the first time.
*/
virtual int32_t doc() const = 0;
/** Returns the score of the current document matching the query.
* Initially invalid, until {@link #next()} or {@link #skipTo(int)}
* is called the first time.
*/
virtual float_t score() = 0;
/**
* Skips to the document matching this Scorer with the lowest doc Id
* greater than or equal to a given target.
*
* <p>
* The behavior of this method is undefined if the target specified is
* less than or equal to the current value of {@link #doc()}.
* <p>
* Behaves as if written:
* <pre>
* boolean skipTo(int target) {
* do {
* if (!next())
* return false;
* } while (target > doc());
* return true;
* }
* </pre>
* Most implementations are considerably more efficient than that.
* </p>
*
* <p>
* When this method is used the {@link #explain(int)} method should not
* be used.
* </p>
*
* @param target The target document number.
* @return true iff there is such a match.
* @see BooleanQuery#setAllowDocsOutOfOrder
*/
virtual bool skipTo(int32_t target) = 0;
/** Returns an explanation of the score for a document.
* <br>When this method is used, the {@link #next()}, {@link #skipTo(int)} and
* {@link #score(HitCollector)} methods should not be used.
* @param doc The document number for the explanation.
*/
virtual Explanation* explain(int32_t doc) = 0;
/** Returns a string which explains the object */
virtual TCHAR* toString() = 0;
static bool sort(const Scorer* elem1, const Scorer* elem2);
};
CL_NS_END
#endif
| privet56/qDesktopSearch | clucene/CLucene/search/Scorer.h | C | apache-2.0 | 4,260 |
' Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
Imports System.Composition
Imports Microsoft.ApiDesignGuidelines.Analyzers
Imports Microsoft.CodeAnalysis.CodeFixes
Imports Microsoft.CodeAnalysis
Imports Microsoft.CodeAnalysis.VisualBasic
Namespace Microsoft.ApiDesignGuidelines.VisualBasic.Analyzers
' <summary>
' CA1008: Enums should have zero value
' </summary>
<ExportCodeFixProvider(LanguageNames.VisualBasic), [Shared]>
Public Class BasicEnumsShouldHaveZeroValueFixer
Inherits EnumsShouldHaveZeroValueFixer
Protected Overrides Function GetParentNodeOrSelfToFix(nodeToFix As SyntaxNode) As SyntaxNode
If nodeToFix.IsKind(SyntaxKind.EnumStatement) And nodeToFix.Parent IsNot Nothing Then
Return nodeToFix.Parent
End If
Return nodeToFix
End Function
End Class
End Namespace
| natidea/roslyn-analyzers | src/Microsoft.CodeQuality.Analyzers/VisualBasic/ApiDesignGuidelines/BasicEnumsShouldHaveZeroValue.Fixer.vb | Visual Basic | apache-2.0 | 995 |
"""Provides device automations for ZHA devices that emit events."""
import voluptuous as vol
import homeassistant.components.automation.event as event
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.const import CONF_DEVICE_ID, CONF_DOMAIN, CONF_PLATFORM, CONF_TYPE
from . import DOMAIN
from .core.helpers import async_get_zha_device
CONF_SUBTYPE = "subtype"
DEVICE = "device"
DEVICE_IEEE = "device_ieee"
ZHA_EVENT = "zha_event"
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{vol.Required(CONF_TYPE): str, vol.Required(CONF_SUBTYPE): str}
)
async def async_validate_trigger_config(hass, config):
"""Validate config."""
config = TRIGGER_SCHEMA(config)
if "zha" in hass.config.components:
trigger = (config[CONF_TYPE], config[CONF_SUBTYPE])
try:
zha_device = await async_get_zha_device(hass, config[CONF_DEVICE_ID])
except (KeyError, AttributeError):
raise InvalidDeviceAutomationConfig
if (
zha_device.device_automation_triggers is None
or trigger not in zha_device.device_automation_triggers
):
raise InvalidDeviceAutomationConfig
return config
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
trigger = (config[CONF_TYPE], config[CONF_SUBTYPE])
try:
zha_device = await async_get_zha_device(hass, config[CONF_DEVICE_ID])
except (KeyError, AttributeError):
return None
if trigger not in zha_device.device_automation_triggers:
return None
trigger = zha_device.device_automation_triggers[trigger]
event_config = {
event.CONF_PLATFORM: "event",
event.CONF_EVENT_TYPE: ZHA_EVENT,
event.CONF_EVENT_DATA: {DEVICE_IEEE: str(zha_device.ieee), **trigger},
}
event_config = event.TRIGGER_SCHEMA(event_config)
return await event.async_attach_trigger(
hass, event_config, action, automation_info, platform_type="device"
)
async def async_get_triggers(hass, device_id):
"""List device triggers.
Make sure the device supports device automations and
if it does return the trigger list.
"""
zha_device = await async_get_zha_device(hass, device_id)
if not zha_device.device_automation_triggers:
return
triggers = []
for trigger, subtype in zha_device.device_automation_triggers.keys():
triggers.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_PLATFORM: DEVICE,
CONF_TYPE: trigger,
CONF_SUBTYPE: subtype,
}
)
return triggers
| pschmitt/home-assistant | homeassistant/components/zha/device_trigger.py | Python | apache-2.0 | 2,852 |
$(document).ready(
function()
{
var $roles = $(".role_change");
$roles.each(
function()
{
var str = $(this).find("input").val();
var en_role_index = getRoleIndex(str,"EN");
var cn_role_str = indexToRole(en_role_index,"CN");
$(this).find("span").append(cn_role_str);
}
);
}
);
| tianfengjingjing/ZhuoHuaCMMOracle11g | WebRoot/js/admin/staff/viewStaffInfo.js | JavaScript | apache-2.0 | 367 |
/*
* Copyright 2018 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.stunner.bpmn.definition;
import java.util.Objects;
import javax.validation.Valid;
import org.jboss.errai.common.client.api.annotations.MapsTo;
import org.jboss.errai.common.client.api.annotations.Portable;
import org.jboss.errai.databinding.client.api.Bindable;
import org.kie.workbench.common.forms.adf.definitions.annotations.FieldParam;
import org.kie.workbench.common.forms.adf.definitions.annotations.FormDefinition;
import org.kie.workbench.common.forms.adf.definitions.annotations.FormField;
import org.kie.workbench.common.forms.adf.definitions.settings.FieldPolicy;
import org.kie.workbench.common.stunner.bpmn.definition.property.background.BackgroundSet;
import org.kie.workbench.common.stunner.bpmn.definition.property.dimensions.CircleDimensionSet;
import org.kie.workbench.common.stunner.bpmn.definition.property.dimensions.Radius;
import org.kie.workbench.common.stunner.bpmn.definition.property.event.compensation.CompensationEventExecutionSet;
import org.kie.workbench.common.stunner.bpmn.definition.property.font.FontSet;
import org.kie.workbench.common.stunner.bpmn.definition.property.general.BPMNGeneralSet;
import org.kie.workbench.common.stunner.core.definition.annotation.Definition;
import org.kie.workbench.common.stunner.core.definition.annotation.Property;
import org.kie.workbench.common.stunner.core.definition.annotation.morph.Morph;
import org.kie.workbench.common.stunner.core.util.HashUtil;
import static org.kie.workbench.common.forms.adf.engine.shared.formGeneration.processing.fields.fieldInitializers.nestedForms.AbstractEmbeddedFormsInitializer.COLLAPSIBLE_CONTAINER;
import static org.kie.workbench.common.forms.adf.engine.shared.formGeneration.processing.fields.fieldInitializers.nestedForms.AbstractEmbeddedFormsInitializer.FIELD_CONTAINER_PARAM;
@Portable
@Bindable
@Definition
@Morph(base = BaseEndEvent.class)
@FormDefinition(
startElement = "general",
policy = FieldPolicy.ONLY_MARKED,
defaultFieldSettings = {@FieldParam(name = FIELD_CONTAINER_PARAM, value = COLLAPSIBLE_CONTAINER)}
)
public class EndCompensationEvent extends BaseEndEvent {
@Property
@FormField(afterElement = "general")
@Valid
private CompensationEventExecutionSet executionSet;
public EndCompensationEvent() {
this(new BPMNGeneralSet(""),
new BackgroundSet(),
new FontSet(),
new CircleDimensionSet(new Radius()),
new CompensationEventExecutionSet());
}
public EndCompensationEvent(final @MapsTo("general") BPMNGeneralSet general,
final @MapsTo("backgroundSet") BackgroundSet backgroundSet,
final @MapsTo("fontSet") FontSet fontSet,
final @MapsTo("dimensionsSet") CircleDimensionSet dimensionsSet,
final @MapsTo("executionSet") CompensationEventExecutionSet executionSet) {
super(general,
backgroundSet,
fontSet,
dimensionsSet);
this.executionSet = executionSet;
}
public CompensationEventExecutionSet getExecutionSet() {
return executionSet;
}
public void setExecutionSet(CompensationEventExecutionSet executionSet) {
this.executionSet = executionSet;
}
@Override
public int hashCode() {
return HashUtil.combineHashCodes(super.hashCode(),
Objects.hashCode(executionSet));
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o instanceof EndCompensationEvent) {
EndCompensationEvent other = (EndCompensationEvent) o;
return super.equals(other) &&
Objects.equals(executionSet,
other.executionSet);
}
return false;
}
}
| jomarko/kie-wb-common | kie-wb-common-stunner/kie-wb-common-stunner-sets/kie-wb-common-stunner-bpmn/kie-wb-common-stunner-bpmn-api/src/main/java/org/kie/workbench/common/stunner/bpmn/definition/EndCompensationEvent.java | Java | apache-2.0 | 4,567 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zeppelin.rest;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.io.FileUtils;
import org.apache.zeppelin.interpreter.InterpreterSetting;
import org.apache.zeppelin.notebook.Note;
import org.apache.zeppelin.notebook.Paragraph;
import org.apache.zeppelin.scheduler.Job.Status;
import org.apache.zeppelin.server.ZeppelinServer;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.gson.Gson;
/**
* Test against spark cluster.
* Spark cluster is started by CI server using testing/startSparkCluster.sh
*/
public class ZeppelinSparkClusterTest extends AbstractTestRestApi {
Gson gson = new Gson();
@BeforeClass
public static void init() throws Exception {
AbstractTestRestApi.startUp();
}
@AfterClass
public static void destroy() throws Exception {
AbstractTestRestApi.shutDown();
}
private void waitForFinish(Paragraph p) {
while (p.getStatus() != Status.FINISHED
&& p.getStatus() != Status.ERROR
&& p.getStatus() != Status.ABORT) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
@Test
public void basicRDDTransformationAndActionTest() throws IOException {
// create new note
Note note = ZeppelinServer.notebook.createNote();
// run markdown paragraph, again
Paragraph p = note.addParagraph();
Map config = p.getConfig();
config.put("enabled", true);
p.setConfig(config);
p.setText("%spark print(sc.parallelize(1 to 10).reduce(_ + _))");
note.run(p.getId());
waitForFinish(p);
assertEquals(Status.FINISHED, p.getStatus());
assertEquals("55", p.getResult().message());
ZeppelinServer.notebook.removeNote(note.id());
}
@Test
public void pySparkTest() throws IOException {
// create new note
Note note = ZeppelinServer.notebook.createNote();
int sparkVersion = getSparkVersionNumber(note);
if (isPyspark() && sparkVersion >= 12) { // pyspark supported from 1.2.1
// run markdown paragraph, again
Paragraph p = note.addParagraph();
Map config = p.getConfig();
config.put("enabled", true);
p.setConfig(config);
p.setText("%pyspark print(sc.parallelize(range(1, 11)).reduce(lambda a, b: a + b))");
note.run(p.getId());
waitForFinish(p);
assertEquals(Status.FINISHED, p.getStatus());
assertEquals("55\n", p.getResult().message());
}
ZeppelinServer.notebook.removeNote(note.id());
}
@Test
public void pySparkAutoConvertOptionTest() throws IOException {
// create new note
Note note = ZeppelinServer.notebook.createNote();
int sparkVersion = getSparkVersionNumber(note);
if (isPyspark() && sparkVersion >= 14) { // auto_convert enabled from spark 1.4
// run markdown paragraph, again
Paragraph p = note.addParagraph();
Map config = p.getConfig();
config.put("enabled", true);
p.setConfig(config);
p.setText("%pyspark\nfrom pyspark.sql.functions import *\n"
+ "print(sqlContext.range(0, 10).withColumn('uniform', rand(seed=10) * 3.14).count())");
note.run(p.getId());
waitForFinish(p);
assertEquals(Status.FINISHED, p.getStatus());
assertEquals("10\n", p.getResult().message());
}
ZeppelinServer.notebook.removeNote(note.id());
}
@Test
public void zRunTest() throws IOException {
// create new note
Note note = ZeppelinServer.notebook.createNote();
Paragraph p0 = note.addParagraph();
Map config0 = p0.getConfig();
config0.put("enabled", true);
p0.setConfig(config0);
p0.setText("%spark z.run(1)");
Paragraph p1 = note.addParagraph();
Map config1 = p1.getConfig();
config1.put("enabled", true);
p1.setConfig(config1);
p1.setText("%spark val a=10");
Paragraph p2 = note.addParagraph();
Map config2 = p2.getConfig();
config2.put("enabled", true);
p2.setConfig(config2);
p2.setText("%spark print(a)");
note.run(p0.getId());
waitForFinish(p0);
assertEquals(Status.FINISHED, p0.getStatus());
note.run(p2.getId());
waitForFinish(p2);
assertEquals(Status.FINISHED, p2.getStatus());
assertEquals("10", p2.getResult().message());
ZeppelinServer.notebook.removeNote(note.id());
}
@Test
public void pySparkDepLoaderTest() throws IOException {
// create new note
Note note = ZeppelinServer.notebook.createNote();
if (isPyspark() && getSparkVersionNumber(note) >= 14) {
// restart spark interpreter
List<InterpreterSetting> settings =
ZeppelinServer.notebook.getBindedInterpreterSettings(note.id());
for (InterpreterSetting setting : settings) {
if (setting.getGroup().equals("spark")) {
ZeppelinServer.notebook.getInterpreterFactory().restart(setting.id());
break;
}
}
// load dep
Paragraph p0 = note.addParagraph();
Map config = p0.getConfig();
config.put("enabled", true);
p0.setConfig(config);
p0.setText("%dep z.load(\"com.databricks:spark-csv_2.11:1.2.0\")");
note.run(p0.getId());
waitForFinish(p0);
assertEquals(Status.FINISHED, p0.getStatus());
// write test csv file
File tmpFile = File.createTempFile("test", "csv");
FileUtils.write(tmpFile, "a,b\n1,2");
// load data using libraries from dep loader
Paragraph p1 = note.addParagraph();
p1.setConfig(config);
p1.setText("%pyspark\n" +
"from pyspark.sql import SQLContext\n" +
"print(sqlContext.read.format('com.databricks.spark.csv')" +
".load('"+ tmpFile.getAbsolutePath() +"').count())");
note.run(p1.getId());
waitForFinish(p1);
assertEquals(Status.FINISHED, p1.getStatus());
assertEquals("2\n", p1.getResult().message());
}
}
/**
* Get spark version number as a numerical value.
* eg. 1.1.x => 11, 1.2.x => 12, 1.3.x => 13 ...
*/
private int getSparkVersionNumber(Note note) {
Paragraph p = note.addParagraph();
Map config = p.getConfig();
config.put("enabled", true);
p.setConfig(config);
p.setText("%spark print(sc.version)");
note.run(p.getId());
waitForFinish(p);
assertEquals(Status.FINISHED, p.getStatus());
String sparkVersion = p.getResult().message();
System.out.println("Spark version detected " + sparkVersion);
String[] split = sparkVersion.split("\\.");
int version = Integer.parseInt(split[0]) * 10 + Integer.parseInt(split[1]);
return version;
}
}
| issaclee/silkroad | zeppelin-server/src/testpjava/org/apache/zeppelin/rest/ZeppelinSparkClusterTest.java | Java | apache-2.0 | 7,524 |
// Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"net/url"
"strings"
"golang.org/x/net/context"
"gopkg.in/olivere/elastic.v5/uritemplates"
)
// IndicesDeleteService allows to delete existing indices.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html
// for details.
type IndicesDeleteService struct {
client *Client
pretty bool
index []string
timeout string
masterTimeout string
}
// NewIndicesDeleteService creates and initializes a new IndicesDeleteService.
func NewIndicesDeleteService(client *Client) *IndicesDeleteService {
return &IndicesDeleteService{
client: client,
index: make([]string, 0),
}
}
// Index adds the list of indices to delete.
// Use `_all` or `*` string to delete all indices.
func (s *IndicesDeleteService) Index(index []string) *IndicesDeleteService {
s.index = index
return s
}
// Timeout is an explicit operation timeout.
func (s *IndicesDeleteService) Timeout(timeout string) *IndicesDeleteService {
s.timeout = timeout
return s
}
// MasterTimeout specifies the timeout for connection to master.
func (s *IndicesDeleteService) MasterTimeout(masterTimeout string) *IndicesDeleteService {
s.masterTimeout = masterTimeout
return s
}
// Pretty indicates that the JSON response be indented and human readable.
func (s *IndicesDeleteService) Pretty(pretty bool) *IndicesDeleteService {
s.pretty = pretty
return s
}
// buildURL builds the URL for the operation.
func (s *IndicesDeleteService) buildURL() (string, url.Values, error) {
// Build URL
path, err := uritemplates.Expand("/{index}", map[string]string{
"index": strings.Join(s.index, ","),
})
if err != nil {
return "", url.Values{}, err
}
// Add query string parameters
params := url.Values{}
if s.pretty {
params.Set("pretty", "1")
}
if s.timeout != "" {
params.Set("timeout", s.timeout)
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
}
return path, params, nil
}
// Validate checks if the operation is valid.
func (s *IndicesDeleteService) Validate() error {
var invalid []string
if len(s.index) == 0 {
invalid = append(invalid, "Index")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Do executes the operation.
func (s *IndicesDeleteService) Do(ctx context.Context) (*IndicesDeleteResponse, error) {
// Check pre-conditions
if err := s.Validate(); err != nil {
return nil, err
}
// Get URL for request
path, params, err := s.buildURL()
if err != nil {
return nil, err
}
// Get HTTP response
res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
if err != nil {
return nil, err
}
// Return operation response
ret := new(IndicesDeleteResponse)
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
return nil, err
}
return ret, nil
}
// -- Result of a delete index request.
// IndicesDeleteResponse is the response of IndicesDeleteService.Do.
type IndicesDeleteResponse struct {
Acknowledged bool `json:"acknowledged"`
}
| dutchcoders/ares | vendor/gopkg.in/olivere/elastic.v5/indices_delete.go | GO | apache-2.0 | 3,255 |
// Copyright 2007, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Author: wan@google.com (Zhanyong Wan)
// Google Mock - a framework for writing C++ mock classes.
//
// This file defines some utilities useful for implementing Google
// Mock. They are subject to change without notice, so please DO NOT
// USE THEM IN USER CODE.
#include "gmock/internal/gmock-internal-utils.h"
#include <ctype.h>
#include <ostream> // NOLINT
#include <string>
#include "gmock/gmock.h"
#include "gmock/internal/gmock-port.h"
#include "gtest/gtest.h"
namespace testing {
namespace internal {
// Joins a vector of strings as if they are fields of a tuple; returns
// the joined string.
GTEST_API_ std::string JoinAsTuple(const Strings& fields) {
switch (fields.size()) {
case 0:
return "";
case 1:
return fields[0];
default:
std::string result = "(" + fields[0];
for (size_t i = 1; i < fields.size(); i++) {
result += ", ";
result += fields[i];
}
result += ")";
return result;
}
}
// Converts an identifier name to a space-separated list of lower-case
// words. Each maximum substring of the form [A-Za-z][a-z]*|\d+ is
// treated as one word. For example, both "FooBar123" and
// "foo_bar_123" are converted to "foo bar 123".
GTEST_API_ std::string ConvertIdentifierNameToWords(const char* id_name) {
std::string result;
char prev_char = '\0';
for (const char* p = id_name; *p != '\0'; prev_char = *(p++)) {
// We don't care about the current locale as the input is
// guaranteed to be a valid C++ identifier name.
const bool starts_new_word = IsUpper(*p) ||
(!IsAlpha(prev_char) && IsLower(*p)) ||
(!IsDigit(prev_char) && IsDigit(*p));
if (IsAlNum(*p)) {
if (starts_new_word && result != "")
result += ' ';
result += ToLower(*p);
}
}
return result;
}
// This class reports Google Mock failures as Google Test failures. A
// user can define another class in a similar fashion if they intend to
// use Google Mock with a testing framework other than Google Test.
class GoogleTestFailureReporter : public FailureReporterInterface {
public:
virtual void ReportFailure(FailureType type, const char* file, int line,
const std::string& message) {
AssertHelper(type == kFatal ?
TestPartResult::kFatalFailure :
TestPartResult::kNonFatalFailure,
file,
line,
message.c_str()) = Message();
if (type == kFatal) {
posix::Abort();
}
}
};
// Returns the global failure reporter. Will create a
// GoogleTestFailureReporter and return it the first time called.
GTEST_API_ FailureReporterInterface* GetFailureReporter() {
// Points to the global failure reporter used by Google Mock. gcc
// guarantees that the following use of failure_reporter is
// thread-safe. We may need to add additional synchronization to
// protect failure_reporter if we port Google Mock to other
// compilers.
static FailureReporterInterface* const failure_reporter =
new GoogleTestFailureReporter();
return failure_reporter;
}
// Protects global resources (stdout in particular) used by Log().
static GTEST_DEFINE_STATIC_MUTEX_(g_log_mutex);
// Returns true iff a log with the given severity is visible according
// to the --gmock_verbose flag.
GTEST_API_ bool LogIsVisible(LogSeverity severity) {
if (GMOCK_FLAG(verbose) == kInfoVerbosity) {
// Always show the log if --gmock_verbose=info.
return true;
} else if (GMOCK_FLAG(verbose) == kErrorVerbosity) {
// Always hide it if --gmock_verbose=error.
return false;
} else {
// If --gmock_verbose is neither "info" nor "error", we treat it
// as "warning" (its default value).
return severity == kWarning;
}
}
// Prints the given message to stdout iff 'severity' >= the level
// specified by the --gmock_verbose flag. If stack_frames_to_skip >=
// 0, also prints the stack trace excluding the top
// stack_frames_to_skip frames. In opt mode, any positive
// stack_frames_to_skip is treated as 0, since we don't know which
// function calls will be inlined by the compiler and need to be
// conservative.
GTEST_API_ void Log(LogSeverity severity, const std::string& message,
int stack_frames_to_skip) {
if (!LogIsVisible(severity))
return;
// Ensures that logs from different threads don't interleave.
MutexLock l(&g_log_mutex);
// "using ::std::cout;" doesn't work with Symbian's STLport, where cout is a
// macro.
if (severity == kWarning) {
// Prints a GMOCK WARNING marker to make the warnings easily searchable.
std::cout << "\nGMOCK WARNING:";
}
// Pre-pends a new-line to message if it doesn't start with one.
if (message.empty() || message[0] != '\n') {
std::cout << "\n";
}
std::cout << message;
if (stack_frames_to_skip >= 0) {
#ifdef NDEBUG
// In opt mode, we have to be conservative and skip no stack frame.
const int actual_to_skip = 0;
#else
// In dbg mode, we can do what the caller tell us to do (plus one
// for skipping this function's stack frame).
const int actual_to_skip = stack_frames_to_skip + 1;
#endif // NDEBUG
// Appends a new-line to message if it doesn't end with one.
if (!message.empty() && *message.rbegin() != '\n') {
std::cout << "\n";
}
std::cout << "Stack trace:\n"
<< ::testing::internal::GetCurrentOsStackTraceExceptTop(
::testing::UnitTest::GetInstance(), actual_to_skip);
}
std::cout << ::std::flush;
}
GTEST_API_ WithoutMatchers GetWithoutMatchers() { return WithoutMatchers(); }
GTEST_API_ void IllegalDoDefault(const char* file, int line) {
internal::Assert(
false, file, line,
"You are using DoDefault() inside a composite action like "
"DoAll() or WithArgs(). This is not supported for technical "
"reasons. Please instead spell out the default action, or "
"assign the default action to an Action variable and use "
"the variable in various places.");
}
} // namespace internal
} // namespace testing
| mlperf/training_results_v0.5 | v0.5.0/nvidia/submission/code/translation/pytorch/cutlass/tools/external/googletest/googlemock/src/gmock-internal-utils.cc | C++ | apache-2.0 | 7,665 |
<div class="l-wrapper">
<div class="l-btn-group">
<button *ngFor="let tab of tabList" [class.active]="isActive(tab.key)" (click)="onClickTab(tab.key)">
{{tab.display}}
</button>
<button (click)="openDetailView()">Mixed View <i class="fas fa-external-link-square-alt" aria-hidden="true"></i></button>
<button class="l-log-info" (click)="openLogView()" [class.disabled]="!hasInfo()" *ngIf="hasLogView()">{{transactionDetailInfo.logButtonName}} <i [ngClass]="getLogIcon()" aria-hidden="true"></i></button>
</div>
<span class="l-transaction-state" [hidden]="!hasState()" [ngClass]="getStateClass()"><i class="fas fa-th-list"></i> {{transactionDetailInfo?.completeState}}</span>
</div>
| naver/pinpoint | web/src/main/angular/src/app/core/components/transaction-detail-menu/transaction-detail-menu.component.html | HTML | apache-2.0 | 737 |
<!doctype html>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:foo="http://www.w3.org/2000/svg">
<head>
<title>Halogen Example - Todo list</title>
<style>
body {
font-family: sans-serif;
max-width: 570px;
margin: auto;
}
ul {
list-style: none;
padding: 0;
}
li {
margin-bottom: 5px;
}
input, button {
font-family: sans-serif;
font-size: 14px;
}
input[type=text] {
width: 500px;
margin: 0px 4px;
}
</style>
</head>
<body>
<script src="example.js"></script>
</body>
</html>
| nwolverson/purescript-halogen | examples/todo/dist/index.html | HTML | apache-2.0 | 638 |
# Copyright 1999-2010 University of Chicago
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
use Globus::Core::Paths;
package Globus::Core::Config;
sub new
{
my $proto = shift;
my $class = ref($proto) || $proto;
my $self = {};
my $path = shift;
my $fh;
my $line;
$path = Globus::Core::Paths::eval_path($path);
if (! -f $path)
{
return undef;
}
open($fh, "<$path");
# Odd parsing algorithm lifted from C code. See globus_common_paths.c
while ($line = <$fh>)
{
# Remove leading whitespace
$line =~ s/^[ \t]*//;
# Process anything that's an attr=.* line
if ($line =~ m/([^=]*)=(.*)/)
{
my $attr = $1;
my $value = $2;
# Remove single leading double quote if present
$value =~ s/^"//;
# Remove all trailing space, tab, newline and quotes
$value =~ s/[ \t"]*$//;
$self->{$attr} = $value;
}
}
bless $self, $class;
return $self;
}
sub get_attribute
{
my $self = shift;
my $attribute = shift;
if (exists $self->{$attribute})
{
return $self->{$attribute};
}
else
{
return undef;
}
}
1;
| gridcf/gct | common/source/scripts/Config.pm | Perl | apache-2.0 | 1,744 |
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett-Packard.
#
"""VPN Utilities and helper functions."""
from neutronclient.common import exceptions
from neutronclient.i18n import _
dpd_supported_actions = ['hold', 'clear', 'restart',
'restart-by-peer', 'disabled']
dpd_supported_keys = ['action', 'interval', 'timeout']
lifetime_keys = ['units', 'value']
lifetime_units = ['seconds']
def validate_dpd_dict(dpd_dict):
for key, value in dpd_dict.items():
if key not in dpd_supported_keys:
message = _(
"DPD Dictionary KeyError: "
"Reason-Invalid DPD key : "
"'%(key)s' not in %(supported_key)s ") % {
'key': key, 'supported_key': dpd_supported_keys}
raise exceptions.CommandError(message)
if key == 'action' and value not in dpd_supported_actions:
message = _(
"DPD Dictionary ValueError: "
"Reason-Invalid DPD action : "
"'%(key_value)s' not in %(supported_action)s ") % {
'key_value': value,
'supported_action': dpd_supported_actions}
raise exceptions.CommandError(message)
if key in ('interval', 'timeout'):
try:
if int(value) <= 0:
raise ValueError()
except ValueError:
message = _(
"DPD Dictionary ValueError: "
"Reason-Invalid positive integer value: "
"'%(key)s' = %(value)s ") % {
'key': key, 'value': value}
raise exceptions.CommandError(message)
else:
dpd_dict[key] = int(value)
return
def validate_lifetime_dict(lifetime_dict):
for key, value in lifetime_dict.items():
if key not in lifetime_keys:
message = _(
"Lifetime Dictionary KeyError: "
"Reason-Invalid unit key : "
"'%(key)s' not in %(supported_key)s ") % {
'key': key, 'supported_key': lifetime_keys}
raise exceptions.CommandError(message)
if key == 'units' and value not in lifetime_units:
message = _(
"Lifetime Dictionary ValueError: "
"Reason-Invalid units : "
"'%(key_value)s' not in %(supported_units)s ") % {
'key_value': key, 'supported_units': lifetime_units}
raise exceptions.CommandError(message)
if key == 'value':
try:
if int(value) < 60:
raise ValueError()
except ValueError:
message = _(
"Lifetime Dictionary ValueError: "
"Reason-Invalid value should be at least 60:"
"'%(key_value)s' = %(value)s ") % {
'key_value': key, 'value': value}
raise exceptions.CommandError(message)
else:
lifetime_dict['value'] = int(value)
return
def lifetime_help(policy):
lifetime = _("%s lifetime attributes. "
"'units'-seconds, default:seconds. "
"'value'-non negative integer, default:3600.") % policy
return lifetime
def dpd_help(policy):
dpd = _(" %s Dead Peer Detection attributes."
" 'action'-hold,clear,disabled,restart,restart-by-peer."
" 'interval' and 'timeout' are non negative integers. "
" 'interval' should be less than 'timeout' value. "
" 'action', default:hold 'interval', default:30, "
" 'timeout', default:120.") % policy.capitalize()
return dpd
| varunarya10/python-neutronclient | neutronclient/neutron/v2_0/vpn/utils.py | Python | apache-2.0 | 4,400 |
<?php
class attachment {
var $contentid;
var $module;
var $catid;
var $attachments;
var $field;
var $imageexts = array('gif', 'jpg', 'jpeg', 'png', 'bmp');
var $uploadedfiles = array();
var $downloadedfiles = array();
var $error;
var $upload_root;
var $siteid;
var $site = array();
function __construct($module='', $catid = 0,$siteid = 0,$upload_dir = '') {
$this->catid = intval($catid);
$this->siteid = intval($siteid)== 0 ? 1 : intval($siteid);
$this->module = $module ? $module : 'content';
pc_base::load_sys_func('dir');
pc_base::load_sys_class('image','','0');
$this->upload_root = pc_base::load_config('system','upload_path');
$this->upload_func = 'copy';
$this->upload_dir = $upload_dir;
}
/**
* 附件上传方法
* @param $field 上传字段
* @param $alowexts 允许上传类型
* @param $maxsize 最大上传大小
* @param $overwrite 是否覆盖原有文件
* @param $thumb_setting 缩略图设置
* @param $watermark_enable 是否添加水印
*/
function upload($field, $alowexts = '', $maxsize = 0, $overwrite = 0,$thumb_setting = array(), $watermark_enable = 1) {
if(!isset($_FILES[$field])) {
$this->error = UPLOAD_ERR_OK;
return false;
}
if(empty($alowexts) || $alowexts == '') {
$site_setting = $this->_get_site_setting($this->siteid);
$alowexts = $site_setting['upload_allowext'];
}
$fn = $_GET['CKEditorFuncNum'] ? $_GET['CKEditorFuncNum'] : '1';
$this->field = $field;
$this->savepath = $this->upload_root.$this->upload_dir.date('Y/md/');
$this->alowexts = $alowexts;
$this->maxsize = $maxsize;
$this->overwrite = $overwrite;
$uploadfiles = array();
$description = isset($GLOBALS[$field.'_description']) ? $GLOBALS[$field.'_description'] : array();
if(is_array($_FILES[$field]['error'])) {
$this->uploads = count($_FILES[$field]['error']);
foreach($_FILES[$field]['error'] as $key => $error) {
if($error === UPLOAD_ERR_NO_FILE) continue;
if($error !== UPLOAD_ERR_OK) {
$this->error = $error;
return false;
}
$uploadfiles[$key] = array('tmp_name' => $_FILES[$field]['tmp_name'][$key], 'name' => $_FILES[$field]['name'][$key], 'type' => $_FILES[$field]['type'][$key], 'size' => $_FILES[$field]['size'][$key], 'error' => $_FILES[$field]['error'][$key], 'description'=>$description[$key],'fn'=>$fn);
}
} else {
$this->uploads = 1;
if(!$description) $description = '';
$uploadfiles[0] = array('tmp_name' => $_FILES[$field]['tmp_name'], 'name' => $_FILES[$field]['name'], 'type' => $_FILES[$field]['type'], 'size' => $_FILES[$field]['size'], 'error' => $_FILES[$field]['error'], 'description'=>$description,'fn'=>$fn);
}
if(!dir_create($this->savepath)) {
$this->error = '8';
return false;
}
if(!is_dir($this->savepath)) {
$this->error = '8';
return false;
}
@chmod($this->savepath, 0777);
if(!is_writeable($this->savepath)) {
$this->error = '9';
return false;
}
if(!$this->is_allow_upload()) {
$this->error = '13';
return false;
}
$aids = array();
foreach($uploadfiles as $k=>$file) {
$fileext = fileext($file['name']);
if($file['error'] != 0) {
$this->error = $file['error'];
return false;
}
if(!preg_match("/^(".$this->alowexts.")$/", $fileext)) {
$this->error = '10';
return false;
}
if($this->maxsize && $file['size'] > $this->maxsize) {
$this->error = '11';
return false;
}
if(!$this->isuploadedfile($file['tmp_name'])) {
$this->error = '12';
return false;
}
$temp_filename = $this->getname($fileext);
$savefile = $this->savepath.$temp_filename;
$savefile = preg_replace("/(php|phtml|php3|php4|jsp|exe|dll|asp|cer|asa|shtml|shtm|aspx|asax|cgi|fcgi|pl)(\.|$)/i", "_\\1\\2", $savefile);
$filepath = preg_replace(new_addslashes("|^".$this->upload_root."|"), "", $savefile);
if(!$this->overwrite && file_exists($savefile)) continue;
$upload_func = $this->upload_func;
if(@$upload_func($file['tmp_name'], $savefile)) {
$this->uploadeds++;
@chmod($savefile, 0644);
@unlink($file['tmp_name']);
$file['name'] = iconv("utf-8",CHARSET,$file['name']);
$file['name'] = safe_replace($file['name']);
$uploadedfile = array('filename'=>$file['name'], 'filepath'=>$filepath, 'filesize'=>$file['size'], 'fileext'=>$fileext, 'fn'=>$file['fn']);
$thumb_enable = is_array($thumb_setting) && ($thumb_setting[0] > 0 || $thumb_setting[1] > 0 ) ? 1 : 0;
$image = new image($thumb_enable,$this->siteid);
if($thumb_enable) {
$image->thumb($savefile,'',$thumb_setting[0],$thumb_setting[1]);
}
if($watermark_enable) {
$image->watermark($savefile, $savefile);
}
$aids[] = $this->add($uploadedfile);
}
}
return $aids;
}
/**
* 附件下载
* Enter description here ...
* @param $field 预留字段
* @param $value 传入下载内容
* @param $watermark 是否加入水印
* @param $ext 下载扩展名
* @param $absurl 绝对路径
* @param $basehref
*/
function download($field, $value,$watermark = '0',$ext = 'gif|jpg|jpeg|bmp|png', $absurl = '', $basehref = '')
{
global $image_d;
$this->att_db = pc_base::load_model('attachment_model');
$upload_url = pc_base::load_config('system','upload_url');
$this->field = $field;
$dir = date('Y/md/');
$uploadpath = $upload_url.$dir;
$uploaddir = $this->upload_root.$dir;
$string = new_stripslashes($value);
if(!preg_match_all("/(href|src)=([\"|']?)([^ \"'>]+\.($ext))\\2/i", $string, $matches)) return $value;
$remotefileurls = array();
foreach($matches[3] as $matche)
{
if(strpos($matche, '://') === false) continue;
dir_create($uploaddir);
$remotefileurls[$matche] = $this->fillurl($matche, $absurl, $basehref);
}
unset($matches, $string);
$remotefileurls = array_unique($remotefileurls);
$oldpath = $newpath = array();
foreach($remotefileurls as $k=>$file) {
if(strpos($file, '://') === false || strpos($file, $upload_url) !== false) continue;
$filename = fileext($file);
$file_name = basename($file);
$filename = $this->getname($filename);
$newfile = $uploaddir.$filename;
$upload_func = $this->upload_func;
if($upload_func($file, $newfile)) {
$oldpath[] = $k;
$GLOBALS['downloadfiles'][] = $newpath[] = $uploadpath.$filename;
@chmod($newfile, 0777);
$fileext = fileext($filename);
if($watermark){
watermark($newfile, $newfile,$this->siteid);
}
$filepath = $dir.$filename;
$downloadedfile = array('filename'=>$filename, 'filepath'=>$filepath, 'filesize'=>filesize($newfile), 'fileext'=>$fileext);
$aid = $this->add($downloadedfile);
$this->downloadedfiles[$aid] = $filepath;
}
}
return str_replace($oldpath, $newpath, $value);
}
/**
* 附件删除方法
* @param $where 删除sql语句
*/
function delete($where) {
$this->att_db = pc_base::load_model('attachment_model');
$result = $this->att_db->select($where);
foreach($result as $r) {
$image = $this->upload_root.$r['filepath'];
@unlink($image);
$thumbs = glob(dirname($image).'/*'.basename($image));
if($thumbs) foreach($thumbs as $thumb) @unlink($thumb);
}
return $this->att_db->delete($where);
}
/**
* 附件添加如数据库
* @param $uploadedfile 附件信息
*/
function add($uploadedfile) {
$this->att_db = pc_base::load_model('attachment_model');
$uploadedfile['module'] = $this->module;
$uploadedfile['catid'] = $this->catid;
$uploadedfile['siteid'] = $this->siteid;
$uploadedfile['userid'] = $this->userid;
$uploadedfile['uploadtime'] = SYS_TIME;
$uploadedfile['uploadip'] = ip();
$uploadedfile['status'] = pc_base::load_config('system','attachment_stat') ? 0 : 1;
$uploadedfile['authcode'] = md5($uploadedfile['filepath']);
$uploadedfile['filename'] = strlen($uploadedfile['filename'])>49 ? $this->getname($uploadedfile['fileext']) : $uploadedfile['filename'];
$uploadedfile['isimage'] = in_array($uploadedfile['fileext'], $this->imageexts) ? 1 : 0;
$aid = $this->att_db->api_add($uploadedfile);
$this->uploadedfiles[] = $uploadedfile;
return $aid;
}
function set_userid($userid) {
$this->userid = $userid;
}
/**
* 获取缩略图地址..
* @param $image 图片路径
*/
function get_thumb($image){
return str_replace('.', '_thumb.', $image);
}
/**
* 获取附件名称
* @param $fileext 附件扩展名
*/
function getname($fileext){
return date('Ymdhis').rand(100, 999).'.'.$fileext;
}
/**
* 返回附件大小
* @param $filesize 图片大小
*/
function size($filesize) {
if($filesize >= 1073741824) {
$filesize = round($filesize / 1073741824 * 100) / 100 . ' GB';
} elseif($filesize >= 1048576) {
$filesize = round($filesize / 1048576 * 100) / 100 . ' MB';
} elseif($filesize >= 1024) {
$filesize = round($filesize / 1024 * 100) / 100 . ' KB';
} else {
$filesize = $filesize . ' Bytes';
}
return $filesize;
}
/**
* 判断文件是否是通过 HTTP POST 上传的
*
* @param string $file 文件地址
* @return bool 所给出的文件是通过 HTTP POST 上传的则返回 TRUE
*/
function isuploadedfile($file) {
return is_uploaded_file($file) || is_uploaded_file(str_replace('\\\\', '\\', $file));
}
/**
* 补全网址
*
* @param string $surl 源地址
* @param string $absurl 相对地址
* @param string $basehref 网址
* @return string 网址
*/
function fillurl($surl, $absurl, $basehref = '') {
if($basehref != '') {
$preurl = strtolower(substr($surl,0,6));
if($preurl=='http://' || $preurl=='ftp://' ||$preurl=='mms://' || $preurl=='rtsp://' || $preurl=='thunde' || $preurl=='emule://'|| $preurl=='ed2k://')
return $surl;
else
return $basehref.'/'.$surl;
}
$i = 0;
$dstr = '';
$pstr = '';
$okurl = '';
$pathStep = 0;
$surl = trim($surl);
if($surl=='') return '';
$urls = @parse_url(SITE_URL);
$HomeUrl = $urls['host'];
$BaseUrlPath = $HomeUrl.$urls['path'];
$BaseUrlPath = preg_replace("/\/([^\/]*)\.(.*)$/",'/',$BaseUrlPath);
$BaseUrlPath = preg_replace("/\/$/",'',$BaseUrlPath);
$pos = strpos($surl,'#');
if($pos>0) $surl = substr($surl,0,$pos);
if($surl[0]=='/') {
$okurl = 'http://'.$HomeUrl.'/'.$surl;
} elseif($surl[0] == '.') {
if(strlen($surl)<=2) return '';
elseif($surl[0]=='/') {
$okurl = 'http://'.$BaseUrlPath.'/'.substr($surl,2,strlen($surl)-2);
} else {
$urls = explode('/',$surl);
foreach($urls as $u) {
if($u=="..") $pathStep++;
else if($i<count($urls)-1) $dstr .= $urls[$i].'/';
else $dstr .= $urls[$i];
$i++;
}
$urls = explode('/', $BaseUrlPath);
if(count($urls) <= $pathStep)
return '';
else {
$pstr = 'http://';
for($i=0;$i<count($urls)-$pathStep;$i++) {
$pstr .= $urls[$i].'/';
}
$okurl = $pstr.$dstr;
}
}
} else {
$preurl = strtolower(substr($surl,0,6));
if(strlen($surl)<7)
$okurl = 'http://'.$BaseUrlPath.'/'.$surl;
elseif($preurl=="http:/"||$preurl=='ftp://' ||$preurl=='mms://' || $preurl=="rtsp://" || $preurl=='thunde' || $preurl=='emule:'|| $preurl=='ed2k:/')
$okurl = $surl;
else
$okurl = 'http://'.$BaseUrlPath.'/'.$surl;
}
$preurl = strtolower(substr($okurl,0,6));
if($preurl=='ftp://' || $preurl=='mms://' || $preurl=='rtsp://' || $preurl=='thunde' || $preurl=='emule:'|| $preurl=='ed2k:/') {
return $okurl;
} else {
$okurl = preg_replace('/^(http:\/\/)/i','',$okurl);
$okurl = preg_replace('/\/{1,}/i','/',$okurl);
return 'http://'.$okurl;
}
}
/**
* 是否允许上传
*/
function is_allow_upload() {
if($_groupid == 1) return true;
$starttime = SYS_TIME-86400;
$site_setting = $this->_get_site_setting($this->siteid);
return ($uploads < $site_setting['upload_maxsize']);
}
/**
* 返回错误信息
*/
function error() {
$UPLOAD_ERROR = array(
0 => L('att_upload_succ'),
1 => L('att_upload_limit_ini'),
2 => L('att_upload_limit_filesize'),
3 => L('att_upload_limit_part'),
4 => L('att_upload_nofile'),
5 => '',
6 => L('att_upload_notemp'),
7 => L('att_upload_temp_w_f'),
8 => L('att_upload_create_dir_f'),
9 => L('att_upload_dir_permissions'),
10 => L('att_upload_limit_ext'),
11 => L('att_upload_limit_setsize'),
12 => L('att_upload_not_allow'),
13 => L('att_upload_limit_time'),
);
return iconv(CHARSET,"utf-8",$UPLOAD_ERROR[$this->error]);
}
/**
* ck编辑器返回
* @param $fn
* @param $fileurl 路径
* @param $message 显示信息
*/
function mkhtml($fn,$fileurl,$message) {
$str='<script type="text/javascript">window.parent.CKEDITOR.tools.callFunction('.$fn.', \''.$fileurl.'\', \''.$message.'\');</script>';
exit($str);
}
/**
* flash上传调试方法
* @param $id
*/
function uploaderror($id = 0) {
file_put_contents(PHPCMS_PATH.'xxx.txt', $id);
}
/**
* 获取站点配置信息
* @param $siteid 站点id
*/
private function _get_site_setting($siteid) {
$siteinfo = getcache('sitelist', 'commons');
return string2array($siteinfo[$siteid]['setting']);
}
}
?> | shopscor/interface | phpcms/libs/classes/attachment.class.php | PHP | apache-2.0 | 13,084 |
/*
* Copyright (c) 2014, Freescale Semiconductor, Inc.
* All rights reserved.
*
* THIS SOFTWARE IS PROVIDED BY FREESCALE "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL FREESCALE BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*/
/*
* WARNING! DO NOT EDIT THIS FILE DIRECTLY!
*
* This file was generated automatically and any changes may be lost.
*/
#ifndef __HW_CMP_REGISTERS_H__
#define __HW_CMP_REGISTERS_H__
#include "regs.h"
/*
* MK70F12 CMP
*
* High-Speed Comparator (CMP), Voltage Reference (VREF) Digital-to-Analog Converter (DAC), and Analog Mux (ANMUX)
*
* Registers defined in this header file:
* - HW_CMP_CR0 - CMP Control Register 0
* - HW_CMP_CR1 - CMP Control Register 1
* - HW_CMP_FPR - CMP Filter Period Register
* - HW_CMP_SCR - CMP Status and Control Register
* - HW_CMP_DACCR - DAC Control Register
* - HW_CMP_MUXCR - MUX Control Register
*
* - hw_cmp_t - Struct containing all module registers.
*/
//! @name Module base addresses
//@{
#ifndef REGS_CMP_BASE
#define HW_CMP_INSTANCE_COUNT (4U) //!< Number of instances of the CMP module.
#define HW_CMP0 (0U) //!< Instance number for CMP0.
#define HW_CMP1 (1U) //!< Instance number for CMP1.
#define HW_CMP2 (2U) //!< Instance number for CMP2.
#define HW_CMP3 (3U) //!< Instance number for CMP3.
#define REGS_CMP0_BASE (0x40073000U) //!< Base address for CMP0.
#define REGS_CMP1_BASE (0x40073008U) //!< Base address for CMP1.
#define REGS_CMP2_BASE (0x40073010U) //!< Base address for CMP2.
#define REGS_CMP3_BASE (0x40073018U) //!< Base address for CMP3.
//! @brief Table of base addresses for CMP instances.
static const uint32_t __g_regs_CMP_base_addresses[] = {
REGS_CMP0_BASE,
REGS_CMP1_BASE,
REGS_CMP2_BASE,
REGS_CMP3_BASE,
};
//! @brief Get the base address of CMP by instance number.
//! @param x CMP instance number, from 0 through 3.
#define REGS_CMP_BASE(x) (__g_regs_CMP_base_addresses[(x)])
//! @brief Get the instance number given a base address.
//! @param b Base address for an instance of CMP.
#define REGS_CMP_INSTANCE(b) ((b) == REGS_CMP0_BASE ? HW_CMP0 : (b) == REGS_CMP1_BASE ? HW_CMP1 : (b) == REGS_CMP2_BASE ? HW_CMP2 : (b) == REGS_CMP3_BASE ? HW_CMP3 : 0)
#endif
//@}
//-------------------------------------------------------------------------------------------
// HW_CMP_CR0 - CMP Control Register 0
//-------------------------------------------------------------------------------------------
#ifndef __LANGUAGE_ASM__
/*!
* @brief HW_CMP_CR0 - CMP Control Register 0 (RW)
*
* Reset value: 0x00U
*/
typedef union _hw_cmp_cr0
{
uint8_t U;
struct _hw_cmp_cr0_bitfields
{
uint8_t HYSTCTR : 2; //!< [1:0] Comparator hard block hysteresis
//! control
uint8_t RESERVED0 : 2; //!< [3:2]
uint8_t FILTER_CNT : 3; //!< [6:4] Filter Sample Count
uint8_t RESERVED1 : 1; //!< [7]
} B;
} hw_cmp_cr0_t;
#endif
/*!
* @name Constants and macros for entire CMP_CR0 register
*/
//@{
#define HW_CMP_CR0_ADDR(x) (REGS_CMP_BASE(x) + 0x0U)
#ifndef __LANGUAGE_ASM__
#define HW_CMP_CR0(x) (*(__IO hw_cmp_cr0_t *) HW_CMP_CR0_ADDR(x))
#define HW_CMP_CR0_RD(x) (HW_CMP_CR0(x).U)
#define HW_CMP_CR0_WR(x, v) (HW_CMP_CR0(x).U = (v))
#define HW_CMP_CR0_SET(x, v) (HW_CMP_CR0_WR(x, HW_CMP_CR0_RD(x) | (v)))
#define HW_CMP_CR0_CLR(x, v) (HW_CMP_CR0_WR(x, HW_CMP_CR0_RD(x) & ~(v)))
#define HW_CMP_CR0_TOG(x, v) (HW_CMP_CR0_WR(x, HW_CMP_CR0_RD(x) ^ (v)))
#endif
//@}
/*
* Constants & macros for individual CMP_CR0 bitfields
*/
/*!
* @name Register CMP_CR0, field HYSTCTR[1:0] (RW)
*
* Defines the programmable hysteresis level. The hysteresis values associated
* with each level is device-specific. See the device's data sheet for the exact
* values.
*
* Values:
* - 00 - Level 0
* - 01 - Level 1
* - 10 - Level 2
* - 11 - Level 3
*/
//@{
#define BP_CMP_CR0_HYSTCTR (0U) //!< Bit position for CMP_CR0_HYSTCTR.
#define BM_CMP_CR0_HYSTCTR (0x03U) //!< Bit mask for CMP_CR0_HYSTCTR.
#define BS_CMP_CR0_HYSTCTR (2U) //!< Bit field size in bits for CMP_CR0_HYSTCTR.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_CR0_HYSTCTR field.
#define BR_CMP_CR0_HYSTCTR(x) (HW_CMP_CR0(x).B.HYSTCTR)
#endif
//! @brief Format value for bitfield CMP_CR0_HYSTCTR.
#define BF_CMP_CR0_HYSTCTR(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_CR0_HYSTCTR), uint8_t) & BM_CMP_CR0_HYSTCTR)
#ifndef __LANGUAGE_ASM__
//! @brief Set the HYSTCTR field to a new value.
#define BW_CMP_CR0_HYSTCTR(x, v) (HW_CMP_CR0_WR(x, (HW_CMP_CR0_RD(x) & ~BM_CMP_CR0_HYSTCTR) | BF_CMP_CR0_HYSTCTR(v)))
#endif
//@}
/*!
* @name Register CMP_CR0, field FILTER_CNT[6:4] (RW)
*
* These bits represent the number of consecutive samples that must agree prior
* to the comparator ouput filter accepting a new output state. For information
* regarding filter programming and latency reference the Functional Description.
*
* Values:
* - 000 - Filter is disabled. If SE = 1, then COUT is a logic zero (this is not
* a legal state, and is not recommended). If SE = 0, COUT = COUTA.
* - 001 - 1 consecutive sample must agree (comparator output is simply sampled).
* - 010 - 2 consecutive samples must agree.
* - 011 - 3 consecutive samples must agree.
* - 100 - 4 consecutive samples must agree.
* - 101 - 5 consecutive samples must agree.
* - 110 - 6 consecutive samples must agree.
* - 111 - 7 consecutive samples must agree.
*/
//@{
#define BP_CMP_CR0_FILTER_CNT (4U) //!< Bit position for CMP_CR0_FILTER_CNT.
#define BM_CMP_CR0_FILTER_CNT (0x70U) //!< Bit mask for CMP_CR0_FILTER_CNT.
#define BS_CMP_CR0_FILTER_CNT (3U) //!< Bit field size in bits for CMP_CR0_FILTER_CNT.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_CR0_FILTER_CNT field.
#define BR_CMP_CR0_FILTER_CNT(x) (HW_CMP_CR0(x).B.FILTER_CNT)
#endif
//! @brief Format value for bitfield CMP_CR0_FILTER_CNT.
#define BF_CMP_CR0_FILTER_CNT(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_CR0_FILTER_CNT), uint8_t) & BM_CMP_CR0_FILTER_CNT)
#ifndef __LANGUAGE_ASM__
//! @brief Set the FILTER_CNT field to a new value.
#define BW_CMP_CR0_FILTER_CNT(x, v) (HW_CMP_CR0_WR(x, (HW_CMP_CR0_RD(x) & ~BM_CMP_CR0_FILTER_CNT) | BF_CMP_CR0_FILTER_CNT(v)))
#endif
//@}
//-------------------------------------------------------------------------------------------
// HW_CMP_CR1 - CMP Control Register 1
//-------------------------------------------------------------------------------------------
#ifndef __LANGUAGE_ASM__
/*!
* @brief HW_CMP_CR1 - CMP Control Register 1 (RW)
*
* Reset value: 0x00U
*/
typedef union _hw_cmp_cr1
{
uint8_t U;
struct _hw_cmp_cr1_bitfields
{
uint8_t EN : 1; //!< [0] Comparator Module Enable
uint8_t OPE : 1; //!< [1] Comparator Output Pin Enable
uint8_t COS : 1; //!< [2] Comparator Output Select
uint8_t INV : 1; //!< [3] Comparator INVERT
uint8_t PMODE : 1; //!< [4] Power Mode Select
uint8_t RESERVED0 : 1; //!< [5]
uint8_t WE : 1; //!< [6] Windowing Enable
uint8_t SE : 1; //!< [7] Sample Enable
} B;
} hw_cmp_cr1_t;
#endif
/*!
* @name Constants and macros for entire CMP_CR1 register
*/
//@{
#define HW_CMP_CR1_ADDR(x) (REGS_CMP_BASE(x) + 0x1U)
#ifndef __LANGUAGE_ASM__
#define HW_CMP_CR1(x) (*(__IO hw_cmp_cr1_t *) HW_CMP_CR1_ADDR(x))
#define HW_CMP_CR1_RD(x) (HW_CMP_CR1(x).U)
#define HW_CMP_CR1_WR(x, v) (HW_CMP_CR1(x).U = (v))
#define HW_CMP_CR1_SET(x, v) (HW_CMP_CR1_WR(x, HW_CMP_CR1_RD(x) | (v)))
#define HW_CMP_CR1_CLR(x, v) (HW_CMP_CR1_WR(x, HW_CMP_CR1_RD(x) & ~(v)))
#define HW_CMP_CR1_TOG(x, v) (HW_CMP_CR1_WR(x, HW_CMP_CR1_RD(x) ^ (v)))
#endif
//@}
/*
* Constants & macros for individual CMP_CR1 bitfields
*/
/*!
* @name Register CMP_CR1, field EN[0] (RW)
*
* The EN bit enables the Analog Comparator Module. When the module is not
* enabled, it remains in the off state, and consumes no power. When you select the
* same input from analog mux to the positive and negative port, the comparator is
* disabled automatically.
*
* Values:
* - 0 - Analog Comparator disabled.
* - 1 - Analog Comparator enabled.
*/
//@{
#define BP_CMP_CR1_EN (0U) //!< Bit position for CMP_CR1_EN.
#define BM_CMP_CR1_EN (0x01U) //!< Bit mask for CMP_CR1_EN.
#define BS_CMP_CR1_EN (1U) //!< Bit field size in bits for CMP_CR1_EN.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_CR1_EN field.
#define BR_CMP_CR1_EN(x) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_EN))
#endif
//! @brief Format value for bitfield CMP_CR1_EN.
#define BF_CMP_CR1_EN(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_CR1_EN), uint8_t) & BM_CMP_CR1_EN)
#ifndef __LANGUAGE_ASM__
//! @brief Set the EN field to a new value.
#define BW_CMP_CR1_EN(x, v) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_EN) = (v))
#endif
//@}
/*!
* @name Register CMP_CR1, field OPE[1] (RW)
*
* Values:
* - 0 - The comparator output (CMPO) is not available on the associated CMPO
* output pin.
* - 1 - The comparator output (CMPO) is available on the associated CMPO output
* pin.
*/
//@{
#define BP_CMP_CR1_OPE (1U) //!< Bit position for CMP_CR1_OPE.
#define BM_CMP_CR1_OPE (0x02U) //!< Bit mask for CMP_CR1_OPE.
#define BS_CMP_CR1_OPE (1U) //!< Bit field size in bits for CMP_CR1_OPE.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_CR1_OPE field.
#define BR_CMP_CR1_OPE(x) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_OPE))
#endif
//! @brief Format value for bitfield CMP_CR1_OPE.
#define BF_CMP_CR1_OPE(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_CR1_OPE), uint8_t) & BM_CMP_CR1_OPE)
#ifndef __LANGUAGE_ASM__
//! @brief Set the OPE field to a new value.
#define BW_CMP_CR1_OPE(x, v) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_OPE) = (v))
#endif
//@}
/*!
* @name Register CMP_CR1, field COS[2] (RW)
*
* Values:
* - 0 - Set CMPO to equal COUT (filtered comparator output).
* - 1 - Set CMPO to equal COUTA (unfiltered comparator output).
*/
//@{
#define BP_CMP_CR1_COS (2U) //!< Bit position for CMP_CR1_COS.
#define BM_CMP_CR1_COS (0x04U) //!< Bit mask for CMP_CR1_COS.
#define BS_CMP_CR1_COS (1U) //!< Bit field size in bits for CMP_CR1_COS.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_CR1_COS field.
#define BR_CMP_CR1_COS(x) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_COS))
#endif
//! @brief Format value for bitfield CMP_CR1_COS.
#define BF_CMP_CR1_COS(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_CR1_COS), uint8_t) & BM_CMP_CR1_COS)
#ifndef __LANGUAGE_ASM__
//! @brief Set the COS field to a new value.
#define BW_CMP_CR1_COS(x, v) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_COS) = (v))
#endif
//@}
/*!
* @name Register CMP_CR1, field INV[3] (RW)
*
* This bit allows you to select the polarity of the analog comparator function.
* It is also driven to the COUT output (on both the device pin and as
* SCR[COUT]) when CR1[OPE]=0.
*
* Values:
* - 0 - Does not invert the comparator output.
* - 1 - Inverts the comparator output.
*/
//@{
#define BP_CMP_CR1_INV (3U) //!< Bit position for CMP_CR1_INV.
#define BM_CMP_CR1_INV (0x08U) //!< Bit mask for CMP_CR1_INV.
#define BS_CMP_CR1_INV (1U) //!< Bit field size in bits for CMP_CR1_INV.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_CR1_INV field.
#define BR_CMP_CR1_INV(x) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_INV))
#endif
//! @brief Format value for bitfield CMP_CR1_INV.
#define BF_CMP_CR1_INV(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_CR1_INV), uint8_t) & BM_CMP_CR1_INV)
#ifndef __LANGUAGE_ASM__
//! @brief Set the INV field to a new value.
#define BW_CMP_CR1_INV(x, v) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_INV) = (v))
#endif
//@}
/*!
* @name Register CMP_CR1, field PMODE[4] (RW)
*
* Refer to the device data sheet's CMP electrical specifications table for
* details on the impact of the modes below.
*
* Values:
* - 0 - Low Speed (LS) comparison mode selected. In this mode, CMP has slower
* output propagation delay and lower current consumption.
* - 1 - High Speed (HS) comparison mode selected. In this mode, CMP has faster
* output propagation delay and higher current consumption.
*/
//@{
#define BP_CMP_CR1_PMODE (4U) //!< Bit position for CMP_CR1_PMODE.
#define BM_CMP_CR1_PMODE (0x10U) //!< Bit mask for CMP_CR1_PMODE.
#define BS_CMP_CR1_PMODE (1U) //!< Bit field size in bits for CMP_CR1_PMODE.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_CR1_PMODE field.
#define BR_CMP_CR1_PMODE(x) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_PMODE))
#endif
//! @brief Format value for bitfield CMP_CR1_PMODE.
#define BF_CMP_CR1_PMODE(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_CR1_PMODE), uint8_t) & BM_CMP_CR1_PMODE)
#ifndef __LANGUAGE_ASM__
//! @brief Set the PMODE field to a new value.
#define BW_CMP_CR1_PMODE(x, v) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_PMODE) = (v))
#endif
//@}
/*!
* @name Register CMP_CR1, field WE[6] (RW)
*
* At any given time, either SE or WE can be set. If a write to this register
* attempts to set both, then SE is set and WE is cleared. However, avoid writing
* ones to both bit locations because this "11" case is reserved and may change in
* future implementations.
*
* Values:
* - 0 - Windowing mode not selected.
* - 1 - Windowing mode selected.
*/
//@{
#define BP_CMP_CR1_WE (6U) //!< Bit position for CMP_CR1_WE.
#define BM_CMP_CR1_WE (0x40U) //!< Bit mask for CMP_CR1_WE.
#define BS_CMP_CR1_WE (1U) //!< Bit field size in bits for CMP_CR1_WE.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_CR1_WE field.
#define BR_CMP_CR1_WE(x) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_WE))
#endif
//! @brief Format value for bitfield CMP_CR1_WE.
#define BF_CMP_CR1_WE(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_CR1_WE), uint8_t) & BM_CMP_CR1_WE)
#ifndef __LANGUAGE_ASM__
//! @brief Set the WE field to a new value.
#define BW_CMP_CR1_WE(x, v) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_WE) = (v))
#endif
//@}
/*!
* @name Register CMP_CR1, field SE[7] (RW)
*
* At any given time, either SE or WE can be set. If a write to this register
* attempts to set both, then SE is set and WE is cleared. However, avoid writing
* ones to both bit locations because this "11" case is reserved and may change in
* future implementations.
*
* Values:
* - 0 - Sampling mode not selected.
* - 1 - Sampling mode selected.
*/
//@{
#define BP_CMP_CR1_SE (7U) //!< Bit position for CMP_CR1_SE.
#define BM_CMP_CR1_SE (0x80U) //!< Bit mask for CMP_CR1_SE.
#define BS_CMP_CR1_SE (1U) //!< Bit field size in bits for CMP_CR1_SE.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_CR1_SE field.
#define BR_CMP_CR1_SE(x) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_SE))
#endif
//! @brief Format value for bitfield CMP_CR1_SE.
#define BF_CMP_CR1_SE(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_CR1_SE), uint8_t) & BM_CMP_CR1_SE)
#ifndef __LANGUAGE_ASM__
//! @brief Set the SE field to a new value.
#define BW_CMP_CR1_SE(x, v) (BITBAND_ACCESS8(HW_CMP_CR1_ADDR(x), BP_CMP_CR1_SE) = (v))
#endif
//@}
//-------------------------------------------------------------------------------------------
// HW_CMP_FPR - CMP Filter Period Register
//-------------------------------------------------------------------------------------------
#ifndef __LANGUAGE_ASM__
/*!
* @brief HW_CMP_FPR - CMP Filter Period Register (RW)
*
* Reset value: 0x00U
*/
typedef union _hw_cmp_fpr
{
uint8_t U;
struct _hw_cmp_fpr_bitfields
{
uint8_t FILT_PER : 8; //!< [7:0] Filter Sample Period
} B;
} hw_cmp_fpr_t;
#endif
/*!
* @name Constants and macros for entire CMP_FPR register
*/
//@{
#define HW_CMP_FPR_ADDR(x) (REGS_CMP_BASE(x) + 0x2U)
#ifndef __LANGUAGE_ASM__
#define HW_CMP_FPR(x) (*(__IO hw_cmp_fpr_t *) HW_CMP_FPR_ADDR(x))
#define HW_CMP_FPR_RD(x) (HW_CMP_FPR(x).U)
#define HW_CMP_FPR_WR(x, v) (HW_CMP_FPR(x).U = (v))
#define HW_CMP_FPR_SET(x, v) (HW_CMP_FPR_WR(x, HW_CMP_FPR_RD(x) | (v)))
#define HW_CMP_FPR_CLR(x, v) (HW_CMP_FPR_WR(x, HW_CMP_FPR_RD(x) & ~(v)))
#define HW_CMP_FPR_TOG(x, v) (HW_CMP_FPR_WR(x, HW_CMP_FPR_RD(x) ^ (v)))
#endif
//@}
/*
* Constants & macros for individual CMP_FPR bitfields
*/
/*!
* @name Register CMP_FPR, field FILT_PER[7:0] (RW)
*
* When CR1[SE] is equal to zero, this field specifies the sampling period, in
* bus clock cycles, of the comparator output filter. Setting FILT_PER to 0x0
* disables the filter. Filter programming and latency details appear in the
* Functional Description. This field has no effect when CR1[SE] is equal to one. In that
* case, the external SAMPLE signal is used to determine the sampling period.
*/
//@{
#define BP_CMP_FPR_FILT_PER (0U) //!< Bit position for CMP_FPR_FILT_PER.
#define BM_CMP_FPR_FILT_PER (0xFFU) //!< Bit mask for CMP_FPR_FILT_PER.
#define BS_CMP_FPR_FILT_PER (8U) //!< Bit field size in bits for CMP_FPR_FILT_PER.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_FPR_FILT_PER field.
#define BR_CMP_FPR_FILT_PER(x) (HW_CMP_FPR(x).B.FILT_PER)
#endif
//! @brief Format value for bitfield CMP_FPR_FILT_PER.
#define BF_CMP_FPR_FILT_PER(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_FPR_FILT_PER), uint8_t) & BM_CMP_FPR_FILT_PER)
#ifndef __LANGUAGE_ASM__
//! @brief Set the FILT_PER field to a new value.
#define BW_CMP_FPR_FILT_PER(x, v) (HW_CMP_FPR_WR(x, (HW_CMP_FPR_RD(x) & ~BM_CMP_FPR_FILT_PER) | BF_CMP_FPR_FILT_PER(v)))
#endif
//@}
//-------------------------------------------------------------------------------------------
// HW_CMP_SCR - CMP Status and Control Register
//-------------------------------------------------------------------------------------------
#ifndef __LANGUAGE_ASM__
/*!
* @brief HW_CMP_SCR - CMP Status and Control Register (RW)
*
* Reset value: 0x00U
*/
typedef union _hw_cmp_scr
{
uint8_t U;
struct _hw_cmp_scr_bitfields
{
uint8_t COUT : 1; //!< [0] Analog Comparator Output
uint8_t CFF : 1; //!< [1] Analog Comparator Flag Falling
uint8_t CFR : 1; //!< [2] Analog Comparator Flag Rising
uint8_t IEF : 1; //!< [3] Comparator Interrupt Enable Falling
uint8_t IER : 1; //!< [4] Comparator Interrupt Enable Rising
uint8_t RESERVED0 : 1; //!< [5]
uint8_t DMAEN : 1; //!< [6] DMA Enable Control
uint8_t RESERVED1 : 1; //!< [7]
} B;
} hw_cmp_scr_t;
#endif
/*!
* @name Constants and macros for entire CMP_SCR register
*/
//@{
#define HW_CMP_SCR_ADDR(x) (REGS_CMP_BASE(x) + 0x3U)
#ifndef __LANGUAGE_ASM__
#define HW_CMP_SCR(x) (*(__IO hw_cmp_scr_t *) HW_CMP_SCR_ADDR(x))
#define HW_CMP_SCR_RD(x) (HW_CMP_SCR(x).U)
#define HW_CMP_SCR_WR(x, v) (HW_CMP_SCR(x).U = (v))
#define HW_CMP_SCR_SET(x, v) (HW_CMP_SCR_WR(x, HW_CMP_SCR_RD(x) | (v)))
#define HW_CMP_SCR_CLR(x, v) (HW_CMP_SCR_WR(x, HW_CMP_SCR_RD(x) & ~(v)))
#define HW_CMP_SCR_TOG(x, v) (HW_CMP_SCR_WR(x, HW_CMP_SCR_RD(x) ^ (v)))
#endif
//@}
/*
* Constants & macros for individual CMP_SCR bitfields
*/
/*!
* @name Register CMP_SCR, field COUT[0] (RO)
*
* Reading the COUT bit will return the current value of the analog comparator
* output. The register bit is reset to zero and will read as CR1[INV] when the
* Analog Comparator module is disabled (CR1[EN] = 0). Writes to this bit are
* ignored.
*/
//@{
#define BP_CMP_SCR_COUT (0U) //!< Bit position for CMP_SCR_COUT.
#define BM_CMP_SCR_COUT (0x01U) //!< Bit mask for CMP_SCR_COUT.
#define BS_CMP_SCR_COUT (1U) //!< Bit field size in bits for CMP_SCR_COUT.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_SCR_COUT field.
#define BR_CMP_SCR_COUT(x) (BITBAND_ACCESS8(HW_CMP_SCR_ADDR(x), BP_CMP_SCR_COUT))
#endif
//@}
/*!
* @name Register CMP_SCR, field CFF[1] (W1C)
*
* During normal operation, the CFF bit is set when a falling edge on COUT has
* been detected. The CFF bit is cleared by writing a logic one to the bit. During
* Stop modes, CFF is level senstive.
*
* Values:
* - 0 - Falling edge on COUT has not been detected.
* - 1 - Falling edge on COUT has occurred.
*/
//@{
#define BP_CMP_SCR_CFF (1U) //!< Bit position for CMP_SCR_CFF.
#define BM_CMP_SCR_CFF (0x02U) //!< Bit mask for CMP_SCR_CFF.
#define BS_CMP_SCR_CFF (1U) //!< Bit field size in bits for CMP_SCR_CFF.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_SCR_CFF field.
#define BR_CMP_SCR_CFF(x) (BITBAND_ACCESS8(HW_CMP_SCR_ADDR(x), BP_CMP_SCR_CFF))
#endif
//! @brief Format value for bitfield CMP_SCR_CFF.
#define BF_CMP_SCR_CFF(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_SCR_CFF), uint8_t) & BM_CMP_SCR_CFF)
#ifndef __LANGUAGE_ASM__
//! @brief Set the CFF field to a new value.
#define BW_CMP_SCR_CFF(x, v) (BITBAND_ACCESS8(HW_CMP_SCR_ADDR(x), BP_CMP_SCR_CFF) = (v))
#endif
//@}
/*!
* @name Register CMP_SCR, field CFR[2] (W1C)
*
* During normal operation, the CFR bit is set when a rising edge on COUT has
* been detected. The CFR bit is cleared by writing a logic one to the bit. During
* Stop modes, CFR is level sensitive.
*
* Values:
* - 0 - Rising edge on COUT has not been detected.
* - 1 - Rising edge on COUT has occurred.
*/
//@{
#define BP_CMP_SCR_CFR (2U) //!< Bit position for CMP_SCR_CFR.
#define BM_CMP_SCR_CFR (0x04U) //!< Bit mask for CMP_SCR_CFR.
#define BS_CMP_SCR_CFR (1U) //!< Bit field size in bits for CMP_SCR_CFR.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_SCR_CFR field.
#define BR_CMP_SCR_CFR(x) (BITBAND_ACCESS8(HW_CMP_SCR_ADDR(x), BP_CMP_SCR_CFR))
#endif
//! @brief Format value for bitfield CMP_SCR_CFR.
#define BF_CMP_SCR_CFR(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_SCR_CFR), uint8_t) & BM_CMP_SCR_CFR)
#ifndef __LANGUAGE_ASM__
//! @brief Set the CFR field to a new value.
#define BW_CMP_SCR_CFR(x, v) (BITBAND_ACCESS8(HW_CMP_SCR_ADDR(x), BP_CMP_SCR_CFR) = (v))
#endif
//@}
/*!
* @name Register CMP_SCR, field IEF[3] (RW)
*
* The IEF bit enables the CFF interrupt from the CMP. When this bit is set, an
* interrupt will be asserted when the CFF bit is set.
*
* Values:
* - 0 - Interrupt disabled.
* - 1 - Interrupt enabled.
*/
//@{
#define BP_CMP_SCR_IEF (3U) //!< Bit position for CMP_SCR_IEF.
#define BM_CMP_SCR_IEF (0x08U) //!< Bit mask for CMP_SCR_IEF.
#define BS_CMP_SCR_IEF (1U) //!< Bit field size in bits for CMP_SCR_IEF.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_SCR_IEF field.
#define BR_CMP_SCR_IEF(x) (BITBAND_ACCESS8(HW_CMP_SCR_ADDR(x), BP_CMP_SCR_IEF))
#endif
//! @brief Format value for bitfield CMP_SCR_IEF.
#define BF_CMP_SCR_IEF(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_SCR_IEF), uint8_t) & BM_CMP_SCR_IEF)
#ifndef __LANGUAGE_ASM__
//! @brief Set the IEF field to a new value.
#define BW_CMP_SCR_IEF(x, v) (BITBAND_ACCESS8(HW_CMP_SCR_ADDR(x), BP_CMP_SCR_IEF) = (v))
#endif
//@}
/*!
* @name Register CMP_SCR, field IER[4] (RW)
*
* The IER bit enables the CFR interrupt from the CMP. When this bit is set, an
* interrupt will be asserted when the CFR bit is set.
*
* Values:
* - 0 - Interrupt disabled.
* - 1 - Interrupt enabled.
*/
//@{
#define BP_CMP_SCR_IER (4U) //!< Bit position for CMP_SCR_IER.
#define BM_CMP_SCR_IER (0x10U) //!< Bit mask for CMP_SCR_IER.
#define BS_CMP_SCR_IER (1U) //!< Bit field size in bits for CMP_SCR_IER.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_SCR_IER field.
#define BR_CMP_SCR_IER(x) (BITBAND_ACCESS8(HW_CMP_SCR_ADDR(x), BP_CMP_SCR_IER))
#endif
//! @brief Format value for bitfield CMP_SCR_IER.
#define BF_CMP_SCR_IER(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_SCR_IER), uint8_t) & BM_CMP_SCR_IER)
#ifndef __LANGUAGE_ASM__
//! @brief Set the IER field to a new value.
#define BW_CMP_SCR_IER(x, v) (BITBAND_ACCESS8(HW_CMP_SCR_ADDR(x), BP_CMP_SCR_IER) = (v))
#endif
//@}
/*!
* @name Register CMP_SCR, field DMAEN[6] (RW)
*
* The DMAEN bit enables the DMA transfer triggered from the CMP module. When
* this bit is set, a DMA request is asserted when the CFR or CFF bit is set.
*
* Values:
* - 0 - DMA disabled.
* - 1 - DMA enabled.
*/
//@{
#define BP_CMP_SCR_DMAEN (6U) //!< Bit position for CMP_SCR_DMAEN.
#define BM_CMP_SCR_DMAEN (0x40U) //!< Bit mask for CMP_SCR_DMAEN.
#define BS_CMP_SCR_DMAEN (1U) //!< Bit field size in bits for CMP_SCR_DMAEN.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_SCR_DMAEN field.
#define BR_CMP_SCR_DMAEN(x) (BITBAND_ACCESS8(HW_CMP_SCR_ADDR(x), BP_CMP_SCR_DMAEN))
#endif
//! @brief Format value for bitfield CMP_SCR_DMAEN.
#define BF_CMP_SCR_DMAEN(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_SCR_DMAEN), uint8_t) & BM_CMP_SCR_DMAEN)
#ifndef __LANGUAGE_ASM__
//! @brief Set the DMAEN field to a new value.
#define BW_CMP_SCR_DMAEN(x, v) (BITBAND_ACCESS8(HW_CMP_SCR_ADDR(x), BP_CMP_SCR_DMAEN) = (v))
#endif
//@}
//-------------------------------------------------------------------------------------------
// HW_CMP_DACCR - DAC Control Register
//-------------------------------------------------------------------------------------------
#ifndef __LANGUAGE_ASM__
/*!
* @brief HW_CMP_DACCR - DAC Control Register (RW)
*
* Reset value: 0x00U
*/
typedef union _hw_cmp_daccr
{
uint8_t U;
struct _hw_cmp_daccr_bitfields
{
uint8_t VOSEL : 6; //!< [5:0] DAC Output Voltage Select
uint8_t VRSEL : 1; //!< [6] Supply Voltage Reference Source Select
uint8_t DACEN : 1; //!< [7] DAC Enable
} B;
} hw_cmp_daccr_t;
#endif
/*!
* @name Constants and macros for entire CMP_DACCR register
*/
//@{
#define HW_CMP_DACCR_ADDR(x) (REGS_CMP_BASE(x) + 0x4U)
#ifndef __LANGUAGE_ASM__
#define HW_CMP_DACCR(x) (*(__IO hw_cmp_daccr_t *) HW_CMP_DACCR_ADDR(x))
#define HW_CMP_DACCR_RD(x) (HW_CMP_DACCR(x).U)
#define HW_CMP_DACCR_WR(x, v) (HW_CMP_DACCR(x).U = (v))
#define HW_CMP_DACCR_SET(x, v) (HW_CMP_DACCR_WR(x, HW_CMP_DACCR_RD(x) | (v)))
#define HW_CMP_DACCR_CLR(x, v) (HW_CMP_DACCR_WR(x, HW_CMP_DACCR_RD(x) & ~(v)))
#define HW_CMP_DACCR_TOG(x, v) (HW_CMP_DACCR_WR(x, HW_CMP_DACCR_RD(x) ^ (v)))
#endif
//@}
/*
* Constants & macros for individual CMP_DACCR bitfields
*/
/*!
* @name Register CMP_DACCR, field VOSEL[5:0] (RW)
*
* This bit selects an output voltage from one of 64 distinct levels. DACO =
* (Vin/64) * (VOSEL[5:0] + 1), so the DACO range is from Vin/64 to Vin.
*/
//@{
#define BP_CMP_DACCR_VOSEL (0U) //!< Bit position for CMP_DACCR_VOSEL.
#define BM_CMP_DACCR_VOSEL (0x3FU) //!< Bit mask for CMP_DACCR_VOSEL.
#define BS_CMP_DACCR_VOSEL (6U) //!< Bit field size in bits for CMP_DACCR_VOSEL.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_DACCR_VOSEL field.
#define BR_CMP_DACCR_VOSEL(x) (HW_CMP_DACCR(x).B.VOSEL)
#endif
//! @brief Format value for bitfield CMP_DACCR_VOSEL.
#define BF_CMP_DACCR_VOSEL(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_DACCR_VOSEL), uint8_t) & BM_CMP_DACCR_VOSEL)
#ifndef __LANGUAGE_ASM__
//! @brief Set the VOSEL field to a new value.
#define BW_CMP_DACCR_VOSEL(x, v) (HW_CMP_DACCR_WR(x, (HW_CMP_DACCR_RD(x) & ~BM_CMP_DACCR_VOSEL) | BF_CMP_DACCR_VOSEL(v)))
#endif
//@}
/*!
* @name Register CMP_DACCR, field VRSEL[6] (RW)
*
* Values:
* - 0 - Vin1 is selected as resistor ladder network supply reference Vin.
* - 1 - Vin2 is selected as resistor ladder network supply reference Vin.
*/
//@{
#define BP_CMP_DACCR_VRSEL (6U) //!< Bit position for CMP_DACCR_VRSEL.
#define BM_CMP_DACCR_VRSEL (0x40U) //!< Bit mask for CMP_DACCR_VRSEL.
#define BS_CMP_DACCR_VRSEL (1U) //!< Bit field size in bits for CMP_DACCR_VRSEL.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_DACCR_VRSEL field.
#define BR_CMP_DACCR_VRSEL(x) (BITBAND_ACCESS8(HW_CMP_DACCR_ADDR(x), BP_CMP_DACCR_VRSEL))
#endif
//! @brief Format value for bitfield CMP_DACCR_VRSEL.
#define BF_CMP_DACCR_VRSEL(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_DACCR_VRSEL), uint8_t) & BM_CMP_DACCR_VRSEL)
#ifndef __LANGUAGE_ASM__
//! @brief Set the VRSEL field to a new value.
#define BW_CMP_DACCR_VRSEL(x, v) (BITBAND_ACCESS8(HW_CMP_DACCR_ADDR(x), BP_CMP_DACCR_VRSEL) = (v))
#endif
//@}
/*!
* @name Register CMP_DACCR, field DACEN[7] (RW)
*
* This bit is used to enable the DAC. When the DAC is disabled, it is powered
* down to conserve power.
*
* Values:
* - 0 - DAC is disabled.
* - 1 - DAC is enabled.
*/
//@{
#define BP_CMP_DACCR_DACEN (7U) //!< Bit position for CMP_DACCR_DACEN.
#define BM_CMP_DACCR_DACEN (0x80U) //!< Bit mask for CMP_DACCR_DACEN.
#define BS_CMP_DACCR_DACEN (1U) //!< Bit field size in bits for CMP_DACCR_DACEN.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_DACCR_DACEN field.
#define BR_CMP_DACCR_DACEN(x) (BITBAND_ACCESS8(HW_CMP_DACCR_ADDR(x), BP_CMP_DACCR_DACEN))
#endif
//! @brief Format value for bitfield CMP_DACCR_DACEN.
#define BF_CMP_DACCR_DACEN(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_DACCR_DACEN), uint8_t) & BM_CMP_DACCR_DACEN)
#ifndef __LANGUAGE_ASM__
//! @brief Set the DACEN field to a new value.
#define BW_CMP_DACCR_DACEN(x, v) (BITBAND_ACCESS8(HW_CMP_DACCR_ADDR(x), BP_CMP_DACCR_DACEN) = (v))
#endif
//@}
//-------------------------------------------------------------------------------------------
// HW_CMP_MUXCR - MUX Control Register
//-------------------------------------------------------------------------------------------
#ifndef __LANGUAGE_ASM__
/*!
* @brief HW_CMP_MUXCR - MUX Control Register (RW)
*
* Reset value: 0x00U
*/
typedef union _hw_cmp_muxcr
{
uint8_t U;
struct _hw_cmp_muxcr_bitfields
{
uint8_t MSEL : 3; //!< [2:0] Minus Input MUX Control
uint8_t PSEL : 3; //!< [5:3] Plus Input MUX Control
uint8_t RESERVED0 : 2; //!< [7:6]
} B;
} hw_cmp_muxcr_t;
#endif
/*!
* @name Constants and macros for entire CMP_MUXCR register
*/
//@{
#define HW_CMP_MUXCR_ADDR(x) (REGS_CMP_BASE(x) + 0x5U)
#ifndef __LANGUAGE_ASM__
#define HW_CMP_MUXCR(x) (*(__IO hw_cmp_muxcr_t *) HW_CMP_MUXCR_ADDR(x))
#define HW_CMP_MUXCR_RD(x) (HW_CMP_MUXCR(x).U)
#define HW_CMP_MUXCR_WR(x, v) (HW_CMP_MUXCR(x).U = (v))
#define HW_CMP_MUXCR_SET(x, v) (HW_CMP_MUXCR_WR(x, HW_CMP_MUXCR_RD(x) | (v)))
#define HW_CMP_MUXCR_CLR(x, v) (HW_CMP_MUXCR_WR(x, HW_CMP_MUXCR_RD(x) & ~(v)))
#define HW_CMP_MUXCR_TOG(x, v) (HW_CMP_MUXCR_WR(x, HW_CMP_MUXCR_RD(x) ^ (v)))
#endif
//@}
/*
* Constants & macros for individual CMP_MUXCR bitfields
*/
/*!
* @name Register CMP_MUXCR, field MSEL[2:0] (RW)
*
* Determines which input is selected for the minus input of the comparator. For
* INx inputs, refer to CMP, DAC and ANMUX Blocks Diagram. When an inappropriate
* operation selects the same input for both MUXes, the comparator automatically
* shuts down to prevent itself from becoming a noise generator.
*
* Values:
* - 000 - IN0
* - 001 - IN1
* - 010 - IN2
* - 011 - IN3
* - 100 - IN4
* - 101 - IN5
* - 110 - IN6
* - 111 - IN7
*/
//@{
#define BP_CMP_MUXCR_MSEL (0U) //!< Bit position for CMP_MUXCR_MSEL.
#define BM_CMP_MUXCR_MSEL (0x07U) //!< Bit mask for CMP_MUXCR_MSEL.
#define BS_CMP_MUXCR_MSEL (3U) //!< Bit field size in bits for CMP_MUXCR_MSEL.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_MUXCR_MSEL field.
#define BR_CMP_MUXCR_MSEL(x) (HW_CMP_MUXCR(x).B.MSEL)
#endif
//! @brief Format value for bitfield CMP_MUXCR_MSEL.
#define BF_CMP_MUXCR_MSEL(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_MUXCR_MSEL), uint8_t) & BM_CMP_MUXCR_MSEL)
#ifndef __LANGUAGE_ASM__
//! @brief Set the MSEL field to a new value.
#define BW_CMP_MUXCR_MSEL(x, v) (HW_CMP_MUXCR_WR(x, (HW_CMP_MUXCR_RD(x) & ~BM_CMP_MUXCR_MSEL) | BF_CMP_MUXCR_MSEL(v)))
#endif
//@}
/*!
* @name Register CMP_MUXCR, field PSEL[5:3] (RW)
*
* Determines which input is selected for the plus input of the comparator. For
* INx inputs, refer to CMP, DAC and ANMUX Blocks Diagram. When an inappropriate
* operation selects the same input for both MUXes, the comparator automatically
* shuts down to prevent itself from becoming a noise generator.
*
* Values:
* - 000 - IN0
* - 001 - IN1
* - 010 - IN2
* - 011 - IN3
* - 100 - IN4
* - 101 - IN5
* - 110 - IN6
* - 111 - IN7
*/
//@{
#define BP_CMP_MUXCR_PSEL (3U) //!< Bit position for CMP_MUXCR_PSEL.
#define BM_CMP_MUXCR_PSEL (0x38U) //!< Bit mask for CMP_MUXCR_PSEL.
#define BS_CMP_MUXCR_PSEL (3U) //!< Bit field size in bits for CMP_MUXCR_PSEL.
#ifndef __LANGUAGE_ASM__
//! @brief Read current value of the CMP_MUXCR_PSEL field.
#define BR_CMP_MUXCR_PSEL(x) (HW_CMP_MUXCR(x).B.PSEL)
#endif
//! @brief Format value for bitfield CMP_MUXCR_PSEL.
#define BF_CMP_MUXCR_PSEL(v) (__REG_VALUE_TYPE((__REG_VALUE_TYPE((v), uint8_t) << BP_CMP_MUXCR_PSEL), uint8_t) & BM_CMP_MUXCR_PSEL)
#ifndef __LANGUAGE_ASM__
//! @brief Set the PSEL field to a new value.
#define BW_CMP_MUXCR_PSEL(x, v) (HW_CMP_MUXCR_WR(x, (HW_CMP_MUXCR_RD(x) & ~BM_CMP_MUXCR_PSEL) | BF_CMP_MUXCR_PSEL(v)))
#endif
//@}
//-------------------------------------------------------------------------------------------
// hw_cmp_t - module struct
//-------------------------------------------------------------------------------------------
/*!
* @brief All CMP module registers.
*/
#ifndef __LANGUAGE_ASM__
#pragma pack(1)
typedef struct _hw_cmp
{
__IO hw_cmp_cr0_t CR0; //!< [0x0] CMP Control Register 0
__IO hw_cmp_cr1_t CR1; //!< [0x1] CMP Control Register 1
__IO hw_cmp_fpr_t FPR; //!< [0x2] CMP Filter Period Register
__IO hw_cmp_scr_t SCR; //!< [0x3] CMP Status and Control Register
__IO hw_cmp_daccr_t DACCR; //!< [0x4] DAC Control Register
__IO hw_cmp_muxcr_t MUXCR; //!< [0x5] MUX Control Register
} hw_cmp_t;
#pragma pack()
//! @brief Macro to access all CMP registers.
//! @param x CMP instance number.
//! @return Reference (not a pointer) to the registers struct. To get a pointer to the struct,
//! use the '&' operator, like <code>&HW_CMP(0)</code>.
#define HW_CMP(x) (*(hw_cmp_t *) REGS_CMP_BASE(x))
#endif
#endif // __HW_CMP_REGISTERS_H__
// v22/130726/0.9
// EOF
| theposey/CMSIS-DAP | shared/cmsis/TARGET_Freescale/include/device/MK70F12/MK70F12_cmp.h | C | apache-2.0 | 37,287 |
/* $Xorg: XrmI.h,v 1.4 2001/02/09 02:03:39 xorgcvs Exp $ */
/*
Copyright 1990, 1998 The Open Group
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation.
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of The Open Group shall
not be used in advertising or otherwise to promote the sale, use or
other dealings in this Software without prior written authorization
from The Open Group.
*/
/*
* Macros to abstract out reading the file, and getting its size.
*
* You may need to redefine these for various other operating systems.
*/
#include <X11/Xos.h>
#include <sys/stat.h>
#define GetSizeOfFile(fd,size) \
{ \
struct stat status_buffer; \
if ( (fstat((fd), &status_buffer)) == -1 ) \
size = -1; \
else \
size = status_buffer.st_size; \
}
| easion/os_sdk | xlibs/X11/XrmI.h | C | apache-2.0 | 1,795 |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.watcher.notification.email;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.SecureSetting;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.xpack.core.watcher.crypto.CryptoService;
import org.elasticsearch.xpack.watcher.notification.NotificationService;
import javax.mail.MessagingException;
import java.util.Arrays;
import java.util.List;
/**
* A component to store email credentials and handle sending email notifications.
*/
public class EmailService extends NotificationService<Account> {
private static final Setting<String> SETTING_DEFAULT_ACCOUNT =
Setting.simpleString("xpack.notification.email.default_account", Property.Dynamic, Property.NodeScope);
private static final Setting.AffixSetting<String> SETTING_PROFILE =
Setting.affixKeySetting("xpack.notification.email.account.", "profile",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<Settings> SETTING_EMAIL_DEFAULTS =
Setting.affixKeySetting("xpack.notification.email.account.", "email_defaults",
(key) -> Setting.groupSetting(key + ".", Property.Dynamic, Property.NodeScope));
// settings that can be configured as smtp properties
private static final Setting.AffixSetting<Boolean> SETTING_SMTP_AUTH =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.auth",
(key) -> Setting.boolSetting(key, false, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<Boolean> SETTING_SMTP_STARTTLS_ENABLE =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.starttls.enable",
(key) -> Setting.boolSetting(key, false, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<Boolean> SETTING_SMTP_STARTTLS_REQUIRED =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.starttls.required",
(key) -> Setting.boolSetting(key, false, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<String> SETTING_SMTP_HOST =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.host",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<Integer> SETTING_SMTP_PORT =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.port",
(key) -> Setting.intSetting(key, 587, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<String> SETTING_SMTP_USER =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.user",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<String> SETTING_SMTP_PASSWORD =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.password",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered));
private static final Setting.AffixSetting<SecureString> SETTING_SECURE_PASSWORD =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.secure_password",
(key) -> SecureSetting.secureString(key, null));
private static final Setting.AffixSetting<TimeValue> SETTING_SMTP_TIMEOUT =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.timeout",
(key) -> Setting.timeSetting(key, TimeValue.timeValueMinutes(2), Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<TimeValue> SETTING_SMTP_CONNECTION_TIMEOUT =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.connection_timeout",
(key) -> Setting.timeSetting(key, TimeValue.timeValueMinutes(2), Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<TimeValue> SETTING_SMTP_WRITE_TIMEOUT =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.write_timeout",
(key) -> Setting.timeSetting(key, TimeValue.timeValueMinutes(2), Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<String> SETTING_SMTP_LOCAL_ADDRESS =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.local_address",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<String> SETTING_SMTP_SSL_TRUST_ADDRESS =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.ssl.trust",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<Integer> SETTING_SMTP_LOCAL_PORT =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.local_port",
(key) -> Setting.intSetting(key, 25, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<Boolean> SETTING_SMTP_SEND_PARTIAL =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.send_partial",
(key) -> Setting.boolSetting(key, false, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<Boolean> SETTING_SMTP_WAIT_ON_QUIT =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.wait_on_quit",
(key) -> Setting.boolSetting(key, true, Property.Dynamic, Property.NodeScope));
private final CryptoService cryptoService;
public EmailService(Settings settings, @Nullable CryptoService cryptoService, ClusterSettings clusterSettings) {
super(settings, "email", clusterSettings, EmailService.getSettings());
this.cryptoService = cryptoService;
// ensure logging of setting changes
clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_PROFILE, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_EMAIL_DEFAULTS, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_AUTH, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_STARTTLS_ENABLE, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_STARTTLS_REQUIRED, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_HOST, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_PORT, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_USER, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_PASSWORD, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SECURE_PASSWORD, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_TIMEOUT, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_CONNECTION_TIMEOUT, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_WRITE_TIMEOUT, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_SSL_TRUST_ADDRESS, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_LOCAL_ADDRESS, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_LOCAL_PORT, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_SEND_PARTIAL, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_WAIT_ON_QUIT, (s, o) -> {}, (s, o) -> {});
// do an initial load
reload(settings);
}
@Override
protected Account createAccount(String name, Settings accountSettings) {
Account.Config config = new Account.Config(name, accountSettings);
return new Account(config, cryptoService, logger);
}
public EmailSent send(Email email, Authentication auth, Profile profile, String accountName) throws MessagingException {
Account account = getAccount(accountName);
if (account == null) {
throw new IllegalArgumentException("failed to send email with subject [" + email.subject() + "] via account [" + accountName
+ "]. account does not exist");
}
return send(email, auth, profile, account);
}
private EmailSent send(Email email, Authentication auth, Profile profile, Account account) throws MessagingException {
assert account != null;
try {
email = account.send(email, auth, profile);
} catch (MessagingException me) {
throw new MessagingException("failed to send email with subject [" + email.subject() + "] via account [" + account.name() +
"]", me);
}
return new EmailSent(account.name(), email);
}
public static class EmailSent {
private final String account;
private final Email email;
public EmailSent(String account, Email email) {
this.account = account;
this.email = email;
}
public String account() {
return account;
}
public Email email() {
return email;
}
}
public static List<Setting<?>> getSettings() {
return Arrays.asList(SETTING_DEFAULT_ACCOUNT, SETTING_PROFILE, SETTING_EMAIL_DEFAULTS, SETTING_SMTP_AUTH, SETTING_SMTP_HOST,
SETTING_SMTP_PASSWORD, SETTING_SMTP_PORT, SETTING_SMTP_STARTTLS_ENABLE, SETTING_SMTP_USER, SETTING_SMTP_STARTTLS_REQUIRED,
SETTING_SMTP_TIMEOUT, SETTING_SMTP_CONNECTION_TIMEOUT, SETTING_SMTP_WRITE_TIMEOUT, SETTING_SMTP_LOCAL_ADDRESS,
SETTING_SMTP_LOCAL_PORT, SETTING_SMTP_SEND_PARTIAL, SETTING_SMTP_WAIT_ON_QUIT, SETTING_SMTP_SSL_TRUST_ADDRESS,
SETTING_SECURE_PASSWORD);
}
}
| gfyoung/elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java | Java | apache-2.0 | 10,810 |
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Created by IntelliJ IDEA.
* User: max
* Date: Jan 26, 2002
* Time: 10:48:52 PM
* To change template for new class use
* Code Style | Class Templates options (Tools | IDE Options).
*/
package com.intellij.codeInspection.dataFlow.instructions;
import com.intellij.codeInspection.dataFlow.*;
import com.intellij.codeInspection.dataFlow.value.DfaValue;
import com.intellij.psi.*;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.Collections;
import java.util.List;
import java.util.Map;
public class MethodCallInstruction extends Instruction {
@Nullable private final PsiCall myCall;
@Nullable private final PsiType myType;
@NotNull private final PsiExpression[] myArgs;
private final boolean myShouldFlushFields;
@NotNull private final PsiElement myContext;
@Nullable private final PsiMethod myTargetMethod;
private final List<MethodContract> myContracts;
private final MethodType myMethodType;
@Nullable private final DfaValue myPrecalculatedReturnValue;
private final boolean myOfNullable;
private final boolean myVarArgCall;
private final Map<PsiExpression, Nullness> myArgRequiredNullability;
private boolean myOnlyNullArgs = true;
private boolean myOnlyNotNullArgs = true;
public enum MethodType {
BOXING, UNBOXING, REGULAR_METHOD_CALL, CAST
}
public MethodCallInstruction(@NotNull PsiExpression context, MethodType methodType, @Nullable PsiType resultType) {
myContext = context;
myContracts = Collections.emptyList();
myMethodType = methodType;
myCall = null;
myArgs = PsiExpression.EMPTY_ARRAY;
myType = resultType;
myShouldFlushFields = false;
myPrecalculatedReturnValue = null;
myTargetMethod = null;
myVarArgCall = false;
myOfNullable = false;
myArgRequiredNullability = Collections.emptyMap();
}
public MethodCallInstruction(@NotNull PsiCall call, @Nullable DfaValue precalculatedReturnValue, List<MethodContract> contracts) {
myContext = call;
myContracts = contracts;
myMethodType = MethodType.REGULAR_METHOD_CALL;
myCall = call;
final PsiExpressionList argList = call.getArgumentList();
myArgs = argList != null ? argList.getExpressions() : PsiExpression.EMPTY_ARRAY;
myType = myCall instanceof PsiCallExpression ? ((PsiCallExpression)myCall).getType() : null;
JavaResolveResult result = call.resolveMethodGenerics();
myTargetMethod = (PsiMethod)result.getElement();
PsiSubstitutor substitutor = result.getSubstitutor();
if (argList != null && myTargetMethod != null) {
PsiParameter[] parameters = myTargetMethod.getParameterList().getParameters();
myVarArgCall = isVarArgCall(myTargetMethod, substitutor, myArgs, parameters);
myArgRequiredNullability = calcArgRequiredNullability(substitutor, parameters);
} else {
myVarArgCall = false;
myArgRequiredNullability = Collections.emptyMap();
}
myShouldFlushFields = !(call instanceof PsiNewExpression && myType != null && myType.getArrayDimensions() > 0) && !isPureCall();
myPrecalculatedReturnValue = precalculatedReturnValue;
myOfNullable = call instanceof PsiMethodCallExpression && DfaOptionalSupport.resolveOfNullable((PsiMethodCallExpression)call) != null;
}
private Map<PsiExpression, Nullness> calcArgRequiredNullability(PsiSubstitutor substitutor, PsiParameter[] parameters) {
int checkedCount = Math.min(myArgs.length, parameters.length) - (myVarArgCall ? 1 : 0);
Map<PsiExpression, Nullness> map = ContainerUtil.newHashMap();
for (int i = 0; i < checkedCount; i++) {
map.put(myArgs[i], DfaPsiUtil.getElementNullability(substitutor.substitute(parameters[i].getType()), parameters[i]));
}
return map;
}
public static boolean isVarArgCall(PsiMethod method, PsiSubstitutor substitutor, PsiExpression[] args, PsiParameter[] parameters) {
if (!method.isVarArgs()) {
return false;
}
int argCount = args.length;
int paramCount = parameters.length;
if (argCount > paramCount) {
return true;
}
if (paramCount > 0 && argCount == paramCount) {
PsiType lastArgType = args[argCount - 1].getType();
if (lastArgType != null && !substitutor.substitute(parameters[paramCount - 1].getType()).isAssignableFrom(lastArgType)) {
return true;
}
}
return false;
}
private boolean isPureCall() {
if (myTargetMethod == null) return false;
return ControlFlowAnalyzer.isPure(myTargetMethod);
}
@Nullable
public PsiType getResultType() {
return myType;
}
@NotNull
public PsiExpression[] getArgs() {
return myArgs;
}
public MethodType getMethodType() {
return myMethodType;
}
public boolean shouldFlushFields() {
return myShouldFlushFields;
}
@Nullable
public PsiMethod getTargetMethod() {
return myTargetMethod;
}
public boolean isVarArgCall() {
return myVarArgCall;
}
@Nullable
public Nullness getArgRequiredNullability(@NotNull PsiExpression arg) {
return myArgRequiredNullability.get(arg);
}
public List<MethodContract> getContracts() {
return myContracts;
}
@Override
public DfaInstructionState[] accept(DataFlowRunner runner, DfaMemoryState stateBefore, InstructionVisitor visitor) {
return visitor.visitMethodCall(this, runner, stateBefore);
}
@Nullable
public PsiCall getCallExpression() {
return myCall;
}
@NotNull
public PsiElement getContext() {
return myContext;
}
@Nullable
public DfaValue getPrecalculatedReturnValue() {
return myPrecalculatedReturnValue;
}
public String toString() {
return myMethodType == MethodType.UNBOXING
? "UNBOX"
: myMethodType == MethodType.BOXING
? "BOX" :
"CALL_METHOD: " + (myCall == null ? "null" : myCall.getText());
}
public boolean updateOfNullable(DfaMemoryState memState, DfaValue arg) {
if (!myOfNullable) return false;
if (!memState.isNotNull(arg)) {
myOnlyNotNullArgs = false;
}
if (!memState.isNull(arg)) {
myOnlyNullArgs = false;
}
return true;
}
public boolean isOptionalAlwaysNullProblem() {
return myOfNullable && myOnlyNullArgs;
}
public boolean isOptionalAlwaysNotNullProblem() {
return myOfNullable && myOnlyNotNullArgs;
}
}
| idea4bsd/idea4bsd | java/java-analysis-impl/src/com/intellij/codeInspection/dataFlow/instructions/MethodCallInstruction.java | Java | apache-2.0 | 7,013 |
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Synchronous channels/ports
///
/// This channel implementation differs significantly from the asynchronous
/// implementations found next to it (oneshot/stream/share). This is an
/// implementation of a synchronous, bounded buffer channel.
///
/// Each channel is created with some amount of backing buffer, and sends will
/// *block* until buffer space becomes available. A buffer size of 0 is valid,
/// which means that every successful send is paired with a successful recv.
///
/// This flavor of channels defines a new `send_opt` method for channels which
/// is the method by which a message is sent but the task does not panic if it
/// cannot be delivered.
///
/// Another major difference is that send() will *always* return back the data
/// if it couldn't be sent. This is because it is deterministically known when
/// the data is received and when it is not received.
///
/// Implementation-wise, it can all be summed up with "use a mutex plus some
/// logic". The mutex used here is an OS native mutex, meaning that no user code
/// is run inside of the mutex (to prevent context switching). This
/// implementation shares almost all code for the buffered and unbuffered cases
/// of a synchronous channel. There are a few branches for the unbuffered case,
/// but they're mostly just relevant to blocking senders.
use core::prelude::*;
pub use self::Failure::*;
use self::Blocker::*;
use vec::Vec;
use core::mem;
use core::ptr;
use sync::atomic::{Ordering, AtomicUsize};
use sync::mpsc::blocking::{self, WaitToken, SignalToken};
use sync::mpsc::select::StartResult::{self, Installed, Abort};
use sync::{Mutex, MutexGuard};
pub struct Packet<T> {
/// Only field outside of the mutex. Just done for kicks, but mainly because
/// the other shared channel already had the code implemented
channels: AtomicUsize,
lock: Mutex<State<T>>,
}
unsafe impl<T: Send> Send for Packet<T> { }
unsafe impl<T: Send> Sync for Packet<T> { }
struct State<T> {
disconnected: bool, // Is the channel disconnected yet?
queue: Queue, // queue of senders waiting to send data
blocker: Blocker, // currently blocked task on this channel
buf: Buffer<T>, // storage for buffered messages
cap: usize, // capacity of this channel
/// A curious flag used to indicate whether a sender failed or succeeded in
/// blocking. This is used to transmit information back to the task that it
/// must dequeue its message from the buffer because it was not received.
/// This is only relevant in the 0-buffer case. This obviously cannot be
/// safely constructed, but it's guaranteed to always have a valid pointer
/// value.
canceled: Option<&'static mut bool>,
}
unsafe impl<T: Send> Send for State<T> {}
/// Possible flavors of threads who can be blocked on this channel.
enum Blocker {
BlockedSender(SignalToken),
BlockedReceiver(SignalToken),
NoneBlocked
}
/// Simple queue for threading tasks together. Nodes are stack-allocated, so
/// this structure is not safe at all
struct Queue {
head: *mut Node,
tail: *mut Node,
}
struct Node {
token: Option<SignalToken>,
next: *mut Node,
}
unsafe impl Send for Node {}
/// A simple ring-buffer
struct Buffer<T> {
buf: Vec<Option<T>>,
start: usize,
size: usize,
}
#[derive(Debug)]
pub enum Failure {
Empty,
Disconnected,
}
/// Atomically blocks the current thread, placing it into `slot`, unlocking `lock`
/// in the meantime. This re-locks the mutex upon returning.
fn wait<'a, 'b, T>(lock: &'a Mutex<State<T>>,
mut guard: MutexGuard<'b, State<T>>,
f: fn(SignalToken) -> Blocker)
-> MutexGuard<'a, State<T>>
{
let (wait_token, signal_token) = blocking::tokens();
match mem::replace(&mut guard.blocker, f(signal_token)) {
NoneBlocked => {}
_ => unreachable!(),
}
drop(guard); // unlock
wait_token.wait(); // block
lock.lock().unwrap() // relock
}
/// Wakes up a thread, dropping the lock at the correct time
fn wakeup<T>(token: SignalToken, guard: MutexGuard<State<T>>) {
// We need to be careful to wake up the waiting task *outside* of the mutex
// in case it incurs a context switch.
drop(guard);
token.signal();
}
impl<T> Packet<T> {
pub fn new(cap: usize) -> Packet<T> {
Packet {
channels: AtomicUsize::new(1),
lock: Mutex::new(State {
disconnected: false,
blocker: NoneBlocked,
cap: cap,
canceled: None,
queue: Queue {
head: ptr::null_mut(),
tail: ptr::null_mut(),
},
buf: Buffer {
buf: (0..cap + if cap == 0 {1} else {0}).map(|_| None).collect(),
start: 0,
size: 0,
},
}),
}
}
// wait until a send slot is available, returning locked access to
// the channel state.
fn acquire_send_slot(&self) -> MutexGuard<State<T>> {
let mut node = Node { token: None, next: ptr::null_mut() };
loop {
let mut guard = self.lock.lock().unwrap();
// are we ready to go?
if guard.disconnected || guard.buf.size() < guard.buf.cap() {
return guard;
}
// no room; actually block
let wait_token = guard.queue.enqueue(&mut node);
drop(guard);
wait_token.wait();
}
}
pub fn send(&self, t: T) -> Result<(), T> {
let mut guard = self.acquire_send_slot();
if guard.disconnected { return Err(t) }
guard.buf.enqueue(t);
match mem::replace(&mut guard.blocker, NoneBlocked) {
// if our capacity is 0, then we need to wait for a receiver to be
// available to take our data. After waiting, we check again to make
// sure the port didn't go away in the meantime. If it did, we need
// to hand back our data.
NoneBlocked if guard.cap == 0 => {
let mut canceled = false;
assert!(guard.canceled.is_none());
guard.canceled = Some(unsafe { mem::transmute(&mut canceled) });
let mut guard = wait(&self.lock, guard, BlockedSender);
if canceled {Err(guard.buf.dequeue())} else {Ok(())}
}
// success, we buffered some data
NoneBlocked => Ok(()),
// success, someone's about to receive our buffered data.
BlockedReceiver(token) => { wakeup(token, guard); Ok(()) }
BlockedSender(..) => panic!("lolwut"),
}
}
pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected {
Err(super::TrySendError::Disconnected(t))
} else if guard.buf.size() == guard.buf.cap() {
Err(super::TrySendError::Full(t))
} else if guard.cap == 0 {
// With capacity 0, even though we have buffer space we can't
// transfer the data unless there's a receiver waiting.
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => Err(super::TrySendError::Full(t)),
BlockedSender(..) => unreachable!(),
BlockedReceiver(token) => {
guard.buf.enqueue(t);
wakeup(token, guard);
Ok(())
}
}
} else {
// If the buffer has some space and the capacity isn't 0, then we
// just enqueue the data for later retrieval, ensuring to wake up
// any blocked receiver if there is one.
assert!(guard.buf.size() < guard.buf.cap());
guard.buf.enqueue(t);
match mem::replace(&mut guard.blocker, NoneBlocked) {
BlockedReceiver(token) => wakeup(token, guard),
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
}
Ok(())
}
}
// Receives a message from this channel
//
// When reading this, remember that there can only ever be one receiver at
// time.
pub fn recv(&self) -> Result<T, ()> {
let mut guard = self.lock.lock().unwrap();
// Wait for the buffer to have something in it. No need for a while loop
// because we're the only receiver.
let mut waited = false;
if !guard.disconnected && guard.buf.size() == 0 {
guard = wait(&self.lock, guard, BlockedReceiver);
waited = true;
}
if guard.disconnected && guard.buf.size() == 0 { return Err(()) }
// Pick up the data, wake up our neighbors, and carry on
assert!(guard.buf.size() > 0);
let ret = guard.buf.dequeue();
self.wakeup_senders(waited, guard);
return Ok(ret);
}
pub fn try_recv(&self) -> Result<T, Failure> {
let mut guard = self.lock.lock().unwrap();
// Easy cases first
if guard.disconnected { return Err(Disconnected) }
if guard.buf.size() == 0 { return Err(Empty) }
// Be sure to wake up neighbors
let ret = Ok(guard.buf.dequeue());
self.wakeup_senders(false, guard);
return ret;
}
// Wake up pending senders after some data has been received
//
// * `waited` - flag if the receiver blocked to receive some data, or if it
// just picked up some data on the way out
// * `guard` - the lock guard that is held over this channel's lock
fn wakeup_senders(&self, waited: bool, mut guard: MutexGuard<State<T>>) {
let pending_sender1: Option<SignalToken> = guard.queue.dequeue();
// If this is a no-buffer channel (cap == 0), then if we didn't wait we
// need to ACK the sender. If we waited, then the sender waking us up
// was already the ACK.
let pending_sender2 = if guard.cap == 0 && !waited {
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => None,
BlockedReceiver(..) => unreachable!(),
BlockedSender(token) => {
guard.canceled.take();
Some(token)
}
}
} else {
None
};
mem::drop(guard);
// only outside of the lock do we wake up the pending tasks
pending_sender1.map(|t| t.signal());
pending_sender2.map(|t| t.signal());
}
// Prepares this shared packet for a channel clone, essentially just bumping
// a refcount.
pub fn clone_chan(&self) {
self.channels.fetch_add(1, Ordering::SeqCst);
}
pub fn drop_chan(&self) {
// Only flag the channel as disconnected if we're the last channel
match self.channels.fetch_sub(1, Ordering::SeqCst) {
1 => {}
_ => return
}
// Not much to do other than wake up a receiver if one's there
let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
BlockedReceiver(token) => wakeup(token, guard),
}
}
pub fn drop_port(&self) {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
// If the capacity is 0, then the sender may want its data back after
// we're disconnected. Otherwise it's now our responsibility to destroy
// the buffered data. As with many other portions of this code, this
// needs to be careful to destroy the data *outside* of the lock to
// prevent deadlock.
let _data = if guard.cap != 0 {
mem::replace(&mut guard.buf.buf, Vec::new())
} else {
Vec::new()
};
let mut queue = mem::replace(&mut guard.queue, Queue {
head: ptr::null_mut(),
tail: ptr::null_mut(),
});
let waiter = match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => None,
BlockedSender(token) => {
*guard.canceled.take().unwrap() = true;
Some(token)
}
BlockedReceiver(..) => unreachable!(),
};
mem::drop(guard);
loop {
match queue.dequeue() {
Some(token) => { token.signal(); }
None => break,
}
}
waiter.map(|t| t.signal());
}
////////////////////////////////////////////////////////////////////////////
// select implementation
////////////////////////////////////////////////////////////////////////////
// If Ok, the value is whether this port has data, if Err, then the upgraded
// port needs to be checked instead of this one.
pub fn can_recv(&self) -> bool {
let guard = self.lock.lock().unwrap();
guard.disconnected || guard.buf.size() > 0
}
// Attempts to start selection on this port. This can either succeed or fail
// because there is data waiting.
pub fn start_selection(&self, token: SignalToken) -> StartResult {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected || guard.buf.size() > 0 {
Abort
} else {
match mem::replace(&mut guard.blocker, BlockedReceiver(token)) {
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
BlockedReceiver(..) => unreachable!(),
}
Installed
}
}
// Remove a previous selecting task from this port. This ensures that the
// blocked task will no longer be visible to any other threads.
//
// The return value indicates whether there's data on this port.
pub fn abort_selection(&self) -> bool {
let mut guard = self.lock.lock().unwrap();
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => true,
BlockedSender(token) => {
guard.blocker = BlockedSender(token);
true
}
BlockedReceiver(token) => { drop(token); false }
}
}
}
#[unsafe_destructor]
impl<T> Drop for Packet<T> {
fn drop(&mut self) {
assert_eq!(self.channels.load(Ordering::SeqCst), 0);
let mut guard = self.lock.lock().unwrap();
assert!(guard.queue.dequeue().is_none());
assert!(guard.canceled.is_none());
}
}
////////////////////////////////////////////////////////////////////////////////
// Buffer, a simple ring buffer backed by Vec<T>
////////////////////////////////////////////////////////////////////////////////
impl<T> Buffer<T> {
fn enqueue(&mut self, t: T) {
let pos = (self.start + self.size) % self.buf.len();
self.size += 1;
let prev = mem::replace(&mut self.buf[pos], Some(t));
assert!(prev.is_none());
}
fn dequeue(&mut self) -> T {
let start = self.start;
self.size -= 1;
self.start = (self.start + 1) % self.buf.len();
let result = &mut self.buf[start];
result.take().unwrap()
}
fn size(&self) -> usize { self.size }
fn cap(&self) -> usize { self.buf.len() }
}
////////////////////////////////////////////////////////////////////////////////
// Queue, a simple queue to enqueue tasks with (stack-allocated nodes)
////////////////////////////////////////////////////////////////////////////////
impl Queue {
fn enqueue(&mut self, node: &mut Node) -> WaitToken {
let (wait_token, signal_token) = blocking::tokens();
node.token = Some(signal_token);
node.next = ptr::null_mut();
if self.tail.is_null() {
self.head = node as *mut Node;
self.tail = node as *mut Node;
} else {
unsafe {
(*self.tail).next = node as *mut Node;
self.tail = node as *mut Node;
}
}
wait_token
}
fn dequeue(&mut self) -> Option<SignalToken> {
if self.head.is_null() {
return None
}
let node = self.head;
self.head = unsafe { (*node).next };
if self.head.is_null() {
self.tail = ptr::null_mut();
}
unsafe {
(*node).next = ptr::null_mut();
Some((*node).token.take().unwrap())
}
}
}
| zaeleus/rust | src/libstd/sync/mpsc/sync.rs | Rust | apache-2.0 | 17,232 |
(function () {
var pigKeywordsU = pigKeywordsL = pigTypesU = pigTypesL = pigBuiltinsU = pigBuiltinsL = [];
var mimeMode = CodeMirror.mimeModes['text/x-pig'];
Object.keys(mimeMode.keywords).forEach( function(w) {
pigKeywordsU.push(w.toUpperCase());
pigKeywordsL.push(w.toLowerCase());
});
Object.keys(mimeMode.types).forEach( function(w) {
pigTypesU.push(w.toUpperCase());
pigTypesL.push(w.toLowerCase());
});
Object.keys(mimeMode.builtins).forEach( function(w) {
pigBuiltinsU.push(w.toUpperCase());
pigBuiltinsL.push(w.toLowerCase());
});
function forEach(arr, f) {
for (var i = 0, e = arr.length; i < e; ++i) {
f(arr[i]);
}
}
function arrayContains(arr, item) {
if (!Array.prototype.indexOf) {
var i = arr.length;
while (i--) {
if (arr[i] === item) {
return true;
}
}
return false;
}
return arr.indexOf(item) != -1;
}
function scriptHint(editor, keywords, getToken) {
// Find the token at the cursor
var cur = editor.getCursor(), token = getToken(editor, cur), tprop = token;
// If it's not a 'word-style' token, ignore the token.
if (!/^[\w$_]*$/.test(token.string)) {
token = tprop = {start: cur.ch, end: cur.ch, string: "", state: token.state,
type: token.string == ":" ? "pig-type" : null};
}
if (!context) var context = [];
context.push(tprop);
completionList = getCompletions(token, context);
completionList = completionList.sort();
return {list: completionList,
from: {line: cur.line, ch: token.start},
to: {line: cur.line, ch: token.end}};
}
function toTitleCase(str) {
return str.replace(/(?:^|\s)\w/g, function(match) {
return match.toUpperCase();
});
}
function getCompletions(token, context) {
var found = [], start = token.string;
function maybeAdd(str) {
if (str.indexOf(start) == 0 && !arrayContains(found, str)) found.push(str);
}
function gatherCompletions(obj) {
if(obj == ":") {
forEach(pigTypesL, maybeAdd);
}
else {
forEach(pigBuiltinsU, maybeAdd);
forEach(pigBuiltinsL, maybeAdd);
forEach(pigTypesU, maybeAdd);
forEach(pigTypesL, maybeAdd);
forEach(pigKeywordsU, maybeAdd);
forEach(pigKeywordsL, maybeAdd);
}
}
if (context) {
// If this is a property, see if it belongs to some object we can
// find in the current environment.
var obj = context.pop(), base;
if (obj.type == "pig-word")
base = obj.string;
else if(obj.type == "pig-type")
base = ":" + obj.string;
while (base != null && context.length)
base = base[context.pop().string];
if (base != null) gatherCompletions(base);
}
return found;
}
CodeMirror.registerHelper("hint", "pig", function(cm, options) {
return scriptHint(cm, pigKeywordsU, function (e, cur) {return e.getTokenAt(cur);});
});
})();
| radicalbit/ambari | contrib/views/pig/src/main/resources/ui/pig-web/vendor/pig-hint.js | JavaScript | apache-2.0 | 3,053 |
import {removeElement} from '#core/dom';
import {Layout_Enum, applyFillContent} from '#core/dom/layout';
import {Services} from '#service';
import {userAssert} from '#utils/log';
import {TAG as KEY_TAG} from './amp-embedly-key';
import {getIframe} from '../../../src/3p-frame';
import {listenFor} from '../../../src/iframe-helper';
/**
* Component tag identifier.
* @const {string}
*/
export const TAG = 'amp-embedly-card';
/**
* Attribute name used to set api key with name
* expected by embedly.
* @const {string}
*/
const API_KEY_ATTR_NAME = 'data-card-key';
/**
* Implementation of the amp-embedly-card component.
* See {@link ../amp-embedly-card.md} for the spec.
*/
export class AmpEmbedlyCard extends AMP.BaseElement {
/** @param {!AmpElement} element */
constructor(element) {
super(element);
/** @private {?HTMLIFrameElement} */
this.iframe_ = null;
/** @private {?string} */
this.apiKey_ = null;
}
/** @override */
buildCallback() {
userAssert(
this.element.getAttribute('data-url'),
'The data-url attribute is required for <%s> %s',
TAG,
this.element
);
const ampEmbedlyKeyElement = document.querySelector(KEY_TAG);
if (ampEmbedlyKeyElement) {
this.apiKey_ = ampEmbedlyKeyElement.getAttribute('value');
}
}
/** @override */
layoutCallback() {
// Add optional paid api key attribute if provided
// to remove embedly branding.
if (this.apiKey_) {
this.element.setAttribute(API_KEY_ATTR_NAME, this.apiKey_);
}
const iframe = getIframe(this.win, this.element, 'embedly');
iframe.title = this.element.title || 'Embedly card';
const opt_is3P = true;
listenFor(
iframe,
'embed-size',
(data) => {
this.forceChangeHeight(data['height']);
},
opt_is3P
);
applyFillContent(iframe);
this.getVsync().mutate(() => {
this.element.appendChild(iframe);
});
this.iframe_ = iframe;
return this.loadPromise(iframe);
}
/** @override */
unlayoutCallback() {
if (this.iframe_) {
removeElement(this.iframe_);
this.iframe_ = null;
}
return true;
}
/** @override */
isLayoutSupported(layout) {
return layout == Layout_Enum.RESPONSIVE;
}
/**
* @param {boolean=} opt_onLayout
* @override
*/
preconnectCallback(opt_onLayout) {
Services.preconnectFor(this.win).url(
this.getAmpDoc(),
'https://cdn.embedly.com',
opt_onLayout
);
}
}
| media-net/amphtml | extensions/amp-embedly-card/0.1/amp-embedly-card-impl.js | JavaScript | apache-2.0 | 2,517 |
package com.google.api.ads.dfp.jaxws.v201408;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlType;
/**
*
* A {@code LiveStreamEvent} encapsulates all the information necessary
* to enable DAI (Dynamic Ad Insertion) into a live video stream.
*
* <p>This includes information such as the start and expected end time of
* the event, the URL of the actual content for DFP to pull and insert ads into,
* as well as the metadata necessary to generate ad requests during the event.
*
*
* <p>Java class for LiveStreamEvent complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="LiveStreamEvent">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="id" type="{http://www.w3.org/2001/XMLSchema}long" minOccurs="0"/>
* <element name="name" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* <element name="description" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* <element name="status" type="{https://www.google.com/apis/ads/publisher/v201408}LiveStreamEventStatus" minOccurs="0"/>
* <element name="creationDateTime" type="{https://www.google.com/apis/ads/publisher/v201408}DateTime" minOccurs="0"/>
* <element name="lastModifiedDateTime" type="{https://www.google.com/apis/ads/publisher/v201408}DateTime" minOccurs="0"/>
* <element name="startDateTime" type="{https://www.google.com/apis/ads/publisher/v201408}DateTime" minOccurs="0"/>
* <element name="endDateTime" type="{https://www.google.com/apis/ads/publisher/v201408}DateTime" minOccurs="0"/>
* <element name="totalEstimatedConcurrentUsers" type="{http://www.w3.org/2001/XMLSchema}long" minOccurs="0"/>
* <element name="contentUrls" type="{http://www.w3.org/2001/XMLSchema}string" maxOccurs="unbounded" minOccurs="0"/>
* <element name="adTags" type="{http://www.w3.org/2001/XMLSchema}string" maxOccurs="unbounded" minOccurs="0"/>
* <element name="liveStreamEventCode" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "LiveStreamEvent", propOrder = {
"id",
"name",
"description",
"status",
"creationDateTime",
"lastModifiedDateTime",
"startDateTime",
"endDateTime",
"totalEstimatedConcurrentUsers",
"contentUrls",
"adTags",
"liveStreamEventCode"
})
public class LiveStreamEvent {
protected Long id;
protected String name;
protected String description;
@XmlSchemaType(name = "string")
protected LiveStreamEventStatus status;
protected DateTime creationDateTime;
protected DateTime lastModifiedDateTime;
protected DateTime startDateTime;
protected DateTime endDateTime;
protected Long totalEstimatedConcurrentUsers;
protected List<String> contentUrls;
protected List<String> adTags;
protected String liveStreamEventCode;
/**
* Gets the value of the id property.
*
* @return
* possible object is
* {@link Long }
*
*/
public Long getId() {
return id;
}
/**
* Sets the value of the id property.
*
* @param value
* allowed object is
* {@link Long }
*
*/
public void setId(Long value) {
this.id = value;
}
/**
* Gets the value of the name property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getName() {
return name;
}
/**
* Sets the value of the name property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setName(String value) {
this.name = value;
}
/**
* Gets the value of the description property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getDescription() {
return description;
}
/**
* Sets the value of the description property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setDescription(String value) {
this.description = value;
}
/**
* Gets the value of the status property.
*
* @return
* possible object is
* {@link LiveStreamEventStatus }
*
*/
public LiveStreamEventStatus getStatus() {
return status;
}
/**
* Sets the value of the status property.
*
* @param value
* allowed object is
* {@link LiveStreamEventStatus }
*
*/
public void setStatus(LiveStreamEventStatus value) {
this.status = value;
}
/**
* Gets the value of the creationDateTime property.
*
* @return
* possible object is
* {@link DateTime }
*
*/
public DateTime getCreationDateTime() {
return creationDateTime;
}
/**
* Sets the value of the creationDateTime property.
*
* @param value
* allowed object is
* {@link DateTime }
*
*/
public void setCreationDateTime(DateTime value) {
this.creationDateTime = value;
}
/**
* Gets the value of the lastModifiedDateTime property.
*
* @return
* possible object is
* {@link DateTime }
*
*/
public DateTime getLastModifiedDateTime() {
return lastModifiedDateTime;
}
/**
* Sets the value of the lastModifiedDateTime property.
*
* @param value
* allowed object is
* {@link DateTime }
*
*/
public void setLastModifiedDateTime(DateTime value) {
this.lastModifiedDateTime = value;
}
/**
* Gets the value of the startDateTime property.
*
* @return
* possible object is
* {@link DateTime }
*
*/
public DateTime getStartDateTime() {
return startDateTime;
}
/**
* Sets the value of the startDateTime property.
*
* @param value
* allowed object is
* {@link DateTime }
*
*/
public void setStartDateTime(DateTime value) {
this.startDateTime = value;
}
/**
* Gets the value of the endDateTime property.
*
* @return
* possible object is
* {@link DateTime }
*
*/
public DateTime getEndDateTime() {
return endDateTime;
}
/**
* Sets the value of the endDateTime property.
*
* @param value
* allowed object is
* {@link DateTime }
*
*/
public void setEndDateTime(DateTime value) {
this.endDateTime = value;
}
/**
* Gets the value of the totalEstimatedConcurrentUsers property.
*
* @return
* possible object is
* {@link Long }
*
*/
public Long getTotalEstimatedConcurrentUsers() {
return totalEstimatedConcurrentUsers;
}
/**
* Sets the value of the totalEstimatedConcurrentUsers property.
*
* @param value
* allowed object is
* {@link Long }
*
*/
public void setTotalEstimatedConcurrentUsers(Long value) {
this.totalEstimatedConcurrentUsers = value;
}
/**
* Gets the value of the contentUrls property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the contentUrls property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getContentUrls().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link String }
*
*
*/
public List<String> getContentUrls() {
if (contentUrls == null) {
contentUrls = new ArrayList<String>();
}
return this.contentUrls;
}
/**
* Gets the value of the adTags property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the adTags property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getAdTags().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link String }
*
*
*/
public List<String> getAdTags() {
if (adTags == null) {
adTags = new ArrayList<String>();
}
return this.adTags;
}
/**
* Gets the value of the liveStreamEventCode property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getLiveStreamEventCode() {
return liveStreamEventCode;
}
/**
* Sets the value of the liveStreamEventCode property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setLiveStreamEventCode(String value) {
this.liveStreamEventCode = value;
}
}
| shyTNT/googleads-java-lib | modules/dfp_appengine/src/main/java/com/google/api/ads/dfp/jaxws/v201408/LiveStreamEvent.java | Java | apache-2.0 | 10,126 |
// @strict: true
// Both of the following types trigger the recursion limiter in getImmediateBaseConstraint
type T1<B extends { [K in keyof B]: Extract<B[Exclude<keyof B, K>], { val: string }>["val"] }> = B;
type T2<B extends { [K in keyof B]: B[Exclude<keyof B, K>]["val"] }> = B;
// Repros from #22950
type AProp<T extends { a: string }> = T
declare function myBug<
T extends { [K in keyof T]: T[K] extends AProp<infer U> ? U : never }
>(arg: T): T
const out = myBug({obj1: {a: "test"}})
type Value<V extends string = string> = Record<"val", V>;
declare function value<V extends string>(val: V): Value<V>;
declare function ensureNoDuplicates<
T extends {
[K in keyof T]: Extract<T[K], Value>["val"] extends Extract<T[Exclude<keyof T, K>], Value>["val"]
? never
: any
}
>(vals: T): void;
const noError = ensureNoDuplicates({main: value("test"), alternate: value("test2")});
const shouldBeNoError = ensureNoDuplicates({main: value("test")});
const shouldBeError = ensureNoDuplicates({main: value("dup"), alternate: value("dup")});
// Repro from #26448
type Cond<T> = T extends number ? number : never;
declare function function1<T extends {[K in keyof T]: Cond<T[K]>}>(): T[keyof T]["foo"];
| weswigham/TypeScript | tests/cases/compiler/infiniteConstraints.ts | TypeScript | apache-2.0 | 1,264 |
#
# Author:: Dan Crosta (<dcrosta@late.am>)
# Copyright:: Copyright (c) 2012 OpsCode, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'spec_helper'
describe Chef::Provider::Group::Groupmod do
before do
@node = Chef::Node.new
@events = Chef::EventDispatch::Dispatcher.new
@run_context = Chef::RunContext.new(@node, {}, @events)
@new_resource = Chef::Resource::Group.new("wheel")
@new_resource.gid 123
@new_resource.members %w{lobster rage fist}
@new_resource.append false
@provider = Chef::Provider::Group::Groupmod.new(@new_resource, @run_context)
end
describe "manage_group" do
describe "when determining the current group state" do
it "should raise an error if the required binary /usr/sbin/group doesn't exist" do
File.should_receive(:exists?).with("/usr/sbin/group").and_return(false)
lambda { @provider.load_current_resource }.should raise_error(Chef::Exceptions::Group)
end
it "should raise an error if the required binary /usr/sbin/user doesn't exist" do
File.should_receive(:exists?).with("/usr/sbin/group").and_return(true)
File.should_receive(:exists?).with("/usr/sbin/user").and_return(false)
lambda { @provider.load_current_resource }.should raise_error(Chef::Exceptions::Group)
end
it "shouldn't raise an error if the required binaries exist" do
File.stub(:exists?).and_return(true)
lambda { @provider.load_current_resource }.should_not raise_error
end
end
describe "after the group's current state is known" do
before do
@current_resource = @new_resource.dup
@provider.current_resource = @current_resource
end
describe "when no group members are specified and append is not set" do
before do
@new_resource.append(false)
@new_resource.members([])
end
it "logs a message and sets group's members to 'none', then removes existing group members" do
Chef::Log.should_receive(:debug).with("group[wheel] setting group members to: none")
@provider.should_receive(:shell_out!).with("group mod -n wheel_bak wheel")
@provider.should_receive(:shell_out!).with("group add -g '123' -o wheel")
@provider.should_receive(:shell_out!).with("group del wheel_bak")
@provider.manage_group
end
end
describe "when no group members are specified and append is set" do
before do
@new_resource.append(true)
@new_resource.members([])
end
it "logs a message and does not modify group membership" do
Chef::Log.should_receive(:debug).with("group[wheel] not changing group members, the group has no members to add")
@provider.should_not_receive(:shell_out!)
@provider.manage_group
end
end
describe "when removing some group members" do
before do
@new_resource.append(false)
@new_resource.members(%w{ lobster })
end
it "updates group membership correctly" do
Chef::Log.stub(:debug)
@provider.should_receive(:shell_out!).with("group mod -n wheel_bak wheel")
@provider.should_receive(:shell_out!).with("user mod -G wheel lobster")
@provider.should_receive(:shell_out!).with("group add -g '123' -o wheel")
@provider.should_receive(:shell_out!).with("group del wheel_bak")
@provider.manage_group
end
end
end
end
describe "create_group" do
describe "when creating a new group" do
before do
@current_resource = Chef::Resource::Group.new("wheel")
@provider.current_resource = @current_resource
end
it "should run a group add command and some user mod commands" do
@provider.should_receive(:shell_out!).with("group add -g '123' wheel")
@provider.should_receive(:shell_out!).with("user mod -G wheel lobster")
@provider.should_receive(:shell_out!).with("user mod -G wheel rage")
@provider.should_receive(:shell_out!).with("user mod -G wheel fist")
@provider.create_group
end
end
end
describe "remove_group" do
describe "when removing an existing group" do
before do
@current_resource = @new_resource.dup
@provider.current_resource = @current_resource
end
it "should run a group del command" do
@provider.should_receive(:shell_out!).with("group del wheel")
@provider.remove_group
end
end
end
end
| sysbot/chef | spec/unit/provider/group/groupmod_spec.rb | Ruby | apache-2.0 | 5,123 |
/*
* Copyright 2017 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import static com.google.common.base.Predicates.not;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.Truth.assertWithMessage;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.javascript.rhino.jstype.FunctionType;
import com.google.javascript.rhino.jstype.JSType;
import com.google.javascript.rhino.jstype.NamedType;
import com.google.javascript.rhino.jstype.NoType;
import com.google.javascript.rhino.jstype.ObjectType;
import com.google.javascript.rhino.jstype.UnionType;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/**
* Tests exercising {@link CompilerOptions#assumeForwardDeclaredForMissingTypes} and {@link
* DiagnosticGroups#MISSING_SOURCES_WARNINGS}.
*/
@RunWith(JUnit4.class)
public class PartialCompilationTest {
private Compiler compiler;
/**
* Asserts that the given lines of code compile and only give errors matching the {@link
* DiagnosticGroups#MISSING_SOURCES_WARNINGS} category.
*/
private void assertPartialCompilationSucceeds(String... code) throws Exception {
compiler = new Compiler();
compiler.setErrorManager(
new BasicErrorManager() {
@Override
public void report(CheckLevel level, JSError error) {
super.report(CheckLevel.ERROR, error);
}
@Override
public void println(CheckLevel level, JSError error) {
/* no-op */
}
@Override
protected void printSummary() {
/* no-op */
}
});
CompilerOptions options = new CompilerOptions();
options.setAssumeForwardDeclaredForMissingTypes(true);
options.setStrictModeInput(true);
options.setPreserveDetailedSourceInfo(true);
CompilationLevel.ADVANCED_OPTIMIZATIONS.setOptionsForCompilationLevel(options);
compiler.init(
ImmutableList.of(),
Collections.singletonList(SourceFile.fromCode("input.js", Joiner.on('\n').join(code))),
options);
compiler.parse();
compiler.check();
ImmutableList<JSError> sourcesErrors =
compiler.getErrors().stream()
.filter(not(DiagnosticGroups.MISSING_SOURCES_WARNINGS::matches))
.collect(toImmutableList());
assertThat(sourcesErrors).isEmpty();
}
@Test
public void testUsesMissingCode() throws Exception {
assertPartialCompilationSucceeds(
"goog.provide('missing_code_user');",
"goog.require('some.thing.Missing');",
"missing_code_user.fnUsesMissingNs = function() {",
" missing_code_user.missingNamespace.foo();",
" missingTopLevelNamespace.bar();",
"};");
}
@Test
public void testMissingType_variable() throws Exception {
assertPartialCompilationSucceeds("/** @type {!some.thing.Missing} */ var foo;");
}
@Test
public void testMissingType_assignment() throws Exception {
assertPartialCompilationSucceeds(
"/** @type {!some.thing.Missing} */ var foo;", // line break
"/** @type {number} */ var bar = foo;");
}
@Test
public void testMissingRequire() throws Exception {
assertPartialCompilationSucceeds(
"goog.provide('missing_extends');", // line break
"goog.require('some.thing.Missing');");
}
@Test
public void testMissingExtends() throws Exception {
assertPartialCompilationSucceeds(
"goog.provide('missing_extends');",
"/** @constructor @extends {some.thing.Missing} */",
"missing_extends.Extends = function() {}");
}
@Test
public void testMissingExtends_template() throws Exception {
assertPartialCompilationSucceeds(
"goog.provide('missing_extends');",
"/** @constructor @extends {some.thing.Missing<string>} x */",
"missing_extends.Extends = function() {}");
}
@Test
public void testMissingType_typedefAlias() throws Exception {
assertPartialCompilationSucceeds("/** @typedef {string} */ var typedef;");
}
@Test
public void testMissingType_typedefField() throws Exception {
assertPartialCompilationSucceeds("/** @typedef {some.thing.Missing} */ var typedef;");
}
@Test
public void testMissingEs6Externs() throws Exception {
assertPartialCompilationSucceeds("let foo = {a, b};");
}
@Test
public void testUnresolvedGenerics() throws Exception {
assertPartialCompilationSucceeds(
"/** @type {!some.thing.Missing<string, !AlsoMissing<!More>>} */", "var x;");
TypedVar x = compiler.getTopScope().getSlot("x");
assertWithMessage("type %s", x.getType()).that(x.getType().isNoResolvedType()).isTrue();
NoType templatizedType = (NoType) x.getType();
assertThat(templatizedType.getReferenceName()).isEqualTo("some.thing.Missing");
ImmutableList<JSType> templateTypes = templatizedType.getTemplateTypes();
assertThat(templateTypes.get(0).isString()).isTrue();
assertThat(templateTypes.get(1).isObject()).isTrue();
ObjectType alsoMissing = (ObjectType) templateTypes.get(1);
assertThat(alsoMissing.getReferenceName()).isEqualTo("AlsoMissing");
assertThat(alsoMissing.getTemplateTypes()).hasSize(1);
ObjectType more = (ObjectType) alsoMissing.getTemplateTypes().get(0);
assertThat(more.getReferenceName()).isEqualTo("More");
}
@Test
public void testUnresolvedUnions() throws Exception {
assertPartialCompilationSucceeds("/** @type {some.thing.Foo|some.thing.Bar} */", "var x;");
TypedVar x = compiler.getTopScope().getSlot("x");
assertWithMessage("type %s", x.getType()).that(x.getType().isUnionType()).isTrue();
UnionType unionType = (UnionType) x.getType();
Collection<JSType> alternatives = unionType.getAlternates();
assertThat(alternatives).hasSize(3);
int nullTypeCount = 0;
List<String> namedTypes = new ArrayList<>();
for (JSType alternative : alternatives) {
assertThat(alternative.isNamedType() || alternative.isNullType()).isTrue();
if (alternative.isNamedType()) {
assertThat(alternative.isNoResolvedType()).isTrue();
namedTypes.add(((NamedType) alternative).getReferenceName());
}
if (alternative.isNullType()) {
nullTypeCount++;
}
}
assertThat(nullTypeCount).isEqualTo(1);
assertThat(namedTypes).containsExactly("some.thing.Foo", "some.thing.Bar");
}
@Test
public void testUnresolvedGenerics_defined() throws Exception {
assertPartialCompilationSucceeds(
"/** @param {!some.thing.Missing<string>} x */",
"function useMissing(x) {}",
"/** @const {!some.thing.Missing<string>} */",
"var x;",
"/** @constructor @template T */",
"some.thing.Missing = function () {}",
"function missingInside() {",
" useMissing(new some.thing.Missing());",
"}");
}
@Test
public void testUnresolvedBaseClassDoesNotHideFields() throws Exception {
assertPartialCompilationSucceeds(
"/** @constructor @extends {MissingBase} */",
"var Klass = function () {",
" /** @type {string} */",
" this.foo;",
"};");
TypedVar x = compiler.getTopScope().getSlot("Klass");
JSType type = x.getType();
assertThat(type.isFunctionType()).isTrue();
FunctionType fType = (FunctionType) type;
assertThat(fType.getTypeOfThis().hasProperty("foo")).isTrue();
}
}
| GoogleChromeLabs/chromeos_smart_card_connector | third_party/closure-compiler/src/test/com/google/javascript/jscomp/PartialCompilationTest.java | Java | apache-2.0 | 8,229 |
/*
* Copyright (c) 2013-2021, Pelion and affiliates.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "nsconfig.h"
#include <string.h>
#include <ns_types.h>
#include <ns_trace.h>
#include "eventOS_event.h"
#include "eventOS_scheduler.h"
#include "eventOS_event_timer.h"
#include "nsdynmemLIB.h"
#include "ns_list.h"
#include "randLIB.h"
#include "socket_api.h"
#include "net_interface.h"
#include "common_functions.h"
#include "libDHCPv6/libDHCPv6.h"
#include "NWK_INTERFACE/Include/protocol.h" // just for protocol_core_monotonic_time
#include "Common_Protocols/ip.h"
#include "dhcp_service_api.h"
#ifdef HAVE_DHCPV6
#define TRACE_GROUP "dhcp"
#define MAX_SERVERS 20
/* Fixed-point randomisation limits for randlib_randomise_base() - RFC 3315
* says RAND is uniformly distributed between -0.1 and +0.1
*/
#define RAND1_LOW 0x7333 // 1 - 0.1; minimum for "1+RAND"
#define RAND1_HIGH 0x8CCD // 1 + 0.1; maximum for "1+RAND"
typedef struct {
dhcp_service_receive_req_cb *recv_req_cb;
uint16_t instance_id;
int8_t interface_id;
dhcp_instance_type_e instance_type;
ns_list_link_t link;
} server_instance_t;
typedef NS_LIST_HEAD(server_instance_t, link) server_instance_list_t;
typedef struct {
uint16_t instance_id;
int8_t interface_id;
uint8_t server_address[16];
bool relay_activated;
bool add_interface_id_option;
ns_list_link_t link;
} relay_instance_t;
typedef NS_LIST_HEAD(relay_instance_t, link) relay_instance_list_t;
typedef struct {
ns_address_t addr;
dhcp_service_receive_resp_cb *recv_resp_cb;
uint16_t instance_id;
int8_t interface_id;
int8_t socket;
uint8_t options;
void *client_obj_ptr;
uint32_t msg_tr_id;
uint32_t message_tr_id;
uint32_t transmit_time;
uint32_t first_transmit_time;
uint16_t delayed_tx;
uint16_t timeout;
uint16_t timeout_init;
uint16_t timeout_max;
uint8_t retrans_max;
uint8_t retrans;
uint8_t *msg_ptr;
uint16_t msg_len;
uint8_t *relay_start;
uint8_t *opt_interface_id;
uint16_t opt_interface_id_length;
ns_list_link_t link;
} msg_tr_t;
typedef NS_LIST_HEAD(msg_tr_t, link) tr_list_t;
typedef struct {
dhcp_relay_neighbour_cb *recv_notify_cb;
int8_t interface_id;
ns_list_link_t link;
} relay_notify_t;
typedef NS_LIST_HEAD(relay_notify_t, link) relay_notify_list_t;
typedef struct {
ns_address_t src_address;
server_instance_list_t srv_list;
relay_instance_list_t relay_list;
relay_notify_list_t notify_list;
tr_list_t tr_list;
int8_t dhcp_server_socket;
int8_t dhcp_client_socket;
int8_t dhcp_relay_socket;
int8_t dhcpv6_socket_service_tasklet;
} dhcp_service_class_t;
#define DHCPV6_SOCKET_SERVICE_TASKLET_INIT 1
#define DHCPV6_SOCKET_SERVICE_TIMER 2
#define DHCPV6_SOCKET_SERVICE_TIMER_ID 1
#define DHCPV6_SOCKET_TIMER_UPDATE_PERIOD_IN_MS 100
dhcp_service_class_t *dhcp_service = NULL;
static bool dhcpv6_socket_timeout_timer_active = false;
void dhcp_service_send_message(msg_tr_t *msg_tr_ptr);
void DHCPv6_socket_service_tasklet(arm_event_s *event)
{
if (event->event_type == DHCPV6_SOCKET_SERVICE_TASKLET_INIT) {
//We should define peridiocally timer service!!
eventOS_event_timer_request(DHCPV6_SOCKET_SERVICE_TIMER_ID, DHCPV6_SOCKET_SERVICE_TIMER, dhcp_service->dhcpv6_socket_service_tasklet, DHCPV6_SOCKET_TIMER_UPDATE_PERIOD_IN_MS);
dhcpv6_socket_timeout_timer_active = true;
} else if (event->event_type == DHCPV6_SOCKET_SERVICE_TIMER) {
if (dhcp_service_timer_tick(1)) {
dhcpv6_socket_timeout_timer_active = true;
eventOS_event_timer_request(DHCPV6_SOCKET_SERVICE_TIMER_ID, DHCPV6_SOCKET_SERVICE_TIMER, dhcp_service->dhcpv6_socket_service_tasklet, DHCPV6_SOCKET_TIMER_UPDATE_PERIOD_IN_MS);
} else {
dhcpv6_socket_timeout_timer_active = false;
}
}
}
bool dhcp_service_allocate(void)
{
bool retVal = false;
if (dhcp_service == NULL) {
dhcp_service = ns_dyn_mem_alloc(sizeof(dhcp_service_class_t));
if (dhcp_service) {
ns_list_init(&dhcp_service->srv_list);
ns_list_init(&dhcp_service->relay_list);
ns_list_init(&dhcp_service->notify_list);
ns_list_init(&dhcp_service->tr_list);
dhcp_service->dhcp_client_socket = -1;
dhcp_service->dhcp_server_socket = -1;
dhcp_service->dhcp_relay_socket = -1;
dhcp_service->dhcpv6_socket_service_tasklet = eventOS_event_handler_create(DHCPv6_socket_service_tasklet, DHCPV6_SOCKET_SERVICE_TASKLET_INIT);
if (dhcp_service->dhcpv6_socket_service_tasklet < 0) {
ns_dyn_mem_free(dhcp_service);
dhcp_service = NULL;
} else {
retVal = true;
}
}
} else {
retVal = true;
}
return retVal;
}
/*Subclass instances*/
msg_tr_t *dhcp_tr_find(uint32_t msg_tr_id)
{
msg_tr_t *result = NULL;
ns_list_foreach(msg_tr_t, cur_ptr, &dhcp_service->tr_list) {
if (cur_ptr->msg_tr_id == msg_tr_id) {
result = cur_ptr;
}
}
return result;
}
msg_tr_t *dhcp_tr_create(void)
{
uint32_t tr_id;
msg_tr_t *msg_ptr = NULL;
msg_ptr = ns_dyn_mem_temporary_alloc(sizeof(msg_tr_t));
if (msg_ptr == NULL) {
return NULL;
}
memset(msg_ptr, 0, sizeof(msg_tr_t));
msg_ptr->msg_ptr = NULL;
msg_ptr->recv_resp_cb = NULL;
tr_id = randLIB_get_32bit() & 0xffffff;// 24 bits for random
// Ensure a unique non-zero transaction id for each transaction
while (tr_id == 0 || dhcp_tr_find(tr_id) != NULL) {
tr_id = (tr_id + 1) & 0xffffff;
}
msg_ptr->msg_tr_id = tr_id;
ns_list_add_to_start(&dhcp_service->tr_list, msg_ptr);
return msg_ptr;
}
void dhcp_tr_delete(msg_tr_t *msg_ptr)
{
if (msg_ptr != NULL) {
ns_list_remove(&dhcp_service->tr_list, msg_ptr);
ns_dyn_mem_free(msg_ptr->msg_ptr);
ns_dyn_mem_free(msg_ptr);
}
return;
}
void dhcp_tr_set_retry_timers(msg_tr_t *msg_ptr, uint8_t msg_type)
{
if (msg_ptr != NULL) {
if (msg_type == DHCPV6_SOLICATION_TYPE) {
msg_ptr->timeout_init = SOL_TIMEOUT;
msg_ptr->timeout_max = SOL_MAX_RT;
msg_ptr->retrans_max = 0;
} else if (msg_type == DHCPV6_RENEW_TYPE) {
msg_ptr->timeout_init = REN_TIMEOUT;
msg_ptr->timeout_max = REN_MAX_RT;
msg_ptr->retrans_max = 0;
} else if (msg_type == DHCPV6_LEASEQUERY_TYPE) {
msg_ptr->timeout_init = LQ_TIMEOUT;
msg_ptr->timeout_max = LQ_MAX_RT;
msg_ptr->retrans_max = LQ_MAX_RC;
} else {
msg_ptr->timeout_init = REL_TIMEOUT;
msg_ptr->timeout_max = 0;
msg_ptr->retrans_max = REL_MAX_RC;
}
// Convert from seconds to 1/10s ticks, with initial randomisation factor
msg_ptr->timeout_init = randLIB_randomise_base(msg_ptr->timeout_init * 10, RAND1_LOW, RAND1_HIGH);
msg_ptr->timeout_max *= 10;
msg_ptr->timeout = msg_ptr->timeout_init;
if (!dhcpv6_socket_timeout_timer_active) {
eventOS_event_timer_request(DHCPV6_SOCKET_SERVICE_TIMER_ID, DHCPV6_SOCKET_SERVICE_TIMER, dhcp_service->dhcpv6_socket_service_tasklet, DHCPV6_SOCKET_TIMER_UPDATE_PERIOD_IN_MS);
dhcpv6_socket_timeout_timer_active = true;
}
}
return;
}
server_instance_t *dhcp_service_client_find(uint16_t instance_id)
{
server_instance_t *result = NULL;
ns_list_foreach(server_instance_t, cur_ptr, &dhcp_service->srv_list) {
if (cur_ptr->instance_id == instance_id) {
result = cur_ptr;
}
}
return result;
}
static uint16_t dhcp_service_relay_interface_get(int8_t interface_id)
{
ns_list_foreach(server_instance_t, cur_ptr, &dhcp_service->srv_list) {
if (cur_ptr->interface_id == interface_id && cur_ptr->instance_type == DHCP_INTANCE_RELAY_AGENT) {
return cur_ptr->instance_id;
}
}
return 0;
}
static relay_notify_t *dhcp_service_notify_find(int8_t interface_id)
{
relay_notify_t *result = NULL;
ns_list_foreach(relay_notify_t, cur_ptr, &dhcp_service->notify_list) {
if (cur_ptr->interface_id == interface_id) {
result = cur_ptr;
}
}
return result;
}
static relay_instance_t *dhcp_service_relay_find(uint16_t instance_id)
{
relay_instance_t *result = NULL;
ns_list_foreach(relay_instance_t, cur_ptr, &dhcp_service->relay_list) {
if (cur_ptr->instance_id == instance_id) {
result = cur_ptr;
}
}
return result;
}
static relay_instance_t *dhcp_service_relay_interface(int8_t interface_id)
{
relay_instance_t *result = NULL;
ns_list_foreach(relay_instance_t, cur_ptr, &dhcp_service->relay_list) {
if (cur_ptr->interface_id == interface_id) {
result = cur_ptr;
}
}
return result;
}
void recv_dhcp_server_msg(void *cb_res)
{
socket_callback_t *sckt_data;
server_instance_t *srv_ptr = NULL;
msg_tr_t *msg_tr_ptr;
uint8_t *msg_ptr, *allocated_ptr;
uint16_t msg_len;
dhcpv6_relay_msg_t relay_msg;
sckt_data = cb_res;
if (sckt_data->event_type != SOCKET_DATA || sckt_data->d_len < 4) {
return;
}
relay_notify_t *neigh_notify = NULL;
tr_debug("dhcp Server recv request");
msg_tr_ptr = dhcp_tr_create();
msg_ptr = ns_dyn_mem_temporary_alloc(sckt_data->d_len);
allocated_ptr = msg_ptr;
if (msg_ptr == NULL || msg_tr_ptr == NULL) {
// read actual message
tr_error("Out of resources");
goto cleanup;
}
msg_len = socket_read(sckt_data->socket_id, &msg_tr_ptr->addr, msg_ptr, sckt_data->d_len);
uint8_t msg_type = *msg_ptr;
if (msg_type == DHCPV6_RELAY_FORWARD) {
if (!libdhcpv6_relay_msg_read(msg_ptr, msg_len, &relay_msg)) {
tr_error("Relay forward not correct");
goto cleanup;
}
//Update Source and data
msg_tr_ptr->relay_start = msg_ptr;
msg_tr_ptr->opt_interface_id = relay_msg.relay_interface_id.msg_ptr;
msg_tr_ptr->opt_interface_id_length = relay_msg.relay_interface_id.len;
memcpy(msg_tr_ptr->addr.address, relay_msg.peer_address, 16);
msg_ptr = relay_msg.relay_options.msg_ptr;
msg_len = relay_msg.relay_options.len;
msg_type = *msg_ptr;
} else if (msg_type == DHCPV6_RELAY_REPLY) {
tr_error("Relay reply drop at server");
goto cleanup;
} else {
//Search only for direct messages here
neigh_notify = dhcp_service_notify_find(sckt_data->interface_id);
}
//TODO use real function from lib also call validity check
msg_tr_ptr->message_tr_id = common_read_24_bit(&msg_ptr[1]);
if (0 != libdhcpv6_message_malformed_check(msg_ptr, msg_len)) {
tr_error("Malformed packet");
goto cleanup;
}
if (neigh_notify && neigh_notify->recv_notify_cb) {
neigh_notify->recv_notify_cb(sckt_data->interface_id, msg_tr_ptr->addr.address);
}
msg_tr_ptr->socket = sckt_data->socket_id;
// call all receivers until found.
ns_list_foreach(server_instance_t, cur_ptr, &dhcp_service->srv_list) {
if (cur_ptr->interface_id == sckt_data->interface_id && cur_ptr->recv_req_cb != NULL) {
msg_tr_ptr->instance_id = cur_ptr->instance_id;
msg_tr_ptr->interface_id = sckt_data->interface_id;
if ((RET_MSG_ACCEPTED ==
cur_ptr->recv_req_cb(cur_ptr->instance_id, msg_tr_ptr->msg_tr_id, msg_type, msg_ptr + 4, msg_len - 4))) {
// should not modify pointers but library requires.
msg_tr_ptr = NULL;
srv_ptr = cur_ptr;
break;
}
}
}
cleanup:
dhcp_tr_delete(msg_tr_ptr);
ns_dyn_mem_free(allocated_ptr);
if (srv_ptr == NULL) {
//no owner found
tr_warn("No handler for this message found");
}
return;
}
void recv_dhcp_relay_msg(void *cb_res)
{
socket_callback_t *sckt_data;
uint16_t msg_len;
sckt_data = cb_res;
if (sckt_data->event_type != SOCKET_DATA || sckt_data->d_len < 4) {
return;
}
protocol_interface_info_entry_t *interface_ptr = protocol_stack_interface_info_get_by_id(sckt_data->interface_id);
relay_instance_t *relay_srv = dhcp_service_relay_interface(sckt_data->interface_id);
if (!interface_ptr || !relay_srv || !relay_srv->relay_activated) {
return;
}
ns_address_t src_address;
//Relay vector added space for relay frame + Interface ID
uint8_t relay_frame[DHCPV6_RELAY_LENGTH + 4 + 5];
uint8_t *socket_data = ns_dyn_mem_temporary_alloc(sckt_data->d_len);
if (socket_data == NULL) {
// read actual message
tr_error("Out of resources");
goto cleanup;
}
ns_msghdr_t msghdr;
ns_iovec_t msg_data;
msg_data.iov_base = socket_data;
msg_data.iov_len = sckt_data->d_len;
//Set messages name buffer
msghdr.msg_name = &src_address;
msghdr.msg_namelen = sizeof(src_address);
msghdr.msg_iov = &msg_data;
msghdr.msg_iovlen = 1;
msghdr.msg_control = NULL;
msghdr.msg_controllen = 0;
msg_len = socket_recvmsg(sckt_data->socket_id, &msghdr, NS_MSG_LEGACY0);
tr_debug("dhcp Relay recv msg");
//Parse type
uint8_t msg_type = *socket_data;
int16_t tc = 0;
if (msg_type == DHCPV6_RELAY_FORWARD) {
tr_error("Drop not supported DHCPv6 forward at Agent");
goto cleanup;
} else if (msg_type == DHCPV6_RELAY_REPLY) {
//Parse and validate Relay
dhcpv6_relay_msg_t relay_msg;
if (!libdhcpv6_relay_msg_read(socket_data, msg_len, &relay_msg)) {
tr_error("Not valid relay");
goto cleanup;
}
if (0 != libdhcpv6_message_malformed_check(relay_msg.relay_options.msg_ptr, relay_msg.relay_options.len)) {
tr_error("Malformed packet");
goto cleanup;
}
//Copy DST address
memcpy(src_address.address, relay_msg.peer_address, 16);
src_address.type = ADDRESS_IPV6;
src_address.identifier = DHCPV6_CLIENT_PORT;
msghdr.msg_iov = &msg_data;
msghdr.msg_iovlen = 1;
msg_data.iov_base = relay_msg.relay_options.msg_ptr;
msg_data.iov_len = relay_msg.relay_options.len;
tr_debug("Forward Original relay msg to client");
} else {
if (0 != libdhcpv6_message_malformed_check(socket_data, msg_len)) {
tr_error("Malformed packet");
goto cleanup;
}
uint8_t gp_address[16];
//Get blobal address from interface
if (addr_interface_select_source(interface_ptr, gp_address, relay_srv->server_address, 0) != 0) {
// No global prefix available
tr_error("No GP address");
goto cleanup;
}
ns_iovec_t msg_iov[2];
uint8_t *ptr = relay_frame;
//Build
//ADD relay frame vector front of original data
msghdr.msg_iov = &msg_iov[0];
msg_iov[0].iov_base = relay_frame;
msghdr.msg_iovlen = 2;
//SET Original Data
msg_iov[1].iov_base = socket_data;
msg_iov[1].iov_len = msg_len;
ptr = libdhcpv6_dhcp_relay_msg_write(ptr, DHCPV6_RELAY_FORWARD, 0, src_address.address, gp_address);
if (relay_srv->add_interface_id_option) {
ptr = libdhcpv6_option_interface_id_write(ptr, sckt_data->interface_id);
}
ptr = libdhcpv6_dhcp_option_header_write(ptr, DHCPV6_OPTION_RELAY, msg_len);
//Update length of relay vector
msg_iov[0].iov_len = ptr - relay_frame;
//Update Neighbour table if necessary
relay_notify_t *neigh_notify = dhcp_service_notify_find(sckt_data->interface_id);
if (neigh_notify && neigh_notify->recv_notify_cb) {
neigh_notify->recv_notify_cb(sckt_data->interface_id, src_address.address);
}
//Copy DST address
memcpy(src_address.address, relay_srv->server_address, 16);
src_address.type = ADDRESS_IPV6;
src_address.identifier = DHCPV6_SERVER_PORT;
tr_debug("Forward Client msg to server");
tc = IP_DSCP_CS6 << IP_TCLASS_DSCP_SHIFT;
}
socket_setsockopt(sckt_data->socket_id, SOCKET_IPPROTO_IPV6, SOCKET_IPV6_TCLASS, &tc, sizeof(tc));
socket_sendmsg(sckt_data->socket_id, &msghdr, NS_MSG_LEGACY0);
cleanup:
ns_dyn_mem_free(socket_data);
return;
}
void recv_dhcp_client_msg(void *cb_res)
{
ns_address_t address;
socket_callback_t *sckt_data;
msg_tr_t *msg_tr_ptr = NULL;
uint8_t *msg_ptr = NULL;
int16_t msg_len = 0;
uint_fast24_t tr_id = 0;
int retVal = RET_MSG_ACCEPTED;
sckt_data = cb_res;
if (sckt_data->event_type != SOCKET_DATA || sckt_data->d_len < 4) {
return;
}
tr_debug("dhcp recv response message");
// read actual message
msg_ptr = ns_dyn_mem_temporary_alloc(sckt_data->d_len);
if (msg_ptr == NULL) {
tr_error("Out of memory");
goto cleanup;
}
msg_len = socket_read(sckt_data->socket_id, &address, msg_ptr, sckt_data->d_len);
tr_id = common_read_24_bit(&msg_ptr[1]);
msg_tr_ptr = dhcp_tr_find(tr_id);
if (msg_tr_ptr == NULL) {
tr_error("invalid tr id");
goto cleanup;
}
if (0 != libdhcpv6_message_malformed_check(msg_ptr, msg_len)) {
msg_tr_ptr->recv_resp_cb(msg_tr_ptr->instance_id, msg_tr_ptr->client_obj_ptr, 0, NULL, 0);
tr_error("Malformed packet");
goto cleanup;
}
// read msg tr id from message and find transaction. and then instance
// TODO use real function from dhcp lib
if (msg_tr_ptr != NULL && msg_tr_ptr->recv_resp_cb) {
// call receive callback should not modify pointers but library requires
retVal = msg_tr_ptr->recv_resp_cb(msg_tr_ptr->instance_id, msg_tr_ptr->client_obj_ptr, *msg_ptr, msg_ptr + 4, msg_len - 4);
} else {
tr_error("no receiver for this message found");
}
cleanup:
ns_dyn_mem_free(msg_ptr);
if (retVal != RET_MSG_WAIT_ANOTHER) {
//Transaction is not killed yet
dhcp_tr_delete(dhcp_tr_find(tr_id));
}
return ;
}
uint16_t dhcp_service_init(int8_t interface_id, dhcp_instance_type_e instance_type, dhcp_service_receive_req_cb *receive_req_cb)
{
uint16_t id = 1;
server_instance_t *srv_ptr;
if (!dhcp_service_allocate()) {
tr_error("dhcp Sockets data base alloc fail");
return 0;
}
if (instance_type == DHCP_INSTANCE_SERVER && dhcp_service->dhcp_server_socket < 0) {
if (dhcp_service->dhcp_relay_socket >= 0) {
tr_error("dhcp Server socket can't open because Agent open already");
}
dhcp_service->dhcp_server_socket = socket_open(SOCKET_UDP, DHCPV6_SERVER_PORT, recv_dhcp_server_msg);
}
if (instance_type == DHCP_INTANCE_RELAY_AGENT && dhcp_service->dhcp_relay_socket < 0) {
if (dhcp_service->dhcp_server_socket >= 0) {
tr_error("dhcp Relay agent can't open because server open already");
}
dhcp_service->dhcp_relay_socket = socket_open(SOCKET_UDP, DHCPV6_SERVER_PORT, recv_dhcp_relay_msg);
}
if (instance_type == DHCP_INSTANCE_CLIENT && dhcp_service->dhcp_client_socket < 0) {
dhcp_service->dhcp_client_socket = socket_open(SOCKET_UDP, DHCPV6_CLIENT_PORT, recv_dhcp_client_msg);
}
if (instance_type == DHCP_INSTANCE_SERVER && dhcp_service->dhcp_server_socket < 0) {
tr_error("No sockets available for DHCP server");
return 0;
}
if (instance_type == DHCP_INSTANCE_CLIENT && dhcp_service->dhcp_client_socket < 0) {
tr_error("No sockets available for DHCP client");
return 0;
}
if (instance_type == DHCP_INTANCE_RELAY_AGENT) {
if (dhcp_service->dhcp_relay_socket < 0) {
tr_error("No sockets available for DHCP server");
}
uint16_t temp_id = dhcp_service_relay_interface_get(interface_id);
if (temp_id) {
return temp_id;
}
}
for (; id < MAX_SERVERS; id++) {
if (dhcp_service_client_find(id) == NULL) {
break;
}
}
srv_ptr = ns_dyn_mem_alloc(sizeof(server_instance_t));
if (id == MAX_SERVERS || srv_ptr == NULL) {
tr_error("Out of server instances");
ns_dyn_mem_free(srv_ptr);
return 0;
}
if (instance_type == DHCP_INTANCE_RELAY_AGENT) {
//Allocate Realay Agent
relay_instance_t *relay_srv = ns_dyn_mem_alloc(sizeof(relay_instance_t));
if (!relay_srv) {
tr_error("Out of realy instances");
ns_dyn_mem_free(srv_ptr);
return 0;
}
ns_list_add_to_start(&dhcp_service->relay_list, relay_srv);
relay_srv->instance_id = id;
relay_srv->interface_id = interface_id;
relay_srv->relay_activated = false;
relay_srv->add_interface_id_option = false;
}
ns_list_add_to_start(&dhcp_service->srv_list, srv_ptr);
srv_ptr->instance_id = id;
srv_ptr->instance_type = instance_type;
srv_ptr->interface_id = interface_id;
srv_ptr->recv_req_cb = receive_req_cb;
return srv_ptr->instance_id;
}
void dhcp_service_relay_instance_enable(uint16_t instance, uint8_t *server_address)
{
relay_instance_t *relay_srv = dhcp_service_relay_find(instance);
if (relay_srv) {
relay_srv->relay_activated = true;
memcpy(relay_srv->server_address, server_address, 16);
}
}
void dhcp_service_relay_interface_id_option_enable(uint16_t instance, bool enable)
{
relay_instance_t *relay_srv = dhcp_service_relay_find(instance);
if (relay_srv) {
relay_srv->add_interface_id_option = enable;
}
}
uint8_t *dhcp_service_relay_global_addres_get(uint16_t instance)
{
relay_instance_t *relay_srv = dhcp_service_relay_find(instance);
if (!relay_srv || !relay_srv->relay_activated) {
return NULL;
}
return relay_srv->server_address;
}
void dhcp_service_delete(uint16_t instance)
{
server_instance_t *srv_ptr;
if (dhcp_service == NULL) {
return;
}
srv_ptr = dhcp_service_client_find(instance);
//TODO delete all transactions
if (srv_ptr != NULL) {
ns_list_remove(&dhcp_service->srv_list, srv_ptr);
if (srv_ptr->instance_type == DHCP_INTANCE_RELAY_AGENT) {
//Free relay service
relay_instance_t *relay = dhcp_service_relay_find(instance);
if (relay) {
ns_list_remove(&dhcp_service->relay_list, relay);
ns_dyn_mem_free(relay);
}
}
ns_dyn_mem_free(srv_ptr);
}
ns_list_foreach_safe(msg_tr_t, cur_ptr, &dhcp_service->tr_list) {
if (cur_ptr->instance_id == instance) {
dhcp_tr_delete(cur_ptr);
}
}
int8_t server_instances = 0, client_instances = 0, relay_instances = 0;
ns_list_foreach(server_instance_t, srv, &dhcp_service->srv_list) {
if (srv->instance_type == DHCP_INSTANCE_SERVER) {
++server_instances;
} else if (srv->instance_type == DHCP_INSTANCE_CLIENT) {
++client_instances;
} else if (srv->instance_type == DHCP_INTANCE_RELAY_AGENT) {
++relay_instances;
}
}
if ((server_instances == 0 && relay_instances == 0) && dhcp_service->dhcp_server_socket > -1) {
socket_close(dhcp_service->dhcp_server_socket);
dhcp_service->dhcp_server_socket = -1;
}
if (client_instances == 0 && dhcp_service->dhcp_client_socket > -1) {
socket_close(dhcp_service->dhcp_client_socket);
dhcp_service->dhcp_client_socket = -1;
}
return;
}
int dhcp_service_send_resp(uint32_t msg_tr_id, uint8_t options, uint8_t *msg_ptr, uint16_t msg_len)
{
tr_debug("Send DHCPv6 response");
msg_tr_t *msg_tr_ptr;
server_instance_t *srv_instance;
msg_tr_ptr = dhcp_tr_find(msg_tr_id);
if (msg_tr_ptr == NULL) {
tr_error("msg_tr_id not found");
return -1;
}
srv_instance = dhcp_service_client_find(msg_tr_ptr->instance_id);
if (srv_instance == NULL) {
tr_error("Srv Instance not found");
return -1;
}
ns_dyn_mem_free(msg_tr_ptr->msg_ptr);
msg_tr_ptr->msg_ptr = msg_ptr;
msg_tr_ptr->msg_len = msg_len;
msg_tr_ptr->options = options;
// set the received transaction id to message.
common_write_24_bit(msg_tr_ptr->message_tr_id, &msg_tr_ptr->msg_ptr[1]);
dhcp_service_send_message(msg_tr_ptr);
msg_tr_ptr->msg_ptr = NULL; // pointer is the responsibility of client
dhcp_tr_delete(msg_tr_ptr);
return 0;
}
uint32_t dhcp_service_send_req(uint16_t instance_id, uint8_t options, void *ptr, const uint8_t addr[static 16], uint8_t *msg_ptr, uint16_t msg_len, dhcp_service_receive_resp_cb *receive_resp_cb, uint16_t delay_tx)
{
tr_debug("Send DHCPv6 request");
msg_tr_t *msg_tr_ptr;
server_instance_t *srv_ptr;
srv_ptr = dhcp_service_client_find(instance_id);
msg_tr_ptr = dhcp_tr_create();
if (msg_tr_ptr == NULL || srv_ptr == NULL || msg_ptr == NULL || receive_resp_cb == NULL || msg_len < 5) {
tr_error("Request sending failed");
return 0;
}
msg_tr_ptr->msg_ptr = msg_ptr;
msg_tr_ptr->msg_len = msg_len;
msg_tr_ptr->options = options;
msg_tr_ptr->client_obj_ptr = ptr;
memcpy(msg_tr_ptr->addr.address, addr, 16);
msg_tr_ptr->addr.identifier = DHCPV6_SERVER_PORT;
msg_tr_ptr->addr.type = ADDRESS_IPV6;
msg_tr_ptr->interface_id = srv_ptr->interface_id;
msg_tr_ptr->instance_id = instance_id;
msg_tr_ptr->socket = dhcp_service->dhcp_client_socket;
msg_tr_ptr->recv_resp_cb = receive_resp_cb;
msg_tr_ptr->delayed_tx = delay_tx;
msg_tr_ptr->first_transmit_time = 0;
msg_tr_ptr->transmit_time = 0;
dhcp_tr_set_retry_timers(msg_tr_ptr, msg_tr_ptr->msg_ptr[0]);
common_write_24_bit(msg_tr_ptr->msg_tr_id, &msg_tr_ptr->msg_ptr[1]);
dhcp_service_send_message(msg_tr_ptr);
return msg_tr_ptr->msg_tr_id;
}
void dhcp_service_set_retry_timers(uint32_t msg_tr_id, uint16_t timeout_init, uint16_t timeout_max, uint8_t retrans_max)
{
msg_tr_t *msg_tr_ptr;
msg_tr_ptr = dhcp_tr_find(msg_tr_id);
if (msg_tr_ptr != NULL) {
msg_tr_ptr->timeout_init = randLIB_randomise_base(timeout_init * 10, RAND1_LOW, RAND1_HIGH);
msg_tr_ptr->timeout = msg_tr_ptr->timeout_init;
msg_tr_ptr->timeout_max = timeout_max * 10;
msg_tr_ptr->retrans_max = retrans_max;
}
return;
}
void dhcp_service_update_server_address(uint32_t msg_tr_id, uint8_t *server_address)
{
msg_tr_t *msg_tr_ptr;
msg_tr_ptr = dhcp_tr_find(msg_tr_id);
if (msg_tr_ptr != NULL) {
memcpy(msg_tr_ptr->addr.address, server_address, 16);
}
}
uint32_t dhcp_service_rtt_get(uint32_t msg_tr_id)
{
msg_tr_t *msg_tr_ptr = dhcp_tr_find(msg_tr_id);
if (msg_tr_ptr && msg_tr_ptr->transmit_time) {
return protocol_core_monotonic_time - msg_tr_ptr->transmit_time;
}
return 0;
}
void dhcp_service_req_remove(uint32_t msg_tr_id)
{
if (dhcp_service) {
dhcp_tr_delete(dhcp_tr_find(msg_tr_id));
}
return;
}
void dhcp_service_req_remove_all(void *msg_class_ptr)
{
if (dhcp_service) {
ns_list_foreach_safe(msg_tr_t, cur_ptr, &dhcp_service->tr_list) {
if (cur_ptr->client_obj_ptr == msg_class_ptr) {
dhcp_tr_delete(cur_ptr);
}
}
}
}
void dhcp_service_send_message(msg_tr_t *msg_tr_ptr)
{
int8_t retval;
int16_t multicast_hop_limit = -1;
const uint32_t address_pref = SOCKET_IPV6_PREFER_SRC_6LOWPAN_SHORT;
dhcp_options_msg_t elapsed_time;
if (msg_tr_ptr->first_transmit_time && libdhcpv6_message_option_discover((msg_tr_ptr->msg_ptr + 4), (msg_tr_ptr->msg_len - 4), DHCPV6_ELAPSED_TIME_OPTION, &elapsed_time) == 0 &&
elapsed_time.len == 2) {
uint32_t t = protocol_core_monotonic_time - msg_tr_ptr->first_transmit_time; // time in 1/10s ticks
uint16_t cs;
if (t > 0xffff / 10) {
cs = 0xffff;
} else {
cs = (uint16_t) t * 10;
}
common_write_16_bit(cs, elapsed_time.msg_ptr);
}
if ((msg_tr_ptr->options & TX_OPT_USE_SHORT_ADDR) == TX_OPT_USE_SHORT_ADDR) {
socket_setsockopt(msg_tr_ptr->socket, SOCKET_IPPROTO_IPV6, SOCKET_IPV6_ADDR_PREFERENCES, &address_pref, sizeof address_pref);
}
if ((msg_tr_ptr->options & TX_OPT_MULTICAST_HOP_LIMIT_64) == TX_OPT_MULTICAST_HOP_LIMIT_64) {
multicast_hop_limit = 64;
}
socket_setsockopt(msg_tr_ptr->socket, SOCKET_IPPROTO_IPV6, SOCKET_IPV6_MULTICAST_HOPS, &multicast_hop_limit, sizeof multicast_hop_limit);
socket_setsockopt(msg_tr_ptr->socket, SOCKET_IPPROTO_IPV6, SOCKET_INTERFACE_SELECT, &msg_tr_ptr->interface_id, sizeof(int8_t));
if (msg_tr_ptr->relay_start) {
//Build Relay Reply only server do this
int16_t tc = IP_DSCP_CS6 << IP_TCLASS_DSCP_SHIFT;
socket_setsockopt(msg_tr_ptr->socket, SOCKET_IPPROTO_IPV6, SOCKET_IPV6_TCLASS, &tc, sizeof(tc));
ns_iovec_t data_vector[4];
uint8_t relay_header[4];
libdhcpv6_dhcp_option_header_write(relay_header, DHCPV6_OPTION_RELAY, msg_tr_ptr->msg_len);
ns_msghdr_t msghdr;
msghdr.msg_iovlen = 0;
memcpy(msg_tr_ptr->addr.address, msg_tr_ptr->relay_start + 2, 16);
msg_tr_ptr->addr.identifier = DHCPV6_SERVER_PORT;
//SET IOV vectors
//Relay Reply
data_vector[msghdr.msg_iovlen].iov_base = (void *) msg_tr_ptr->relay_start;
data_vector[msghdr.msg_iovlen].iov_len = DHCPV6_RELAY_LENGTH;
msghdr.msg_iovlen++;
if (msg_tr_ptr->opt_interface_id) {
data_vector[msghdr.msg_iovlen].iov_base = (void *)(msg_tr_ptr->opt_interface_id - 4);
data_vector[msghdr.msg_iovlen].iov_len = msg_tr_ptr->opt_interface_id_length + 4;
msghdr.msg_iovlen++;
}
//Relay reply header
data_vector[msghdr.msg_iovlen].iov_base = (void *) relay_header;
data_vector[msghdr.msg_iovlen].iov_len = 4;
msghdr.msg_iovlen++;
//DHCPV normal message vector
data_vector[msghdr.msg_iovlen].iov_base = (void *) msg_tr_ptr->msg_ptr;
data_vector[msghdr.msg_iovlen].iov_len = msg_tr_ptr->msg_len;
msghdr.msg_iovlen++;
//Set message name
msghdr.msg_name = (void *) &msg_tr_ptr->addr;
msghdr.msg_namelen = sizeof(ns_address_t);
msghdr.msg_iov = &data_vector[0];
//No ancillary data
msghdr.msg_control = NULL;
msghdr.msg_controllen = 0;
uint8_t *ptr = msg_tr_ptr->relay_start;
*ptr = DHCPV6_RELAY_REPLY;
if (msg_tr_ptr->delayed_tx) {
retval = 0;
} else {
retval = socket_sendmsg(msg_tr_ptr->socket, &msghdr, NS_MSG_LEGACY0);
}
} else {
if (msg_tr_ptr->delayed_tx) {
retval = 0;
} else {
int16_t tc = 0;
socket_setsockopt(msg_tr_ptr->socket, SOCKET_IPPROTO_IPV6, SOCKET_IPV6_TCLASS, &tc, sizeof(tc));
retval = socket_sendto(msg_tr_ptr->socket, &msg_tr_ptr->addr, msg_tr_ptr->msg_ptr, msg_tr_ptr->msg_len);
msg_tr_ptr->transmit_time = protocol_core_monotonic_time ? protocol_core_monotonic_time : 1;
if (msg_tr_ptr->first_transmit_time == 0 && retval == 0) {
//Mark first pushed message timestamp
msg_tr_ptr->first_transmit_time = protocol_core_monotonic_time ? protocol_core_monotonic_time : 1;
}
}
}
if (retval != 0) {
tr_warn("dhcp service socket_sendto fails: %i", retval);
} else {
tr_info("dhcp service socket_sendto %s", trace_ipv6(msg_tr_ptr->addr.address));
}
}
bool dhcp_service_timer_tick(uint16_t ticks)
{
bool activeTimerNeed = false;
ns_list_foreach_safe(msg_tr_t, cur_ptr, &dhcp_service->tr_list) {
if (cur_ptr->delayed_tx) {
activeTimerNeed = true;
if (cur_ptr->delayed_tx <= ticks) {
cur_ptr->delayed_tx = 0;
dhcp_service_send_message(cur_ptr);
} else {
cur_ptr->delayed_tx -= ticks;
}
continue;
}
if (cur_ptr->timeout == 0) {
continue;
}
if (cur_ptr->timeout <= ticks) {
activeTimerNeed = true;
cur_ptr->retrans++;
if (cur_ptr->retrans_max != 0 && cur_ptr->retrans >= cur_ptr->retrans_max) {
// retransmission count exceeded.
cur_ptr->recv_resp_cb(cur_ptr->instance_id, cur_ptr->client_obj_ptr, 0, NULL, 0);
dhcp_tr_delete(cur_ptr);
continue;
}
dhcp_service_send_message(cur_ptr);
// RFC 3315 says:
// RT = 2*RTprev + RAND*RTprev,
// We calculate this as
// RT = RTprev + (1+RAND)*RTprev
cur_ptr->timeout = cur_ptr->timeout_init + randLIB_randomise_base(cur_ptr->timeout_init, RAND1_LOW, RAND1_HIGH);
// Catch 16-bit integer overflow
if (cur_ptr->timeout < cur_ptr->timeout_init) {
cur_ptr->timeout = 0xFFFF;
}
// Check against MRT
if (cur_ptr->timeout_max != 0 && cur_ptr->timeout > cur_ptr->timeout_max) {
cur_ptr->timeout = randLIB_randomise_base(cur_ptr->timeout_max, RAND1_LOW, RAND1_HIGH);
}
cur_ptr->timeout_init = cur_ptr->timeout;
} else {
cur_ptr->timeout -= ticks;
activeTimerNeed = true;
}
}
return activeTimerNeed;
}
int dhcp_service_link_local_rx_cb_set(int8_t interface_id, dhcp_relay_neighbour_cb *notify_cb)
{
if (dhcp_service == NULL) {
return -1;
}
relay_notify_t *notify_srv = dhcp_service_notify_find(interface_id);
if (notify_srv) {
notify_srv->recv_notify_cb = notify_cb;
return 0;
}
notify_srv = ns_dyn_mem_alloc(sizeof(relay_notify_t));
if (!notify_srv) {
return -1;
}
ns_list_add_to_start(&dhcp_service->notify_list, notify_srv);
notify_srv->recv_notify_cb = notify_cb;
notify_srv->interface_id = interface_id;
return 0;
}
#else
uint16_t dhcp_service_init(int8_t interface_id, dhcp_instance_type_e instance_type, dhcp_service_receive_req_cb *receive_req_cb)
{
(void)interface_id;
(void)instance_type;
(void)receive_req_cb;
return 0;
}
void dhcp_service_delete(uint16_t instance)
{
(void)instance;
}
void dhcp_service_relay_instance_enable(uint16_t instance, uint8_t *server_address)
{
(void)instance;
(void)server_address;
}
void dhcp_service_relay_interface_id_option_enable(uint16_t instance, bool enable)
{
(void)instance;
(void)enable;
}
int dhcp_service_send_resp(uint32_t msg_tr_id, uint8_t options, uint8_t *msg_ptr, uint16_t msg_len)
{
(void)msg_tr_id;
(void)options;
(void)msg_ptr;
(void)msg_len;
return -1;
}
uint32_t dhcp_service_send_req(uint16_t instance_id, uint8_t options, void *ptr, const uint8_t addr[static 16], uint8_t *msg_ptr, uint16_t msg_len, dhcp_service_receive_resp_cb *receive_resp_cb, uint16_t delay_tx)
{
(void)instance_id;
(void)options;
(void)ptr;
(void)addr;
(void)msg_ptr;
(void)msg_len;
(void)receive_resp_cb;
(void)delay_tx;
return 0;
}
void dhcp_service_set_retry_timers(uint32_t msg_tr_id, uint16_t timeout_init, uint16_t timeout_max, uint8_t retrans_max)
{
(void)msg_tr_id;
(void)timeout_init;
(void)timeout_max;
(void)retrans_max;
}
void dhcp_service_req_remove(uint32_t msg_tr_id)
{
(void)msg_tr_id;
}
bool dhcp_service_timer_tick(uint16_t ticks)
{
(void)ticks;
return false;
}
void dhcp_service_req_remove_all(void *msg_class_ptr)
{
(void)msg_class_ptr;
}
int dhcp_service_link_local_rx_cb_set(int8_t interface_id, dhcp_relay_neighbour_cb *notify_cb)
{
(void) interface_id;
(void) notify_cb;
return -1;
}
#endif
| adfernandes/mbed | connectivity/nanostack/sal-stack-nanostack/source/libDHCPv6/dhcp_service_api.c | C | apache-2.0 | 36,946 |
#
# Author:: Prajakta Purohit (prajakta@opscode.com)
# Copyright:: Copyright (c) 2012 Opscode, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'spec_helper'
describe Chef::Provider::RegistryKey do
let(:value1) { { :name => "one", :type => :string, :data => "1" } }
let(:key_path) { 'HKCU\Software\OpscodeNumbers' }
let(:key) { 'Software\OpscodeNumbers' }
let(:key_parent) { 'Software' }
let(:key_to_delete) { 'OpscodeNumbers' }
let(:sub_key) {'OpscodePrimes'}
let(:missing_key_path) {'HKCU\Software'}
before(:each) do
Chef::Win32::Registry.any_instance.stub(:machine_architecture).and_return(:x86_64)
@registry = Chef::Win32::Registry.new()
#Making the values for registry constants available on unix
Object.send(:remove_const, 'Win32') if defined?(Win32)
Win32 = Module.new
Win32::Registry = Class.new
Win32::Registry::KEY_SET_VALUE = 0x0002
Win32::Registry::KEY_QUERY_VALUE = 0x0001
Win32::Registry::KEY_WRITE = 0x00020000 | 0x0002 | 0x0004
Win32::Registry::KEY_READ = 0x00020000 | 0x0001 | 0x0008 | 0x0010
Win32::Registry::Error = Class.new(RuntimeError)
@hive_mock = double("::Win32::Registry::HKEY_CURRENT_USER")
@reg_mock = double("reg")
end
describe "get_values" do
it "gets all values for a key if the key exists" do
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@registry.should_receive(:key_exists!).with(key_path).and_return(true)
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_READ | @registry.registry_system_architecture).and_yield(@reg_mock)
@reg_mock.should_receive(:map)
@registry.get_values(key_path)
end
it "throws an exception if key does not exist" do
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@registry.should_receive(:key_exists!).with(key_path).and_raise(Chef::Exceptions::Win32RegKeyMissing)
lambda{@registry.get_values(key_path)}.should raise_error(Chef::Exceptions::Win32RegKeyMissing)
end
end
describe "set_value" do
it "does nothing if key and hive and value exist" do
@registry.should_receive(:key_exists!).with(key_path).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@registry.should_receive(:value_exists?).with(key_path, value1).and_return(true)
@registry.should_receive(:data_exists?).with(key_path, value1).and_return(true)
@registry.set_value(key_path, value1)
end
it "updates value if key and hive and value exist, but data is different" do
@registry.should_receive(:key_exists!).with(key_path).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@registry.should_receive(:value_exists?).with(key_path, value1).and_return(true)
@registry.should_receive(:data_exists?).with(key_path, value1).and_return(false)
@hive_mock.should_receive(:open).with(key, Win32::Registry::KEY_SET_VALUE | ::Win32::Registry::KEY_QUERY_VALUE | @registry.registry_system_architecture).and_yield(@reg_mock)
@registry.should_receive(:get_type_from_name).with(:string).and_return(1)
@reg_mock.should_receive(:write).with("one", 1, "1")
@registry.set_value(key_path, value1)
end
it "creates value if the key exists and the value does not exist" do
@registry.should_receive(:key_exists!).with(key_path).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@registry.should_receive(:value_exists?).with(key_path, value1).and_return(false)
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_SET_VALUE | ::Win32::Registry::KEY_QUERY_VALUE | @registry.registry_system_architecture).and_yield(@reg_mock)
@registry.should_receive(:get_type_from_name).with(:string).and_return(1)
@reg_mock.should_receive(:write).with("one", 1, "1")
@registry.set_value(key_path, value1)
end
it "should raise an exception if the key does not exist" do
@registry.should_receive(:key_exists!).with(key_path).and_raise(Chef::Exceptions::Win32RegKeyMissing)
lambda {@registry.set_value(key_path, value1)}.should raise_error(Chef::Exceptions::Win32RegKeyMissing)
end
end
describe "delete_value" do
it "deletes value if value exists" do
@registry.should_receive(:value_exists?).with(key_path, value1).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_SET_VALUE | @registry.registry_system_architecture).and_yield(@reg_mock)
@reg_mock.should_receive(:delete_value).with("one").and_return(true)
@registry.delete_value(key_path, value1)
end
it "raises an exception if the key does not exist" do
@registry.should_receive(:value_exists?).with(key_path, value1).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_raise(Chef::Exceptions::Win32RegKeyMissing)
@registry.delete_value(key_path, value1)
end
it "does nothing if the value does not exist" do
@registry.should_receive(:value_exists?).with(key_path, value1).and_return(false)
@registry.delete_value(key_path, value1)
end
end
describe "create_key" do
it "creates key if intermediate keys are missing and recursive is set to true" do
@registry.should_receive(:keys_missing?).with(key_path).and_return(true)
@registry.should_receive(:create_missing).with(key_path)
@registry.should_receive(:key_exists?).with(key_path).and_return(false)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:create).with(key, ::Win32::Registry::KEY_WRITE | @registry.registry_system_architecture)
@registry.create_key(key_path, true)
end
it "raises an exception if intermediate keys are missing and recursive is set to false" do
@registry.should_receive(:keys_missing?).with(key_path).and_return(true)
lambda{@registry.create_key(key_path, false)}.should raise_error(Chef::Exceptions::Win32RegNoRecursive)
end
it "does nothing if the key exists" do
@registry.should_receive(:keys_missing?).with(key_path).and_return(true)
@registry.should_receive(:create_missing).with(key_path)
@registry.should_receive(:key_exists?).with(key_path).and_return(true)
@registry.create_key(key_path, true)
end
it "create key if intermediate keys not missing and recursive is set to false" do
@registry.should_receive(:keys_missing?).with(key_path).and_return(false)
@registry.should_receive(:key_exists?).with(key_path).and_return(false)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:create).with(key, ::Win32::Registry::KEY_WRITE | @registry.registry_system_architecture)
@registry.create_key(key_path, false)
end
it "create key if intermediate keys not missing and recursive is set to true" do
@registry.should_receive(:keys_missing?).with(key_path).and_return(false)
@registry.should_receive(:key_exists?).with(key_path).and_return(false)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:create).with(key, ::Win32::Registry::KEY_WRITE | @registry.registry_system_architecture)
@registry.create_key(key_path, true)
end
end
describe "delete_key", :windows_only do
it "deletes key if it has subkeys and recursive is set to true" do
@registry.should_receive(:key_exists?).with(key_path).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@registry.should_receive(:has_subkeys?).with(key_path).and_return(true)
@registry.should_receive(:get_subkeys).with(key_path).and_return([sub_key])
@registry.should_receive(:key_exists?).with(key_path+"\\"+sub_key).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path+"\\"+sub_key).and_return([@hive_mock, key+"\\"+sub_key])
@registry.should_receive(:has_subkeys?).with(key_path+"\\"+sub_key).and_return(false)
@registry.should_receive(:delete_key_ex).twice
@registry.delete_key(key_path, true)
end
it "raises an exception if it has subkeys but recursive is set to false" do
@registry.should_receive(:key_exists?).with(key_path).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@registry.should_receive(:has_subkeys?).with(key_path).and_return(true)
lambda{@registry.delete_key(key_path, false)}.should raise_error(Chef::Exceptions::Win32RegNoRecursive)
end
it "deletes key if the key exists and has no subkeys" do
@registry.should_receive(:key_exists?).with(key_path).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@registry.should_receive(:has_subkeys?).with(key_path).and_return(false)
@registry.should_receive(:delete_key_ex)
@registry.delete_key(key_path, true)
end
end
describe "key_exists?" do
it "returns true if key_exists" do
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_READ | @registry.registry_system_architecture).and_yield(@reg_mock)
@registry.key_exists?(key_path).should == true
end
it "returns false if key does not exist" do
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_READ | @registry.registry_system_architecture).and_raise(::Win32::Registry::Error)
@registry.key_exists?(key_path).should == false
end
end
describe "key_exists!" do
it "throws an exception if the key_parent does not exist" do
@registry.should_receive(:key_exists?).with(key_path).and_return(false)
lambda{@registry.key_exists!(key_path)}.should raise_error(Chef::Exceptions::Win32RegKeyMissing)
end
end
describe "hive_exists?" do
it "returns true if the hive exists" do
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@registry.hive_exists?(key_path) == true
end
it "returns false if the hive does not exist" do
@registry.should_receive(:get_hive_and_key).with(key_path).and_raise(Chef::Exceptions::Win32RegHiveMissing)
@registry.hive_exists?(key_path) == false
end
end
describe "has_subkeys?" do
it "returns true if the key has subkeys" do
@registry.should_receive(:key_exists!).with(key_path).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_READ | @registry.registry_system_architecture).and_yield(@reg_mock)
@reg_mock.should_receive(:each_key).and_yield(key)
@registry.has_subkeys?(key_path) == true
end
it "returns false if the key does not have subkeys" do
@registry.should_receive(:key_exists!).with(key_path).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_READ | @registry.registry_system_architecture).and_yield(@reg_mock)
@reg_mock.should_receive(:each_key).and_return(no_args())
@registry.has_subkeys?(key_path).should == false
end
it "throws an exception if the key does not exist" do
@registry.should_receive(:key_exists!).with(key_path).and_raise(Chef::Exceptions::Win32RegKeyMissing)
lambda {@registry.set_value(key_path, value1)}.should raise_error(Chef::Exceptions::Win32RegKeyMissing)
end
end
describe "get_subkeys" do
it "returns the subkeys if they exist" do
@registry.should_receive(:key_exists!).with(key_path).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_READ | @registry.registry_system_architecture).and_yield(@reg_mock)
@reg_mock.should_receive(:each_key).and_yield(sub_key)
@registry.get_subkeys(key_path)
end
end
describe "value_exists?" do
it "throws an exception if the key does not exist" do
@registry.should_receive(:key_exists!).with(key_path).and_raise(Chef::Exceptions::Win32RegKeyMissing)
lambda {@registry.value_exists?(key_path, value1)}.should raise_error(Chef::Exceptions::Win32RegKeyMissing)
end
it "returns true if the value exists" do
@registry.should_receive(:key_exists!).with(key_path).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_READ | @registry.registry_system_architecture).and_yield(@reg_mock)
@reg_mock.should_receive(:any?).and_yield("one")
@registry.value_exists?(key_path, value1) == true
end
it "returns false if the value does not exist" do
@registry.should_receive(:key_exists!).with(key_path).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_READ | @registry.registry_system_architecture).and_yield(@reg_mock)
@reg_mock.should_receive(:any?).and_yield(no_args())
@registry.value_exists?(key_path, value1) == false
end
end
describe "data_exists?" do
it "throws an exception if the key does not exist" do
@registry.should_receive(:key_exists!).with(key_path).and_raise(Chef::Exceptions::Win32RegKeyMissing)
lambda {@registry.data_exists?(key_path, value1)}.should raise_error(Chef::Exceptions::Win32RegKeyMissing)
end
it "returns true if the data exists" do
@registry.should_receive(:key_exists!).with(key_path).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@registry.should_receive(:get_type_from_name).with(:string).and_return(1)
@reg_mock.should_receive(:each).with(no_args()).and_yield("one", 1, "1")
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_READ | @registry.registry_system_architecture).and_yield(@reg_mock)
@registry.data_exists?(key_path, value1).should == true
end
it "returns false if the data does not exist" do
@registry.should_receive(:key_exists!).with(key_path).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_READ | @registry.registry_system_architecture).and_yield(@reg_mock)
@registry.should_receive(:get_type_from_name).with(:string).and_return(1)
@reg_mock.should_receive(:each).with(no_args()).and_yield("one", 1, "2")
@registry.data_exists?(key_path, value1).should == false
end
end
describe "value_exists!" do
it "does nothing if the value exists" do
@registry.should_receive(:value_exists?).with(key_path, value1).and_return(true)
@registry.value_exists!(key_path, value1)
end
it "throws an exception if the value does not exist" do
@registry.should_receive(:value_exists?).with(key_path, value1).and_return(false)
lambda{@registry.value_exists!(key_path, value1)}.should raise_error(Chef::Exceptions::Win32RegValueMissing)
end
end
describe "data_exists!" do
it "does nothing if the data exists" do
@registry.should_receive(:data_exists?).with(key_path, value1).and_return(true)
@registry.data_exists!(key_path, value1)
end
it "throws an exception if the data does not exist" do
@registry.should_receive(:data_exists?).with(key_path, value1).and_return(false)
lambda{@registry.data_exists!(key_path, value1)}.should raise_error(Chef::Exceptions::Win32RegDataMissing)
end
end
describe "type_matches?" do
it "returns true if type matches" do
@registry.should_receive(:value_exists!).with(key_path, value1).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_READ | @registry.registry_system_architecture).and_yield(@reg_mock)
@registry.should_receive(:get_type_from_name).with(:string).and_return(1)
@reg_mock.should_receive(:each).and_yield("one", 1)
@registry.type_matches?(key_path, value1).should == true
end
it "returns false if type does not match" do
@registry.should_receive(:value_exists!).with(key_path, value1).and_return(true)
@registry.should_receive(:get_hive_and_key).with(key_path).and_return([@hive_mock, key])
@hive_mock.should_receive(:open).with(key, ::Win32::Registry::KEY_READ | @registry.registry_system_architecture).and_yield(@reg_mock)
@reg_mock.should_receive(:each).and_yield("two", 2)
@registry.type_matches?(key_path, value1).should == false
end
it "throws an exception if value does not exist" do
@registry.should_receive(:value_exists?).with(key_path, value1).and_return(false)
lambda{@registry.type_matches?(key_path, value1)}.should raise_error(Chef::Exceptions::Win32RegValueMissing)
end
end
describe "type_matches!" do
it "does nothing if the type_matches" do
@registry.should_receive(:type_matches?).with(key_path, value1).and_return(true)
@registry.type_matches!(key_path, value1)
end
it "throws an exception if the type does not match" do
@registry.should_receive(:type_matches?).with(key_path, value1).and_return(false)
lambda{@registry.type_matches!(key_path, value1)}.should raise_error(Chef::Exceptions::Win32RegTypesMismatch)
end
end
describe "keys_missing?" do
it "returns true if the keys are missing" do
@registry.should_receive(:key_exists?).with(missing_key_path).and_return(false)
@registry.keys_missing?(key_path).should == true
end
it "returns false if no keys in the path are missing" do
@registry.should_receive(:key_exists?).with(missing_key_path).and_return(true)
@registry.keys_missing?(key_path).should == false
end
end
end
| ccope/chef | spec/unit/registry_helper_spec.rb | Ruby | apache-2.0 | 19,296 |
#!/usr/bin/env jruby
require 'jrubyfx'
##
# Example showing a tree_view and also how complicated you can tailor
# behavior of a tree_view. In this example we allow you to drag and drop
# tree_items around. There are three things you can see in d&d here:
# 1. drag onto other tree item will put it as a child in that tree item
# 2. drag and drop to rearrange items
# 3. drag and drop into another window...it should paste the contents there
#
# This example also allows you to edit your tree and change the text of
# existing contents. Just double click the item to edit it and hit escape
# to cancel or return to save the new name.
#
class DraggableTreeCell < Java::javafx::scene::control::TreeCell
include JRubyFX::DSL
SELECTION_PERCENT = 0.25
class << self
attr_accessor :drag_item, :drop_type
end
def initialize()
super
set_on_drag_over do |event|
if !child_of_target? && !over_self?
if drop_into_range? y_percentage(event)
set_effect inner_shadow(offset_x: 1.0)
self.class.drop_type = :drop_into
else
set_effect nil
self.class.drop_type = :rearrange
end
event.accept_transfer_modes :move
end
end
set_on_drag_detected do |event|
drag_item = tree_item
if drag_item
content = clipboard_content { put_string drag_item.value }
tree_view.start_drag_and_drop(TransferMode::MOVE).set_content content
self.class.drag_item = drag_item
end
event.consume
end
set_on_drag_dropped do |event|
if drag_item && tree_item
drop_into if drop_type == :drop_into
rearrange(event) if drop_type == :rearrange
self.class.drag_item = nil
event.drop_completed = true
end
event.consume
end
set_on_drag_exited do |event|
set_effect nil
end
end
def y_percentage(event)
y = event.scene_y - local_to_scene(0, 0).y
y == 0 ? 0 : y / height
end
def child_of_target?(parent = tree_item)
return true if drag_item == parent
return false if !parent || !parent.parent
child_of_target?(parent.parent)
end
def drop_into
if !child_of_target? && !over_self?
drag_item.parent.children.remove(drag_item)
tree_item.children.add(drag_item)
tree_item.expanded = true
end
end
def drop_into_range?(percent)
percent >= SELECTION_PERCENT && percent <= (1-SELECTION_PERCENT)
end
def over_self?
drag_item.parent == tree_item
end
def updateItem(item, empty)
super(item, empty);
if empty
set_text nil
set_graphic nil
else
if editing?
@text_field.text = get_string if @text_field
set_text nil
set_graphic @text_field
else
set_text get_string
set_graphic tree_item.graphic
end
end
end
def drag_item
self.class.drag_item
end
def drop_type
self.class.drop_type
end
def rearrange(event)
parent = tree_item.parent
unless parent # root of tree view
parent = tree_item
where = 0
end
drag_item.parent.children.remove(drag_item)
saved_items = parent.children.to_a
unless where # where already deduced from root being view_item
where = saved_items.find_index { |e| e == tree_item }
where += 1 if y_percentage(event) > SELECTION_PERCENT
end
if (where >= saved_items.size)
parent.children.add(drag_item)
else
parent.children.set(where, drag_item)
where.upto(saved_items.size - 2) do |i|
parent.children.set(i+1, saved_items[i])
end
parent.children.add(saved_items[saved_items.size - 1])
end
end
#### These methods are part of the code to make the tree editable
def startEdit
super
create_text_field unless @text_field
set_text nil
set_graphic @text_field
@text_field.select_all
end
def cancelEdit
super
set_text get_item
set_graphic tree_item.graphic
end
def get_string
get_item ? get_item.to_s : ''
end
def create_text_field
@text_field = TextField.new(get_string)
@text_field.set_on_key_released do |event|
if event.code == KeyCode::ENTER
commitEdit(@text_field.text)
elsif event.code == KeyCode::ESCAPE
cancelEdit
end
end
end
end
class SimpleTreeView < JRubyFX::Application
def start(stage)
with(stage, width: 300, height: 300, title: 'Simple Tree View') do
layout_scene(:blue) do
stack_pane(padding: insets(30)) do
tree_view(editable: true, cell_factory: proc { DraggableTreeCell.new}) do
tree_item("Root") do
5.times {|i| tree_item "File #{i}" }
end
end
end
end
show
end
end
end
SimpleTreeView.launch
| monkstone/jrubyfx | samples/javafx/tree_view.rb | Ruby | apache-2.0 | 4,812 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.core.datastore.page.encoding.dimension.legacy;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.carbondata.core.datastore.columnar.BlockIndexerStorage;
import org.apache.carbondata.core.datastore.columnar.BlockIndexerStorageForNoInvertedIndexForShort;
import org.apache.carbondata.core.datastore.columnar.BlockIndexerStorageForShort;
import org.apache.carbondata.core.datastore.compression.Compressor;
import org.apache.carbondata.core.datastore.compression.CompressorFactory;
import org.apache.carbondata.core.datastore.page.ColumnPage;
import org.apache.carbondata.core.datastore.page.encoding.ColumnPageEncoder;
import org.apache.carbondata.core.util.ByteUtil;
import org.apache.carbondata.format.Encoding;
public class HighCardDictDimensionIndexCodec extends IndexStorageCodec {
/**
* whether this column is varchar data type(long string)
*/
private boolean isVarcharType;
public HighCardDictDimensionIndexCodec(boolean isSort, boolean isInvertedIndex,
boolean isVarcharType) {
super(isSort, isInvertedIndex);
this.isVarcharType = isVarcharType;
}
@Override
public String getName() {
return "HighCardDictDimensionIndexCodec";
}
@Override
public ColumnPageEncoder createEncoder(Map<String, String> parameter) {
return new IndexStorageEncoder() {
@Override
protected void encodeIndexStorage(ColumnPage input) {
BlockIndexerStorage<byte[][]> indexStorage;
byte[][] data = input.getByteArrayPage();
boolean isDictionary = input.isLocalDictGeneratedPage();
if (isInvertedIndex) {
indexStorage = new BlockIndexerStorageForShort(data, isDictionary, !isDictionary, isSort);
} else {
indexStorage =
new BlockIndexerStorageForNoInvertedIndexForShort(data, isDictionary);
}
byte[] flattened = ByteUtil.flatten(indexStorage.getDataPage());
Compressor compressor = CompressorFactory.getInstance().getCompressor(
input.getColumnCompressorName());
super.compressedDataPage = compressor.compressByte(flattened);
super.indexStorage = indexStorage;
}
@Override
protected List<Encoding> getEncodingList() {
List<Encoding> encodings = new ArrayList<>();
if (isVarcharType) {
encodings.add(Encoding.DIRECT_COMPRESS_VARCHAR);
} else if (indexStorage.getRowIdPageLengthInBytes() > 0) {
encodings.add(Encoding.INVERTED_INDEX);
}
if (indexStorage.getDataRlePageLengthInBytes() > 0) {
encodings.add(Encoding.RLE);
}
return encodings;
}
};
}
}
| ravipesala/incubator-carbondata | core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/dimension/legacy/HighCardDictDimensionIndexCodec.java | Java | apache-2.0 | 3,503 |
package org.zstack.sdk;
import org.zstack.sdk.ImageInventory;
public class BackupStorageMigrateImageResult {
public ImageInventory inventory;
public void setInventory(ImageInventory inventory) {
this.inventory = inventory;
}
public ImageInventory getInventory() {
return this.inventory;
}
}
| zstackorg/zstack | sdk/src/main/java/org/zstack/sdk/BackupStorageMigrateImageResult.java | Java | apache-2.0 | 330 |
/*
*
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Rhino code, released
* May 6, 1999.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1997-1999
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Bob Jervis
* Google Inc.
*
* Alternatively, the contents of this file may be used under the terms of
* the GNU General Public License Version 2 or later (the "GPL"), in which
* case the provisions of the GPL are applicable instead of those above. If
* you wish to allow use of your version of this file only under the terms of
* the GPL and not to allow others to use your version of this file under the
* MPL, indicate your decision by deleting the provisions above and replacing
* them with the notice and other provisions required by the GPL. If you do
* not delete the provisions above, a recipient may use your version of this
* file under either the MPL or the GPL.
*
* ***** END LICENSE BLOCK ***** */
package com.google.javascript.rhino;
import java.util.ArrayList;
import java.util.List;
/**
* A simple {@link ErrorReporter} that collects warnings and errors and makes
* them accessible via {@link #errors()} and {@link #warnings()}.
*
*
*/
public class SimpleErrorReporter implements ErrorReporter {
private List<String> warnings = null;
private List<String> errors = null;
public void warning(String message, String sourceName, int line,
String lineSource, int lineOffset)
{
if (warnings == null) {
warnings = new ArrayList<String>();
}
warnings.add(formatDetailedMessage(
message, sourceName, line, lineSource, lineOffset));
}
public void error(String message, String sourceName, int line,
String lineSource, int lineOffset)
{
if (errors == null) {
errors = new ArrayList<String>();
}
errors.add(formatDetailedMessage(
message, sourceName, line, lineSource, lineOffset));
}
public EvaluatorException runtimeError(
String message, String sourceName, int line, String lineSource,
int lineOffset)
{
return new EvaluatorException(
message, sourceName, line, lineSource, lineOffset);
}
/**
* Returns the list of errors, or {@code null} if there were none.
*/
public List<String> errors()
{
return errors;
}
/**
* Returns the list of warnings, or {@code null} if there were none.
*/
public List<String> warnings()
{
return warnings;
}
private String formatDetailedMessage(
String message, String sourceName, int line, String lineSource,
int lineOffset)
{
RhinoException e = new RhinoException(message);
if (sourceName != null) {
e.initSourceName(sourceName);
}
if (lineSource != null) {
e.initLineSource(lineSource);
}
if (line > 0) {
e.initLineNumber(line);
}
if (lineOffset > 0) {
e.initColumnNumber(lineOffset);
}
return e.getMessage();
}
}
| ehsan/js-symbolic-executor | closure-compiler/src/com/google/javascript/rhino/SimpleErrorReporter.java | Java | apache-2.0 | 3,775 |
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>TrTranslation.cdy</title>
<style type="text/css">
* {
margin: 0px;
padding: 0px;
}
#CSConsole {
background-color: #FAFAFA;
border-top: 1px solid #333333;
bottom: 0px;
height: 200px;
overflow-y: scroll;
position: fixed;
width: 100%;
}
</style>
<script type="text/javascript" src="../build/js/Cindy.js"></script>
<script id="csdraw" type="text/x-cindyscript">
draw(P1,P1i,arrow->true,color->(1,1,1));
draw(O,X,arrow->true,color->(0,0,1));
draw(Oi,Xi,arrow->true,color->(0,0,1));
</script>
<script type="text/javascript">
CindyJS({
scripts: "cs*",
defaultAppearance: { fontFamily: "sans-serif", lineSize: 1, pointSize: 5.0 },
angleUnit: "°",
geometry: [
{ name: "P1", type: "Free", pos: [ 4.0, -4.0, -0.6666666666666666 ], color: [ 0.0, 0.0, 0.0 ], labeled: true, size: 3.0 },
{ name: "P1i", type: "Free", pos: [ 0.0, -4.0, -1.3333333333333333 ], color: [ 0.5019608, 0.5019608, 0.5019608 ], labeled: true, size: 3.0, printname: "P1'" },
{ name: "Tr0", type: "TrTranslation", color: [ 0.0, 0.0, 1.0 ], args: [ "P1", "P1i" ] },
{ name: "O", type: "Free", pos: [ 4.0, 3.3333333333333335, 0.6666666666666666 ], color: [ 1.0, 1.0, 1.0 ], labeled: true },
{ name: "C0", type: "CircleByRadius", color: [ 1.0, 1.0, 0.0 ], radius: 2.999999999999999, args: [ "O" ], printname: "$C_{0}$" },
{ name: "X", type: "PointOnCircle", pos: [ 4.0, { r: 2.2077737946188507, i: -1.3419095945542774E-16 }, { r: 0.4444531955041505, i: 1.6304775351968118E-19 } ], color: [ 0.0, 0.0, 1.0 ], args: [ "C0" ], labeled: true, size: 3.0 },
{ name: "Oi", type: "Transform", pos: [ 4.0, { r: 0.6666666666666673, i: -3.996802888650567E-17 }, { r: 0.3333333333333333, i: 2.0400348077487273E-17 } ], color: [ 1.0, 1.0, 1.0 ], args: [ "Tr0", "O" ], labeled: true, printname: "O'" },
{ name: "Xi", type: "Transform", pos: [ 4.0, { r: 0.5246443927888744, i: -1.4647937769530865E-16 }, { r: 0.2666698170233487, i: 2.095953600789266E-17 } ], color: [ 0.0, 0.0, 1.0 ], args: [ "Tr0", "X" ], labeled: true, size: 3.0, printname: "X'" },
{ name: "C2", type: "Transform", color: [ 1.0, 1.0, 0.0 ], args: [ "Tr0", "C0" ], printname: "$C_{2}$" } ],
ports: [
{ id: "CSCanvas", width: 680, height: 350, transform: [ { visibleRect: [ -9.06, 9.34, 18.14, -4.66 ] } ], background: "rgb(168,176,192)" } ],
cinderella: { build: 1798, version: [ 2, 9, 1798 ] } });
</script>
</head>
<body>
<div id="CSCanvas"></div>
</body>
</html>
| gagern/CindyJS | examples/112_TrTranslation.html | HTML | apache-2.0 | 2,651 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.LogManager;
import org.apache.log4j.spi.LoggingEvent;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.mockito.Mockito;
import org.xml.sax.ContentHandler;
import org.xml.sax.SAXException;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
/**
* This class tests the creation and validation of a checkpoint.
*/
@RunWith(Parameterized.class)
public class TestEditLog {
static {
GenericTestUtils.setLogLevel(FSEditLog.LOG, Level.ALL);
}
@Parameters
public static Collection<Object[]> data() {
Collection<Object[]> params = new ArrayList<Object[]>();
params.add(new Object[]{ Boolean.FALSE });
params.add(new Object[]{ Boolean.TRUE });
return params;
}
private static boolean useAsyncEditLog;
public TestEditLog(Boolean async) {
useAsyncEditLog = async;
}
public static Configuration getConf() {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_ASYNC_LOGGING,
useAsyncEditLog);
return conf;
}
/**
* A garbage mkdir op which is used for testing
* {@link EditLogFileInputStream#scanEditLog(File, long, boolean)}
*/
public static class GarbageMkdirOp extends FSEditLogOp {
public GarbageMkdirOp() {
super(FSEditLogOpCodes.OP_MKDIR);
}
@Override
void resetSubFields() {
// nop
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
throw new IOException("cannot decode GarbageMkdirOp");
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
// write in some garbage content
Random random = new Random();
byte[] content = new byte[random.nextInt(16) + 1];
random.nextBytes(content);
out.write(content);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
throw new UnsupportedOperationException(
"Not supported for GarbageMkdirOp");
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
throw new UnsupportedOperationException(
"Not supported for GarbageMkdirOp");
}
}
static final Log LOG = LogFactory.getLog(TestEditLog.class);
static final int NUM_DATA_NODES = 0;
// This test creates NUM_THREADS threads and each thread does
// 2 * NUM_TRANSACTIONS Transactions concurrently.
static final int NUM_TRANSACTIONS = 100;
static final int NUM_THREADS = 100;
static final File TEST_DIR = PathUtils.getTestDir(TestEditLog.class);
/** An edits log with 3 edits from 0.20 - the result of
* a fresh namesystem followed by hadoop fs -touchz /myfile */
static final byte[] HADOOP20_SOME_EDITS =
StringUtils.hexStringToByte((
"ffff ffed 0a00 0000 0000 03fa e100 0000" +
"0005 0007 2f6d 7966 696c 6500 0133 000d" +
"3132 3932 3331 3634 3034 3138 3400 0d31" +
"3239 3233 3136 3430 3431 3834 0009 3133" +
"3432 3137 3732 3800 0000 0004 746f 6464" +
"0a73 7570 6572 6772 6f75 7001 a400 1544" +
"4653 436c 6965 6e74 5f2d 3136 3136 3535" +
"3738 3931 000b 3137 322e 3239 2e35 2e33" +
"3209 0000 0005 0007 2f6d 7966 696c 6500" +
"0133 000d 3132 3932 3331 3634 3034 3138" +
"3400 0d31 3239 3233 3136 3430 3431 3834" +
"0009 3133 3432 3137 3732 3800 0000 0004" +
"746f 6464 0a73 7570 6572 6772 6f75 7001" +
"a4ff 0000 0000 0000 0000 0000 0000 0000"
).replace(" ",""));
static {
// No need to fsync for the purposes of tests. This makes
// the tests run much faster.
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
}
static final byte TRAILER_BYTE = FSEditLogOpCodes.OP_INVALID.getOpCode();
private static final int CHECKPOINT_ON_STARTUP_MIN_TXNS = 100;
//
// an object that does a bunch of transactions
//
static class Transactions implements Runnable {
final FSNamesystem namesystem;
final int numTransactions;
final short replication = 3;
final long blockSize = 64;
final int startIndex;
Transactions(FSNamesystem ns, int numTx, int startIdx) {
namesystem = ns;
numTransactions = numTx;
startIndex = startIdx;
}
// add a bunch of transactions.
@Override
public void run() {
PermissionStatus p = namesystem.createFsOwnerPermissions(
new FsPermission((short)0777));
FSEditLog editLog = namesystem.getEditLog();
for (int i = 0; i < numTransactions; i++) {
INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
inode.toUnderConstruction("", "");
editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
editLog.logCloseFile("/filename" + (startIndex + i), inode);
editLog.logSync();
}
}
}
/**
* Construct FSEditLog with default configuration, taking editDirs from NNStorage
*
* @param storage Storage object used by namenode
*/
private static FSEditLog getFSEditLog(NNStorage storage) throws IOException {
Configuration conf = getConf();
// Make sure the edits dirs are set in the provided configuration object.
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
StringUtils.join(",", storage.getEditsDirectories()));
FSEditLog log = FSEditLog.newInstance(
conf, storage, FSNamesystem.getNamespaceEditsDirs(conf));
return log;
}
/**
* Test case for an empty edit log from a prior version of Hadoop.
*/
@Test
public void testPreTxIdEditLogNoEdits() throws Exception {
FSNamesystem namesys = Mockito.mock(FSNamesystem.class);
namesys.dir = Mockito.mock(FSDirectory.class);
long numEdits = testLoad(
StringUtils.hexStringToByte("ffffffed"), // just version number
namesys);
assertEquals(0, numEdits);
}
/**
* Test case for loading a very simple edit log from a format
* prior to the inclusion of edit transaction IDs in the log.
*/
@Test
public void testPreTxidEditLogWithEdits() throws Exception {
Configuration conf = getConf();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
long numEdits = testLoad(HADOOP20_SOME_EDITS, namesystem);
assertEquals(3, numEdits);
// Sanity check the edit
HdfsFileStatus fileInfo =
namesystem.getFileInfo("/myfile", false, false, false);
assertEquals("supergroup", fileInfo.getGroup());
assertEquals(3, fileInfo.getReplication());
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
private long testLoad(byte[] data, FSNamesystem namesys) throws IOException {
FSEditLogLoader loader = new FSEditLogLoader(namesys, 0);
return loader.loadFSEdits(new EditLogByteInputStream(data), 1);
}
/**
* Simple test for writing to and rolling the edit log.
*/
@Test
public void testSimpleEditLog() throws IOException {
// start a cluster
Configuration conf = getConf();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
FSImage fsimage = namesystem.getFSImage();
final FSEditLog editLog = fsimage.getEditLog();
assertExistsInStorageDirs(
cluster, NameNodeDirType.EDITS,
NNStorage.getInProgressEditsFileName(1));
editLog.logSetReplication("fakefile", (short) 1);
editLog.logSync();
editLog.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertExistsInStorageDirs(
cluster, NameNodeDirType.EDITS,
NNStorage.getFinalizedEditsFileName(1,3));
assertExistsInStorageDirs(
cluster, NameNodeDirType.EDITS,
NNStorage.getInProgressEditsFileName(4));
editLog.logSetReplication("fakefile", (short) 2);
editLog.logSync();
editLog.close();
} finally {
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
}
/**
* Tests transaction logging in dfs.
*/
@Test
public void testMultiThreadedEditLog() throws IOException {
testEditLog(2048);
// force edit buffer to automatically sync on each log of edit log entry
testEditLog(1);
}
private void assertExistsInStorageDirs(MiniDFSCluster cluster,
NameNodeDirType dirType,
String filename) {
NNStorage storage = cluster.getNamesystem().getFSImage().getStorage();
for (StorageDirectory sd : storage.dirIterable(dirType)) {
File f = new File(sd.getCurrentDir(), filename);
assertTrue("Expect that " + f + " exists", f.exists());
}
}
/**
* Test edit log with different initial buffer size
*
* @param initialSize initial edit log buffer size
* @throws IOException
*/
private void testEditLog(int initialSize) throws IOException {
// start a cluster
Configuration conf = getConf();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
for (Iterator<URI> it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) {
File dir = new File(it.next().getPath());
System.out.println(dir);
}
FSImage fsimage = namesystem.getFSImage();
FSEditLog editLog = fsimage.getEditLog();
// set small size of flush buffer
editLog.setOutputBufferCapacity(initialSize);
// Roll log so new output buffer size takes effect
// we should now be writing to edits_inprogress_3
fsimage.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
// Remember the current lastInodeId and will reset it back to test
// loading editlog segments.The transactions in the following allocate new
// inode id to write to editlogs but doesn't create ionde in namespace
long originalLastInodeId = namesystem.dir.getLastInodeId();
// Create threads and make them run transactions concurrently.
Thread threadId[] = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
Transactions trans =
new Transactions(namesystem, NUM_TRANSACTIONS, i*NUM_TRANSACTIONS);
threadId[i] = new Thread(trans, "TransactionThread-" + i);
threadId[i].start();
}
// wait for all transactions to get over
for (int i = 0; i < NUM_THREADS; i++) {
try {
threadId[i].join();
} catch (InterruptedException e) {
i--; // retry
}
}
// Reopen some files as for append
Transactions trans =
new Transactions(namesystem, NUM_TRANSACTIONS, NUM_TRANSACTIONS / 2);
trans.run();
// Roll another time to finalize edits_inprogress_3
fsimage.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
long expectedTxns = ((NUM_THREADS+1) * 2 * NUM_TRANSACTIONS) + 2; // +2 for start/end txns
// Verify that we can read in all the transactions that we have written.
// If there were any corruptions, it is likely that the reading in
// of these transactions will throw an exception.
//
namesystem.dir.resetLastInodeIdWithoutChecking(originalLastInodeId);
for (Iterator<StorageDirectory> it =
fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);
File editFile = NNStorage.getFinalizedEditsFile(it.next(), 3,
3 + expectedTxns - 1);
assertTrue("Expect " + editFile + " exists", editFile.exists());
System.out.println("Verifying file: " + editFile);
long numEdits = loader.loadFSEdits(
new EditLogFileInputStream(editFile), 3);
int numLeases = namesystem.leaseManager.countLease();
System.out.println("Number of outstanding leases " + numLeases);
assertEquals(0, numLeases);
assertTrue("Verification for " + editFile + " failed. " +
"Expected " + expectedTxns + " transactions. "+
"Found " + numEdits + " transactions.",
numEdits == expectedTxns);
}
} finally {
try {
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
} catch (Throwable t) {
LOG.error("Couldn't shut down cleanly", t);
}
}
}
private void doLogEdit(ExecutorService exec, final FSEditLog log,
final String filename) throws Exception
{
exec.submit(new Callable<Void>() {
@Override
public Void call() {
log.logSetReplication(filename, (short)1);
return null;
}
}).get();
}
private void doCallLogSync(ExecutorService exec, final FSEditLog log)
throws Exception
{
exec.submit(new Callable<Void>() {
@Override
public Void call() {
log.logSync();
return null;
}
}).get();
}
private void doCallLogSyncAll(ExecutorService exec, final FSEditLog log)
throws Exception
{
exec.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
log.logSyncAll();
return null;
}
}).get();
}
@Test
public void testSyncBatching() throws Exception {
if (useAsyncEditLog) {
// semantics are completely differently since edits will be auto-synced
return;
}
// start a cluster
Configuration conf = getConf();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
ExecutorService threadA = Executors.newSingleThreadExecutor();
ExecutorService threadB = Executors.newSingleThreadExecutor();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
FSImage fsimage = namesystem.getFSImage();
final FSEditLog editLog = fsimage.getEditLog();
assertEquals("should start with only the BEGIN_LOG_SEGMENT txn synced",
1, editLog.getSyncTxId());
// Log an edit from thread A
doLogEdit(threadA, editLog, "thread-a 1");
assertEquals("logging edit without syncing should do not affect txid",
1, editLog.getSyncTxId());
// Log an edit from thread B
doLogEdit(threadB, editLog, "thread-b 1");
assertEquals("logging edit without syncing should do not affect txid",
1, editLog.getSyncTxId());
// Now ask to sync edit from B, which should sync both edits.
doCallLogSync(threadB, editLog);
assertEquals("logSync from second thread should bump txid up to 3",
3, editLog.getSyncTxId());
// Now ask to sync edit from A, which was already batched in - thus
// it should increment the batch count metric
doCallLogSync(threadA, editLog);
assertEquals("logSync from first thread shouldn't change txid",
3, editLog.getSyncTxId());
//Should have incremented the batch count exactly once
assertCounter("TransactionsBatchedInSync", 1L,
getMetrics("NameNodeActivity"));
} finally {
threadA.shutdown();
threadB.shutdown();
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
}
/**
* Test what happens with the following sequence:
*
* Thread A writes edit
* Thread B calls logSyncAll
* calls close() on stream
* Thread A calls logSync
*
* This sequence is legal and can occur if enterSafeMode() is closely
* followed by saveNamespace.
*/
@Test
public void testBatchedSyncWithClosedLogs() throws Exception {
// start a cluster
Configuration conf = getConf();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
ExecutorService threadA = Executors.newSingleThreadExecutor();
ExecutorService threadB = Executors.newSingleThreadExecutor();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
FSImage fsimage = namesystem.getFSImage();
final FSEditLog editLog = fsimage.getEditLog();
// Log an edit from thread A
doLogEdit(threadA, editLog, "thread-a 1");
// async log is doing batched syncs in background. logSync just ensures
// the edit is durable, so the txid may increase prior to sync
if (!useAsyncEditLog) {
assertEquals("logging edit without syncing should do not affect txid",
1, editLog.getSyncTxId());
}
// logSyncAll in Thread B
doCallLogSyncAll(threadB, editLog);
assertEquals("logSyncAll should sync thread A's transaction",
2, editLog.getSyncTxId());
// Close edit log
editLog.close();
// Ask thread A to finish sync (which should be a no-op)
doCallLogSync(threadA, editLog);
} finally {
threadA.shutdown();
threadB.shutdown();
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
}
@Test
public void testEditChecksum() throws Exception {
// start a cluster
Configuration conf = getConf();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
FSImage fsimage = namesystem.getFSImage();
final FSEditLog editLog = fsimage.getEditLog();
fileSys.mkdirs(new Path("/tmp"));
Iterator<StorageDirectory> iter = fsimage.getStorage().
dirIterator(NameNodeDirType.EDITS);
LinkedList<StorageDirectory> sds = new LinkedList<StorageDirectory>();
while (iter.hasNext()) {
sds.add(iter.next());
}
editLog.close();
cluster.shutdown();
for (StorageDirectory sd : sds) {
File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 3);
assertTrue(editFile.exists());
long fileLen = editFile.length();
LOG.debug("Corrupting Log File: " + editFile + " len: " + fileLen);
RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
rwf.seek(fileLen-4); // seek to checksum bytes
int b = rwf.readInt();
rwf.seek(fileLen-4);
rwf.writeInt(b+1);
rwf.close();
}
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
fail("should not be able to start");
} catch (IOException e) {
// expected
assertNotNull("Cause of exception should be ChecksumException", e.getCause());
assertEquals("Cause of exception should be ChecksumException",
ChecksumException.class, e.getCause().getClass());
}
}
/**
* Test what happens if the NN crashes when it has has started but
* had no transactions written.
*/
@Test
public void testCrashRecoveryNoTransactions() throws Exception {
testCrashRecovery(0);
}
/**
* Test what happens if the NN crashes when it has has started and
* had a few transactions written
*/
@Test
public void testCrashRecoveryWithTransactions() throws Exception {
testCrashRecovery(150);
}
/**
* Do a test to make sure the edit log can recover edits even after
* a non-clean shutdown. This does a simulated crash by copying over
* the edits directory while the NN is still running, then shutting it
* down, and restoring that edits directory.
*/
private void testCrashRecovery(int numTransactions) throws Exception {
MiniDFSCluster cluster = null;
Configuration conf = getConf();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
CHECKPOINT_ON_STARTUP_MIN_TXNS);
try {
LOG.info("\n===========================================\n" +
"Starting empty cluster");
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(true)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
for (int i = 0; i < numTransactions; i++) {
fs.mkdirs(new Path("/test" + i));
}
// Directory layout looks like:
// test/data/dfs/nameN/current/{fsimage_N,edits_...}
File nameDir = new File(cluster.getNameDirs(0).iterator().next().getPath());
File dfsDir = nameDir.getParentFile();
assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
LOG.info("Copying data directory aside to a hot backup");
File backupDir = new File(dfsDir.getParentFile(), "dfs.backup-while-running");
FileUtils.copyDirectory(dfsDir, backupDir);
LOG.info("Shutting down cluster #1");
cluster.shutdown();
cluster = null;
// Now restore the backup
FileUtil.fullyDeleteContents(dfsDir);
dfsDir.delete();
backupDir.renameTo(dfsDir);
// Directory layout looks like:
// test/data/dfs/nameN/current/{fsimage_N,edits_...}
File currentDir = new File(nameDir, "current");
// We should see the file as in-progress
File editsFile = new File(currentDir,
NNStorage.getInProgressEditsFileName(1));
assertTrue("Edits file " + editsFile + " should exist", editsFile.exists());
File imageFile = FSImageTestUtil.findNewestImageFile(
currentDir.getAbsolutePath());
assertNotNull("No image found in " + nameDir, imageFile);
assertEquals(NNStorage.getImageFileName(0), imageFile.getName());
// Try to start a new cluster
LOG.info("\n===========================================\n" +
"Starting same cluster after simulated crash");
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.build();
cluster.waitActive();
// We should still have the files we wrote prior to the simulated crash
fs = cluster.getFileSystem();
for (int i = 0; i < numTransactions; i++) {
assertTrue(fs.exists(new Path("/test" + i)));
}
long expectedTxId;
if (numTransactions > CHECKPOINT_ON_STARTUP_MIN_TXNS) {
// It should have saved a checkpoint on startup since there
// were more unfinalized edits than configured
expectedTxId = numTransactions + 1;
} else {
// otherwise, it shouldn't have made a checkpoint
expectedTxId = 0;
}
imageFile = FSImageTestUtil.findNewestImageFile(
currentDir.getAbsolutePath());
assertNotNull("No image found in " + nameDir, imageFile);
assertEquals(NNStorage.getImageFileName(expectedTxId),
imageFile.getName());
// Started successfully. Shut it down and make sure it can restart.
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.build();
cluster.waitActive();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
// should succeed - only one corrupt log dir
@Test
public void testCrashRecoveryEmptyLogOneDir() throws Exception {
doTestCrashRecoveryEmptyLog(false, true, true);
}
// should fail - seen_txid updated to 3, but no log dir contains txid 3
@Test
public void testCrashRecoveryEmptyLogBothDirs() throws Exception {
doTestCrashRecoveryEmptyLog(true, true, false);
}
// should succeed - only one corrupt log dir
@Test
public void testCrashRecoveryEmptyLogOneDirNoUpdateSeenTxId()
throws Exception {
doTestCrashRecoveryEmptyLog(false, false, true);
}
// should succeed - both log dirs corrupt, but seen_txid never updated
@Test
public void testCrashRecoveryEmptyLogBothDirsNoUpdateSeenTxId()
throws Exception {
doTestCrashRecoveryEmptyLog(true, false, true);
}
/**
* Test that the NN handles the corruption properly
* after it crashes just after creating an edit log
* (ie before writing START_LOG_SEGMENT). In the case
* that all logs have this problem, it should mark them
* as corrupt instead of trying to finalize them.
*
* @param inBothDirs if true, there will be a truncated log in
* both of the edits directories. If false, the truncated log
* will only be in one of the directories. In both cases, the
* NN should fail to start up, because it's aware that txid 3
* was reached, but unable to find a non-corrupt log starting there.
* @param updateTransactionIdFile if true update the seen_txid file.
* If false, it will not be updated. This will simulate a case where
* the NN crashed between creating the new segment and updating the
* seen_txid file.
* @param shouldSucceed true if the test is expected to succeed.
*/
private void doTestCrashRecoveryEmptyLog(boolean inBothDirs,
boolean updateTransactionIdFile, boolean shouldSucceed)
throws Exception {
// start a cluster
Configuration conf = getConf();
MiniDFSCluster cluster = null;
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES).build();
cluster.shutdown();
Collection<URI> editsDirs = cluster.getNameEditsDirs(0);
for (URI uri : editsDirs) {
File dir = new File(uri.getPath());
File currentDir = new File(dir, "current");
// We should start with only the finalized edits_1-2
GenericTestUtils.assertGlobEquals(currentDir, "edits_.*",
NNStorage.getFinalizedEditsFileName(1, 2));
// Make a truncated edits_3_inprogress
File log = new File(currentDir,
NNStorage.getInProgressEditsFileName(3));
EditLogFileOutputStream stream = new EditLogFileOutputStream(conf, log, 1024);
try {
stream.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
if (!inBothDirs) {
break;
}
NNStorage storage = new NNStorage(conf,
Collections.<URI>emptyList(),
Lists.newArrayList(uri));
if (updateTransactionIdFile) {
storage.writeTransactionIdFileToStorage(3);
}
storage.close();
} finally {
stream.close();
}
}
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES).format(false).build();
if (!shouldSucceed) {
fail("Should not have succeeded in startin cluster");
}
} catch (IOException ioe) {
if (shouldSucceed) {
LOG.info("Should have succeeded in starting cluster, but failed", ioe);
throw ioe;
} else {
GenericTestUtils.assertExceptionContains(
"Gap in transactions. Expected to be able to read up until " +
"at least txid 3 but unable to find any edit logs containing " +
"txid 3", ioe);
}
} finally {
cluster.shutdown();
}
}
private static class EditLogByteInputStream extends EditLogInputStream {
private final InputStream input;
private final long len;
private int version;
private FSEditLogOp.Reader reader = null;
private FSEditLogLoader.PositionTrackingInputStream tracker = null;
public EditLogByteInputStream(byte[] data) throws IOException {
len = data.length;
input = new ByteArrayInputStream(data);
BufferedInputStream bin = new BufferedInputStream(input);
DataInputStream in = new DataInputStream(bin);
version = EditLogFileInputStream.readLogVersion(in, true);
tracker = new FSEditLogLoader.PositionTrackingInputStream(in);
in = new DataInputStream(tracker);
reader = FSEditLogOp.Reader.create(in, tracker, version);
}
@Override
public long getFirstTxId() {
return HdfsServerConstants.INVALID_TXID;
}
@Override
public long getLastTxId() {
return HdfsServerConstants.INVALID_TXID;
}
@Override
public long length() throws IOException {
return len;
}
@Override
public long getPosition() {
return tracker.getPos();
}
@Override
protected FSEditLogOp nextOp() throws IOException {
return reader.readOp(false);
}
@Override
public int getVersion(boolean verifyVersion) throws IOException {
return version;
}
@Override
public void close() throws IOException {
input.close();
}
@Override
public String getName() {
return "AnonEditLogByteInputStream";
}
@Override
public boolean isInProgress() {
return true;
}
@Override
public void setMaxOpSize(int maxOpSize) {
reader.setMaxOpSize(maxOpSize);
}
@Override public boolean isLocalLog() {
return true;
}
}
@Test
public void testFailedOpen() throws Exception {
File logDir = new File(TEST_DIR, "testFailedOpen");
logDir.mkdirs();
FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
try {
FileUtil.setWritable(logDir, false);
log.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Did no throw exception on only having a bad dir");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"too few journals successfully started", ioe);
} finally {
FileUtil.setWritable(logDir, true);
log.close();
}
}
/**
* Regression test for HDFS-1112/HDFS-3020. Ensures that, even if
* logSync isn't called periodically, the edit log will sync itself.
*/
@Test
public void testAutoSync() throws Exception {
File logDir = new File(TEST_DIR, "testAutoSync");
logDir.mkdirs();
FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
String oneKB = StringUtils.byteToHexString(
new byte[500]);
try {
log.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
NameNodeMetrics mockMetrics = Mockito.mock(NameNodeMetrics.class);
log.setMetricsForTests(mockMetrics);
for (int i = 0; i < 400; i++) {
log.logDelete(oneKB, 1L, false);
}
// After ~400KB, we're still within the 512KB buffer size
Mockito.verify(mockMetrics, Mockito.times(0)).addSync(Mockito.anyLong());
// After ~400KB more, we should have done an automatic sync
for (int i = 0; i < 400; i++) {
log.logDelete(oneKB, 1L, false);
}
Mockito.verify(mockMetrics, Mockito.times(1)).addSync(Mockito.anyLong());
} finally {
log.close();
}
}
/**
* Tests the getEditLogManifest function using mock storage for a number
* of different situations.
*/
@Test
public void testEditLogManifestMocks() throws IOException {
NNStorage storage;
FSEditLog log;
// Simple case - different directories have the same
// set of logs, with an in-progress one at end
storage = mockStorageWithEdits(
"[1,100]|[101,200]|[201,]",
"[1,100]|[101,200]|[201,]");
log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]] CommittedTxId: 200",
log.getEditLogManifest(1).toString());
assertEquals("[[101,200]] CommittedTxId: 200",
log.getEditLogManifest(101).toString());
// Another simple case, different directories have different
// sets of files
storage = mockStorageWithEdits(
"[1,100]|[101,200]",
"[1,100]|[201,300]|[301,400]"); // nothing starting at 101
log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200], [201,300], [301,400]]" +
" CommittedTxId: 400", log.getEditLogManifest(1).toString());
// Case where one directory has an earlier finalized log, followed
// by a gap. The returned manifest should start after the gap.
storage = mockStorageWithEdits(
"[1,100]|[301,400]", // gap from 101 to 300
"[301,400]|[401,500]");
log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[301,400], [401,500]] CommittedTxId: 500",
log.getEditLogManifest(1).toString());
// Case where different directories have different length logs
// starting at the same txid - should pick the longer one
storage = mockStorageWithEdits(
"[1,100]|[101,150]", // short log at 101
"[1,50]|[101,200]"); // short log at 1
log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]] CommittedTxId: 200",
log.getEditLogManifest(1).toString());
assertEquals("[[101,200]] CommittedTxId: 200",
log.getEditLogManifest(101).toString());
// Case where the first storage has an inprogress while
// the second has finalised that file (i.e. the first failed
// recently)
storage = mockStorageWithEdits(
"[1,100]|[101,]",
"[1,100]|[101,200]");
log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]] CommittedTxId: 200",
log.getEditLogManifest(1).toString());
assertEquals("[[101,200]] CommittedTxId: 200",
log.getEditLogManifest(101).toString());
}
/**
* Create a mock NNStorage object with several directories, each directory
* holding edit logs according to a specification. Each directory
* is specified by a pipe-separated string. For example:
* <code>[1,100]|[101,200]</code> specifies a directory which
* includes two finalized segments, one from 1-100, and one from 101-200.
* The syntax <code>[1,]</code> specifies an in-progress log starting at
* txid 1.
*/
private NNStorage mockStorageWithEdits(String... editsDirSpecs) throws IOException {
List<StorageDirectory> sds = Lists.newArrayList();
List<URI> uris = Lists.newArrayList();
NNStorage storage = Mockito.mock(NNStorage.class);
for (String dirSpec : editsDirSpecs) {
List<String> files = Lists.newArrayList();
String[] logSpecs = dirSpec.split("\\|");
for (String logSpec : logSpecs) {
Matcher m = Pattern.compile("\\[(\\d+),(\\d+)?\\]").matcher(logSpec);
assertTrue("bad spec: " + logSpec, m.matches());
if (m.group(2) == null) {
files.add(NNStorage.getInProgressEditsFileName(
Long.parseLong(m.group(1))));
} else {
files.add(NNStorage.getFinalizedEditsFileName(
Long.parseLong(m.group(1)),
Long.parseLong(m.group(2))));
}
}
StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(
NameNodeDirType.EDITS, false,
files.toArray(new String[0]));
sds.add(sd);
URI u = URI.create("file:///storage"+ Math.random());
Mockito.doReturn(sd).when(storage).getStorageDirectory(u);
uris.add(u);
}
Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS);
Mockito.doReturn(uris).when(storage).getEditsDirectories();
return storage;
}
/**
* Specification for a failure during #setupEdits
*/
static class AbortSpec {
final int roll;
final int logindex;
/**
* Construct the failure specification.
* @param roll number to fail after. e.g. 1 to fail after the first roll
* @param logindex index of journal to fail.
*/
AbortSpec(int roll, int logindex) {
this.roll = roll;
this.logindex = logindex;
}
}
final static int TXNS_PER_ROLL = 10;
final static int TXNS_PER_FAIL = 2;
/**
* Set up directories for tests.
*
* Each rolled file is 10 txns long.
* A failed file is 2 txns long.
*
* @param editUris directories to create edit logs in
* @param numrolls number of times to roll the edit log during setup
* @param closeOnFinish whether to close the edit log after setup
* @param abortAtRolls Specifications for when to fail, see AbortSpec
*/
public static NNStorage setupEdits(List<URI> editUris, int numrolls,
boolean closeOnFinish, AbortSpec... abortAtRolls) throws IOException {
List<AbortSpec> aborts = new ArrayList<AbortSpec>(Arrays.asList(abortAtRolls));
NNStorage storage = new NNStorage(getConf(),
Collections.<URI>emptyList(),
editUris);
storage.format(new NamespaceInfo());
FSEditLog editlog = getFSEditLog(storage);
// open the edit log and add two transactions
// logGenerationStamp is used, simply because it doesn't
// require complex arguments.
editlog.initJournalsForWrite();
editlog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (int i = 2; i < TXNS_PER_ROLL; i++) {
editlog.logGenerationStamp((long) 0);
}
editlog.logSync();
// Go into edit log rolling loop.
// On each roll, the abortAtRolls abort specs are
// checked to see if an abort is required. If so the
// the specified journal is aborted. It will be brought
// back into rotation automatically by rollEditLog
for (int i = 0; i < numrolls; i++) {
editlog.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
editlog.logGenerationStamp((long) i);
editlog.logSync();
while (aborts.size() > 0
&& aborts.get(0).roll == (i+1)) {
AbortSpec spec = aborts.remove(0);
editlog.getJournals().get(spec.logindex).abort();
}
for (int j = 3; j < TXNS_PER_ROLL; j++) {
editlog.logGenerationStamp((long) i);
}
editlog.logSync();
}
if (closeOnFinish) {
editlog.close();
}
FSImageTestUtil.logStorageContents(LOG, storage);
return storage;
}
/**
* Set up directories for tests.
*
* Each rolled file is 10 txns long.
* A failed file is 2 txns long.
*
* @param editUris directories to create edit logs in
* @param numrolls number of times to roll the edit log during setup
* @param abortAtRolls Specifications for when to fail, see AbortSpec
*/
public static NNStorage setupEdits(List<URI> editUris, int numrolls,
AbortSpec... abortAtRolls) throws IOException {
return setupEdits(editUris, numrolls, true, abortAtRolls);
}
/**
* Test loading an editlog which has had both its storage fail
* on alternating rolls. Two edit log directories are created.
* The first one fails on odd rolls, the second on even. Test
* that we are able to load the entire editlog regardless.
*/
@Test
public void testAlternatingJournalFailure() throws IOException {
File f1 = new File(TEST_DIR + "/alternatingjournaltest0");
File f2 = new File(TEST_DIR + "/alternatingjournaltest1");
List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI());
NNStorage storage = setupEdits(editUris, 10,
new AbortSpec(1, 0),
new AbortSpec(2, 1),
new AbortSpec(3, 0),
new AbortSpec(4, 1),
new AbortSpec(5, 0),
new AbortSpec(6, 1),
new AbortSpec(7, 0),
new AbortSpec(8, 1),
new AbortSpec(9, 0),
new AbortSpec(10, 1));
long totaltxnread = 0;
FSEditLog editlog = getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId = 1;
Iterable<EditLogInputStream> editStreams = editlog.selectInputStreams(startTxId,
TXNS_PER_ROLL*11);
for (EditLogInputStream edits : editStreams) {
FSEditLogLoader.EditLogValidation val =
FSEditLogLoader.scanEditLog(edits, Long.MAX_VALUE);
long read = (val.getEndTxId() - edits.getFirstTxId()) + 1;
LOG.info("Loading edits " + edits + " read " + read);
assertEquals(startTxId, edits.getFirstTxId());
startTxId += read;
totaltxnread += read;
}
editlog.close();
storage.close();
assertEquals(TXNS_PER_ROLL*11, totaltxnread);
}
/**
* Test loading an editlog with gaps. A single editlog directory
* is set up. On of the edit log files is deleted. This should
* fail when selecting the input streams as it will not be able
* to select enough streams to load up to 4*TXNS_PER_ROLL.
* There should be 4*TXNS_PER_ROLL transactions as we rolled 3
* times.
*/
@Test
public void testLoadingWithGaps() throws IOException {
File f1 = new File(TEST_DIR + "/gaptest0");
List<URI> editUris = ImmutableList.of(f1.toURI());
NNStorage storage = setupEdits(editUris, 3);
final long startGapTxId = 1*TXNS_PER_ROLL + 1;
final long endGapTxId = 2*TXNS_PER_ROLL;
File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,
endGapTxId))) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
assertTrue(files[0].delete());
FSEditLog editlog = getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId = 1;
try {
editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL);
fail("Should have thrown exception");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Gap in transactions. Expected to be able to read up until " +
"at least txid 40 but unable to find any edit logs containing " +
"txid 11", ioe);
}
}
/**
* Test that we can read from a byte stream without crashing.
*
*/
static void validateNoCrash(byte garbage[]) throws IOException {
final File TEST_LOG_NAME = new File(TEST_DIR, "test_edit_log");
EditLogFileOutputStream elfos = null;
EditLogFileInputStream elfis = null;
try {
elfos = new EditLogFileOutputStream(getConf(), TEST_LOG_NAME, 0);
elfos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
elfos.writeRaw(garbage, 0, garbage.length);
elfos.setReadyToFlush();
elfos.flushAndSync(true);
elfos.close();
elfos = null;
elfis = new EditLogFileInputStream(TEST_LOG_NAME);
// verify that we can read everything without killing the JVM or
// throwing an exception other than IOException
try {
while (true) {
FSEditLogOp op = elfis.readOp();
if (op == null)
break;
}
} catch (IOException e) {
} catch (Throwable t) {
fail("Caught non-IOException throwable " +
StringUtils.stringifyException(t));
}
} finally {
if ((elfos != null) && (elfos.isOpen()))
elfos.close();
if (elfis != null)
elfis.close();
}
}
static byte[][] invalidSequenecs = null;
/**
* "Fuzz" test for the edit log.
*
* This tests that we can read random garbage from the edit log without
* crashing the JVM or throwing an unchecked exception.
*/
@Test
public void testFuzzSequences() throws IOException {
final int MAX_GARBAGE_LENGTH = 512;
final int MAX_INVALID_SEQ = 5000;
// The seed to use for our random number generator. When given the same
// seed, Java.util.Random will always produce the same sequence of values.
// This is important because it means that the test is deterministic and
// repeatable on any machine.
final int RANDOM_SEED = 123;
Random r = new Random(RANDOM_SEED);
for (int i = 0; i < MAX_INVALID_SEQ; i++) {
byte[] garbage = new byte[r.nextInt(MAX_GARBAGE_LENGTH)];
r.nextBytes(garbage);
validateNoCrash(garbage);
}
}
private static long readAllEdits(Collection<EditLogInputStream> streams,
long startTxId) throws IOException {
FSEditLogOp op;
long nextTxId = startTxId;
long numTx = 0;
for (EditLogInputStream s : streams) {
while (true) {
op = s.readOp();
if (op == null)
break;
if (op.getTransactionId() != nextTxId) {
throw new IOException("out of order transaction ID! expected " +
nextTxId + " but got " + op.getTransactionId() + " when " +
"reading " + s.getName());
}
numTx++;
nextTxId = op.getTransactionId() + 1;
}
}
return numTx;
}
/**
* Test edit log failover. If a single edit log is missing, other
* edits logs should be used instead.
*/
@Test
public void testEditLogFailOverFromMissing() throws IOException {
File f1 = new File(TEST_DIR + "/failover0");
File f2 = new File(TEST_DIR + "/failover1");
List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI());
NNStorage storage = setupEdits(editUris, 3);
final long startErrorTxId = 1*TXNS_PER_ROLL + 1;
final long endErrorTxId = 2*TXNS_PER_ROLL;
File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,
endErrorTxId))) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
assertTrue(files[0].delete());
FSEditLog editlog = getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId = 1;
Collection<EditLogInputStream> streams = null;
try {
streams = editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL);
readAllEdits(streams, startTxId);
} catch (IOException e) {
LOG.error("edit log failover didn't work", e);
fail("Edit log failover didn't work");
} finally {
IOUtils.cleanup(null, streams.toArray(new EditLogInputStream[0]));
}
}
/**
* Test edit log failover from a corrupt edit log
*/
@Test
public void testEditLogFailOverFromCorrupt() throws IOException {
File f1 = new File(TEST_DIR + "/failover0");
File f2 = new File(TEST_DIR + "/failover1");
List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI());
NNStorage storage = setupEdits(editUris, 3);
final long startErrorTxId = 1*TXNS_PER_ROLL + 1;
final long endErrorTxId = 2*TXNS_PER_ROLL;
File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,
endErrorTxId))) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
long fileLen = files[0].length();
LOG.debug("Corrupting Log File: " + files[0] + " len: " + fileLen);
RandomAccessFile rwf = new RandomAccessFile(files[0], "rw");
rwf.seek(fileLen-4); // seek to checksum bytes
int b = rwf.readInt();
rwf.seek(fileLen-4);
rwf.writeInt(b+1);
rwf.close();
FSEditLog editlog = getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId = 1;
Collection<EditLogInputStream> streams = null;
try {
streams = editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL);
readAllEdits(streams, startTxId);
} catch (IOException e) {
LOG.error("edit log failover didn't work", e);
fail("Edit log failover didn't work");
} finally {
IOUtils.cleanup(null, streams.toArray(new EditLogInputStream[0]));
}
}
/**
* Test creating a directory with lots and lots of edit log segments
*/
@Test
public void testManyEditLogSegments() throws IOException {
final int NUM_EDIT_LOG_ROLLS = 1000;
// start a cluster
Configuration conf = getConf();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
FSImage fsimage = namesystem.getFSImage();
final FSEditLog editLog = fsimage.getEditLog();
for (int i = 0; i < NUM_EDIT_LOG_ROLLS; i++){
editLog.logSetReplication("fakefile" + i, (short)(i % 3));
assertExistsInStorageDirs(
cluster, NameNodeDirType.EDITS,
NNStorage.getInProgressEditsFileName((i * 3) + 1));
editLog.logSync();
editLog.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertExistsInStorageDirs(
cluster, NameNodeDirType.EDITS,
NNStorage.getFinalizedEditsFileName((i * 3) + 1, (i * 3) + 3));
}
editLog.close();
} finally {
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
// How long does it take to read through all these edit logs?
long startTime = Time.now();
try {
cluster = new MiniDFSCluster.Builder(conf).
numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
long endTime = Time.now();
double delta = ((float)(endTime - startTime)) / 1000.0;
LOG.info(String.format("loaded %d edit log segments in %.2f seconds",
NUM_EDIT_LOG_ROLLS, delta));
}
/**
* Edit log op instances are cached internally using thread-local storage.
* This test checks that the cached instances are reset in between different
* transactions processed on the same thread, so that we don't accidentally
* apply incorrect attributes to an inode.
*
* @throws IOException if there is an I/O error
*/
@Test
public void testResetThreadLocalCachedOps() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
// Set single handler thread, so all transactions hit same thread-local ops.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 1);
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
// Create /dir1 with a default ACL.
Path dir1 = new Path("/dir1");
fileSys.mkdirs(dir1);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
fileSys.modifyAclEntries(dir1, aclSpec);
// /dir1/dir2 is expected to clone the default ACL.
Path dir2 = new Path("/dir1/dir2");
fileSys.mkdirs(dir2);
// /dir1/file1 is expected to clone the default ACL.
Path file1 = new Path("/dir1/file1");
fileSys.create(file1).close();
// /dir3 is not a child of /dir1, so must not clone the default ACL.
Path dir3 = new Path("/dir3");
fileSys.mkdirs(dir3);
// /file2 is not a child of /dir1, so must not clone the default ACL.
Path file2 = new Path("/file2");
fileSys.create(file2).close();
// Restart and assert the above stated expectations.
IOUtils.cleanup(LOG, fileSys);
cluster.restartNameNode();
fileSys = cluster.getFileSystem();
assertFalse(fileSys.getAclStatus(dir1).getEntries().isEmpty());
assertFalse(fileSys.getAclStatus(dir2).getEntries().isEmpty());
assertFalse(fileSys.getAclStatus(file1).getEntries().isEmpty());
assertTrue(fileSys.getAclStatus(dir3).getEntries().isEmpty());
assertTrue(fileSys.getAclStatus(file2).getEntries().isEmpty());
} finally {
IOUtils.cleanup(LOG, fileSys);
if (cluster != null) {
cluster.shutdown();
}
}
}
class TestAppender extends AppenderSkeleton {
private final List<LoggingEvent> log = new ArrayList<>();
@Override
public boolean requiresLayout() {
return false;
}
@Override
protected void append(final LoggingEvent loggingEvent) {
log.add(loggingEvent);
}
@Override
public void close() {
}
public List<LoggingEvent> getLog() {
return new ArrayList<>(log);
}
}
/**
*
* @throws Exception
*/
@Test
public void testReadActivelyUpdatedLog() throws Exception {
final TestAppender appender = new TestAppender();
LogManager.getRootLogger().addAppender(appender);
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
// Set single handler thread, so all transactions hit same thread-local ops.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 1);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FSImage fsimage = cluster.getNamesystem().getFSImage();
StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
final DistributedFileSystem fileSys = cluster.getFileSystem();
DFSInotifyEventInputStream events = fileSys.getInotifyEventStream();
fileSys.mkdirs(new Path("/test"));
fileSys.mkdirs(new Path("/test/dir1"));
fileSys.delete(new Path("/test/dir1"), true);
fsimage.getEditLog().logSync();
fileSys.mkdirs(new Path("/test/dir2"));
final File inProgressEdit = NNStorage.getInProgressEditsFile(sd, 1);
assertTrue(inProgressEdit.exists());
EditLogFileInputStream elis = new EditLogFileInputStream(inProgressEdit);
FSEditLogOp op;
long pos = 0;
while (true) {
op = elis.readOp();
if (op != null && op.opCode != FSEditLogOpCodes.OP_INVALID) {
pos = elis.getPosition();
} else {
break;
}
}
elis.close();
assertTrue(pos > 0);
RandomAccessFile rwf = new RandomAccessFile(inProgressEdit, "rw");
rwf.seek(pos);
assertEquals(rwf.readByte(), (byte) -1);
rwf.seek(pos + 1);
rwf.writeByte(2);
rwf.close();
events.poll();
String pattern = "Caught exception after reading (.*) ops";
Pattern r = Pattern.compile(pattern);
final List<LoggingEvent> log = appender.getLog();
for (LoggingEvent event : log) {
Matcher m = r.matcher(event.getRenderedMessage());
if (m.find()) {
fail("Should not try to read past latest syned edit log op");
}
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
LogManager.getRootLogger().removeAppender(appender);
}
}
}
| dennishuo/hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java | Java | apache-2.0 | 59,670 |
create table SALES_PRODUCT (
ID varchar(36) not null,
CREATE_TS timestamp,
CREATED_BY varchar(50),
VERSION integer,
UPDATE_TS timestamp,
UPDATED_BY varchar(50),
DELETE_TS timestamp,
DELETED_BY varchar(50),
--
NAME varchar(50) not null,
PRICE decimal(19, 2) not null,
--
primary key (ID)
);
| Haulmont/cuba-charts-maps | modules/core/db/update/hsql/15/150429-1-createProduct.sql | SQL | apache-2.0 | 342 |
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <chrono>
#include <iostream>
// Use the C histogram rather than C++ to avoid depending on proto
#include "test/core/util/histogram.h"
#include "test/cpp/qps/interarrival.h"
#include "test/cpp/util/test_config.h"
using grpc::testing::InterarrivalTimer;
using grpc::testing::RandomDistInterface;
static void RunTest(RandomDistInterface&& r, int threads, std::string title) {
InterarrivalTimer timer;
timer.init(r, threads);
grpc_histogram* h(grpc_histogram_create(0.01, 60e9));
for (int i = 0; i < 10000000; i++) {
for (int j = 0; j < threads; j++) {
grpc_histogram_add(h, timer.next(j));
}
}
std::cout << title << " Distribution" << std::endl;
std::cout << "Value, Percentile" << std::endl;
for (double pct = 0.0; pct < 100.0; pct += 1.0) {
std::cout << grpc_histogram_percentile(h, pct) << "," << pct << std::endl;
}
grpc_histogram_destroy(h);
}
using grpc::testing::ExpDist;
int main(int argc, char** argv) {
grpc::testing::InitTest(&argc, &argv, true);
RunTest(ExpDist(10.0), 5, std::string("Exponential(10)"));
return 0;
}
| kpayson64/grpc | test/cpp/qps/qps_interarrival_test.cc | C++ | apache-2.0 | 1,693 |
// ----------------------------------------------------------------------------------
//
// Copyright Microsoft Corporation
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ----------------------------------------------------------------------------------
using Microsoft.Azure.Commands.ResourceManager.Common.ArgumentCompleters;
using Microsoft.Azure.Commands.ResourceManager.Common.Tags;
using Microsoft.Azure.Commands.Sql.Properties;
using Microsoft.Azure.Commands.Sql.Replication.Model;
using Microsoft.Azure.Commands.Sql.Database.Services;
using Microsoft.Rest.Azure;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
using System.Management.Automation;
namespace Microsoft.Azure.Commands.Sql.Replication.Cmdlet
{
/// <summary>
/// Cmdlet to create a new Azure SQL Database Copy
/// </summary>
[Cmdlet("New", ResourceManager.Common.AzureRMConstants.AzureRMPrefix + "SqlDatabaseCopy", ConfirmImpact = ConfirmImpact.Low, SupportsShouldProcess = true, DefaultParameterSetName = DtuDatabaseParameterSet), OutputType(typeof(AzureSqlDatabaseCopyModel))]
public class NewAzureSqlDatabaseCopy : AzureSqlDatabaseCopyCmdletBase
{
private const string DtuDatabaseParameterSet = "DtuBasedDatabase";
private const string VcoreDatabaseParameterSet = "VcoreBasedDatabase";
/// <summary>
/// Gets or sets the name of the database to be copied.
/// </summary>
[Parameter(Mandatory = true,
ValueFromPipelineByPropertyName = true,
Position = 2,
HelpMessage = "The name of the Azure SQL Database to be copied.")]
[ValidateNotNullOrEmpty]
public string DatabaseName { get; set; }
/// <summary>
/// Gets or sets the name of the service objective to assign to the Azure SQL Database copy
/// </summary>
[Parameter(ParameterSetName = DtuDatabaseParameterSet, Mandatory = false,
HelpMessage = "The name of the service objective to assign to the Azure SQL Database copy.")]
[ValidateNotNullOrEmpty]
public string ServiceObjectiveName { get; set; }
/// <summary>
/// Gets or sets the name of the Elastic Pool to put the database copy in
/// </summary>
[Parameter(ParameterSetName = DtuDatabaseParameterSet, Mandatory = false,
HelpMessage = "The name of the Elastic Pool to put the database copy in.")]
[ValidateNotNullOrEmpty]
public string ElasticPoolName { get; set; }
/// <summary>
/// Gets or sets the tags associated with the Azure SQL Database Copy
/// </summary>
[Parameter(Mandatory = false,
HelpMessage = "The tags to associate with the Azure SQL Database Copy")]
[Alias("Tag")]
public Hashtable Tags { get; set; }
/// <summary>
/// Gets or sets the name of the resource group of the copy.
/// </summary>
[Parameter(Mandatory = false,
HelpMessage = "The name of the resource group of the copy.")]
[ValidateNotNullOrEmpty]
public string CopyResourceGroupName { get; set; }
/// <summary>
/// Gets or sets the name of the Azure SQL Server of the copy.
/// </summary>
[Parameter(Mandatory = false,
HelpMessage = "The name of the Azure SQL Server of the copy.")]
[ValidateNotNullOrEmpty]
public string CopyServerName { get; set; }
/// <summary>
/// Gets or sets the name of the source database copy.
/// </summary>
[Parameter(Mandatory = true,
HelpMessage = "The name of the Azure SQL Database copy.")]
[ValidateNotNullOrEmpty]
public string CopyDatabaseName { get; set; }
/// <summary>
/// Gets or sets whether or not to run this cmdlet in the background as a job
/// </summary>
[Parameter(Mandatory = false, HelpMessage = "Run cmdlet in the background")]
public SwitchParameter AsJob { get; set; }
/// <summary>
/// Gets or sets the compute generation of the database copy
/// </summary>
[Parameter(ParameterSetName = VcoreDatabaseParameterSet, Mandatory = true,
HelpMessage = "The compute generation to assign to the new copy.")]
[Alias("Family")]
[PSArgumentCompleter("Gen4", "Gen5")]
[ValidateNotNullOrEmpty]
public string ComputeGeneration { get; set; }
/// <summary>
/// Gets or sets the Vcore numbers of the database copy
/// </summary>
[Parameter(ParameterSetName = VcoreDatabaseParameterSet, Mandatory = true,
HelpMessage = "The Vcore numbers of the Azure Sql Database copy.")]
[Alias("Capacity")]
[ValidateNotNullOrEmpty]
public int VCore { get; set; }
/// <summary>
/// Gets or sets the license type for the Azure Sql database
/// </summary>
[Parameter(Mandatory = false,
HelpMessage = "The license type for the Azure Sql database.")]
[PSArgumentCompleter(
Management.Sql.Models.DatabaseLicenseType.LicenseIncluded,
Management.Sql.Models.DatabaseLicenseType.BasePrice)]
public string LicenseType { get; set; }
/// <summary>
/// Overriding to add warning message
/// </summary>
public override void ExecuteCmdlet()
{
base.ExecuteCmdlet();
}
/// <summary>
/// Get the entities from the service
/// </summary>
/// <returns>The list of entities</returns>
protected override IEnumerable<AzureSqlDatabaseCopyModel> GetEntity()
{
string copyResourceGroupName = string.IsNullOrWhiteSpace(this.CopyResourceGroupName) ? this.ResourceGroupName : this.CopyResourceGroupName;
string copyServerName = string.IsNullOrWhiteSpace(this.CopyServerName) ? this.ServerName : this.CopyServerName;
// We try to get the database. Since this is a create copy, we don't want the copy database to exist
try
{
ModelAdapter.GetDatabase(copyResourceGroupName, copyServerName, this.CopyDatabaseName);
}
catch (CloudException ex)
{
if (ex.Response.StatusCode == System.Net.HttpStatusCode.NotFound)
{
// This is what we want. We looked and there is no database with this name.
return null;
}
// Unexpected exception encountered
throw;
}
// The database already exists
throw new PSArgumentException(
string.Format(Resources.DatabaseNameExists, this.CopyDatabaseName, copyServerName),
"CopyDatabaseName");
}
/// <summary>
/// Create the model from user input
/// </summary>
/// <param name="model">Model retrieved from service</param>
/// <returns>The model that was passed in</returns>
protected override IEnumerable<AzureSqlDatabaseCopyModel> ApplyUserInputToModel(IEnumerable<AzureSqlDatabaseCopyModel> model)
{
string copyResourceGroup = string.IsNullOrWhiteSpace(CopyResourceGroupName) ? ResourceGroupName : CopyResourceGroupName;
string copyServer = string.IsNullOrWhiteSpace(CopyServerName) ? ServerName : CopyServerName;
string location = ModelAdapter.GetServerLocation(ResourceGroupName, ServerName);
string copyLocation = copyServer.Equals(ServerName) ? location : ModelAdapter.GetServerLocation(copyResourceGroup, copyServer);
Database.Model.AzureSqlDatabaseModel sourceDb = ModelAdapter.GetDatabase(ResourceGroupName, ServerName, DatabaseName);
List<Model.AzureSqlDatabaseCopyModel> newEntity = new List<AzureSqlDatabaseCopyModel>();
AzureSqlDatabaseCopyModel copyModel = new AzureSqlDatabaseCopyModel()
{
Location = location,
ResourceGroupName = ResourceGroupName,
ServerName = ServerName,
DatabaseName = DatabaseName,
CopyResourceGroupName = copyResourceGroup,
CopyServerName = copyServer,
CopyDatabaseName = CopyDatabaseName,
CopyLocation = copyLocation,
ServiceObjectiveName = ServiceObjectiveName,
ElasticPoolName = ElasticPoolName,
Tags = TagsConversionHelper.CreateTagDictionary(Tags, validate: true),
LicenseType = LicenseType // note: default license type is LicenseIncluded
};
if(ParameterSetName == DtuDatabaseParameterSet)
{
if (!string.IsNullOrWhiteSpace(ServiceObjectiveName))
{
copyModel.SkuName = ServiceObjectiveName;
}
else if(string.IsNullOrWhiteSpace(ElasticPoolName))
{
copyModel.SkuName = sourceDb.CurrentServiceObjectiveName;
copyModel.Edition = sourceDb.Edition;
copyModel.Capacity = sourceDb.Capacity;
copyModel.Family = sourceDb.Family;
}
}
else
{
copyModel.SkuName = AzureSqlDatabaseAdapter.GetDatabaseSkuName(sourceDb.Edition);
copyModel.Edition = sourceDb.Edition;
copyModel.Capacity = VCore;
copyModel.Family = ComputeGeneration;
}
newEntity.Add(copyModel);
return newEntity;
}
/// <summary>
/// Create the new database copy
/// </summary>
/// <param name="entity">The output of apply user input to model</param>
/// <returns>The input entity</returns>
protected override IEnumerable<AzureSqlDatabaseCopyModel> PersistChanges(IEnumerable<AzureSqlDatabaseCopyModel> entity)
{
return new List<AzureSqlDatabaseCopyModel>()
{
ModelAdapter.CopyDatabaseWithNewSdk(entity.First().CopyResourceGroupName, entity.First().CopyServerName, entity.First())
};
}
}
}
| AzureAutomationTeam/azure-powershell | src/ResourceManager/Sql/Commands.Sql/Replication/Cmdlet/NewAzureSqlDatabaseCopy.cs | C# | apache-2.0 | 10,818 |
<!DOCTYPE html>
<html lang="en">
<!-- head头部分开始 -->
<head>
<include file="Public/public_head" title="开源项目-" keywords="{$Think.config.WEB_KEYWORDS}" description="{$Think.config.WEB_DESCRIPTION}" />
</head>
<!-- head头部分结束 -->
<body>
<!-- 顶部导航开始 -->
<include file="Public/public_nav" />
<!-- 顶部导航结束 -->
<div class="b-h-70"></div>
<!-- 主体部分开始 -->
<div id="b-content" class="container">
<div class="row">
<!-- 左侧开源项目开始 -->
<div class="col-xs-12 col-md-12 col-lg-8 b-chat">
<!-- bjyadmin开始 -->
<script src='http://git.oschina.net/shuaibai123/thinkphp-bjyadmin/widget_preview'></script>
<style>
.pro_name a{color: #4183c4;}
.osc_git_title{background-color: #d8e5f1;}
.osc_git_box{background-color: #fafafa;}
.osc_git_box{border-color: #ddd;}
.osc_git_info{color: #666;}
.osc_git_main a{color: #4183c4;}
</style>
<!-- bjyadmin结束 -->
<!-- bjyblog开始 -->
<script src='http://git.oschina.net/shuaibai123/thinkbjy/widget_preview'></script>
<style>
.pro_name a{color: #4183c4;}
.osc_git_title{background-color: #d8e5f1;}
.osc_git_box{background-color: #fafafa;}
.osc_git_box{border-color: #ddd;}
.osc_git_info{color: #666;}
.osc_git_main a{color: #4183c4;}
</style>
<!-- bjyblog结束 -->
<!-- sublime开始 -->
<script src='http://git.oschina.net/shuaibai123/sublime-thinkphp-bjy/widget_preview'></script>
<style>
.pro_name a{color: #4183c4;}
.osc_git_title{background-color: #d8e5f1;}
.osc_git_box{background-color: #fafafa;}
.osc_git_box{border-color: #ddd;}
.osc_git_info{color: #666;}
.osc_git_main a{color: #4183c4;}
</style>
<!-- sublime结束 -->
<!-- 资源开始 -->
<script src='http://git.oschina.net/shuaibai123/resources/widget_preview'></script>
<style>
.pro_name a{color: #4183c4;}
.osc_git_title{background-color: #d8e5f1;}
.osc_git_box{background-color: #fafafa;}
.osc_git_box{border-color: #ddd;}
.osc_git_info{color: #666;}
.osc_git_main a{color: #4183c4;}
</style>
<!-- 资源结束 -->
<!-- github 上的项目 -->
<div class="github-widget" data-repo="baijunyao/thinkphp-bjyadmin"></div>
<div class="github-widget" data-repo="baijunyao/thinkphp-bjyblog"></div>
</div>
<!-- 左侧开源项目结束 -->
<!-- 通用右侧开始 -->
<include file="Public/public_right" />
<!-- 通用右侧结束 -->
</div>
<div class="row">
<!-- 底部文件开始 -->
<include file="Public/public_foot" />
<!-- 通用底部文件结束 -->
</div>
</div>
<!-- 主体部分结束 -->
<!-- 登录框开始 -->
<include file="Public/public_login" />
<!-- 登录框结束 -->
<script src="__PUBLIC__/static/js/jquery.githubRepoWidget.min.js"></script>
<!-- 让osc的链接新窗口打开 -->
<script type="text/javascript">
$(function(){
$('.osc_git_box a,.github-widget a').attr('target','_blank');
})
</script>
</body>
</html>
| shuaibai/thinkphp-bjyblog | Template/default_src/Home/Index/git.html | HTML | apache-2.0 | 3,602 |
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#pragma once
#include <string>
#include "db/table_properties_collector.h"
#include "rocksdb/types.h"
#include "util/coding.h"
#include "util/string_util.h"
namespace ROCKSDB_NAMESPACE {
// Table Properties that are specific to tables created by SstFileWriter.
struct ExternalSstFilePropertyNames {
// value of this property is a fixed uint32 number.
static const std::string kVersion;
// value of this property is a fixed uint64 number.
static const std::string kGlobalSeqno;
};
// PropertiesCollector used to add properties specific to tables
// generated by SstFileWriter
class SstFileWriterPropertiesCollector : public IntTblPropCollector {
public:
explicit SstFileWriterPropertiesCollector(int32_t version,
SequenceNumber global_seqno)
: version_(version), global_seqno_(global_seqno) {}
virtual Status InternalAdd(const Slice& /*key*/, const Slice& /*value*/,
uint64_t /*file_size*/) override {
// Intentionally left blank. Have no interest in collecting stats for
// individual key/value pairs.
return Status::OK();
}
virtual void BlockAdd(uint64_t /* block_raw_bytes */,
uint64_t /* block_compressed_bytes_fast */,
uint64_t /* block_compressed_bytes_slow */) override {
// Intentionally left blank. No interest in collecting stats for
// blocks.
return;
}
virtual Status Finish(UserCollectedProperties* properties) override {
// File version
std::string version_val;
PutFixed32(&version_val, static_cast<uint32_t>(version_));
properties->insert({ExternalSstFilePropertyNames::kVersion, version_val});
// Global Sequence number
std::string seqno_val;
PutFixed64(&seqno_val, static_cast<uint64_t>(global_seqno_));
properties->insert({ExternalSstFilePropertyNames::kGlobalSeqno, seqno_val});
return Status::OK();
}
virtual const char* Name() const override {
return "SstFileWriterPropertiesCollector";
}
virtual UserCollectedProperties GetReadableProperties() const override {
return {{ExternalSstFilePropertyNames::kVersion, ToString(version_)}};
}
private:
int32_t version_;
SequenceNumber global_seqno_;
};
class SstFileWriterPropertiesCollectorFactory
: public IntTblPropCollectorFactory {
public:
explicit SstFileWriterPropertiesCollectorFactory(int32_t version,
SequenceNumber global_seqno)
: version_(version), global_seqno_(global_seqno) {}
virtual IntTblPropCollector* CreateIntTblPropCollector(
uint32_t /*column_family_id*/, int /* level_at_creation */) override {
return new SstFileWriterPropertiesCollector(version_, global_seqno_);
}
virtual const char* Name() const override {
return "SstFileWriterPropertiesCollector";
}
private:
int32_t version_;
SequenceNumber global_seqno_;
};
} // namespace ROCKSDB_NAMESPACE
| arangodb/arangodb | 3rdParty/rocksdb/6.29/table/sst_file_writer_collectors.h | C | apache-2.0 | 3,237 |
package cvc3;
import java.util.*;
import java.io.*;
/** Wrapper for a c++ object as a java Object.
see README for details on garbage collection,
i.e. interplay of delete, finalize, and EmbeddedManager to destruct
the embedded c++ object. */
public abstract class Embedded {
// load jni c++ library
static {
System.loadLibrary("cvc3jni");
/*
// for debugging: stop here by waiting for a key press,
// and attach c++ debugger
System.out.println("Loadded cvc3jni");
try {
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
br.readLine();
} catch (IOException ioe) {
}
*/
}
/// Attributes
// embedded object
protected Object d_embedded;
// embedded object manager
private final EmbeddedManager d_embeddedManager;
/// Constructor
// initialize with embedded object and EmbeddedManager
// if EmbeddedManager is null then delete must be called before
// Embedded is garbage collected
protected Embedded(Object Embedded, EmbeddedManager embeddedManager) {
//System.out.println("Create: Embedded");
assert(Embedded != null);
d_embedded = Embedded;
d_embeddedManager = embeddedManager;
}
// access to embedded c++ object
public synchronized Object embedded() {
return d_embedded;
}
// access to EmbeddedManager (might be null if none used)
public EmbeddedManager embeddedManager() {
return d_embeddedManager;
}
// check if already destructed
// (or queued for destruction in embeddedManager)
public synchronized boolean isDeleted() {
return (d_embedded == null);
}
// delete embedded object or enqueue it for deletion
public synchronized void delete() throws Cvc3Exception {
if (isDeleted()) return;
// no embedded manager, so should be in main thread:
// destruct right away
if (d_embeddedManager == null) {
EmbeddedManager.jniDelete(d_embedded);
}
// could be in finalizer, so queue in embeddedManager;
// unless the embeddedManager is already deleted,
// then its (and this') ValidityChecker has been delete.
// assuming this is an Expr or a Theorem it's embedded object
// has then already been deleted as well.
else {
synchronized(d_embeddedManager) {
if (!d_embeddedManager.isDeleted()) {
d_embeddedManager.register(this);
}
}
}
d_embedded = null;
}
// ensure that delete is called if finalization occurs
public void finalize() throws Throwable {
try {
// no embeddedManager, so deleted should have been called
if (d_embeddedManager == null) {
if (d_embedded != null) {
assert(false);
// System.out.println("Embedded.Finalizer: should never be called");
throw new Error("Embedded.Finalizer: should never be called");
}
}
else if (!d_embeddedManager.isDeleted()) {
delete();
}
} finally {
super.finalize();
}
}
}
| ehsan/js-symbolic-executor | cvc3/java/src/cvc3/Embedded.java | Java | apache-2.0 | 2,909 |
/*
* Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*/
package com.sun.java.swing.plaf.windows;
import java.awt.*;
import java.awt.event.*;
import java.awt.image.*;
import java.lang.ref.*;
import java.util.*;
import javax.swing.plaf.basic.*;
import javax.swing.*;
import javax.swing.plaf.ComponentUI;
import static com.sun.java.swing.plaf.windows.TMSchema.*;
import static com.sun.java.swing.plaf.windows.XPStyle.Skin;
/**
* Windows rendition of the component.
* <p>
* <strong>Warning:</strong>
* Serialized objects of this class will not be compatible with
* future Swing releases. The current serialization support is appropriate
* for short term storage or RMI between applications running the same
* version of Swing. A future release of Swing will provide support for
* long term persistence.
*/
public class WindowsScrollBarUI extends BasicScrollBarUI {
private Grid thumbGrid;
private Grid highlightGrid;
private Dimension horizontalThumbSize;
private Dimension verticalThumbSize;
/**
* Creates a UI for a JScrollBar.
*
* @param c the text field
* @return the UI
*/
public static ComponentUI createUI(JComponent c) {
return new WindowsScrollBarUI();
}
protected void installDefaults() {
super.installDefaults();
XPStyle xp = XPStyle.getXP();
if (xp != null) {
scrollbar.setBorder(null);
horizontalThumbSize = getSize(scrollbar, xp, Part.SBP_THUMBBTNHORZ);
verticalThumbSize = getSize(scrollbar, xp, Part.SBP_THUMBBTNVERT);
} else {
horizontalThumbSize = null;
verticalThumbSize = null;
}
}
private static Dimension getSize(Component component, XPStyle xp, Part part) {
Skin skin = xp.getSkin(component, part);
return new Dimension(skin.getWidth(), skin.getHeight());
}
@Override
protected Dimension getMinimumThumbSize() {
if ((horizontalThumbSize == null) || (verticalThumbSize == null)) {
return super.getMinimumThumbSize();
}
return JScrollBar.HORIZONTAL == scrollbar.getOrientation()
? horizontalThumbSize
: verticalThumbSize;
}
public void uninstallUI(JComponent c) {
super.uninstallUI(c);
thumbGrid = highlightGrid = null;
}
protected void configureScrollBarColors() {
super.configureScrollBarColors();
Color color = UIManager.getColor("ScrollBar.trackForeground");
if (color != null && trackColor != null) {
thumbGrid = Grid.getGrid(color, trackColor);
}
color = UIManager.getColor("ScrollBar.trackHighlightForeground");
if (color != null && trackHighlightColor != null) {
highlightGrid = Grid.getGrid(color, trackHighlightColor);
}
}
protected JButton createDecreaseButton(int orientation) {
return new WindowsArrowButton(orientation,
UIManager.getColor("ScrollBar.thumb"),
UIManager.getColor("ScrollBar.thumbShadow"),
UIManager.getColor("ScrollBar.thumbDarkShadow"),
UIManager.getColor("ScrollBar.thumbHighlight"));
}
protected JButton createIncreaseButton(int orientation) {
return new WindowsArrowButton(orientation,
UIManager.getColor("ScrollBar.thumb"),
UIManager.getColor("ScrollBar.thumbShadow"),
UIManager.getColor("ScrollBar.thumbDarkShadow"),
UIManager.getColor("ScrollBar.thumbHighlight"));
}
/**
* {@inheritDoc}
* @since 1.6
*/
@Override
protected ArrowButtonListener createArrowButtonListener(){
// we need to repaint the entire scrollbar because state change for each
// button causes a state change for the thumb and other button on Vista
if(XPStyle.isVista()) {
return new ArrowButtonListener() {
public void mouseEntered(MouseEvent evt) {
repaint();
super.mouseEntered(evt);
}
public void mouseExited(MouseEvent evt) {
repaint();
super.mouseExited(evt);
}
private void repaint() {
scrollbar.repaint();
}
};
} else {
return super.createArrowButtonListener();
}
}
protected void paintTrack(Graphics g, JComponent c, Rectangle trackBounds){
boolean v = (scrollbar.getOrientation() == JScrollBar.VERTICAL);
XPStyle xp = XPStyle.getXP();
if (xp != null) {
JScrollBar sb = (JScrollBar)c;
State state = State.NORMAL;
// Pending: Implement rollover (hot) and pressed
if (!sb.isEnabled()) {
state = State.DISABLED;
}
Part part = v ? Part.SBP_LOWERTRACKVERT : Part.SBP_LOWERTRACKHORZ;
xp.getSkin(sb, part).paintSkin(g, trackBounds, state);
} else if (thumbGrid == null) {
super.paintTrack(g, c, trackBounds);
}
else {
thumbGrid.paint(g, trackBounds.x, trackBounds.y, trackBounds.width,
trackBounds.height);
if (trackHighlight == DECREASE_HIGHLIGHT) {
paintDecreaseHighlight(g);
}
else if (trackHighlight == INCREASE_HIGHLIGHT) {
paintIncreaseHighlight(g);
}
}
}
protected void paintThumb(Graphics g, JComponent c, Rectangle thumbBounds) {
boolean v = (scrollbar.getOrientation() == JScrollBar.VERTICAL);
XPStyle xp = XPStyle.getXP();
if (xp != null) {
JScrollBar sb = (JScrollBar)c;
State state = State.NORMAL;
if (!sb.isEnabled()) {
state = State.DISABLED;
} else if (isDragging) {
state = State.PRESSED;
} else if (isThumbRollover()) {
state = State.HOT;
} else if (XPStyle.isVista()) {
if ((incrButton != null && incrButton.getModel().isRollover()) ||
(decrButton != null && decrButton.getModel().isRollover())) {
state = State.HOVER;
}
}
// Paint thumb
Part thumbPart = v ? Part.SBP_THUMBBTNVERT : Part.SBP_THUMBBTNHORZ;
xp.getSkin(sb, thumbPart).paintSkin(g, thumbBounds, state);
// Paint gripper
Part gripperPart = v ? Part.SBP_GRIPPERVERT : Part.SBP_GRIPPERHORZ;
Skin skin = xp.getSkin(sb, gripperPart);
Insets gripperInsets = xp.getMargin(c, thumbPart, null, Prop.CONTENTMARGINS);
if (gripperInsets == null ||
(v && (thumbBounds.height - gripperInsets.top -
gripperInsets.bottom >= skin.getHeight())) ||
(!v && (thumbBounds.width - gripperInsets.left -
gripperInsets.right >= skin.getWidth()))) {
skin.paintSkin(g,
thumbBounds.x + (thumbBounds.width - skin.getWidth()) / 2,
thumbBounds.y + (thumbBounds.height - skin.getHeight()) / 2,
skin.getWidth(), skin.getHeight(), state);
}
} else {
super.paintThumb(g, c, thumbBounds);
}
}
protected void paintDecreaseHighlight(Graphics g) {
if (highlightGrid == null) {
super.paintDecreaseHighlight(g);
}
else {
Insets insets = scrollbar.getInsets();
Rectangle thumbR = getThumbBounds();
int x, y, w, h;
if (scrollbar.getOrientation() == JScrollBar.VERTICAL) {
x = insets.left;
y = decrButton.getY() + decrButton.getHeight();
w = scrollbar.getWidth() - (insets.left + insets.right);
h = thumbR.y - y;
}
else {
x = decrButton.getX() + decrButton.getHeight();
y = insets.top;
w = thumbR.x - x;
h = scrollbar.getHeight() - (insets.top + insets.bottom);
}
highlightGrid.paint(g, x, y, w, h);
}
}
protected void paintIncreaseHighlight(Graphics g) {
if (highlightGrid == null) {
super.paintDecreaseHighlight(g);
}
else {
Insets insets = scrollbar.getInsets();
Rectangle thumbR = getThumbBounds();
int x, y, w, h;
if (scrollbar.getOrientation() == JScrollBar.VERTICAL) {
x = insets.left;
y = thumbR.y + thumbR.height;
w = scrollbar.getWidth() - (insets.left + insets.right);
h = incrButton.getY() - y;
}
else {
x = thumbR.x + thumbR.width;
y = insets.top;
w = incrButton.getX() - x;
h = scrollbar.getHeight() - (insets.top + insets.bottom);
}
highlightGrid.paint(g, x, y, w, h);
}
}
/**
* {@inheritDoc}
* @since 1.6
*/
@Override
protected void setThumbRollover(boolean active) {
boolean old = isThumbRollover();
super.setThumbRollover(active);
// we need to repaint the entire scrollbar because state change for thumb
// causes state change for incr and decr buttons on Vista
if(XPStyle.isVista() && active != old) {
scrollbar.repaint();
}
}
/**
* WindowsArrowButton is used for the buttons to position the
* document up/down. It differs from BasicArrowButton in that the
* preferred size is always a square.
*/
private class WindowsArrowButton extends BasicArrowButton {
public WindowsArrowButton(int direction, Color background, Color shadow,
Color darkShadow, Color highlight) {
super(direction, background, shadow, darkShadow, highlight);
}
public WindowsArrowButton(int direction) {
super(direction);
}
public void paint(Graphics g) {
XPStyle xp = XPStyle.getXP();
if (xp != null) {
ButtonModel model = getModel();
Skin skin = xp.getSkin(this, Part.SBP_ARROWBTN);
State state = null;
boolean jointRollover = XPStyle.isVista() && (isThumbRollover() ||
(this == incrButton && decrButton.getModel().isRollover()) ||
(this == decrButton && incrButton.getModel().isRollover()));
// normal, rollover, pressed, disabled
if (model.isArmed() && model.isPressed()) {
switch (direction) {
case NORTH: state = State.UPPRESSED; break;
case SOUTH: state = State.DOWNPRESSED; break;
case WEST: state = State.LEFTPRESSED; break;
case EAST: state = State.RIGHTPRESSED; break;
}
} else if (!model.isEnabled()) {
switch (direction) {
case NORTH: state = State.UPDISABLED; break;
case SOUTH: state = State.DOWNDISABLED; break;
case WEST: state = State.LEFTDISABLED; break;
case EAST: state = State.RIGHTDISABLED; break;
}
} else if (model.isRollover() || model.isPressed()) {
switch (direction) {
case NORTH: state = State.UPHOT; break;
case SOUTH: state = State.DOWNHOT; break;
case WEST: state = State.LEFTHOT; break;
case EAST: state = State.RIGHTHOT; break;
}
} else if (jointRollover) {
switch (direction) {
case NORTH: state = State.UPHOVER; break;
case SOUTH: state = State.DOWNHOVER; break;
case WEST: state = State.LEFTHOVER; break;
case EAST: state = State.RIGHTHOVER; break;
}
} else {
switch (direction) {
case NORTH: state = State.UPNORMAL; break;
case SOUTH: state = State.DOWNNORMAL; break;
case WEST: state = State.LEFTNORMAL; break;
case EAST: state = State.RIGHTNORMAL; break;
}
}
skin.paintSkin(g, 0, 0, getWidth(), getHeight(), state);
} else {
super.paint(g);
}
}
public Dimension getPreferredSize() {
int size = 16;
if (scrollbar != null) {
switch (scrollbar.getOrientation()) {
case JScrollBar.VERTICAL:
size = scrollbar.getWidth();
break;
case JScrollBar.HORIZONTAL:
size = scrollbar.getHeight();
break;
}
size = Math.max(size, 5);
}
return new Dimension(size, size);
}
}
/**
* This should be pulled out into its own class if more classes need to
* use it.
* <p>
* Grid is used to draw the track for windows scrollbars. Grids
* are cached in a HashMap, with the key being the rgb components
* of the foreground/background colors. Further the Grid is held through
* a WeakRef so that it can be freed when no longer needed. As the
* Grid is rather expensive to draw, it is drawn in a BufferedImage.
*/
private static class Grid {
private static final int BUFFER_SIZE = 64;
private static HashMap<String, WeakReference<Grid>> map;
private BufferedImage image;
static {
map = new HashMap<String, WeakReference<Grid>>();
}
public static Grid getGrid(Color fg, Color bg) {
String key = fg.getRGB() + " " + bg.getRGB();
WeakReference<Grid> ref = map.get(key);
Grid grid = (ref == null) ? null : ref.get();
if (grid == null) {
grid = new Grid(fg, bg);
map.put(key, new WeakReference<Grid>(grid));
}
return grid;
}
public Grid(Color fg, Color bg) {
int cmap[] = { fg.getRGB(), bg.getRGB() };
IndexColorModel icm = new IndexColorModel(8, 2, cmap, 0, false, -1,
DataBuffer.TYPE_BYTE);
image = new BufferedImage(BUFFER_SIZE, BUFFER_SIZE,
BufferedImage.TYPE_BYTE_INDEXED, icm);
Graphics g = image.getGraphics();
try {
g.setClip(0, 0, BUFFER_SIZE, BUFFER_SIZE);
paintGrid(g, fg, bg);
}
finally {
g.dispose();
}
}
/**
* Paints the grid into the specified Graphics at the specified
* location.
*/
public void paint(Graphics g, int x, int y, int w, int h) {
Rectangle clipRect = g.getClipBounds();
int minX = Math.max(x, clipRect.x);
int minY = Math.max(y, clipRect.y);
int maxX = Math.min(clipRect.x + clipRect.width, x + w);
int maxY = Math.min(clipRect.y + clipRect.height, y + h);
if (maxX <= minX || maxY <= minY) {
return;
}
int xOffset = (minX - x) % 2;
for (int xCounter = minX; xCounter < maxX;
xCounter += BUFFER_SIZE) {
int yOffset = (minY - y) % 2;
int width = Math.min(BUFFER_SIZE - xOffset,
maxX - xCounter);
for (int yCounter = minY; yCounter < maxY;
yCounter += BUFFER_SIZE) {
int height = Math.min(BUFFER_SIZE - yOffset,
maxY - yCounter);
g.drawImage(image, xCounter, yCounter,
xCounter + width, yCounter + height,
xOffset, yOffset,
xOffset + width, yOffset + height, null);
if (yOffset != 0) {
yCounter -= yOffset;
yOffset = 0;
}
}
if (xOffset != 0) {
xCounter -= xOffset;
xOffset = 0;
}
}
}
/**
* Actually renders the grid into the Graphics <code>g</code>.
*/
private void paintGrid(Graphics g, Color fg, Color bg) {
Rectangle clipRect = g.getClipBounds();
g.setColor(bg);
g.fillRect(clipRect.x, clipRect.y, clipRect.width,
clipRect.height);
g.setColor(fg);
g.translate(clipRect.x, clipRect.y);
int width = clipRect.width;
int height = clipRect.height;
int xCounter = clipRect.x % 2;
for (int end = width - height; xCounter < end; xCounter += 2) {
g.drawLine(xCounter, 0, xCounter + height, height);
}
for (int end = width; xCounter < end; xCounter += 2) {
g.drawLine(xCounter, 0, width, width - xCounter);
}
int yCounter = ((clipRect.x % 2) == 0) ? 2 : 1;
for (int end = height - width; yCounter < end; yCounter += 2) {
g.drawLine(0, yCounter, width, yCounter + width);
}
for (int end = height; yCounter < end; yCounter += 2) {
g.drawLine(0, yCounter, height - yCounter, height);
}
g.translate(-clipRect.x, -clipRect.y);
}
}
}
| shun634501730/java_source_cn | src_en/com/sun/java/swing/plaf/windows/WindowsScrollBarUI.java | Java | apache-2.0 | 18,587 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.assignment;
import static org.junit.Assert.fail;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Testcase for HBASE-23682.
*/
@Category({ MasterTests.class, MediumTests.class })
public class TestDeadServerMetricRegionChore {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestDeadServerMetricRegionChore.class);
protected HBaseTestingUtil util;
@Before
public void setUp() throws Exception {
util = new HBaseTestingUtil();
// Disable DeadServerMetricRegionChore
util.getConfiguration()
.setInt(AssignmentManager.DEAD_REGION_METRIC_CHORE_INTERVAL_MSEC_CONF_KEY, -1);
}
@After
public void tearDown() throws Exception {
this.util.shutdownMiniCluster();
}
@Test
public void testDeadServerMetricRegionChore() throws Exception {
try {
this.util.startMiniCluster();
} catch (Exception e) {
fail("Start cluster failed");
}
}
}
| apurtell/hbase | hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestDeadServerMetricRegionChore.java | Java | apache-2.0 | 2,127 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.thrift.api.udf;
import com.facebook.drift.annotations.ThriftConstructor;
import com.facebook.drift.annotations.ThriftField;
import com.facebook.drift.annotations.ThriftStruct;
import com.google.common.collect.ImmutableList;
import javax.annotation.Nullable;
import javax.annotation.concurrent.Immutable;
import java.util.List;
import static com.facebook.drift.annotations.ThriftField.Recursiveness.TRUE;
import static com.facebook.drift.annotations.ThriftField.Requiredness.OPTIONAL;
import static java.util.Objects.requireNonNull;
@Immutable
@ThriftStruct
public class UdfExecutionFailureInfo
{
private final String type;
private final String message;
private final UdfExecutionFailureInfo cause;
private final List<UdfExecutionFailureInfo> suppressed;
private final List<String> stack;
@ThriftConstructor
public UdfExecutionFailureInfo(
String type,
String message,
@Nullable UdfExecutionFailureInfo cause,
List<UdfExecutionFailureInfo> suppressed,
List<String> stack)
{
this.type = requireNonNull(type, "type is null");
this.message = requireNonNull(message, "message is null");
this.cause = cause;
this.suppressed = ImmutableList.copyOf(suppressed);
this.stack = ImmutableList.copyOf(stack);
}
@ThriftField(1)
public String getType()
{
return type;
}
@Nullable
@ThriftField(2)
public String getMessage()
{
return message;
}
@Nullable
@ThriftField(value = 3, isRecursive = TRUE, requiredness = OPTIONAL)
public UdfExecutionFailureInfo getCause()
{
return cause;
}
@ThriftField(4)
public List<UdfExecutionFailureInfo> getSuppressed()
{
return suppressed;
}
@ThriftField(5)
public List<String> getStack()
{
return stack;
}
}
| facebook/presto | presto-thrift-api/src/main/java/com/facebook/presto/thrift/api/udf/UdfExecutionFailureInfo.java | Java | apache-2.0 | 2,491 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.storm.mongodb.topology;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.IRichSpout;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;
import java.util.Map;
import java.util.Random;
import java.util.UUID;
public class WordSpout implements IRichSpout {
boolean isDistributed;
SpoutOutputCollector collector;
public static final String[] words = new String[] { "apple", "orange", "pineapple", "banana", "watermelon" };
public WordSpout() {
this(true);
}
public WordSpout(boolean isDistributed) {
this.isDistributed = isDistributed;
}
public boolean isDistributed() {
return this.isDistributed;
}
@SuppressWarnings("rawtypes")
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
this.collector = collector;
}
public void close() {
}
public void nextTuple() {
final Random rand = new Random();
final String word = words[rand.nextInt(words.length)];
this.collector.emit(new Values(word), UUID.randomUUID());
Thread.yield();
}
public void ack(Object msgId) {
}
public void fail(Object msgId) {
}
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("word"));
}
@Override
public void activate() {
}
@Override
public void deactivate() {
}
@Override
public Map<String, Object> getComponentConfiguration() {
return null;
}
}
| dke-knu/i2am | rdma-based-storm/examples/storm-mongodb-examples/src/main/java/org/apache/storm/mongodb/topology/WordSpout.java | Java | apache-2.0 | 2,496 |
//
// MessagePack for Java
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package org.msgpack.core;
import java.math.BigInteger;
/**
* This error is thrown when the user tries to read an integer value
* using a smaller types. For example, calling MessageUnpacker.unpackInt() for an integer value
* that is larger than Integer.MAX_VALUE will cause this exception.
*/
public class MessageIntegerOverflowException
extends MessageTypeException
{
private final BigInteger bigInteger;
public MessageIntegerOverflowException(BigInteger bigInteger)
{
super();
this.bigInteger = bigInteger;
}
public MessageIntegerOverflowException(long value)
{
this(BigInteger.valueOf(value));
}
public MessageIntegerOverflowException(String message, BigInteger bigInteger)
{
super(message);
this.bigInteger = bigInteger;
}
public BigInteger getBigInteger()
{
return bigInteger;
}
@Override
public String getMessage()
{
return bigInteger.toString();
}
}
| jackyglony/msgpack-java | msgpack-core/src/main/java/org/msgpack/core/MessageIntegerOverflowException.java | Java | apache-2.0 | 1,614 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.balancer;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Test the load balancer that is created by default.
*/
@Category(MediumTests.class)
public class TestDefaultLoadBalancer extends BalancerTestBase {
private static final Log LOG = LogFactory.getLog(TestDefaultLoadBalancer.class);
private static LoadBalancer loadBalancer;
@BeforeClass
public static void beforeAllTests() throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.regions.slop", "0");
loadBalancer = new DefaultLoadBalancer();
loadBalancer.setConf(conf);
}
// int[testnum][servernumber] -> numregions
int[][] clusterStateMocks = new int[][] {
// 1 node
new int[] { 0 },
new int[] { 1 },
new int[] { 10 },
// 2 node
new int[] { 0, 0 },
new int[] { 2, 0 },
new int[] { 2, 1 },
new int[] { 2, 2 },
new int[] { 2, 3 },
new int[] { 2, 4 },
new int[] { 1, 1 },
new int[] { 0, 1 },
new int[] { 10, 1 },
new int[] { 14, 1432 },
new int[] { 47, 53 },
// 3 node
new int[] { 0, 1, 2 },
new int[] { 1, 2, 3 },
new int[] { 0, 2, 2 },
new int[] { 0, 3, 0 },
new int[] { 0, 4, 0 },
new int[] { 20, 20, 0 },
// 4 node
new int[] { 0, 1, 2, 3 },
new int[] { 4, 0, 0, 0 },
new int[] { 5, 0, 0, 0 },
new int[] { 6, 6, 0, 0 },
new int[] { 6, 2, 0, 0 },
new int[] { 6, 1, 0, 0 },
new int[] { 6, 0, 0, 0 },
new int[] { 4, 4, 4, 7 },
new int[] { 4, 4, 4, 8 },
new int[] { 0, 0, 0, 7 },
// 5 node
new int[] { 1, 1, 1, 1, 4 },
// more nodes
new int[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 }, new int[] { 6, 6, 5, 6, 6, 6, 6, 6, 6, 1 },
new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 54 }, new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 55 },
new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 }, new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 16 },
new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 8 }, new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 9 },
new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 10 }, new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 123 },
new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 155 },
new int[] { 0, 0, 144, 1, 1, 1, 1, 1123, 133, 138, 12, 1444 },
new int[] { 0, 0, 144, 1, 0, 4, 1, 1123, 133, 138, 12, 1444 },
new int[] { 1538, 1392, 1561, 1557, 1535, 1553, 1385, 1542, 1619 } };
/**
* Test the load balancing algorithm.
*
* Invariant is that all servers should be hosting either floor(average) or
* ceiling(average)
*
* @throws Exception
*/
@Test
public void testBalanceCluster() throws Exception {
for (int[] mockCluster : clusterStateMocks) {
Map<ServerName, List<HRegionInfo>> servers = mockClusterServers(mockCluster);
List<ServerAndLoad> list = convertToList(servers);
LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
List<RegionPlan> plans = loadBalancer.balanceCluster(servers);
List<ServerAndLoad> balancedCluster = reconcile(list, plans);
LOG.info("Mock Balance : " + printMock(balancedCluster));
assertClusterAsBalanced(balancedCluster);
for (Map.Entry<ServerName, List<HRegionInfo>> entry : servers.entrySet()) {
returnRegions(entry.getValue());
returnServer(entry.getKey());
}
}
}
}
| daidong/DominoHBase | hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDefaultLoadBalancer.java | Java | apache-2.0 | 4,805 |
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
namespace Microsoft.CodeAnalysis.Host
{
internal class NoOpPersistentStorageService : IChecksummedPersistentStorageService
{
public static readonly IPersistentStorageService Instance = new NoOpPersistentStorageService();
private NoOpPersistentStorageService()
{
}
public IPersistentStorage GetStorage(Solution solution)
=> NoOpPersistentStorage.Instance;
public IPersistentStorage GetStorage(Solution solution, bool checkBranchId)
=> NoOpPersistentStorage.Instance;
IChecksummedPersistentStorage IChecksummedPersistentStorageService.GetStorage(Solution solution)
=> NoOpPersistentStorage.Instance;
IChecksummedPersistentStorage IChecksummedPersistentStorageService.GetStorage(Solution solution, bool checkBranchId)
=> NoOpPersistentStorage.Instance;
}
}
| davkean/roslyn | src/Workspaces/Core/Portable/Workspace/Host/PersistentStorage/NoOpPersistentStorageService.cs | C# | apache-2.0 | 1,090 |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django import template
from django.template import defaultfilters as filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
from openstack_dashboard.usage import quotas
LOG = logging.getLogger(__name__)
class CheckNetworkEditable(object):
"""Mixin class to determine the specified network is editable."""
def allowed(self, request, datum=None):
# Only administrator is allowed to create and manage shared networks.
if datum and datum.shared:
return False
return True
class DeleteNetwork(policy.PolicyTargetMixin, CheckNetworkEditable,
tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Network",
u"Delete Networks",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Network",
u"Deleted Networks",
count
)
policy_rules = (("network", "delete_network"),)
def delete(self, request, network_id):
network_name = network_id
try:
# Retrieve the network list.
network = api.neutron.network_get(request, network_id,
expand_subnet=False)
network_name = network.name
LOG.debug('Network %(network_id)s has subnets: %(subnets)s',
{'network_id': network_id, 'subnets': network.subnets})
for subnet_id in network.subnets:
api.neutron.subnet_delete(request, subnet_id)
LOG.debug('Deleted subnet %s', subnet_id)
api.neutron.network_delete(request, network_id)
LOG.debug('Deleted network %s successfully', network_id)
except Exception:
msg = _('Failed to delete network %s')
LOG.info(msg, network_id)
redirect = reverse("horizon:project:networks:index")
exceptions.handle(request, msg % network_name, redirect=redirect)
class CreateNetwork(tables.LinkAction):
name = "create"
verbose_name = _("Create Network")
url = "horizon:project:networks:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_network"),)
def allowed(self, request, datum=None):
usages = quotas.tenant_quota_usages(request)
# when Settings.OPENSTACK_NEUTRON_NETWORK['enable_quotas'] = False
# usages["networks"] is empty
if usages.get('networks', {}).get('available', 1) <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ["disabled"]
self.verbose_name = _("Create Network (Quota exceeded)")
else:
self.verbose_name = _("Create Network")
self.classes = [c for c in self.classes if c != "disabled"]
return True
class EditNetwork(policy.PolicyTargetMixin, CheckNetworkEditable,
tables.LinkAction):
name = "update"
verbose_name = _("Edit Network")
url = "horizon:project:networks:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_network"),)
class CreateSubnet(policy.PolicyTargetMixin, CheckNetworkEditable,
tables.LinkAction):
name = "subnet"
verbose_name = _("Add Subnet")
url = "horizon:project:networks:addsubnet"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_subnet"),)
# neutron has used both in their policy files, supporting both
policy_target_attrs = (("network:tenant_id", "tenant_id"),
("network:project_id", "tenant_id"),)
def allowed(self, request, datum=None):
usages = quotas.tenant_quota_usages(request)
# when Settings.OPENSTACK_NEUTRON_NETWORK['enable_quotas'] = False
# usages["subnets'] is empty
if usages.get('subnets', {}).get('available', 1) <= 0:
if 'disabled' not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = _('Add Subnet (Quota exceeded)')
else:
self.verbose_name = _('Add Subnet')
self.classes = [c for c in self.classes if c != 'disabled']
return True
def get_subnets(network):
template_name = 'project/networks/_network_ips.html'
context = {"subnets": network.subnets}
return template.loader.render_to_string(template_name, context)
DISPLAY_CHOICES = (
("up", pgettext_lazy("Admin state of a Network", u"UP")),
("down", pgettext_lazy("Admin state of a Network", u"DOWN")),
)
STATUS_DISPLAY_CHOICES = (
("active", pgettext_lazy("Current status of a Network", u"Active")),
("build", pgettext_lazy("Current status of a Network", u"Build")),
("down", pgettext_lazy("Current status of a Network", u"Down")),
("error", pgettext_lazy("Current status of a Network", u"Error")),
)
class ProjectNetworksFilterAction(tables.FilterAction):
name = "filter_project_networks"
filter_type = "server"
filter_choices = (('name', _("Name ="), True),
('shared', _("Shared ="), True,
_("e.g. Yes / No")),
('router:external', _("External ="), True,
_("e.g. Yes / No")),
('status', _("Status ="), True),
('admin_state_up', _("Admin State ="), True,
_("e.g. UP / DOWN")))
class NetworksTable(tables.DataTable):
name = tables.WrappingColumn("name_or_id",
verbose_name=_("Name"),
link='horizon:project:networks:detail')
subnets = tables.Column(get_subnets,
verbose_name=_("Subnets Associated"),)
shared = tables.Column("shared", verbose_name=_("Shared"),
filters=(filters.yesno, filters.capfirst))
external = tables.Column("router:external", verbose_name=_("External"),
filters=(filters.yesno, filters.capfirst))
status = tables.Column("status", verbose_name=_("Status"),
display_choices=STATUS_DISPLAY_CHOICES)
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=DISPLAY_CHOICES)
class Meta(object):
name = "networks"
verbose_name = _("Networks")
table_actions = (CreateNetwork, DeleteNetwork,
ProjectNetworksFilterAction)
row_actions = (EditNetwork, CreateSubnet, DeleteNetwork)
| coreycb/horizon | openstack_dashboard/dashboards/project/networks/tables.py | Python | apache-2.0 | 7,635 |
// Copyright 2014 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* just enough ELF support for finding the interpreter in the program header
* table, this should theoretically work as-is on both big-endian and
* little-endian
*/
/* values of interest */
#define ELF_BITS_32 0x1
#define ELF_BITS_64 0x2
#define ELF_ENDIAN_LITL 0x1
#define ELF_ENDIAN_BIG 0x2
#define ELF_PT_INTERP 0x3
/* offsets of interest */
#define ELF_BITS 0x4
#define ELF_ENDIAN 0x5
#define ELF_VERSION 0x6
#define ELF32_PHT_OFF 0x1c
#define ELF32_PHTE_SIZE 0x2a
#define ELF32_PHTE_CNT 0x2c
#define ELF32_PHE_OFF 0x4
#define ELF32_PHE_SIZE 0x10
#define ELF64_PHT_OFF 0x20
#define ELF64_PHTE_SIZE 0x36
#define ELF64_PHTE_CNT 0x38
#define ELF64_PHE_OFF 0x8
#define ELF64_PHE_SIZE 0x20
/* multibyte value accessors, choose which based on ELF_BITS and ELF_ENDIAN */
#define SHIFT(_val, _bytes) ((unsigned long long)(_val) << ((_bytes) * 8))
static uint64_t le32_lget(const uint8_t *addr)
{
uint64_t val = 0;
val += SHIFT(addr[3], 3);
val += SHIFT(addr[2], 2);
val += SHIFT(addr[1], 1);
val += SHIFT(addr[0], 0);
return val;
}
static uint64_t be32_lget(const uint8_t *addr)
{
uint64_t val = 0;
val += SHIFT(addr[0], 3);
val += SHIFT(addr[1], 2);
val += SHIFT(addr[2], 1);
val += SHIFT(addr[3], 0);
return val;
}
static uint64_t le64_lget(const uint8_t *addr)
{
uint64_t val = 0;
val += SHIFT(addr[7], 7);
val += SHIFT(addr[6], 6);
val += SHIFT(addr[5], 5);
val += SHIFT(addr[4], 4);
val += SHIFT(addr[3], 3);
val += SHIFT(addr[2], 2);
val += SHIFT(addr[1], 1);
val += SHIFT(addr[0], 0);
return val;
}
static uint64_t be64_lget(const uint8_t *addr)
{
uint64_t val = 0;
val += SHIFT(addr[0], 7);
val += SHIFT(addr[1], 6);
val += SHIFT(addr[2], 5);
val += SHIFT(addr[3], 4);
val += SHIFT(addr[4], 3);
val += SHIFT(addr[5], 2);
val += SHIFT(addr[6], 1);
val += SHIFT(addr[7], 0);
return val;
}
static uint32_t le_iget(const uint8_t *addr)
{
return (uint32_t)le32_lget(addr);
}
static uint32_t be_iget(const uint8_t *addr)
{
return (uint32_t)be32_lget(addr);
}
static uint16_t le_sget(const uint8_t *addr)
{
uint16_t val = 0;
val += SHIFT(addr[1], 1);
val += SHIFT(addr[0], 0);
return val;
}
static uint16_t be_sget(const uint8_t *addr)
{
uint16_t val = 0;
val += SHIFT(addr[0], 0);
val += SHIFT(addr[1], 1);
return val;
}
| robszumski/rocket | stage1/rootfs/diagexec/elf.h | C | apache-2.0 | 2,859 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.descriptors;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.api.ValidationException;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Consumer;
/** Validator for {@link FunctionDescriptor}. */
@Internal
public class FunctionDescriptorValidator implements DescriptorValidator {
public static final String FROM = "from";
public static final String FROM_VALUE_CLASS = "class";
public static final String FROM_VALUE_PYTHON = "python";
@Override
public void validate(DescriptorProperties properties) {
Map<String, Consumer<String>> enumValidation = new HashMap<>();
enumValidation.put(
FROM_VALUE_CLASS, s -> new ClassInstanceValidator().validate(properties));
enumValidation.put(
FROM_VALUE_PYTHON, s -> new PythonFunctionValidator().validate(properties));
// check for 'from'
if (properties.containsKey(FROM)) {
properties.validateEnum(FROM, false, enumValidation);
} else {
throw new ValidationException("Could not find 'from' property for function.");
}
}
}
| rmetzger/flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/descriptors/FunctionDescriptorValidator.java | Java | apache-2.0 | 1,990 |
/*
* Copyright (c) 2005, 2006, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package sun.security.smartcardio;
import java.security.*;
import javax.smartcardio.*;
/**
* Provider object for PC/SC.
*
* @since 1.6
* @author Andreas Sterbenz
*/
public final class SunPCSC extends Provider {
private static final long serialVersionUID = 6168388284028876579L;
public SunPCSC() {
super("SunPCSC", 1.6d, "Sun PC/SC provider");
AccessController.doPrivileged(new PrivilegedAction<Void>() {
public Void run() {
put("TerminalFactory.PC/SC", "sun.security.smartcardio.SunPCSC$Factory");
return null;
}
});
}
public static final class Factory extends TerminalFactorySpi {
public Factory(Object obj) throws PCSCException {
if (obj != null) {
throw new IllegalArgumentException
("SunPCSC factory does not use parameters");
}
// make sure PCSC is available and that we can obtain a context
PCSC.checkAvailable();
PCSCTerminals.initContext();
}
/**
* Returns the available readers.
* This must be a new object for each call.
*/
protected CardTerminals engineTerminals() {
return new PCSCTerminals();
}
}
}
| andreagenso/java2scala | test/J2s/java/openjdk-6-src-b27/jdk/src/share/classes/sun/security/smartcardio/SunPCSC.java | Java | apache-2.0 | 2,515 |
# encoding: utf-8
# This file is distributed under New Relic's license terms.
# See https://github.com/newrelic/rpm/blob/master/LICENSE for complete details.
# This class is the central point for dispatching get_agent_commands messages
# to the various components that actually process them.
#
# This could be evented further, but we eventually need direct access to things
# like the ThreadProfiler, so it's simpler to just keep it together here.
require 'new_relic/agent/commands/agent_command'
require 'new_relic/agent/commands/xray_session_collection'
require 'new_relic/agent/threading/backtrace_service'
module NewRelic
module Agent
module Commands
class AgentCommandRouter
attr_reader :handlers
attr_accessor :thread_profiler_session, :backtrace_service,
:xray_session_collection
def initialize(event_listener=nil)
@handlers = Hash.new { |*| Proc.new { |cmd| self.unrecognized_agent_command(cmd) } }
@backtrace_service = Threading::BacktraceService.new(event_listener)
@thread_profiler_session = ThreadProfilerSession.new(@backtrace_service)
@xray_session_collection = XraySessionCollection.new(@backtrace_service, event_listener)
@handlers['start_profiler'] = Proc.new { |cmd| thread_profiler_session.handle_start_command(cmd) }
@handlers['stop_profiler'] = Proc.new { |cmd| thread_profiler_session.handle_stop_command(cmd) }
@handlers['active_xray_sessions'] = Proc.new { |cmd| xray_session_collection.handle_active_xray_sessions(cmd) }
if event_listener
event_listener.subscribe(:before_shutdown, &method(:on_before_shutdown))
end
end
def new_relic_service
NewRelic::Agent.instance.service
end
def check_for_and_handle_agent_commands
commands = get_agent_commands
stop_xray_sessions unless active_xray_command?(commands)
results = invoke_commands(commands)
new_relic_service.agent_command_results(results) unless results.empty?
end
def stop_xray_sessions
self.xray_session_collection.stop_all_sessions
end
def active_xray_command?(commands)
commands.any? {|command| command.name == 'active_xray_sessions'}
end
def on_before_shutdown(*args)
if self.thread_profiler_session.running?
self.thread_profiler_session.stop(true)
end
end
def harvest!
profiles = []
profiles += harvest_from_xray_session_collection
profiles += harvest_from_thread_profiler_session
log_profiles(profiles)
profiles
end
# We don't currently support merging thread profiles that failed to send
# back into the AgentCommandRouter, so we just no-op this method.
# Same with reset! - we don't support asynchronous cancellation of a
# running thread profile or X-Ray session currently.
def merge!(*args); end
def reset!; end
def harvest_from_xray_session_collection
self.xray_session_collection.harvest_thread_profiles
end
def harvest_from_thread_profiler_session
if self.thread_profiler_session.ready_to_harvest?
self.thread_profiler_session.stop(true)
[self.thread_profiler_session.harvest]
else
[]
end
end
def log_profiles(profiles)
if profiles.empty?
::NewRelic::Agent.logger.debug "No thread profiles with data found to send."
else
profile_descriptions = profiles.map { |p| p.to_log_description }
::NewRelic::Agent.logger.debug "Sending thread profiles [#{profile_descriptions.join(", ")}]"
end
end
def get_agent_commands
commands = new_relic_service.get_agent_commands
NewRelic::Agent.logger.debug "Received get_agent_commands = #{commands.inspect}"
commands.map {|collector_command| AgentCommand.new(collector_command)}
end
def invoke_commands(agent_commands)
results = {}
agent_commands.each do |agent_command|
results[agent_command.id.to_s] = invoke_command(agent_command)
end
results
end
class AgentCommandError < StandardError
end
def invoke_command(agent_command)
begin
call_handler_for(agent_command)
return success
rescue AgentCommandError => e
NewRelic::Agent.logger.debug(e)
error(e)
end
end
SUCCESS_RESULT = {}.freeze
ERROR_KEY = "error"
def success
SUCCESS_RESULT
end
def error(err)
{ ERROR_KEY => err.message }
end
def call_handler_for(agent_command)
handler = select_handler(agent_command)
handler.call(agent_command)
end
def select_handler(agent_command)
@handlers[agent_command.name]
end
def unrecognized_agent_command(agent_command)
NewRelic::Agent.logger.debug("Unrecognized agent command #{agent_command.inspect}")
end
end
end
end
end
| dmitrinesterenko/profiling_newrelic | vendor/gems/ruby/2.2.0/gems/newrelic_rpm-3.14.2.312/lib/new_relic/agent/commands/agent_command_router.rb | Ruby | apache-2.0 | 5,318 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DenseTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testDenseProperties(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(dense.units, 2)
self.assertEqual(dense.activation, nn_ops.relu)
self.assertEqual(dense.kernel_regularizer, None)
self.assertEqual(dense.bias_regularizer, None)
self.assertEqual(dense.activity_regularizer, None)
self.assertEqual(dense.use_bias, True)
# Test auto-naming
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_1')
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_2')
def testVariableInput(self):
with self.test_session():
v = variable_scope.get_variable(
'X', initializer=init_ops.zeros_initializer(), shape=(1, 1))
x = core_layers.Dense(1)(v)
variables.global_variables_initializer().run()
self.assertAllEqual(x.eval(), [[0.0]])
@test_util.run_in_graph_and_eager_modes()
def testCall(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 2], outputs.get_shape().as_list())
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables, [])
if context.in_graph_mode():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias.name, 'my_dense/bias:0')
@test_util.run_in_graph_and_eager_modes()
def testCallTensorDot(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 4, 2], outputs.get_shape().as_list())
@test_util.run_in_graph_and_eager_modes()
def testNoBias(self):
dense = core_layers.Dense(2, use_bias=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel])
self.assertListEqual(dense.trainable_variables, [dense.kernel])
self.assertListEqual(dense.non_trainable_variables, [])
if context.in_graph_mode():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias, None)
@test_util.run_in_graph_and_eager_modes()
def testNonTrainable(self):
dense = core_layers.Dense(2, trainable=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables, [])
if context.in_graph_mode():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 0)
@test_util.run_in_graph_and_eager_modes()
def testOutputShape(self):
dense = core_layers.Dense(7, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 7])
inputs = random_ops.random_uniform((5, 2, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7])
inputs = random_ops.random_uniform((1, 2, 4, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7])
def testCallOnPlaceHolder(self):
inputs = array_ops.placeholder(dtype=dtypes.float32)
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
@test_util.run_in_graph_and_eager_modes()
def testActivation(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if context.in_graph_mode():
self.assertEqual(outputs.op.name, 'dense1/Relu')
dense = core_layers.Dense(2, name='dense2')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if context.in_graph_mode():
self.assertEqual(outputs.op.name, 'dense2/BiasAdd')
def testActivityRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', activity_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', kernel_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizerWithReuse(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer, reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testBiasRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(2, name='my_dense', bias_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testFunctionalDense(self):
with self.test_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = core_layers.dense(
inputs, 2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(outputs.op.name, 'my_dense/Relu')
def testFunctionalDenseTwice(self):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
vars1 = _get_variable_dict_from_varstore().values()
core_layers.dense(inputs, 2)
vars2 = _get_variable_dict_from_varstore().values()
self.assertEqual(len(vars1), 2)
self.assertEqual(len(vars2), 4)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuse(self):
with self.test_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
core_layers.dense(inputs, 2, name='my_dense', reuse=True)
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuseFromScope(self):
with self.test_session():
with variable_scope.variable_scope('scope'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
with variable_scope.variable_scope('scope', reuse=True):
core_layers.dense(inputs, 2, name='my_dense')
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
def testFunctionalDenseInitializerFromScope(self):
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()), self.test_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
variables.global_variables_initializer().run()
weights = _get_variable_dict_from_varstore()
self.assertEqual(len(weights), 2)
# Check that the matrix weights got initialized to ones (from scope).
self.assertAllClose(weights['scope/dense/kernel'].read_value().eval(),
np.ones((3, 2)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights['scope/dense/bias'].read_value().eval(),
np.zeros((2)))
def testEagerExecution(self):
with context.eager_mode():
container = variable_scope.EagerVariableStore()
x = constant_op.constant([[2.0]])
with container.as_default():
y = core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertAllEqual(y, [[2.0]])
self.assertEqual(len(container.variables()), 2)
# Recreate the layer to test reuse.
with container.as_default():
core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertEqual(len(container.variables()), 2)
def testFunctionalDenseWithCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope('test', custom_getter=custom_getter):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
self.assertEqual(called[0], 2)
def testFunctionalDenseInScope(self):
with self.test_session():
with variable_scope.variable_scope('test'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
var_dict = _get_variable_dict_from_varstore()
var_key = 'test/my_dense/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
with variable_scope.variable_scope('test1') as scope:
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name=scope)
var_dict = _get_variable_dict_from_varstore()
var_key = 'test1/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
with variable_scope.variable_scope('test2'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
var_dict = _get_variable_dict_from_varstore()
var_key = 'test2/dense/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
@test_util.run_in_graph_and_eager_modes()
def testComputeOutputShape(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
ts = tensor_shape.TensorShape
# pylint: disable=protected-access
with self.assertRaises(ValueError):
dense._compute_output_shape(ts(None))
with self.assertRaises(ValueError):
dense._compute_output_shape(ts([]))
with self.assertRaises(ValueError):
dense._compute_output_shape(ts([1]))
self.assertEqual(
[None, 2],
dense._compute_output_shape((None, 3)).as_list())
self.assertEqual(
[None, 2],
dense._compute_output_shape(ts([None, 3])).as_list())
self.assertEqual(
[None, 4, 2],
dense._compute_output_shape(ts([None, 4, 3])).as_list())
# pylint: enable=protected-access
@test_util.run_in_graph_and_eager_modes()
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
dense = core_layers.Dense(2,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3), seed=1)
dense(inputs)
self.assertEqual(dense.kernel_constraint, k_constraint)
self.assertEqual(dense.bias_constraint, b_constraint)
def _get_variable_dict_from_varstore():
var_dict = variable_scope._get_default_variable_store()._vars # pylint: disable=protected-access
sorted_var_dict = collections.OrderedDict(
sorted(var_dict.items(), key=lambda t: t[0]))
return sorted_var_dict
class DropoutTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testDropoutProperties(self):
dp = core_layers.Dropout(0.5, name='dropout')
self.assertEqual(dp.rate, 0.5)
self.assertEqual(dp.noise_shape, None)
dp.apply(array_ops.ones(()))
self.assertEqual(dp.name, 'dropout')
@test_util.run_in_graph_and_eager_modes()
def testBooleanLearningPhase(self):
dp = core_layers.Dropout(0.5)
inputs = array_ops.ones((5, 3))
dropped = dp.apply(inputs, training=True)
if context.in_graph_mode():
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
dropped = dp.apply(inputs, training=False)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 3)), np_output)
def testDynamicLearningPhase(self):
with self.test_session() as sess:
dp = core_layers.Dropout(0.5, seed=1)
inputs = array_ops.ones((5, 5))
training = array_ops.placeholder(dtype='bool')
dropped = dp.apply(inputs, training=training)
self.evaluate(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={training: True})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={training: False})
self.assertAllClose(np.ones((5, 5)), np_output)
@test_util.run_in_graph_and_eager_modes()
def testDynamicNoiseShape(self):
inputs = array_ops.ones((5, 3, 2))
noise_shape = [None, 1, None]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
def testCustomNoiseShape(self):
inputs = array_ops.ones((5, 3, 2))
noise_shape = [5, 1, 2]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
def testFunctionalDropout(self):
with self.test_session():
inputs = array_ops.ones((5, 5))
dropped = core_layers.dropout(inputs, 0.5, training=True, seed=1)
variables.global_variables_initializer().run()
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
dropped = core_layers.dropout(inputs, 0.5, training=False, seed=1)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 5)), np_output)
def testDynamicRate(self):
with self.test_session() as sess:
rate = array_ops.placeholder(dtype='float32', name='rate')
dp = core_layers.Dropout(rate, name='dropout')
inputs = array_ops.ones((5, 5))
dropped = dp.apply(inputs, training=True)
sess.run(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={rate: 0.5})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={rate: 0.0})
self.assertAllClose(np.ones((5, 5)), np_output)
class FlattenTest(test.TestCase):
def testCreateFlatten(self):
with self.test_session() as sess:
x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((3, 2, 3))})
self.assertEqual(list(np_output.shape), [3, 6])
self.assertEqual(y.get_shape().as_list(), [None, 6])
x = array_ops.placeholder(shape=(1, 2, 3, 2), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((1, 2, 3, 2))})
self.assertEqual(list(np_output.shape), [1, 12])
self.assertEqual(y.get_shape().as_list(), [1, 12])
def testComputeShape(self):
shape = core_layers.Flatten()._compute_output_shape((1, 2, 3, 2))
self.assertEqual(shape.as_list(), [1, 12])
shape = core_layers.Flatten()._compute_output_shape((None, 3, 2))
self.assertEqual(shape.as_list(), [None, 6])
shape = core_layers.Flatten()._compute_output_shape((None, 3, None))
self.assertEqual(shape.as_list(), [None, None])
def testFunctionalFlatten(self):
x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32')
y = core_layers.flatten(x, name='flatten')
self.assertEqual(y.get_shape().as_list(), [None, 6])
def testFlattenValueError(self):
x = array_ops.placeholder(shape=(None,), dtype='float32')
with self.assertRaises(ValueError):
core_layers.Flatten()(x)
def testFlattenUnknownAxes(self):
with self.test_session() as sess:
x = array_ops.placeholder(shape=(5, None, None), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 2, 3))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
x = array_ops.placeholder(shape=(5, None, 2), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 3, 2))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
if __name__ == '__main__':
test.main()
| eadgarchen/tensorflow | tensorflow/python/layers/core_test.py | Python | apache-2.0 | 20,438 |
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef __STOUT_OS_WINDOWS_MKDIR_HPP__
#define __STOUT_OS_WINDOWS_MKDIR_HPP__
#include <string>
#include <vector>
#include <stout/error.hpp>
#include <stout/nothing.hpp>
#include <stout/strings.hpp>
#include <stout/try.hpp>
#include <stout/windows.hpp>
#include <stout/os/exists.hpp>
#include <stout/os/constants.hpp>
#include <stout/internal/windows/longpath.hpp>
namespace os {
inline Try<Nothing> mkdir(const std::string& directory, bool recursive = true)
{
if (!recursive) {
// NOTE: We check for existence because parts of certain directories
// like `C:\` will return an error if passed to `CreateDirectory`,
// even though the drive may already exist.
if (os::exists(directory)) {
return Nothing();
}
std::wstring longpath = ::internal::windows::longpath(directory);
if (::CreateDirectoryW(longpath.data(), nullptr) == 0) {
return WindowsError("Failed to create directory: " + directory);
}
} else {
// Remove the long path prefix, if it already exists, otherwise the
// tokenizer includes the long path prefix (`\\?\`) as the first part
// of the path.
std::vector<std::string> tokens = strings::tokenize(
strings::remove(directory, os::LONGPATH_PREFIX, strings::Mode::PREFIX),
stringify(os::PATH_SEPARATOR));
std::string path;
foreach (const std::string& token, tokens) {
path += token + os::PATH_SEPARATOR;
Try<Nothing> result = mkdir(path, false);
if (result.isError()) {
return result;
}
}
}
return Nothing();
}
} // namespace os {
#endif // __STOUT_OS_WINDOWS_MKDIR_HPP__
| shakamunyi/mesos | 3rdparty/stout/include/stout/os/windows/mkdir.hpp | C++ | apache-2.0 | 2,179 |
/*
* Copyright 2017 - 2018 Anton Tananaev (anton@traccar.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.traccar.protocol;
import io.netty.handler.codec.string.StringDecoder;
import io.netty.handler.codec.string.StringEncoder;
import org.traccar.BaseProtocol;
import org.traccar.PipelineBuilder;
import org.traccar.TrackerServer;
public class TmgProtocol extends BaseProtocol {
public TmgProtocol() {
addServer(new TrackerServer(false, getName()) {
@Override
protected void addProtocolHandlers(PipelineBuilder pipeline) {
pipeline.addLast(new TmgFrameDecoder());
pipeline.addLast(new StringEncoder());
pipeline.addLast(new StringDecoder());
pipeline.addLast(new TmgProtocolDecoder(TmgProtocol.this));
}
});
}
}
| tananaev/traccar | src/main/java/org/traccar/protocol/TmgProtocol.java | Java | apache-2.0 | 1,371 |
/*
* Copyright 2011 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Author: sligocki@google.com (Shawn Ligocki)
#ifndef NET_INSTAWEB_HTTP_PUBLIC_CACHE_URL_ASYNC_FETCHER_H_
#define NET_INSTAWEB_HTTP_PUBLIC_CACHE_URL_ASYNC_FETCHER_H_
#include "net/instaweb/http/public/url_async_fetcher.h"
#include "pagespeed/kernel/base/basictypes.h"
#include "pagespeed/kernel/base/string.h"
namespace net_instaweb {
class AsyncFetch;
class Hasher;
class Histogram;
class HTTPCache;
class MessageHandler;
class NamedLockManager;
class Variable;
// Composes an asynchronous URL fetcher with an http cache, to
// generate an asynchronous caching URL fetcher.
//
// This fetcher will asynchronously check the cache. If the url
// is found in cache and is still valid, the fetch's callback will be
// called right away. This includes any cached failures or that URL
// is uncacheable, unless set_ignore_recent_fetch_failed(true) is called.
// Otherwise (if fetcher != NULL) an async fetch will be performed in the
// fetcher, the result of which will be written into the cache. In case the
// fetch fails and there is a stale response in the cache, we serve the stale
// response.
//
// If fetcher == NULL, this will only perform a cache lookup and then call
// the callback immediately.
//
// In case of cache hit and resource is about to expire (80% of TTL or 5 mins
// which ever is minimum), it will trigger background fetch to freshen the value
// in cache. Background fetch only be triggered only if async_op_hooks_ != NULL,
// otherwise, fetcher object accessed by BackgroundFreshenFetch may be deleted
// by the time origin fetch finishes.
//
// TODO(sligocki): In order to use this for fetching resources for rewriting
// we'd need to integrate resource locking in this class. Do we want that?
class CacheUrlAsyncFetcher : public UrlAsyncFetcher {
public:
// Interface for managing async operations in CacheUrlAsyncFetcher. It helps
// to protect the lifetime of the injected objects.
class AsyncOpHooks {
public:
AsyncOpHooks() {}
virtual ~AsyncOpHooks();
// Called when CacheUrlAsyncFetcher is about to start async operation.
virtual void StartAsyncOp() = 0;
// Called when async operation is ended.
virtual void FinishAsyncOp() = 0;
};
// None of these are owned by CacheUrlAsyncFetcher.
CacheUrlAsyncFetcher(const Hasher* lock_hasher,
NamedLockManager* lock_manager,
HTTPCache* cache,
const GoogleString& fragment,
AsyncOpHooks* async_op_hooks,
UrlAsyncFetcher* fetcher)
: lock_hasher_(lock_hasher),
lock_manager_(lock_manager),
http_cache_(cache),
fragment_(fragment),
fetcher_(fetcher),
async_op_hooks_(async_op_hooks),
backend_first_byte_latency_(NULL),
fallback_responses_served_(NULL),
fallback_responses_served_while_revalidate_(NULL),
num_conditional_refreshes_(NULL),
num_proactively_freshen_user_facing_request_(NULL),
respect_vary_(false),
ignore_recent_fetch_failed_(false),
serve_stale_if_fetch_error_(false),
default_cache_html_(false),
proactively_freshen_user_facing_request_(false),
own_fetcher_(false),
serve_stale_while_revalidate_threshold_sec_(0) {
}
virtual ~CacheUrlAsyncFetcher();
virtual bool SupportsHttps() const { return fetcher_->SupportsHttps(); }
virtual void Fetch(const GoogleString& url,
MessageHandler* message_handler,
AsyncFetch* base_fetch);
// HTTP status code used to indicate that we failed the Fetch because
// result was not found in cache. (Only happens if fetcher_ == NULL).
static const int kNotInCacheStatus;
HTTPCache* http_cache() const { return http_cache_; }
UrlAsyncFetcher* fetcher() const { return fetcher_; }
void set_backend_first_byte_latency_histogram(Histogram* x) {
backend_first_byte_latency_ = x;
}
Histogram* backend_first_byte_latency_histogram() const {
return backend_first_byte_latency_;
}
void set_fallback_responses_served(Variable* x) {
fallback_responses_served_ = x;
}
Variable* fallback_responses_served() const {
return fallback_responses_served_;
}
void set_fallback_responses_served_while_revalidate(Variable* x) {
fallback_responses_served_while_revalidate_ = x;
}
Variable* fallback_responses_served_while_revalidate() const {
return fallback_responses_served_while_revalidate_;
}
void set_num_conditional_refreshes(Variable* x) {
num_conditional_refreshes_ = x;
}
Variable* num_conditional_refreshes() const {
return num_conditional_refreshes_;
}
void set_num_proactively_freshen_user_facing_request(Variable* x) {
num_proactively_freshen_user_facing_request_ = x;
}
Variable* num_proactively_freshen_user_facing_request() const {
return num_proactively_freshen_user_facing_request_;
}
void set_respect_vary(bool x) { respect_vary_ = x; }
bool respect_vary() const { return respect_vary_; }
void set_ignore_recent_fetch_failed(bool x) {
ignore_recent_fetch_failed_ = x;
}
bool ignore_recent_fetch_failed() const {
return ignore_recent_fetch_failed_;
}
void set_serve_stale_if_fetch_error(bool x) {
serve_stale_if_fetch_error_ = x;
}
bool serve_stale_if_fetch_error() const {
return serve_stale_if_fetch_error_;
}
void set_serve_stale_while_revalidate_threshold_sec(int64 x) {
serve_stale_while_revalidate_threshold_sec_ = x;
}
int64 serve_stale_while_revalidate_threshold_sec() const {
return serve_stale_while_revalidate_threshold_sec_;
}
void set_default_cache_html(bool x) { default_cache_html_ = x; }
bool default_cache_html() const { return default_cache_html_; }
void set_proactively_freshen_user_facing_request(bool x) {
proactively_freshen_user_facing_request_ = x;
}
bool proactively_freshen_user_facing_request() const {
return proactively_freshen_user_facing_request_;
}
void set_own_fetcher(bool x) { own_fetcher_ = x; }
private:
// Not owned by CacheUrlAsyncFetcher.
const Hasher* lock_hasher_;
NamedLockManager* lock_manager_;
HTTPCache* http_cache_;
GoogleString fragment_;
UrlAsyncFetcher* fetcher_; // may be NULL.
AsyncOpHooks* async_op_hooks_;
Histogram* backend_first_byte_latency_; // may be NULL.
Variable* fallback_responses_served_; // may be NULL.
Variable* fallback_responses_served_while_revalidate_; // may be NULL.
Variable* num_conditional_refreshes_; // may be NULL.
Variable* num_proactively_freshen_user_facing_request_; // may be NULL.
bool respect_vary_;
bool ignore_recent_fetch_failed_;
bool serve_stale_if_fetch_error_;
bool default_cache_html_;
bool proactively_freshen_user_facing_request_;
bool own_fetcher_; // set true to transfer ownership of fetcher to this.
int64 serve_stale_while_revalidate_threshold_sec_;
DISALLOW_COPY_AND_ASSIGN(CacheUrlAsyncFetcher);
};
} // namespace net_instaweb
#endif // NET_INSTAWEB_HTTP_PUBLIC_CACHE_URL_ASYNC_FETCHER_H_
| jalonsoa/mod_pagespeed | net/instaweb/http/public/cache_url_async_fetcher.h | C | apache-2.0 | 7,697 |
# coding=utf-8
# Copyright 2022 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| google/init2winit | init2winit/optimizer_lib/__init__.py | Python | apache-2.0 | 603 |
/* Copyright 2005-2006 Tim Fennell
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sourceforge.stripes.util;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.regex.Pattern;
/**
* Provides simple utility methods for dealing with HTML.
*
* @author Tim Fennell
*/
public class HtmlUtil {
private static final String FIELD_DELIMITER_STRING = "||";
private static final Pattern FIELD_DELIMITER_PATTERN = Pattern.compile("\\|\\|");
/**
* Replaces special HTML characters from the set {@literal [<, >, ", ', &]} with their HTML
* escape codes. Note that because the escape codes are multi-character that the returned
* String could be longer than the one passed in.
*
* @param fragment a String fragment that might have HTML special characters in it
* @return the fragment with special characters escaped
*/
public static String encode(String fragment) {
// If the input is null, then the output is null
if (fragment == null) return null;
StringBuilder builder = new StringBuilder(fragment.length() + 10); // a little wiggle room
char[] characters = fragment.toCharArray();
// This loop used to also look for and replace single ticks with ' but it
// turns out that it's not strictly necessary since Stripes uses double-quotes
// around all form fields, and stupid IE6 will render ' verbatim instead
// of as a single quote.
for (int i=0; i<characters.length; ++i) {
switch (characters[i]) {
case '<' : builder.append("<"); break;
case '>' : builder.append(">"); break;
case '"' : builder.append("""); break;
case '&' : builder.append("&"); break;
default: builder.append(characters[i]);
}
}
return builder.toString();
}
/**
* One of a pair of methods (the other is splitValues) that is used to combine several
* un-encoded values into a single delimited, encoded value for placement into a
* hidden field.
*
* @param values One or more values which are to be combined
* @return a single HTML-encoded String that contains all the values in such a way that
* they can be converted back into a Collection of Strings with splitValues().
*/
public static String combineValues(Collection<String> values) {
if (values == null || values.size() == 0) {
return "";
}
else {
StringBuilder builder = new StringBuilder(values.size() * 30);
for (String value : values) {
builder.append(value).append(FIELD_DELIMITER_STRING);
}
return encode(builder.toString());
}
}
/**
* Takes in a String produced by combineValues and returns a Collection of values that
* contains the same values as originally supplied to combineValues. Note that the order
* or items in the collection (and indeed the type of Collection used) are not guaranteed
* to be the same.
*
* @param value a String value produced by
* @return a Collection of zero or more Strings
*/
public static Collection<String> splitValues(String value) {
if (value == null || value.length() == 0) {
return Collections.emptyList();
}
else {
String[] splits = FIELD_DELIMITER_PATTERN.split(value);
return Arrays.asList(splits);
}
}
}
| scarcher2/stripes | stripes/src/net/sourceforge/stripes/util/HtmlUtil.java | Java | apache-2.0 | 4,132 |
/*
* Copyright 2017 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.agent.common;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertThat;
public class AgentCLITest {
private ByteArrayOutputStream errorStream;
private AgentCLI agentCLI;
private AgentCLI.SystemExitter exitter;
@Before
public void setUp() throws Exception {
errorStream = new ByteArrayOutputStream();
exitter = new AgentCLI.SystemExitter() {
@Override
public void exit(int status) {
throw new ExitException(status);
}
};
agentCLI = new AgentCLI(new PrintStream(errorStream), exitter);
}
@Test
public void shouldDieIfNoArguments() {
try {
agentCLI.parse();
Assert.fail("Was expecting an exception!");
} catch (ExitException e) {
assertThat(e.getStatus(), is(1));
assertThat(errorStream.toString(), containsString("The following option is required: [-serverUrl]"));
assertThat(errorStream.toString(), containsString("Usage: java -jar agent-bootstrapper.jar"));
}
}
@Test
public void serverURLMustBeAValidURL() throws Exception {
try {
agentCLI.parse("-serverUrl", "foobar");
Assert.fail("Was expecting an exception!");
} catch (ExitException e) {
assertThat(e.getStatus(), is(1));
assertThat(errorStream.toString(), containsString("-serverUrl is not a valid url"));
assertThat(errorStream.toString(), containsString("Usage: java -jar agent-bootstrapper.jar"));
}
}
@Test
public void serverURLMustBeSSL() throws Exception {
try {
agentCLI.parse("-serverUrl", "http://go.example.com:8154/go");
Assert.fail("Was expecting an exception!");
} catch (ExitException e) {
assertThat(e.getStatus(), is(1));
assertThat(errorStream.toString(), containsString("serverUrl must be an HTTPS url and must begin with https://"));
assertThat(errorStream.toString(), containsString("Usage: java -jar agent-bootstrapper.jar"));
}
}
@Test
public void shouldPassIfCorrectArgumentsAreProvided() throws Exception {
AgentBootstrapperArgs agentBootstrapperArgs = agentCLI.parse("-serverUrl", "https://go.example.com:8154/go", "-sslVerificationMode", "NONE");
assertThat(agentBootstrapperArgs.getServerUrl().toString(), is("https://go.example.com:8154/go"));
assertThat(agentBootstrapperArgs.getSslMode(), is(AgentBootstrapperArgs.SslMode.NONE));
}
@Test
public void shouldRaisExceptionWhenInvalidSslModeIsPassed() throws Exception {
try {
agentCLI.parse("-serverUrl", "https://go.example.com:8154/go", "-sslVerificationMode", "FOOBAR");
Assert.fail("Was expecting an exception!");
} catch (ExitException e) {
assertThat(e.getStatus(), is(1));
assertThat(errorStream.toString(), containsString("Invalid value for -sslVerificationMode parameter. Allowed values:[FULL, NONE, NO_VERIFY_HOST]"));
assertThat(errorStream.toString(), containsString("Usage: java -jar agent-bootstrapper.jar"));
}
}
@Test
public void shouldDefaultsTheSslModeToNONEWhenNotSpecified() throws Exception {
AgentBootstrapperArgs agentBootstrapperArgs = agentCLI.parse("-serverUrl", "https://go.example.com/go");
assertThat(agentBootstrapperArgs.getSslMode(), is(AgentBootstrapperArgs.SslMode.NONE));
}
@Test
public void printsHelpAndExitsWith0() throws Exception {
try {
agentCLI.parse("-help");
Assert.fail("Was expecting an exception!");
} catch (ExitException e) {
assertThat(e.getStatus(), is(0));
}
}
class ExitException extends RuntimeException {
private final int status;
public ExitException(int status) {
this.status = status;
}
public int getStatus() {
return status;
}
}
}
| varshavaradarajan/gocd | agent-common/src/test/java/com/thoughtworks/go/agent/common/AgentCLITest.java | Java | apache-2.0 | 4,889 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.