code stringlengths 1 2.01M | repo_name stringlengths 3 62 | path stringlengths 1 267 | language stringclasses 231
values | license stringclasses 13
values | size int64 1 2.01M |
|---|---|---|---|---|---|
7## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
def build(bld):
obj = bld.create_ns3_program( 'wimax-ipv4', ['wimax', 'internet', 'mobility', 'csma'])
obj.source = 'wimax-ipv4.cc'
obj = bld.create_ns3_program( 'wimax-multicast', ['wimax', 'internet', 'csma'])
obj.source = 'wimax-multicast.cc'
obj = bld.create_ns3_program( 'wimax-simple', ['wimax', 'internet', 'mobility'])
obj.source = 'wimax-simple.cc'
| zy901002-gpsr | src/wimax/examples/wscript | Python | gpl2 | 478 |
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2007,2008, 2009 INRIA, UDcast
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Mohamed Amine Ismail <amine.ismail@sophia.inria.fr>
* <amine.ismail@udcast.com>
*/
// Default network topology includes:
// - A base station (BS)
// - Some number of SSs specified by the variable nbSS (defaults to 10)
// - A multicast router (ASNGW)
// - A multicast streamer
// Two Lans are setup: The first one between the multicast streamer and the
// ASNGW, the second one between the ASNGW (router) and the base station
// +-----+ +-----+ +-----+ +-----+ +-----+
// | SS0 | | SS1 | | SS2 | | SS3 | | SS4 |
// +-----+ +-----+ +-----+ +-----+ +-----+
// 10.1.0.1 10.1.0.2 10.1.0.3 10.1.0.4 10.1.0.5
// -------- -------- ------- ------- --------
// ((*)) ((*)) ((*)) ((*)) ((*))
//
// LAN2 (11.1.1.0)
// ===============
// 10.1.0.11 | |
// +------------+ ASNGW multicast Streamer
// ((*))==|Base Station| | (12.1.1.0) |
// +------------+ ==================
// LAN1
//
// ((*)) ((*)) ((*)) ((*)) ((*))
// ------- -------- -------- ------- --------
// 10.1.0.6 10.1.0.7 10.1.0.8 10.1.0.9 10.1.0.10
// +-----+ +-----+ +-----+ +-----+ +-----+
// | SS5 | | SS6 | | SS7 | | SS8 | | SS9 |
// +-----+ +-----+ +-----+ +-----+ +-----+
#include "ns3/core-module.h"
#include "ns3/network-module.h"
#include "ns3/applications-module.h"
#include "ns3/mobility-module.h"
#include "ns3/config-store-module.h"
#include "ns3/wimax-module.h"
#include "ns3/csma-module.h"
#include <iostream>
#include "ns3/global-route-manager.h"
#include "ns3/mobility-module.h"
#include "ns3/internet-module.h"
#include "ns3/vector.h"
NS_LOG_COMPONENT_DEFINE ("WimaxMulticastSimulation");
#define MAXSS 1000
#define MAXDIST 10 // km
using namespace ns3;
int main (int argc, char *argv[])
{
bool verbose = false;
NodeContainer ssNodes;
Ptr<SubscriberStationNetDevice> ss[MAXSS];
NetDeviceContainer ssDevs;
Ipv4InterfaceContainer SSinterfaces;
NodeContainer bsNodes;
Ptr<BaseStationNetDevice> bs;
NetDeviceContainer bsDevs, bsDevsOne;
Ipv4InterfaceContainer BSinterfaces;
UdpServerHelper udpServer[MAXSS];
ApplicationContainer serverApps[MAXSS];
UdpTraceClientHelper udpClient;
ApplicationContainer clientApps;
Ptr<SimpleOfdmWimaxChannel> channel;
NodeContainer Streamer_Node;
NodeContainer ASNGW_Node;
Ptr<ConstantPositionMobilityModel> BSPosition;
Ptr<RandomWaypointMobilityModel> SSPosition[MAXSS];
Ptr<RandomRectanglePositionAllocator> SSPosAllocator[MAXSS];
// default values
int nbSS = 10, duration = 7, schedType = 0;
WimaxHelper::SchedulerType scheduler = WimaxHelper::SCHED_TYPE_SIMPLE;
CommandLine cmd;
cmd.AddValue ("nbSS", "number of subscriber station to create", nbSS);
cmd.AddValue ("scheduler", "type of scheduler to use with the netdevices", schedType);
cmd.AddValue ("duration", "duration of the simulation in seconds", duration);
cmd.AddValue ("verbose", "turn on all WimaxNetDevice log components", verbose);
cmd.Parse (argc, argv);
LogComponentEnable ("UdpTraceClient", LOG_LEVEL_INFO);
LogComponentEnable ("UdpServer", LOG_LEVEL_INFO);
switch (schedType)
{
case 0:
scheduler = WimaxHelper::SCHED_TYPE_SIMPLE;
break;
case 1:
scheduler = WimaxHelper::SCHED_TYPE_MBQOS;
break;
case 2:
scheduler = WimaxHelper::SCHED_TYPE_RTPS;
break;
default:
scheduler = WimaxHelper::SCHED_TYPE_SIMPLE;
}
ssNodes.Create (nbSS);
bsNodes.Create (1);
Streamer_Node.Create (1);
ASNGW_Node.Create (1);
WimaxHelper wimax;
channel = CreateObject<SimpleOfdmWimaxChannel> ();
channel->SetPropagationModel (SimpleOfdmWimaxChannel::COST231_PROPAGATION);
ssDevs = wimax.Install (ssNodes,
WimaxHelper::DEVICE_TYPE_SUBSCRIBER_STATION,
WimaxHelper::SIMPLE_PHY_TYPE_OFDM,
channel,
scheduler);
Ptr<WimaxNetDevice> dev = wimax.Install (bsNodes.Get (0),
WimaxHelper::DEVICE_TYPE_BASE_STATION,
WimaxHelper::SIMPLE_PHY_TYPE_OFDM,
channel,
scheduler);
BSPosition = CreateObject<ConstantPositionMobilityModel> ();
BSPosition->SetPosition (Vector (1000, 0, 0));
bsNodes.Get (0)->AggregateObject (BSPosition);
bsDevs.Add (dev);
if (verbose)
{
wimax.EnableLogComponents (); // Turn on all wimax logging
}
for (int i = 0; i < nbSS; i++)
{
SSPosition[i] = CreateObject<RandomWaypointMobilityModel> ();
SSPosAllocator[i] = CreateObject<RandomRectanglePositionAllocator> ();
SSPosAllocator[i]->SetX (UniformVariable ((i / 40) * 2000, (i / 40 + 1) * 2000));
SSPosAllocator[i]->SetY (UniformVariable ((i / 40) * 2000, (i / 40 + 1) * 2000));
SSPosition[i]->SetAttribute ("PositionAllocator", PointerValue (SSPosAllocator[i]));
SSPosition[i]->SetAttribute ("Speed", RandomVariableValue (UniformVariable (10.3, 40.7)));
SSPosition[i]->SetAttribute ("Pause", RandomVariableValue (ConstantVariable (0.01)));
ss[i] = ssDevs.Get (i)->GetObject<SubscriberStationNetDevice> ();
ss[i]->SetModulationType (WimaxPhy::MODULATION_TYPE_QAM16_12);
ssNodes.Get (i)->AggregateObject (SSPosition[i]);
}
bs = bsDevs.Get (0)->GetObject<BaseStationNetDevice> ();
CsmaHelper csmaASN_BS;
CsmaHelper csmaStreamer_ASN;
// First LAN BS and ASN
NodeContainer LAN_ASN_BS;
LAN_ASN_BS.Add (bsNodes.Get (0));
LAN_ASN_BS.Add (ASNGW_Node.Get (0));
csmaASN_BS.SetChannelAttribute ("DataRate", DataRateValue (DataRate (10000000)));
csmaASN_BS.SetChannelAttribute ("Delay", TimeValue (MilliSeconds (2)));
csmaASN_BS.SetDeviceAttribute ("Mtu", UintegerValue (1500));
NetDeviceContainer LAN_ASN_BS_Devs = csmaASN_BS.Install (LAN_ASN_BS);
NetDeviceContainer BS_CSMADevs;
BS_CSMADevs.Add (LAN_ASN_BS_Devs.Get (0));
NetDeviceContainer ASN_Devs1;
ASN_Devs1.Add (LAN_ASN_BS_Devs.Get (1));
// Second LAN ASN-GW and Streamer
NodeContainer LAN_ASN_STREAMER;
LAN_ASN_STREAMER.Add (ASNGW_Node.Get (0));
LAN_ASN_STREAMER.Add (Streamer_Node.Get (0));
csmaStreamer_ASN.SetChannelAttribute ("DataRate", DataRateValue (DataRate (10000000)));
csmaStreamer_ASN.SetChannelAttribute ("Delay", TimeValue (MilliSeconds (2)));
csmaStreamer_ASN.SetDeviceAttribute ("Mtu", UintegerValue (1500));
NetDeviceContainer LAN_ASN_STREAMER_Devs = csmaStreamer_ASN.Install (LAN_ASN_STREAMER);
NetDeviceContainer STREAMER_Devs;
NetDeviceContainer ASN_Devs2;
ASN_Devs2.Add (LAN_ASN_STREAMER_Devs.Get (0));
STREAMER_Devs.Add (LAN_ASN_STREAMER_Devs.Get (1));
MobilityHelper mobility;
InternetStackHelper stack;
mobility.Install (bsNodes);
stack.Install (bsNodes);
mobility.Install (ssNodes);
stack.Install (ssNodes);
stack.Install (Streamer_Node);
stack.Install (ASNGW_Node);
Ipv4AddressHelper address;
address.SetBase ("10.1.0.0", "255.255.255.0");
bsDevsOne.Add (bs);
BSinterfaces = address.Assign (bsDevsOne);
SSinterfaces = address.Assign (ssDevs);
address.SetBase ("11.1.1.0", "255.255.255.0");
Ipv4InterfaceContainer BSCSMAInterfaces = address.Assign (BS_CSMADevs);
Ipv4InterfaceContainer ASNCSMAInterfaces1 = address.Assign (ASN_Devs1);
address.SetBase ("12.1.1.0", "255.255.255.0");
Ipv4InterfaceContainer ASNCSMAInterfaces2 = address.Assign (ASN_Devs2);
Ipv4InterfaceContainer StreamerCSMAInterfaces = address.Assign (STREAMER_Devs);
Ipv4Address multicastSource ("12.1.1.2");
Ipv4Address multicastGroup ("224.30.10.81");
Ipv4StaticRoutingHelper multicast;
// 1) Configure a (static) multicast route on ASNGW (multicastRouter)
Ptr<Node> multicastRouter = ASNGW_Node.Get (0); // The node in question
Ptr<NetDevice> inputIf = ASN_Devs2.Get (0); // The input NetDevice
multicast.AddMulticastRoute (multicastRouter, multicastSource, multicastGroup, inputIf, ASN_Devs1);
// 2) Set up a default multicast route on the sender n0
Ptr<Node> sender = Streamer_Node.Get (0);
Ptr<NetDevice> senderIf = STREAMER_Devs.Get (0);
multicast.SetDefaultMulticastRoute (sender, senderIf);
// 1) Configure a (static) multicast route on ASNGW (multicastRouter)
multicastRouter = bsNodes.Get (0); // The node in question
inputIf = BS_CSMADevs.Get (0); // The input NetDevice
multicast.AddMulticastRoute (multicastRouter, multicastSource, multicastGroup, inputIf, bsDevsOne);
uint16_t multicast_port = 100;
for (int i = 0; i < nbSS; i++)
{
udpServer[i] = UdpServerHelper (multicast_port);
serverApps[i] = udpServer[i].Install (ssNodes.Get (i));
serverApps[i].Start (Seconds (6));
serverApps[i].Stop (Seconds (duration));
}
udpClient = UdpTraceClientHelper (multicastGroup, multicast_port, "");
clientApps = udpClient.Install (Streamer_Node.Get (0));
clientApps.Start (Seconds (6));
clientApps.Stop (Seconds (duration));
IpcsClassifierRecord MulticastClassifier (Ipv4Address ("0.0.0.0"),
Ipv4Mask ("0.0.0.0"),
multicastGroup,
Ipv4Mask ("255.255.255.255"),
0,
65000,
multicast_port,
multicast_port,
17,
1);
ServiceFlow MulticastServiceFlow = wimax.CreateServiceFlow (ServiceFlow::SF_DIRECTION_DOWN,
ServiceFlow::SF_TYPE_UGS,
MulticastClassifier);
bs->GetServiceFlowManager ()->AddMulticastServiceFlow (MulticastServiceFlow, WimaxPhy::MODULATION_TYPE_QPSK_12);
Simulator::Stop (Seconds (duration + 0.1));
NS_LOG_INFO ("Starting simulation.....");
Simulator::Run ();
for (int i = 0; i < nbSS; i++)
{
ss[i] = 0;
SSPosition[i] = 0;
SSPosAllocator[i] = 0;
}
bs = 0;
Simulator::Destroy ();
NS_LOG_INFO ("Done.");
return 0;
}
| zy901002-gpsr | src/wimax/examples/wimax-multicast.cc | C++ | gpl2 | 11,385 |
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2007,2008, 2009 INRIA, UDCAST
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Authors: Jahanzeb Farooq <jahanzeb.farooq@sophia.inria.fr>
* Amine Ismail <amine.ismail@sophia.inria.fr>
*/
#ifndef WIMAX_HELPER_H
#define WIMAX_HELPER_H
#include <string>
#include "ns3/object-factory.h"
#include "ns3/node-container.h"
#include "ns3/net-device-container.h"
#include "ns3/bs-net-device.h"
#include "ns3/ss-net-device.h"
#include "ns3/deprecated.h"
#include "ns3/service-flow.h"
#include "ns3/propagation-loss-model.h"
#include "ns3/simple-ofdm-wimax-channel.h"
#include "ns3/bs-uplink-scheduler.h"
#include "ns3/bs-uplink-scheduler-mbqos.h"
#include "ns3/bs-uplink-scheduler-simple.h"
#include "ns3/bs-uplink-scheduler-rtps.h"
#include "ns3/bs-scheduler.h"
#include "ns3/bs-scheduler-simple.h"
#include "ns3/bs-scheduler-rtps.h"
#include "ns3/trace-helper.h"
namespace ns3 {
class WimaxChannel;
class WimaxPhy;
class UplinkScheduler;
/**
* \brief helps to manage and create WimaxNetDevice objects
*
* This class can help to create a large set of similar
* WimaxNetDevice objects and to configure their attributes
* during creation.
*/
class WimaxHelper : public PcapHelperForDevice, public AsciiTraceHelperForDevice
{
public:
/**
* Net Device Type
* Distinguish a subscriber station(SS) device from base station(BS) device
*/
enum NetDeviceType
{
DEVICE_TYPE_SUBSCRIBER_STATION, /**< Subscriber station(SS) device */
DEVICE_TYPE_BASE_STATION
/**< Base station(BS) device */
};
/**
* WiMAX Physical layer
* WiMAX Physical layers with different levels of detail
*/
enum PhyType
{
SIMPLE_PHY_TYPE_OFDM
};
/**
* Scheduler Type
* Different implementations of uplink/downlink scheduler
*/
enum SchedulerType
{
SCHED_TYPE_SIMPLE, /**< A simple priority-based FCFS scheduler */
SCHED_TYPE_RTPS, /**< A simple scheduler - rtPS based scheduler */
SCHED_TYPE_MBQOS
/**< An migration-based uplink scheduler */
};
/**
* \brief Create a Wimax helper in an empty state.
*/
WimaxHelper (void);
~WimaxHelper (void);
/**
* \brief Enable ascii trace output on the indicated net device for a given connection
* \param oss The output stream object to use when logging ascii traces.
* \param nodeid the id of the node for which you want to enable tracing.
* \param deviceid the id of the net device for which you want to enable tracing.
* \param netdevice the type of net device for which you want to enable tracing (SubscriberStationNetDevice,
* BaseStationNetDevice or WimaxNetDevice)
* \param connection the connection for which you want to enable tracing (InitialRangingConnection,
* BroadcastConnection, BasicConnection, PrimaryConnection).
*/
static void EnableAsciiForConnection (Ptr<OutputStreamWrapper> oss,
uint32_t nodeid,
uint32_t deviceid,
char *netdevice,
char *connection);
/**
* \param phyType WiMAX Physical layer type
* \return WiMAX Phy object
*
* Creates a physical layer object to be used in simulation.
*/
Ptr<WimaxPhy> CreatePhy (PhyType phyType);
/**
* \param schedulerType Scheduling mechanism
* \return Uplink scheduler
*
* Creates a uplink scheduler to be used by base station
* according to selected scheduling mechanism.
*/
Ptr<UplinkScheduler> CreateUplinkScheduler (SchedulerType schedulerType);
/**
* \param schedulerType Scheduling mechanism
* \return Downlink scheduler
*
* Creates a downlink scheduler to be used by base station
* according to selected scheduling mechanism.
*/
Ptr<BSScheduler> CreateBSScheduler (SchedulerType schedulerType);
/**
* \param c a set of nodes
* \param type device type to create
* \param phyType a phy to use
* \param schedulerType the type of the scheduling algorithm to install
*
* For each of the input nodes, a new WiMAX net device (either
* ns3::SubscriberStationNetDevice or ns3::BaseStationNetDevice
* depending on the type parameter) is attached to the shared input channel.
*/
NetDeviceContainer Install (NodeContainer c, NetDeviceType type, PhyType phyType, SchedulerType schedulerType);
/**
* \param c A set of nodes.
* \param deviceType Device type to create.
* \param phyType PHY type to create.
* \param channel A channel to use.
* \param schedulerType The scheduling mechanism.
*
* For each of the input nodes, a new WiMAX net device (either
* ns3::SubscriberStationNetDevice or ns3::BaseStationNetDevice
* depending on the type parameter) is attached to the shared input channel.
*/
NetDeviceContainer Install (NodeContainer c,
NetDeviceType deviceType,
PhyType phyType,
Ptr<WimaxChannel> channel,
SchedulerType schedulerType);
/**
* \param c A set of nodes.
* \param deviceType Device type to create.
* \param phyType PHY type to create.
* \param schedulerType The scheduling mechanism.
* \param frameDuration the farme duration in seconds
*
* For each of the input nodes, a new WiMAX net device (either
* ns3::SubscriberStationNetDevice or ns3::BaseStationNetDevice
* depending on the type parameter) is attached to the shared input channel.
*/
NetDeviceContainer Install (NodeContainer c,
NetDeviceType deviceType,
PhyType phyType,
SchedulerType schedulerType,
double frameDuration);
/**
* \brief Set the propagation and loss model of the channel. By default the channel
* uses a COST231 propagation and loss model.
* \param propagationModel The propagation and loss model to set
*/
void SetPropagationLossModel (SimpleOfdmWimaxChannel::PropModel propagationModel);
/**
* \param phyType WiMAX Physical layer type
* \return WiMAX Phy object
*
* Creates a physical layer without a channel
*/
Ptr<WimaxPhy> CreatePhyWithoutChannel (PhyType phyType);
/**
* \param phyType WiMAX Physical layer type
* \param SNRTraceFilePath of the repository containing the SNR traces files
* \param activateLoss set to 1 to activate losses 0 otherwise
* \return WiMAX Phy object
*
* Creates a physical layer without creating a channel
*/
Ptr<WimaxPhy> CreatePhyWithoutChannel (PhyType phyType, char * SNRTraceFilePath, bool activateLoss);
/**
* \param phyType WiMAX Physical layer type
* \param SNRTraceFilePath the path to the repository containing the SNR traces files
* \param activateLoss set to 1 if you want ton activate losses 0 otherwise
* \return WiMAX Phy object
*
* Creates a physical layer
*/
Ptr<WimaxPhy> CreatePhy (PhyType phyType, char * SNRTraceFilePath, bool activateLoss);
/**
* \param node Node to be installed.
* \param deviceType Device type to create.
* \param phyType PHY type to create.
* \param channel A channel to use.
* \param schedulerType The scheduling mechanism to install on the device.
*
* For each of the input nodes, a new WiMAX net device (either
* ns3::SubscriberStationNetDevice or ns3::BaseStationNetDevice
* depending on the type parameter) is attached to the shared input channel.
*/
Ptr<WimaxNetDevice> Install (Ptr<Node> node,
NetDeviceType deviceType,
PhyType phyType,
Ptr<WimaxChannel> channel,
SchedulerType schedulerType);
/**
* \brief Creates a transport service flow.
* \param direction the direction of the service flow: UP or DOWN.
* \param schedulinType The service scheduling type to be used: UGS, RTPS, NRTPS, BE
* \param classifier The classifier to be used for this service flow
*
*/
ServiceFlow CreateServiceFlow (ServiceFlow::Direction direction,
ServiceFlow::SchedulingType schedulinType,
IpcsClassifierRecord classifier);
/**
* Helper to enable all WimaxNetDevice log components with one statement
*/
static void EnableLogComponents (void);
private:
static void AsciiRxEvent (Ptr<OutputStreamWrapper> stream, std::string path, Ptr<const Packet> packet, const Mac48Address &source);
static void AsciiTxEvent (Ptr<OutputStreamWrapper> stream, std::string path, Ptr<const Packet> packet, const Mac48Address &dest);
/**
* \brief Enable pcap output on the indicated net device.
* \internal
*
* NetDevice-specific implementation mechanism for hooking the trace and
* writing to the trace file.
*
* \param prefix Filename prefix to use for pcap files.
* \param nd Net device for which you want to enable tracing.
* \param explicitFilename Treat the prefix as an explicit filename if true
* \param promiscuous If true capture all possible packets available at the device.
*/
virtual void EnablePcapInternal (std::string prefix, Ptr<NetDevice> nd, bool explicitFilename, bool promiscuous);
/**
* \brief Enable ascii trace output on the indicated net device.
* \internal
*
* NetDevice-specific implementation mechanism for hooking the trace and
* writing to the trace file.
*
* \param stream The output stream object to use when logging ascii traces.
* \param prefix Filename prefix to use for ascii trace files.
* \param nd Net device for which you want to enable tracing.
* \param explicitFilename Treat the prefix as an explicit filename if true
*/
virtual void EnableAsciiInternal (Ptr<OutputStreamWrapper> stream,
std::string prefix,
Ptr<NetDevice> nd,
bool explicitFilename);
Ptr<WimaxChannel> m_channel;
};
} // namespace ns3
#endif /* WIMAX_HELPER_H */
| zy901002-gpsr | src/wimax/helper/wimax-helper.h | C++ | gpl2 | 10,816 |
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2007,2008 INRIA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Authors: Jahanzeb Farooq <jahanzeb.farooq@sophia.inria.fr>
* Mohamed Amine Ismail <amine.ismail@sophia.inria.fr>
*/
#include "wimax-helper.h"
#include "ns3/simulator.h"
#include "ns3/packet.h"
#include "ns3/log.h"
#include <string>
#include "ns3/config.h"
#include "ns3/wimax-net-device.h"
#include "ns3/bs-net-device.h"
#include "ns3/ss-net-device.h"
#include "ns3/wimax-channel.h"
#include "ns3/simple-ofdm-wimax-channel.h"
#include "ns3/wimax-phy.h"
#include "ns3/simple-ofdm-wimax-phy.h"
#include "ns3/pointer.h"
#include "ns3/wimax-mac-to-mac-header.h"
NS_LOG_COMPONENT_DEFINE ("WimaxHelper");
namespace ns3 {
WimaxHelper::WimaxHelper (void)
: m_channel (0)
{
}
WimaxHelper::~WimaxHelper (void)
{
}
void WimaxHelper::EnableAsciiForConnection (Ptr<OutputStreamWrapper> os,
uint32_t nodeid,
uint32_t deviceid,
char *netdevice,
char *connection)
{
std::ostringstream oss;
oss << "/NodeList/" << nodeid << "/DeviceList/" << deviceid << "/$ns3::" << netdevice << "/" << connection
<< "/TxQueue/Enqueue";
Config::Connect (oss.str (), MakeBoundCallback (&AsciiTraceHelper::DefaultEnqueueSinkWithContext, os));
oss.str ("");
oss << "/NodeList/" << nodeid << "/DeviceList/" << deviceid << "/$ns3::" << netdevice << "/" << connection
<< "/TxQueue/Dequeue";
Config::Connect (oss.str (), MakeBoundCallback (&AsciiTraceHelper::DefaultDequeueSinkWithContext, os));
oss.str ("");
oss << "/NodeList/" << nodeid << "/DeviceList/" << deviceid << "/$ns3::" << netdevice << "/" << connection
<< "/TxQueue/Drop";
Config::Connect (oss.str (), MakeBoundCallback (&AsciiTraceHelper::DefaultDropSinkWithContext, os));
}
Ptr<WimaxPhy> WimaxHelper::CreatePhy (PhyType phyType)
{
Ptr<WimaxPhy> phy;
switch (phyType)
{
case SIMPLE_PHY_TYPE_OFDM:
phy = CreateObject<SimpleOfdmWimaxPhy> ();
if (!m_channel)
{
m_channel = CreateObject<SimpleOfdmWimaxChannel> (SimpleOfdmWimaxChannel::COST231_PROPAGATION);
}
break;
default:
NS_FATAL_ERROR ("Invalid physical type");
break;
}
return phy;
}
void WimaxHelper::SetPropagationLossModel (SimpleOfdmWimaxChannel::PropModel propagationModel)
{
if (!m_channel)
{
m_channel = CreateObject<SimpleOfdmWimaxChannel> ();
}
m_channel->GetObject<SimpleOfdmWimaxChannel> ()->SetPropagationModel (propagationModel);
}
Ptr<WimaxPhy> WimaxHelper::CreatePhy (PhyType phyType, char * SNRTraceFilePath, bool activateLoss)
{
Ptr<WimaxPhy> phy;
Ptr<SimpleOfdmWimaxPhy> sphy;
switch (phyType)
{
case SIMPLE_PHY_TYPE_OFDM:
sphy = CreateObject<SimpleOfdmWimaxPhy> ();
phy = sphy;
sphy->SetSNRToBlockErrorRateTracesPath (SNRTraceFilePath);
sphy->ActivateLoss (activateLoss);
if (!m_channel)
{
m_channel = CreateObject<SimpleOfdmWimaxChannel> (SimpleOfdmWimaxChannel::COST231_PROPAGATION);
}
break;
default:
NS_FATAL_ERROR ("Invalid physical type");
break;
}
return phy;
}
Ptr<WimaxPhy> WimaxHelper::CreatePhyWithoutChannel (PhyType phyType)
{
Ptr<WimaxPhy> phy;
switch (phyType)
{
case SIMPLE_PHY_TYPE_OFDM:
phy = CreateObject<SimpleOfdmWimaxPhy> ();
break;
default:
NS_FATAL_ERROR ("Invalid physical type");
break;
}
return phy;
}
Ptr<WimaxPhy> WimaxHelper::CreatePhyWithoutChannel (PhyType phyType, char * SNRTraceFilePath, bool activateLoss)
{
Ptr<WimaxPhy> phy;
Ptr<SimpleOfdmWimaxPhy> sphy;
switch (phyType)
{
case SIMPLE_PHY_TYPE_OFDM:
sphy = CreateObject<SimpleOfdmWimaxPhy> ();
phy = sphy;
sphy->SetSNRToBlockErrorRateTracesPath (SNRTraceFilePath);
sphy->ActivateLoss (activateLoss);
break;
default:
NS_FATAL_ERROR ("Invalid physical type");
break;
}
return phy;
}
Ptr<UplinkScheduler> WimaxHelper::CreateUplinkScheduler (SchedulerType schedulerType)
{
Ptr<UplinkScheduler> uplinkScheduler;
switch (schedulerType)
{
case SCHED_TYPE_SIMPLE:
uplinkScheduler = CreateObject<UplinkSchedulerSimple> ();
break;
case SCHED_TYPE_RTPS:
uplinkScheduler = CreateObject<UplinkSchedulerRtps> ();
break;
case SCHED_TYPE_MBQOS:
uplinkScheduler = CreateObject<UplinkSchedulerMBQoS> (Seconds (0.25));
break;
default:
NS_FATAL_ERROR ("Invalid scheduling type");
break;
}
return uplinkScheduler;
}
Ptr<BSScheduler> WimaxHelper::CreateBSScheduler (SchedulerType schedulerType)
{
Ptr<BSScheduler> bsScheduler;
switch (schedulerType)
{
case SCHED_TYPE_SIMPLE:
bsScheduler = CreateObject<BSSchedulerSimple> ();
break;
case SCHED_TYPE_RTPS:
bsScheduler = CreateObject<BSSchedulerRtps> ();
break;
case SCHED_TYPE_MBQOS:
bsScheduler = CreateObject<BSSchedulerSimple> ();
break;
default:
NS_FATAL_ERROR ("Invalid scheduling type");
break;
}
return bsScheduler;
}
NetDeviceContainer WimaxHelper::Install (NodeContainer c,
NetDeviceType deviceType,
PhyType phyType,
SchedulerType schedulerType,
double frameDuration)
{
NetDeviceContainer devices;
for (NodeContainer::Iterator i = c.Begin (); i != c.End (); i++)
{
Ptr<Node> node = *i;
Ptr<WimaxPhy> phy = CreatePhy (phyType);
// Set SuperFrame Duration
phy->SetFrameDuration (Seconds (frameDuration));
Ptr<WimaxNetDevice> device;
Ptr<UplinkScheduler> uplinkScheduler = CreateUplinkScheduler (schedulerType);
Ptr<BSScheduler> bsScheduler = CreateBSScheduler (schedulerType);
if (deviceType == DEVICE_TYPE_BASE_STATION)
{
// attach phy
Ptr<BaseStationNetDevice> deviceBS;
deviceBS = CreateObject<BaseStationNetDevice> (node, phy, uplinkScheduler, bsScheduler);
device = deviceBS;
uplinkScheduler->SetBs (deviceBS);
bsScheduler->SetBs (deviceBS);
}
else
{
device = CreateObject<SubscriberStationNetDevice> (node, phy);
}
device->SetAddress (Mac48Address::Allocate ());
phy->SetDevice (device);
device->Start ();
device->Attach (m_channel); // attach channel
node->AddDevice (device);
devices.Add (device);
}
return devices;
}
NetDeviceContainer WimaxHelper::Install (NodeContainer c,
NetDeviceType deviceType,
PhyType phyType,
SchedulerType schedulerType)
{
NetDeviceContainer devices;
for (NodeContainer::Iterator i = c.Begin (); i != c.End (); i++)
{
Ptr<Node> node = *i;
Ptr<WimaxPhy> phy = CreatePhy (phyType);
Ptr<WimaxNetDevice> device;
Ptr<UplinkScheduler> uplinkScheduler = CreateUplinkScheduler (schedulerType);
Ptr<BSScheduler> bsScheduler = CreateBSScheduler (schedulerType);
if (deviceType == DEVICE_TYPE_BASE_STATION)
{
// attach phy
Ptr<BaseStationNetDevice> deviceBS;
deviceBS = CreateObject<BaseStationNetDevice> (node, phy, uplinkScheduler, bsScheduler);
device = deviceBS;
uplinkScheduler->SetBs (deviceBS);
bsScheduler->SetBs (deviceBS);
}
else
{
device = CreateObject<SubscriberStationNetDevice> (node, phy);
}
device->SetAddress (Mac48Address::Allocate ());
phy->SetDevice (device);
device->Start ();
device->Attach (m_channel); // attach channel
node->AddDevice (device);
devices.Add (device);
}
return devices;
}
NetDeviceContainer WimaxHelper::Install (NodeContainer c,
NetDeviceType deviceType,
PhyType phyType,
Ptr<WimaxChannel> channel,
SchedulerType schedulerType)
{
NetDeviceContainer devices;
for (NodeContainer::Iterator i = c.Begin (); i != c.End (); i++)
{
Ptr<Node> node = *i;
Ptr<WimaxPhy> phy = CreatePhyWithoutChannel (phyType, (char*) "dummy", 0);
Ptr<WimaxNetDevice> device;
Ptr<UplinkScheduler> uplinkScheduler = CreateUplinkScheduler (schedulerType);
Ptr<BSScheduler> bsScheduler = CreateBSScheduler (schedulerType);
if (deviceType == DEVICE_TYPE_BASE_STATION)
{
Ptr<BaseStationNetDevice> deviceBS;
deviceBS = CreateObject<BaseStationNetDevice> (node, phy, uplinkScheduler, bsScheduler);
device = deviceBS;
uplinkScheduler->SetBs (deviceBS);
bsScheduler->SetBs (deviceBS);
}
else
{
device = CreateObject<SubscriberStationNetDevice> (node, phy);
}
device->SetAddress (Mac48Address::Allocate ());
phy->SetDevice (device);
device->Start ();
device->Attach (channel);
node->AddDevice (device);
devices.Add (device);
}
return devices;
}
Ptr<WimaxNetDevice> WimaxHelper::Install (Ptr<Node> node,
NetDeviceType deviceType,
PhyType phyType,
Ptr<WimaxChannel> channel,
SchedulerType schedulerType)
{
// Ptr<WimaxPhy> phy = CreatePhyWithoutChannel (phyType);
Ptr<WimaxPhy> phy = CreatePhyWithoutChannel (phyType, (char*) "dummy", 0);
Ptr<WimaxNetDevice> device;
Ptr<UplinkScheduler> uplinkScheduler = CreateUplinkScheduler (schedulerType);
Ptr<BSScheduler> bsScheduler = CreateBSScheduler (schedulerType);
if (deviceType == DEVICE_TYPE_BASE_STATION)
{
Ptr<BaseStationNetDevice> deviceBS;
deviceBS = CreateObject<BaseStationNetDevice> (node, phy, uplinkScheduler, bsScheduler);
device = deviceBS;
uplinkScheduler->SetBs (deviceBS);
bsScheduler->SetBs (deviceBS);
}
else
{
device = CreateObject<SubscriberStationNetDevice> (node, phy);
}
device->SetAddress (Mac48Address::Allocate ());
phy->SetDevice (device);
device->Start ();
device->Attach (channel);
node->AddDevice (device);
return device;
}
void
WimaxHelper::EnableLogComponents (void)
{
LogComponentEnable ("BandwidthManager", LOG_LEVEL_ALL);
LogComponentEnable ("BSLinkManager", LOG_LEVEL_ALL);
LogComponentEnable ("BaseStationNetDevice", LOG_LEVEL_ALL);
LogComponentEnable ("BSSchedulerRtps", LOG_LEVEL_ALL);
LogComponentEnable ("BSSchedulerSimple", LOG_LEVEL_ALL);
LogComponentEnable ("BSScheduler", LOG_LEVEL_ALL);
LogComponentEnable ("BsServiceFlowManager", LOG_LEVEL_ALL);
LogComponentEnable ("UplinkSchedulerMBQoS", LOG_LEVEL_ALL);
LogComponentEnable ("UplinkSchedulerRtps", LOG_LEVEL_ALL);
LogComponentEnable ("UplinkSchedulerSimple", LOG_LEVEL_ALL);
LogComponentEnable ("UplinkScheduler", LOG_LEVEL_ALL);
LogComponentEnable ("BurstProfileManager", LOG_LEVEL_ALL);
LogComponentEnable ("ConnectionManager", LOG_LEVEL_ALL);
LogComponentEnable ("IpcsClassifierRecord", LOG_LEVEL_ALL);
LogComponentEnable ("IpcsClassifier", LOG_LEVEL_ALL);
LogComponentEnable ("MACMESSAGES", LOG_LEVEL_ALL);
LogComponentEnable ("PacketBurst", LOG_LEVEL_ALL);
LogComponentEnable ("ServiceFlowManager", LOG_LEVEL_ALL);
LogComponentEnable ("simpleOfdmWimaxChannel", LOG_LEVEL_ALL);
LogComponentEnable ("SimpleOfdmWimaxPhy", LOG_LEVEL_ALL);
LogComponentEnable ("SNRToBlockErrorRateManager", LOG_LEVEL_ALL);
LogComponentEnable ("SSLinkManager", LOG_LEVEL_ALL);
LogComponentEnable ("SSManager", LOG_LEVEL_ALL);
LogComponentEnable ("SubscriberStationNetDevice", LOG_LEVEL_ALL);
LogComponentEnable ("SSScheduler", LOG_LEVEL_ALL);
LogComponentEnable ("SsServiceFlowManager", LOG_LEVEL_ALL);
LogComponentEnable ("WimaxChannel", LOG_LEVEL_ALL);
LogComponentEnable ("WimaxMacQueue", LOG_LEVEL_ALL);
LogComponentEnable ("WimaxNetDevice", LOG_LEVEL_ALL);
LogComponentEnable ("WimaxPhy", LOG_LEVEL_ALL);
LogComponentEnable ("Tlv", LOG_LEVEL_ALL);
LogComponentEnable ("BandwidthManager", LOG_LEVEL_ALL);
LogComponentEnable ("BaseStationNetDevice", LOG_LEVEL_ALL);
LogComponentEnable ("BSSchedulerRtps", LOG_LEVEL_ALL);
LogComponentEnable ("BSSchedulerSimple", LOG_LEVEL_ALL);
LogComponentEnable ("BSScheduler", LOG_LEVEL_ALL);
LogComponentEnable ("SubscriberStationNetDevice", LOG_LEVEL_ALL);
LogComponentEnable ("SSScheduler", LOG_LEVEL_ALL);
LogComponentEnable ("WimaxMacQueue", LOG_LEVEL_ALL);
}
void WimaxHelper::AsciiRxEvent (Ptr<OutputStreamWrapper> stream,
std::string path,
Ptr<const Packet> packet,
const Mac48Address &source)
{
*stream->GetStream () << "r " << Simulator::Now ().GetSeconds () << " from: " << source << " ";
*stream->GetStream () << path << std::endl;
}
void WimaxHelper::AsciiTxEvent (Ptr<OutputStreamWrapper> stream, std::string path, Ptr<const Packet> packet, const Mac48Address &dest)
{
*stream->GetStream () << "t " << Simulator::Now ().GetSeconds () << " to: " << dest << " ";
*stream->GetStream () << path << std::endl;
}
ServiceFlow WimaxHelper::CreateServiceFlow (ServiceFlow::Direction direction,
ServiceFlow::SchedulingType schedulinType,
IpcsClassifierRecord classifier)
{
CsParameters csParam (CsParameters::ADD, classifier);
ServiceFlow serviceFlow = ServiceFlow (direction);
serviceFlow.SetConvergenceSublayerParam (csParam);
serviceFlow.SetCsSpecification (ServiceFlow::IPV4);
serviceFlow.SetServiceSchedulingType (schedulinType);
serviceFlow.SetMaxSustainedTrafficRate (100);
serviceFlow.SetMinReservedTrafficRate (1000000);
serviceFlow.SetMinTolerableTrafficRate (1000000);
serviceFlow.SetMaximumLatency (100);
serviceFlow.SetMaxTrafficBurst (2000);
serviceFlow.SetTrafficPriority (1);
serviceFlow.SetUnsolicitedGrantInterval (1);
serviceFlow.SetMaxSustainedTrafficRate (70);
serviceFlow.SetToleratedJitter (10);
serviceFlow.SetSduSize (49);
serviceFlow.SetRequestTransmissionPolicy (0);
return serviceFlow;
}
void
WimaxHelper::EnableAsciiInternal (Ptr<OutputStreamWrapper> stream,
std::string prefix,
Ptr<NetDevice> nd,
bool explicitFilename)
{
//
// All of the ascii enable functions vector through here including the ones
// that are wandering through all of devices on perhaps all of the nodes in
// the system. We can only deal with devices of type CsmaNetDevice.
//
Ptr<WimaxNetDevice> device = nd->GetObject<WimaxNetDevice> ();
if (device == 0)
{
NS_LOG_INFO ("WimaxHelper::EnableAsciiInternal(): Device " << device << " not of type ns3::WimaxNetDevice");
return;
}
//
// Our default trace sinks are going to use packet printing, so we have to
// make sure that is turned on.
//
Packet::EnablePrinting ();
//
// If we are not provided an OutputStreamWrapper, we are expected to create
// one using the usual trace filename conventions and do a Hook*WithoutContext
// since there will be one file per context and therefore the context would
// be redundant.
//
if (stream == 0)
{
//
// Set up an output stream object to deal with private ofstream copy
// constructor and lifetime issues. Let the helper decide the actual
// name of the file given the prefix.
//
AsciiTraceHelper asciiTraceHelper;
std::string filename;
if (explicitFilename)
{
filename = prefix;
}
else
{
filename = asciiTraceHelper.GetFilenameFromDevice (prefix, device);
}
Ptr<OutputStreamWrapper> theStream = asciiTraceHelper.CreateFileStream (filename);
uint32_t nodeid = nd->GetNode ()->GetId ();
uint32_t deviceid = nd->GetIfIndex ();
std::ostringstream oss;
//
// The MacRx trace source provides our "r" event.
//
oss << "/NodeList/" << nodeid << "/DeviceList/" << deviceid << "/$ns3::WimaxNetDevice/Rx";
Config::Connect (oss.str (), MakeBoundCallback (&WimaxHelper::AsciiRxEvent, theStream));
oss.str ("");
oss << "/NodeList/" << nodeid << "/DeviceList/" << deviceid << "/$ns3::WimaxNetDevice/Tx";
Config::Connect (oss.str (), MakeBoundCallback (&WimaxHelper::AsciiTxEvent, theStream));
//
// The "+", '-', and 'd' events are driven by trace sources actually in the
// transmit queue.
//
EnableAsciiForConnection (theStream, nodeid, deviceid, (char*) "WimaxNetDevice", (char*) "InitialRangingConnection");
EnableAsciiForConnection (theStream, nodeid, deviceid, (char*) "WimaxNetDevice", (char*) "BroadcastConnection");
EnableAsciiForConnection (theStream, nodeid, deviceid, (char*) "SubscriberStationNetDevice", (char*) "BasicConnection");
EnableAsciiForConnection (theStream, nodeid, deviceid, (char*) "SubscriberStationNetDevice", (char*) "PrimaryConnection");
return;
}
//
// If we are provided an OutputStreamWrapper, we are expected to use it, and
// to providd a context. We are free to come up with our own context if we
// want, and use the AsciiTraceHelper Hook*WithContext functions, but for
// compatibility and simplicity, we just use Config::Connect and let it deal
// with the context.
//
// Note that we are going to use the default trace sinks provided by the
// ascii trace helper. There is actually no AsciiTraceHelper in sight here,
// but the default trace sinks are actually publicly available static
// functions that are always there waiting for just such a case.
//
uint32_t nodeid = nd->GetNode ()->GetId ();
uint32_t deviceid = nd->GetIfIndex ();
std::ostringstream oss;
oss << "/NodeList/" << nodeid << "/DeviceList/" << deviceid << "/$ns3::WimaxNetDevice/Rx";
Config::Connect (oss.str (), MakeBoundCallback (&WimaxHelper::AsciiRxEvent, stream));
oss.str ("");
oss << "/NodeList/" << nodeid << "/DeviceList/" << deviceid << "/$ns3::WimaxNetDevice/Tx";
Config::Connect (oss.str (), MakeBoundCallback (&WimaxHelper::AsciiTxEvent, stream));
EnableAsciiForConnection (stream, nodeid, deviceid, (char*) "WimaxNetDevice", (char*) "InitialRangingConnection");
EnableAsciiForConnection (stream, nodeid, deviceid, (char*) "WimaxNetDevice", (char*) "BroadcastConnection");
EnableAsciiForConnection (stream, nodeid, deviceid, (char*) "SubscriberStationNetDevice", (char*) "BasicConnection");
EnableAsciiForConnection (stream, nodeid, deviceid, (char*) "SubscriberStationNetDevice", (char*) "PrimaryConnection");
}
static void PcapSniffTxRxEvent (Ptr<PcapFileWrapper> file,
Ptr<const PacketBurst> burst)
{
std::list<Ptr<Packet> > packets = burst->GetPackets ();
for (std::list<Ptr<Packet> >::iterator iter = packets.begin (); iter != packets.end (); ++iter)
{
Ptr<Packet> p = (*iter)->Copy ();
WimaxMacToMacHeader m2m (p->GetSize ());
p->AddHeader (m2m);
file->Write (Simulator::Now (), p);
}
}
void
WimaxHelper::EnablePcapInternal (std::string prefix, Ptr<NetDevice> nd, bool explicitFilename, bool promiscuous)
{
//
// All of the Pcap enable functions vector through here including the ones
// that are wandering through all of devices on perhaps all of the nodes in
// the system. We can only deal with devices of type WimaxNetDevice.
//
Ptr<WimaxNetDevice> device = nd->GetObject<WimaxNetDevice> ();
if (device == 0)
{
NS_LOG_INFO ("WimaxHelper::EnablePcapInternal(): Device " << &device << " not of type ns3::WimaxNetDevice");
return;
}
Ptr<WimaxPhy> phy = device->GetPhy ();
PcapHelper pcapHelper;
std::string filename;
if (explicitFilename)
{
filename = prefix;
}
else
{
filename = pcapHelper.GetFilenameFromDevice (prefix, device);
}
Ptr<PcapFileWrapper> file = pcapHelper.CreateFile (filename, std::ios::out, PcapHelper::DLT_EN10MB);
phy->TraceConnectWithoutContext ("Tx", MakeBoundCallback (&PcapSniffTxRxEvent, file));
phy->TraceConnectWithoutContext ("Rx", MakeBoundCallback (&PcapSniffTxRxEvent, file));
}
} // namespace ns3
| zy901002-gpsr | src/wimax/helper/wimax-helper.cc | C++ | gpl2 | 21,446 |
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
def build(bld):
obj = bld.create_ns3_module('wimax', ['network', 'point-to-point', 'internet', 'applications', 'propagation', 'mobility'])
obj.source = [
'model/cid.cc',
'model/cid-factory.cc',
'model/wimax-net-device.cc',
'model/bs-net-device.cc',
'model/ss-net-device.cc',
'model/wimax-mac-header.cc',
'model/wimax-phy.cc',
'model/wimax-channel.cc',
'model/ofdm-downlink-frame-prefix.cc',
'model/wimax-connection.cc',
'model/ss-record.cc',
'model/mac-messages.cc',
'model/dl-mac-messages.cc',
'model/ul-mac-messages.cc',
'model/simple-ofdm-wimax-phy.cc',
'model/simple-ofdm-wimax-channel.cc',
'model/send-params.cc',
'model/ss-manager.cc',
'model/connection-manager.cc',
'model/bs-uplink-scheduler.cc',
'model/bs-uplink-scheduler-simple.cc',
'model/bs-uplink-scheduler-mbqos.cc',
'model/bs-uplink-scheduler-rtps.cc',
'model/bs-scheduler.cc',
'model/bs-scheduler-simple.cc',
'model/bs-scheduler-rtps.cc',
'model/wimax-mac-queue.cc',
'model/burst-profile-manager.cc',
'model/ss-scheduler.cc',
'model/service-flow.cc',
'model/service-flow-manager.cc',
'model/service-flow-record.cc',
'model/ss-link-manager.cc',
'model/bs-link-manager.cc',
'model/bandwidth-manager.cc',
'model/crc8.cc',
'model/ul-job.cc' ,
'model/snr-to-block-error-rate-record.cc',
'model/snr-to-block-error-rate-manager.cc',
'model/simple-ofdm-send-param.cc',
'model/ss-service-flow-manager.cc',
'model/bs-service-flow-manager.cc',
'model/ipcs-classifier.cc',
'model/ipcs-classifier-record.cc',
'model/wimax-tlv.cc',
'model/cs-parameters.cc',
'model/wimax-mac-to-mac-header.cc',
'helper/wimax-helper.cc',
]
obj_test = bld.create_ns3_module_test_library('wimax')
obj_test.source = [
'test/wimax-tlv-test.cc',
'test/mac-messages-test.cc',
'test/wimax-service-flow-test.cc',
'test/ss-mac-test.cc',
'test/phy-test.cc',
'test/qos-test.cc',
'test/wimax-fragmentation-test.cc',
]
headers = bld.new_task_gen(features=['ns3header'])
headers.module = 'wimax'
headers.source = [
'model/wimax-channel.h',
'model/wimax-net-device.h',
'model/bs-net-device.h',
'model/ss-net-device.h',
'model/cid.h',
'model/cid-factory.h',
'model/ofdm-downlink-frame-prefix.h',
'model/wimax-connection.h',
'model/ss-record.h',
'model/mac-messages.h',
'model/dl-mac-messages.h',
'model/ul-mac-messages.h',
'model/wimax-phy.h',
'model/simple-ofdm-wimax-phy.h',
'model/simple-ofdm-wimax-channel.h',
'model/send-params.h',
'model/service-flow.h',
'model/ss-manager.h',
'model/connection-manager.h',
'model/wimax-mac-header.h',
'model/wimax-mac-queue.h',
'model/crc8.h',
'model/service-flow-manager.h',
'model/bs-uplink-scheduler.h',
'model/bs-uplink-scheduler-simple.h',
'model/bs-uplink-scheduler-mbqos.h',
'model/bs-uplink-scheduler-rtps.h',
'model/ul-job.h',
'model/bs-scheduler.h',
'model/bs-scheduler-simple.h',
'model/bs-scheduler-rtps.h',
'model/service-flow-record.h',
'model/snr-to-block-error-rate-record.h',
'model/snr-to-block-error-rate-manager.h',
'model/simple-ofdm-send-param.h',
'model/ss-service-flow-manager.h',
'model/bs-service-flow-manager.h',
'model/cs-parameters.h',
'model/ipcs-classifier-record.h',
'model/wimax-tlv.h',
'model/ipcs-classifier.h',
'model/bvec.h',
'model/wimax-mac-to-mac-header.h',
'helper/wimax-helper.h',
]
if bld.env['ENABLE_EXAMPLES']:
bld.add_subdirs('examples')
bld.ns3_python_bindings()
| zy901002-gpsr | src/wimax/wscript | Python | gpl2 | 4,658 |
import os
import os.path
import sys
import subprocess
import shlex
# WAF modules
import Options
import Utils
import Logs
import TaskGen
import Build
import re
from waflib.Errors import WafError
# these are set from the main wscript file
APPNAME=None
VERSION=None
bld=None
def get_command_template(env, arguments=()):
cmd = Options.options.command_template or '%s'
for arg in arguments:
cmd = cmd + " " + arg
return cmd
if hasattr(os.path, "relpath"):
relpath = os.path.relpath # since Python 2.6
else:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
from waflib import Context
def find_program(program_name, env):
launch_dir = os.path.abspath(Context.launch_dir)
#top_dir = os.path.abspath(Options.cwd_launch)
found_programs = []
for obj in bld.all_task_gen:
if not getattr(obj, 'is_ns3_program', False):
continue
## filter out programs not in the subtree starting at the launch dir
if not (obj.path.abspath().startswith(launch_dir)
or obj.path.abspath(env).startswith(launch_dir)):
continue
name1 = obj.target
name2 = os.path.join(relpath(obj.path.abspath(), launch_dir), obj.target)
names = [name1, name2]
found_programs.extend(names)
if program_name in names:
return obj
raise ValueError("program '%s' not found; available programs are: %r"
% (program_name, found_programs))
def get_proc_env(os_env=None):
env = bld.env
if sys.platform == 'linux2':
pathvar = 'LD_LIBRARY_PATH'
elif sys.platform == 'darwin':
pathvar = 'DYLD_LIBRARY_PATH'
elif sys.platform == 'win32':
pathvar = 'PATH'
elif sys.platform == 'cygwin':
pathvar = 'PATH'
elif sys.platform.startswith('freebsd'):
pathvar = 'LD_LIBRARY_PATH'
else:
Logs.warn(("Don't know how to configure "
"dynamic library path for the platform %r;"
" assuming it's LD_LIBRARY_PATH.") % (sys.platform,))
pathvar = 'LD_LIBRARY_PATH'
proc_env = dict(os.environ)
if os_env is not None:
proc_env.update(os_env)
if pathvar is not None:
if pathvar in proc_env:
proc_env[pathvar] = os.pathsep.join(list(env['NS3_MODULE_PATH']) + [proc_env[pathvar]])
else:
proc_env[pathvar] = os.pathsep.join(list(env['NS3_MODULE_PATH']))
pymoddir = bld.path.find_dir('bindings/python').get_bld().abspath()
pyvizdir = bld.path.find_dir('src/visualizer').abspath()
if 'PYTHONPATH' in proc_env:
proc_env['PYTHONPATH'] = os.pathsep.join([pymoddir, pyvizdir] + [proc_env['PYTHONPATH']])
else:
proc_env['PYTHONPATH'] = os.pathsep.join([pymoddir, pyvizdir])
if 'PATH' in proc_env:
proc_env['PATH'] = os.pathsep.join(list(env['NS3_EXECUTABLE_PATH']) + [proc_env['PATH']])
else:
proc_env['PATH'] = os.pathsep.join(list(env['NS3_EXECUTABLE_PATH']))
return proc_env
def run_argv(argv, env, os_env=None, cwd=None, force_no_valgrind=False):
proc_env = get_proc_env(os_env)
if Options.options.valgrind and not force_no_valgrind:
if Options.options.command_template:
raise WafError("Options --command-template and --valgrind are conflicting")
if not env['VALGRIND']:
raise WafError("valgrind is not installed")
argv = [env['VALGRIND'], "--leak-check=full", "--show-reachable=yes", "--error-exitcode=1"] + argv
proc = subprocess.Popen(argv, env=proc_env, cwd=cwd, stderr=subprocess.PIPE)
error = False
for line in proc.stderr:
sys.stderr.write(line)
if "== LEAK SUMMARY" in line:
error = True
retval = proc.wait()
if retval == 0 and error:
retval = 1
else:
try:
WindowsError
except NameError:
retval = subprocess.Popen(argv, env=proc_env, cwd=cwd).wait()
else:
try:
retval = subprocess.Popen(argv, env=proc_env, cwd=cwd).wait()
except WindowsError, ex:
raise WafError("Command %s raised exception %s" % (argv, ex))
if retval:
signame = None
if retval < 0: # signal?
import signal
for name, val in vars(signal).iteritems():
if len(name) > 3 and name[:3] == 'SIG' and name[3] != '_':
if val == -retval:
signame = name
break
if signame:
raise WafError("Command %s terminated with signal %s."
" Run it under a debugger to get more information "
"(./waf --run <program> --command-template=\"gdb --args %%s <args>\")." % (argv, signame))
else:
raise WafError("Command %s exited with code %i" % (argv, retval))
return retval
def get_run_program(program_string, command_template=None):
"""
Return the program name and argv of the process that would be executed by
run_program(program_string, command_template).
"""
#print "get_run_program_argv(program_string=%r, command_template=%r)" % (program_string, command_template)
env = bld.env
if command_template in (None, '%s'):
argv = shlex.split(program_string)
#print "%r ==shlex.split==> %r" % (program_string, argv)
program_name = argv[0]
try:
program_obj = find_program(program_name, env)
except ValueError, ex:
raise WafError(str(ex))
program_node = program_obj.path.find_or_declare(program_obj.target)
#try:
# program_node = program_obj.path.find_build(ccroot.get_target_name(program_obj))
#except AttributeError:
# raise Utils.WafError("%s does not appear to be a program" % (program_name,))
execvec = [program_node.abspath()] + argv[1:]
else:
program_name = program_string
try:
program_obj = find_program(program_name, env)
except ValueError, ex:
raise WafError(str(ex))
program_node = program_obj.path.find_or_declare(program_obj.target)
#try:
# program_node = program_obj.path.find_build(ccroot.get_target_name(program_obj))
#except AttributeError:
# raise Utils.WafError("%s does not appear to be a program" % (program_name,))
tmpl = command_template % (program_node.abspath(),)
execvec = shlex.split(tmpl.replace('\\', '\\\\'))
#print "%r ==shlex.split==> %r" % (command_template % (program_node.abspath(env),), execvec)
return program_name, execvec
def run_program(program_string, env, command_template=None, cwd=None, visualize=False):
"""
if command_template is not None, then program_string == program
name and argv is given by command_template with %s replaced by the
full path to the program. Else, program_string is interpreted as
a shell command with first name being the program name.
"""
dummy_program_name, execvec = get_run_program(program_string, command_template)
if cwd is None:
if (Options.options.cwd_launch):
cwd = Options.options.cwd_launch
else:
cwd = Options.cwd_launch
if visualize:
execvec.append("--SimulatorImplementationType=ns3::VisualSimulatorImpl")
return run_argv(execvec, env, cwd=cwd)
def run_python_program(program_string, env, visualize=False):
env = bld.env
execvec = shlex.split(program_string)
if (Options.options.cwd_launch):
cwd = Options.options.cwd_launch
else:
cwd = Options.cwd_launch
if visualize:
execvec.append("--SimulatorImplementationType=ns3::VisualSimulatorImpl")
return run_argv([env['PYTHON'][0]] + execvec, env, cwd=cwd)
def monkey_patch_Runner_start():
"""http://code.google.com/p/waf/issues/detail?id=1039"""
from waflib import Task
def start(self):
"""
Give tasks to :py:class:`waflib.Runner.TaskConsumer` instances until the build finishes or the ``stop`` flag is set.
If only one job is used, then execute the tasks one by one, without consumers.
"""
self.total = self.bld.total()
while not self.stop:
self.refill_task_list()
# consider the next task
tsk = self.get_next_task()
if not tsk:
if self.count:
# tasks may add new ones after they are run
continue
else:
# no tasks to run, no tasks running, time to exit
break
if tsk.hasrun:
# if the task is marked as "run", just skip it
self.processed += 1
continue
if self.stop: # stop immediately after a failure was detected
break
try:
st = tsk.runnable_status()
except Exception:
self.processed += 1
if not self.stop and self.bld.keep:
tsk.hasrun = Task.SKIPPED
if self.bld.keep == 1:
# if -k stop at the first exception, if -kk try to go as far as possible
self.stop = True
continue
tsk.err_msg = Utils.ex_stack()
tsk.hasrun = Task.EXCEPTION
self.error_handler(tsk)
continue
if st == Task.ASK_LATER:
self.postpone(tsk)
# TODO optimize this
# if self.outstanding:
# for x in tsk.run_after:
# if x in self.outstanding:
# self.outstanding.remove(x)
# self.outstanding.insert(0, x)
elif st == Task.SKIP_ME:
self.processed += 1
tsk.hasrun = Task.SKIPPED
self.add_more_tasks(tsk)
else:
# run me: put the task in ready queue
tsk.position = (self.processed, self.total)
self.count += 1
tsk.master = self
self.processed += 1
if self.numjobs == 1:
tsk.process()
else:
self.add_task(tsk)
# self.count represents the tasks that have been made available to the consumer threads
# collect all the tasks after an error else the message may be incomplete
while self.error and self.count:
self.get_out()
#print loop
assert (self.count == 0 or self.stop)
# free the task pool, if any
self.free_task_pool()
from waflib.Runner import Parallel
Parallel.start = start
| zy901002-gpsr | wutils.py | Python | gpl2 | 11,469 |
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# Additional variables for figures, not sphinx default:
DIA = dia
EPSTOPDF = epstopdf
FIGURES = source/figures
IMAGES_EPS = \
IMAGES_PNG = ${IMAGES_EPS:.eps=.png}
IMAGES_PDF = ${IMAGES_EPS:.eps=.pdf}
IMAGES = $(IMAGES_EPS) $(IMAGES_PNG) $(IMAGES_PDF)
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
%.eps : %.dia; $(DIA) -t eps $< -e $@
%.png : %.dia; $(DIA) -t png $< -e $@
%.pdf : %.eps; $(EPSTOPDF) $< -o=$@
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
frag: pickle
@if test ! -d $(BUILDDIR)/frag; then mkdir $(BUILDDIR)/frag; fi
pushd $(BUILDDIR)/frag && ../../pickle-to-xml.py ../pickle/index.fpickle > navigation.xml && popd
cp -r $(BUILDDIR)/pickle/_images $(BUILDDIR)/frag
html: $(IMAGES)
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml: $(IMAGES)
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml: $(IMAGES)
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle: $(IMAGES)
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json: $(IMAGES)
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp: $(IMAGES)
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp: $(IMAGES)
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ns-3.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ns-3.qhc"
devhelp: $(IMAGES)
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/ns-3"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ns-3"
@echo "# devhelp"
epub: $(IMAGES)
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex: $(IMAGES)
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf: $(IMAGES)
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
make -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text: $(IMAGES)
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man: $(IMAGES)
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
changes: $(IMAGES)
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck: $(IMAGEs)
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest: $(IMAGES)
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
| zy901002-gpsr | doc/tutorial/Makefile | Makefile | gpl2 | 5,343 |
#!/usr/bin/python
# output xml format:
# <pages>
# <page url="xx"><prev url="yyy">zzz</prev><next url="hhh">lll</next><fragment>file.frag</fragment></page>
# ...
# </pages>
import pickle
import os
import codecs
def dump_pickles(out, dirname, filename, path):
f = open(os.path.join(dirname, filename), 'r')
data = pickle.load(f)
fragment_file = codecs.open(data['current_page_name'] + '.frag', mode='w', encoding='utf-8')
fragment_file.write(data['body'])
fragment_file.close()
out.write(' <page url="%s">\n' % path)
out.write(' <fragment>%s.frag</fragment>\n' % data['current_page_name'])
if data['prev'] is not None:
out.write(' <prev url="%s">%s</prev>\n' %
(os.path.normpath(os.path.join(path, data['prev']['link'])),
data['prev']['title']))
if data['next'] is not None:
out.write(' <next url="%s">%s</next>\n' %
(os.path.normpath(os.path.join(path, data['next']['link'])),
data['next']['title']))
out.write(' </page>\n')
f.close()
if data['next'] is not None:
next_path = os.path.normpath(os.path.join(path, data['next']['link']))
next_filename = os.path.basename(next_path) + '.fpickle'
dump_pickles(out, dirname, next_filename, next_path)
return
import sys
sys.stdout.write('<pages>\n')
dump_pickles(sys.stdout, os.path.dirname(sys.argv[1]), os.path.basename(sys.argv[1]), '/')
sys.stdout.write('</pages>')
| zy901002-gpsr | doc/tutorial/pickle-to-xml.py | Python | gpl2 | 1,500 |
#!/usr/bin/env bash
TMPFILE=`mktemp`
echo "\documentclass{book}
\usepackage{pdfpages}
\begin{document}
\includepdf[width=${1},fitpaper]{${2}}
\end{document}" >${TMPFILE}.tex
pdflatex -output-directory /tmp ${TMPFILE}.tex >/dev/null 2>/dev/null
cp ${TMPFILE}.pdf ${3}
| zy901002-gpsr | doc/models/rescale-pdf.sh | Shell | gpl2 | 270 |
EPSTOPDF = epstopdf
DIA = dia
CONVERT = convert
SRC = ../../src
# Temporary source directory, for build
SOURCETEMP = source-temp
FIGURES = $(SOURCETEMP)/figures
# list all model library .rst files that need to be copied to $SOURCETEMP
SOURCES = \
source/conf.py \
source/_static \
source/index.rst \
source/replace.txt \
source/organization.rst \
source/internet-models.rst \
source/network.rst \
source/emulation-overview.rst \
$(SRC)/aodv/doc/aodv.rst \
$(SRC)/applications/doc/applications.rst \
$(SRC)/bridge/doc/bridge.rst \
$(SRC)/click/doc/click.rst \
$(SRC)/csma/doc/csma.rst \
$(SRC)/dsdv/doc/dsdv.rst \
$(SRC)/mpi/doc/distributed.rst \
$(SRC)/energy/doc/energy.rst \
$(SRC)/emu/doc/emu.rst \
$(SRC)/tap-bridge/doc/tap.rst \
$(SRC)/mesh/doc/mesh.rst \
$(SRC)/lte/doc/lte.rst \
$(SRC)/propagation/doc/propagation.rst \
$(SRC)/network/doc/network-overview.rst \
$(SRC)/network/doc/packets.rst \
$(SRC)/network/doc/sockets-api.rst \
$(SRC)/network/doc/simple.rst \
$(SRC)/internet/doc/internet-stack.rst \
$(SRC)/internet/doc/ipv4.rst \
$(SRC)/internet/doc/ipv6.rst \
$(SRC)/internet/doc/routing-overview.rst \
$(SRC)/internet/doc/tcp.rst \
$(SRC)/olsr/doc/olsr.rst \
$(SRC)/openflow/doc/openflow-switch.rst \
$(SRC)/point-to-point/doc/point-to-point.rst \
$(SRC)/wifi/doc/wifi.rst \
$(SRC)/wimax/doc/wimax.rst \
$(SRC)/uan/doc/uan.rst \
$(SRC)/stats/doc/statistics.rst \
$(SRC)/netanim/doc/animation.rst \
$(SRC)/flow-monitor/doc/flow-monitor.rst \
# list all model library figure files that need to be copied to
# $SOURCETEMP/figures. For each figure to be included in all
# documentation formats (html, latex...) the following formats are supported:
# 1) a single .dia file (preferred option, because it can be edited)
# 2) a single .eps file
# 3) both a .pdf and .png file
SOURCEFIGS = \
figures/testbed.dia \
figures/emulated-channel.dia \
$(SRC)/network/doc/packet.dia \
$(SRC)/network/doc/node.dia \
$(SRC)/network/doc/buffer.dia \
$(SRC)/network/doc/sockets-overview.dia \
$(SRC)/internet/doc/internet-node-send.dia \
$(SRC)/internet/doc/internet-node-recv.dia \
$(SRC)/internet/doc/routing.dia \
$(SRC)/internet/doc/routing-specialization.dia \
$(SRC)/wifi/doc/WifiArchitecture.dia \
$(SRC)/wifi/doc/snir.dia \
$(SRC)/wimax/doc/WimaxArchitecture.dia \
$(SRC)/lte/doc/lte-transmission.png \
$(SRC)/lte/doc/lte-transmission.pdf \
$(SRC)/uan/doc/auvmobility-classes.dia \
$(SRC)/stats/doc/Stat-framework-arch.png \
$(SRC)/stats/doc/Wifi-default.png \
$(SRC)/netanim/doc/animation-dumbbell.png \
$(SRC)/netanim/doc/animation-dumbbell.pdf \
# specify figures from which .png and .pdf figures need to be
# generated (all dia and eps figures)
IMAGES_EPS = \
$(FIGURES)/testbed.eps \
$(FIGURES)/emulated-channel.eps \
$(FIGURES)/packet.eps \
$(FIGURES)/node.eps \
$(FIGURES)/buffer.eps \
$(FIGURES)/sockets-overview.eps \
$(FIGURES)/internet-node-send.eps \
$(FIGURES)/internet-node-recv.eps \
$(FIGURES)/routing.eps \
$(FIGURES)/routing-specialization.eps \
$(FIGURES)/WifiArchitecture.eps \
$(FIGURES)/snir.eps \
$(FIGURES)/WimaxArchitecture.eps \
$(FIGURES)/auvmobility-classes.eps \
# rescale pdf figures as necessary
$(FIGURES)/testbed.pdf_width = 5in
$(FIGURES)/emulated-channel.pdf_width = 6in
$(FIGURES)/node.pdf_width = 5in
$(FIGURES)/packet.pdf_width = 4in
$(FIGURES)/buffer.pdf_width = 15cm
$(FIGURES)/sockets-overview.pdf_width = 10cm
$(FIGURES)/internet-node-send.pdf_width = 5in
$(FIGURES)/internet-node-recv.pdf_width = 5in
$(FIGURES)/routing.pdf_width = 6in
$(FIGURES)/routing-specialization.pdf_width = 5in
$(FIGURES)/snir.pdf_width = 3in
$(FIGURES)/lte-transmission.pdf_width = 3in
$(FIGURES)/auvmobility-classes.pdf_width = 10cm
IMAGES_PNG = ${IMAGES_EPS:.eps=.png}
IMAGES_PDF = ${IMAGES_EPS:.eps=.pdf}
IMAGES = $(IMAGES_EPS) $(IMAGES_PNG) $(IMAGES_PDF)
%.eps : %.dia; $(DIA) -t eps $< -e $@
%.png : %.dia; $(DIA) -t png $< -e $@
%.png : %.eps; $(CONVERT) $< $@
%.pdf : %.eps; $(EPSTOPDF) $< -o=$@; if test x$($@_width) != x; then TMPFILE=`mktemp`; ./rescale-pdf.sh $($@_width) $@ $${TMPFILE} && mv $${TMPFILE} $@; fi
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCETEMP)
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
copy-sources: $(SOURCES)
@rm -rf $(SOURCETEMP)
@mkdir -p $(SOURCETEMP)
@mkdir -p $(FIGURES)
@cp -r $(SOURCES) $(SOURCETEMP)
@cp -r $(SOURCEFIGS) $(FIGURES)
clean:
-rm -rf $(BUILDDIR)/*
-rm -rf $(SOURCETEMP)
frag: pickle
@if test ! -d $(BUILDDIR)/frag; then mkdir $(BUILDDIR)/frag; fi
pushd $(BUILDDIR)/frag && ../../pickle-to-xml.py ../pickle/index.fpickle > navigation.xml && popd
cp -r $(BUILDDIR)/pickle/_images $(BUILDDIR)/frag
html: copy-sources $(IMAGES)
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml: copy-sources $(IMAGES)
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml: copy-sources $(IMAGES)
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle: copy-sources $(IMAGES)
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json: copy-sources $(IMAGES)
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp: copy-sources $(IMAGES)
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp: copy-sources $(IMAGES)
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ns-3.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ns-3.qhc"
devhelp: copy-sources $(IMAGES)
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/ns-3"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ns-3"
@echo "# devhelp"
epub: copy-sources $(IMAGES)
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex: copy-sources $(IMAGES)
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf: copy-sources $(IMAGES)
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
make -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text: copy-sources $(IMAGES)
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man: copy-sources $(IMAGES)
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
changes: copy-sources $(IMAGES)
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck: copy-sources $(IMAGEs)
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest: copy-sources $(IMAGES)
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
| zy901002-gpsr | doc/models/Makefile | Makefile | gpl2 | 9,483 |
#!/usr/bin/env bash
TMPFILE=`mktemp`
echo "\documentclass{book}
\usepackage{pdfpages}
\begin{document}
\includepdf[width=${1},fitpaper]{${2}}
\end{document}" >${TMPFILE}.tex
pdflatex -output-directory /tmp ${TMPFILE}.tex >/dev/null 2>/dev/null
cp ${TMPFILE}.pdf ${3}
| zy901002-gpsr | doc/manual/rescale-pdf.sh | Shell | gpl2 | 270 |
EPSTOPDF = epstopdf
DIA = dia
CONVERT = convert
FIGURES = figures
VPATH = $(FIGURES)
IMAGES_EPS = \
$(FIGURES)/software-organization.eps \
# rescale pdf figures as necessary
$(FIGURES)/software-organization.pdf_width = 5in
IMAGES_PNG = \
$(FIGURES)/plot-2d.png \
$(FIGURES)/plot-2d-with-error-bars.png \
$(FIGURES)/plot-3d.png \
${IMAGES_EPS:.eps=.png}
IMAGES_PDF = ${IMAGES_EPS:.eps=.pdf}
IMAGES = $(IMAGES_EPS) $(IMAGES_PNG) $(IMAGES_PDF)
%.eps : %.dia; $(DIA) -t eps $< -e $@
%.png : %.dia; $(DIA) -t png $< -e $@
%.png : %.eps; $(CONVERT) $< $@
%.pdf : %.eps; $(EPSTOPDF) $< -o=$@; if test x$($@_width) != x; then TMPFILE=`mktemp`; ./rescale-pdf.sh $($@_width) $@ $${TMPFILE} && mv $${TMPFILE} $@; fi
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)
-rm -rf $(IMAGES)
frag: pickle
@if test ! -d $(BUILDDIR)/frag; then mkdir $(BUILDDIR)/frag; fi
pushd $(BUILDDIR)/frag && ../../pickle-to-xml.py ../pickle/index.fpickle > navigation.xml && popd
cp -r $(BUILDDIR)/pickle/_images $(BUILDDIR)/frag
html: $(IMAGES)
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml: $(IMAGES)
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml: $(IMAGES)
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle: $(IMAGES)
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json: $(IMAGES)
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp: $(IMAGES)
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp: $(IMAGES)
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ns-3.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ns-3.qhc"
devhelp: $(IMAGES)
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/ns-3"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ns-3"
@echo "# devhelp"
epub: $(IMAGES)
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex: $(IMAGES)
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf: $(IMAGES)
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
make -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text: $(IMAGES)
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man: $(IMAGES)
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
changes: $(IMAGES)
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck: $(IMAGEs)
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest: $(IMAGES)
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
| zy901002-gpsr | doc/manual/Makefile | Makefile | gpl2 | 5,674 |
/**
* \mainpage ns-3 Documentation
*
* \section intro-sec Introduction
* <a href="http://www.nsnam.org/">ns-3</a> documentation is maintained using
* <a href="http://www.doxygen.org">Doxygen</a>.
* Doxygen is typically used for
* API documentation, and organizes such documentation across different
* modules. This project uses Doxygen for building the definitive
* maintained API documentation. Additional ns-3 project documentation
* can be found at the
* <a href="http://www.nsnam.org/documentation/latest">project web site</a>.
*
* \section install-sec Building the Documentation
*
* ns-3 requires Doxygen version 1.5.4 or greater to fully build all items,
* although earlier versions of Doxygen will mostly work.
*
* Type "./waf --doxygen" or "./waf --doxygen-no-build" to build the
* documentation. The doc/ directory contains
* configuration for Doxygen (doxygen.conf) and main.h. The Doxygen
* build process puts html files into the doc/html/ directory, and latex
* filex into the doc/latex/ directory.
*
* \section module-sec Module overview
*
* The ns-3 library is split across many modules organized under the
* <b><a href="modules.html">Modules</a></b> tab.
* - aodv
* - applications
* - bridge
* - click
* - config-store
* - core
* - csma
* - csma-layout
* - dsdv
* - emu
* - energy
* - flow-monitor
* - internet
* - lte
* - mesh
* - mobility
* - mpi
* - netanim
* - network
* - nix-vector-routing
* - ns3tcp
* - ns3wifi
* - olsr
* - openflow
* - point-to-point
* - point-to-point-layout
* - propagation
* - spectrum
* - stats
* - tap-bridge
* - template
* - test
* - tools
* - topology-read
* - uan
* - virtual-net-device
* - visualizer
* - wifi
* - wimax
*
*
*/
/**
* \namespace ns3
* \brief Every class exported by the ns3 library is enclosed in the
* ns3 namespace.
*/
| zy901002-gpsr | doc/main.h | C | gpl2 | 2,032 |
#!/usr/bin/perl -w
#
# Copyright (c) International Business Machines Corp., 2002,2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# lcov
#
# This is a wrapper script which provides a single interface for accessing
# LCOV coverage data.
#
#
# History:
# 2002-08-29 created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
# IBM Lab Boeblingen
# 2002-09-05 / Peter Oberparleiter: implemented --kernel-directory +
# multiple directories
# 2002-10-16 / Peter Oberparleiter: implemented --add-tracefile option
# 2002-10-17 / Peter Oberparleiter: implemented --extract option
# 2002-11-04 / Peter Oberparleiter: implemented --list option
# 2003-03-07 / Paul Larson: Changed to make it work with the latest gcov
# kernel patch. This will break it with older gcov-kernel
# patches unless you change the value of $gcovmod in this script
# 2003-04-07 / Peter Oberparleiter: fixed bug which resulted in an error
# when trying to combine .info files containing data without
# a test name
# 2003-04-10 / Peter Oberparleiter: extended Paul's change so that LCOV
# works both with the new and the old gcov-kernel patch
# 2003-04-10 / Peter Oberparleiter: added $gcov_dir constant in anticipation
# of a possible move of the gcov kernel directory to another
# file system in a future version of the gcov-kernel patch
# 2003-04-15 / Paul Larson: make info write to STDERR, not STDOUT
# 2003-04-15 / Paul Larson: added --remove option
# 2003-04-30 / Peter Oberparleiter: renamed --reset to --zerocounters
# to remove naming ambiguity with --remove
# 2003-04-30 / Peter Oberparleiter: adjusted help text to include --remove
# 2003-06-27 / Peter Oberparleiter: implemented --diff
# 2003-07-03 / Peter Oberparleiter: added line checksum support, added
# --no-checksum
# 2003-12-11 / Laurent Deniel: added --follow option
# 2004-03-29 / Peter Oberparleiter: modified --diff option to better cope with
# ambiguous patch file entries, modified --capture option to use
# modprobe before insmod (needed for 2.6)
# 2004-03-30 / Peter Oberparleiter: added --path option
# 2004-08-09 / Peter Oberparleiter: added configuration file support
# 2008-08-13 / Peter Oberparleiter: added function coverage support
#
use strict;
use File::Basename;
use File::Path;
use File::Find;
use File::Temp qw /tempdir/;
use File::Spec::Functions qw /abs2rel canonpath catdir catfile catpath
file_name_is_absolute rootdir splitdir splitpath/;
use Getopt::Long;
use Cwd qw /abs_path getcwd/;
# Global constants
our $lcov_version = 'LCOV version 1.9';
our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php";
our $tool_name = basename($0);
# Directory containing gcov kernel files
our $gcov_dir;
# Where to create temporary directories
our $tmp_dir;
# Internal constants
our $GKV_PROC = 0; # gcov-kernel data in /proc via external patch
our $GKV_SYS = 1; # gcov-kernel data in /sys via vanilla 2.6.31+
our @GKV_NAME = ( "external", "upstream" );
our $pkg_gkv_file = ".gcov_kernel_version";
our $pkg_build_file = ".build_directory";
our $BR_BLOCK = 0;
our $BR_BRANCH = 1;
our $BR_TAKEN = 2;
our $BR_VEC_ENTRIES = 3;
our $BR_VEC_WIDTH = 32;
# Branch data combination types
our $BR_SUB = 0;
our $BR_ADD = 1;
# Prototypes
sub print_usage(*);
sub check_options();
sub userspace_reset();
sub userspace_capture();
sub kernel_reset();
sub kernel_capture();
sub kernel_capture_initial();
sub package_capture();
sub add_traces();
sub read_info_file($);
sub get_info_entry($);
sub set_info_entry($$$$$$$$$;$$$$$$);
sub add_counts($$);
sub merge_checksums($$$);
sub combine_info_entries($$$);
sub combine_info_files($$);
sub write_info_file(*$);
sub extract();
sub remove();
sub list();
sub get_common_filename($$);
sub read_diff($);
sub diff();
sub system_no_output($@);
sub read_config($);
sub apply_config($);
sub info(@);
sub create_temp_dir();
sub transform_pattern($);
sub warn_handler($);
sub die_handler($);
sub abort_handler($);
sub temp_cleanup();
sub setup_gkv();
sub get_overall_line($$$$);
sub print_overall_rate($$$$$$$$$);
sub lcov_geninfo(@);
sub create_package($$$;$);
sub get_func_found_and_hit($);
sub br_ivec_get($$);
# Global variables & initialization
our @directory; # Specifies where to get coverage data from
our @kernel_directory; # If set, captures only from specified kernel subdirs
our @add_tracefile; # If set, reads in and combines all files in list
our $list; # If set, list contents of tracefile
our $extract; # If set, extracts parts of tracefile
our $remove; # If set, removes parts of tracefile
our $diff; # If set, modifies tracefile according to diff
our $reset; # If set, reset all coverage data to zero
our $capture; # If set, capture data
our $output_filename; # Name for file to write coverage data to
our $test_name = ""; # Test case name
our $quiet = ""; # If set, suppress information messages
our $help; # Help option flag
our $version; # Version option flag
our $convert_filenames; # If set, convert filenames when applying diff
our $strip; # If set, strip leading directories when applying diff
our $temp_dir_name; # Name of temporary directory
our $cwd = `pwd`; # Current working directory
our $to_file; # If set, indicates that output is written to a file
our $follow; # If set, indicates that find shall follow links
our $diff_path = ""; # Path removed from tracefile when applying diff
our $base_directory; # Base directory (cwd of gcc during compilation)
our $checksum; # If set, calculate a checksum for each line
our $no_checksum; # If set, don't calculate a checksum for each line
our $compat_libtool; # If set, indicates that libtool mode is to be enabled
our $no_compat_libtool; # If set, indicates that libtool mode is to be disabled
our $gcov_tool;
our $ignore_errors;
our $initial;
our $no_recursion = 0;
our $to_package;
our $from_package;
our $maxdepth;
our $no_markers;
our $config; # Configuration file contents
chomp($cwd);
our $tool_dir = dirname($0); # Directory where genhtml tool is installed
our @temp_dirs;
our $gcov_gkv; # gcov kernel support version found on machine
our $opt_derive_func_data;
our $opt_debug;
our $opt_list_full_path;
our $opt_no_list_full_path;
our $opt_list_width = 80;
our $opt_list_truncate_max = 20;
our $ln_overall_found;
our $ln_overall_hit;
our $fn_overall_found;
our $fn_overall_hit;
our $br_overall_found;
our $br_overall_hit;
#
# Code entry point
#
$SIG{__WARN__} = \&warn_handler;
$SIG{__DIE__} = \&die_handler;
$SIG{'INT'} = \&abort_handler;
$SIG{'QUIT'} = \&abort_handler;
# Prettify version string
$lcov_version =~ s/\$\s*Revision\s*:?\s*(\S+)\s*\$/$1/;
# Add current working directory if $tool_dir is not already an absolute path
if (! ($tool_dir =~ /^\/(.*)$/))
{
$tool_dir = "$cwd/$tool_dir";
}
# Read configuration file if available
if (defined($ENV{"HOME"}) && (-r $ENV{"HOME"}."/.lcovrc"))
{
$config = read_config($ENV{"HOME"}."/.lcovrc");
}
elsif (-r "/etc/lcovrc")
{
$config = read_config("/etc/lcovrc");
}
if ($config)
{
# Copy configuration file values to variables
apply_config({
"lcov_gcov_dir" => \$gcov_dir,
"lcov_tmp_dir" => \$tmp_dir,
"lcov_list_full_path" => \$opt_list_full_path,
"lcov_list_width" => \$opt_list_width,
"lcov_list_truncate_max"=> \$opt_list_truncate_max,
});
}
# Parse command line options
if (!GetOptions("directory|d|di=s" => \@directory,
"add-tracefile|a=s" => \@add_tracefile,
"list|l=s" => \$list,
"kernel-directory|k=s" => \@kernel_directory,
"extract|e=s" => \$extract,
"remove|r=s" => \$remove,
"diff=s" => \$diff,
"convert-filenames" => \$convert_filenames,
"strip=i" => \$strip,
"capture|c" => \$capture,
"output-file|o=s" => \$output_filename,
"test-name|t=s" => \$test_name,
"zerocounters|z" => \$reset,
"quiet|q" => \$quiet,
"help|h|?" => \$help,
"version|v" => \$version,
"follow|f" => \$follow,
"path=s" => \$diff_path,
"base-directory|b=s" => \$base_directory,
"checksum" => \$checksum,
"no-checksum" => \$no_checksum,
"compat-libtool" => \$compat_libtool,
"no-compat-libtool" => \$no_compat_libtool,
"gcov-tool=s" => \$gcov_tool,
"ignore-errors=s" => \$ignore_errors,
"initial|i" => \$initial,
"no-recursion" => \$no_recursion,
"to-package=s" => \$to_package,
"from-package=s" => \$from_package,
"no-markers" => \$no_markers,
"derive-func-data" => \$opt_derive_func_data,
"debug" => \$opt_debug,
"list-full-path" => \$opt_list_full_path,
"no-list-full-path" => \$opt_no_list_full_path,
))
{
print(STDERR "Use $tool_name --help to get usage information\n");
exit(1);
}
else
{
# Merge options
if (defined($no_checksum))
{
$checksum = ($no_checksum ? 0 : 1);
$no_checksum = undef;
}
if (defined($no_compat_libtool))
{
$compat_libtool = ($no_compat_libtool ? 0 : 1);
$no_compat_libtool = undef;
}
if (defined($opt_no_list_full_path))
{
$opt_list_full_path = ($opt_no_list_full_path ? 0 : 1);
$opt_no_list_full_path = undef;
}
}
# Check for help option
if ($help)
{
print_usage(*STDOUT);
exit(0);
}
# Check for version option
if ($version)
{
print("$tool_name: $lcov_version\n");
exit(0);
}
# Check list width option
if ($opt_list_width <= 40) {
die("ERROR: lcov_list_width parameter out of range (needs to be ".
"larger than 40)\n");
}
# Normalize --path text
$diff_path =~ s/\/$//;
if ($follow)
{
$follow = "-follow";
}
else
{
$follow = "";
}
if ($no_recursion)
{
$maxdepth = "-maxdepth 1";
}
else
{
$maxdepth = "";
}
# Check for valid options
check_options();
# Only --extract, --remove and --diff allow unnamed parameters
if (@ARGV && !($extract || $remove || $diff))
{
die("Extra parameter found: '".join(" ", @ARGV)."'\n".
"Use $tool_name --help to get usage information\n");
}
# Check for output filename
$to_file = ($output_filename && ($output_filename ne "-"));
if ($capture)
{
if (!$to_file)
{
# Option that tells geninfo to write to stdout
$output_filename = "-";
}
}
# Determine kernel directory for gcov data
if (!$from_package && !@directory && ($capture || $reset)) {
($gcov_gkv, $gcov_dir) = setup_gkv();
}
# Check for requested functionality
if ($reset)
{
# Differentiate between user space and kernel reset
if (@directory)
{
userspace_reset();
}
else
{
kernel_reset();
}
}
elsif ($capture)
{
# Capture source can be user space, kernel or package
if ($from_package) {
package_capture();
} elsif (@directory) {
userspace_capture();
} else {
if ($initial) {
if (defined($to_package)) {
die("ERROR: --initial cannot be used together ".
"with --to-package\n");
}
kernel_capture_initial();
} else {
kernel_capture();
}
}
}
elsif (@add_tracefile)
{
($ln_overall_found, $ln_overall_hit,
$fn_overall_found, $fn_overall_hit,
$br_overall_found, $br_overall_hit) = add_traces();
}
elsif ($remove)
{
($ln_overall_found, $ln_overall_hit,
$fn_overall_found, $fn_overall_hit,
$br_overall_found, $br_overall_hit) = remove();
}
elsif ($extract)
{
($ln_overall_found, $ln_overall_hit,
$fn_overall_found, $fn_overall_hit,
$br_overall_found, $br_overall_hit) = extract();
}
elsif ($list)
{
list();
}
elsif ($diff)
{
if (scalar(@ARGV) != 1)
{
die("ERROR: option --diff requires one additional argument!\n".
"Use $tool_name --help to get usage information\n");
}
($ln_overall_found, $ln_overall_hit,
$fn_overall_found, $fn_overall_hit,
$br_overall_found, $br_overall_hit) = diff();
}
temp_cleanup();
if (defined($ln_overall_found)) {
print_overall_rate(1, $ln_overall_found, $ln_overall_hit,
1, $fn_overall_found, $fn_overall_hit,
1, $br_overall_found, $br_overall_hit);
} else {
info("Done.\n") if (!$list && !$capture);
}
exit(0);
#
# print_usage(handle)
#
# Print usage information.
#
sub print_usage(*)
{
local *HANDLE = $_[0];
print(HANDLE <<END_OF_USAGE);
Usage: $tool_name [OPTIONS]
Use lcov to collect coverage data from either the currently running Linux
kernel or from a user space application. Specify the --directory option to
get coverage data for a user space program.
Misc:
-h, --help Print this help, then exit
-v, --version Print version number, then exit
-q, --quiet Do not print progress messages
Operation:
-z, --zerocounters Reset all execution counts to zero
-c, --capture Capture coverage data
-a, --add-tracefile FILE Add contents of tracefiles
-e, --extract FILE PATTERN Extract files matching PATTERN from FILE
-r, --remove FILE PATTERN Remove files matching PATTERN from FILE
-l, --list FILE List contents of tracefile FILE
--diff FILE DIFF Transform tracefile FILE according to DIFF
Options:
-i, --initial Capture initial zero coverage data
-t, --test-name NAME Specify test name to be stored with data
-o, --output-file FILENAME Write data to FILENAME instead of stdout
-d, --directory DIR Use .da files in DIR instead of kernel
-f, --follow Follow links when searching .da files
-k, --kernel-directory KDIR Capture kernel coverage data only from KDIR
-b, --base-directory DIR Use DIR as base directory for relative paths
--convert-filenames Convert filenames when applying diff
--strip DEPTH Strip initial DEPTH directory levels in diff
--path PATH Strip PATH from tracefile when applying diff
--(no-)checksum Enable (disable) line checksumming
--(no-)compat-libtool Enable (disable) libtool compatibility mode
--gcov-tool TOOL Specify gcov tool location
--ignore-errors ERRORS Continue after ERRORS (gcov, source, graph)
--no-recursion Exclude subdirectories from processing
--to-package FILENAME Store unprocessed coverage data in FILENAME
--from-package FILENAME Capture from unprocessed data in FILENAME
--no-markers Ignore exclusion markers in source code
--derive-func-data Generate function data from line data
--list-full-path Print full path during a list operation
For more information see: $lcov_url
END_OF_USAGE
;
}
#
# check_options()
#
# Check for valid combination of command line options. Die on error.
#
sub check_options()
{
my $i = 0;
# Count occurrence of mutually exclusive options
$reset && $i++;
$capture && $i++;
@add_tracefile && $i++;
$extract && $i++;
$remove && $i++;
$list && $i++;
$diff && $i++;
if ($i == 0)
{
die("Need one of the options -z, -c, -a, -e, -r, -l or ".
"--diff\n".
"Use $tool_name --help to get usage information\n");
}
elsif ($i > 1)
{
die("ERROR: only one of -z, -c, -a, -e, -r, -l or ".
"--diff allowed!\n".
"Use $tool_name --help to get usage information\n");
}
}
#
# userspace_reset()
#
# Reset coverage data found in DIRECTORY by deleting all contained .da files.
#
# Die on error.
#
sub userspace_reset()
{
my $current_dir;
my @file_list;
foreach $current_dir (@directory)
{
info("Deleting all .da files in $current_dir".
($no_recursion?"\n":" and subdirectories\n"));
@file_list = `find "$current_dir" $maxdepth $follow -name \\*\\.da -o -name \\*\\.gcda -type f 2>/dev/null`;
chomp(@file_list);
foreach (@file_list)
{
unlink($_) or die("ERROR: cannot remove file $_!\n");
}
}
}
#
# userspace_capture()
#
# Capture coverage data found in DIRECTORY and write it to a package (if
# TO_PACKAGE specified) or to OUTPUT_FILENAME or STDOUT.
#
# Die on error.
#
sub userspace_capture()
{
my $dir;
my $build;
if (!defined($to_package)) {
lcov_geninfo(@directory);
return;
}
if (scalar(@directory) != 1) {
die("ERROR: -d may be specified only once with --to-package\n");
}
$dir = $directory[0];
if (defined($base_directory)) {
$build = $base_directory;
} else {
$build = $dir;
}
create_package($to_package, $dir, $build);
}
#
# kernel_reset()
#
# Reset kernel coverage.
#
# Die on error.
#
sub kernel_reset()
{
local *HANDLE;
my $reset_file;
info("Resetting kernel execution counters\n");
if (-e "$gcov_dir/vmlinux") {
$reset_file = "$gcov_dir/vmlinux";
} elsif (-e "$gcov_dir/reset") {
$reset_file = "$gcov_dir/reset";
} else {
die("ERROR: no reset control found in $gcov_dir\n");
}
open(HANDLE, ">$reset_file") or
die("ERROR: cannot write to $reset_file!\n");
print(HANDLE "0");
close(HANDLE);
}
#
# lcov_copy_single(from, to)
#
# Copy single regular file FROM to TO without checking its size. This is
# required to work with special files generated by the kernel
# seq_file-interface.
#
#
sub lcov_copy_single($$)
{
my ($from, $to) = @_;
my $content;
local $/;
local *HANDLE;
open(HANDLE, "<$from") or die("ERROR: cannot read $from: $!\n");
$content = <HANDLE>;
close(HANDLE);
open(HANDLE, ">$to") or die("ERROR: cannot write $from: $!\n");
if (defined($content)) {
print(HANDLE $content);
}
close(HANDLE);
}
#
# lcov_find(dir, function, data[, extension, ...)])
#
# Search DIR for files and directories whose name matches PATTERN and run
# FUNCTION for each match. If not pattern is specified, match all names.
#
# FUNCTION has the following prototype:
# function(dir, relative_name, data)
#
# Where:
# dir: the base directory for this search
# relative_name: the name relative to the base directory of this entry
# data: the DATA variable passed to lcov_find
#
sub lcov_find($$$;@)
{
my ($dir, $fn, $data, @pattern) = @_;
my $result;
my $_fn = sub {
my $filename = $File::Find::name;
if (defined($result)) {
return;
}
$filename = abs2rel($filename, $dir);
foreach (@pattern) {
if ($filename =~ /$_/) {
goto ok;
}
}
return;
ok:
$result = &$fn($dir, $filename, $data);
};
if (scalar(@pattern) == 0) {
@pattern = ".*";
}
find( { wanted => $_fn, no_chdir => 1 }, $dir);
return $result;
}
#
# lcov_copy_fn(from, rel, to)
#
# Copy directories, files and links from/rel to to/rel.
#
sub lcov_copy_fn($$$)
{
my ($from, $rel, $to) = @_;
my $absfrom = canonpath(catfile($from, $rel));
my $absto = canonpath(catfile($to, $rel));
if (-d) {
if (! -d $absto) {
mkpath($absto) or
die("ERROR: cannot create directory $absto\n");
chmod(0700, $absto);
}
} elsif (-l) {
# Copy symbolic link
my $link = readlink($absfrom);
if (!defined($link)) {
die("ERROR: cannot read link $absfrom: $!\n");
}
symlink($link, $absto) or
die("ERROR: cannot create link $absto: $!\n");
} else {
lcov_copy_single($absfrom, $absto);
chmod(0600, $absto);
}
return undef;
}
#
# lcov_copy(from, to, subdirs)
#
# Copy all specified SUBDIRS and files from directory FROM to directory TO. For
# regular files, copy file contents without checking its size. This is required
# to work with seq_file-generated files.
#
sub lcov_copy($$;@)
{
my ($from, $to, @subdirs) = @_;
my @pattern;
foreach (@subdirs) {
push(@pattern, "^$_");
}
lcov_find($from, \&lcov_copy_fn, $to, @pattern);
}
#
# lcov_geninfo(directory)
#
# Call geninfo for the specified directory and with the parameters specified
# at the command line.
#
sub lcov_geninfo(@)
{
my (@dir) = @_;
my @param;
# Capture data
info("Capturing coverage data from ".join(" ", @dir)."\n");
@param = ("$tool_dir/geninfo", @dir);
if ($output_filename)
{
@param = (@param, "--output-filename", $output_filename);
}
if ($test_name)
{
@param = (@param, "--test-name", $test_name);
}
if ($follow)
{
@param = (@param, "--follow");
}
if ($quiet)
{
@param = (@param, "--quiet");
}
if (defined($checksum))
{
if ($checksum)
{
@param = (@param, "--checksum");
}
else
{
@param = (@param, "--no-checksum");
}
}
if ($base_directory)
{
@param = (@param, "--base-directory", $base_directory);
}
if ($no_compat_libtool)
{
@param = (@param, "--no-compat-libtool");
}
elsif ($compat_libtool)
{
@param = (@param, "--compat-libtool");
}
if ($gcov_tool)
{
@param = (@param, "--gcov-tool", $gcov_tool);
}
if ($ignore_errors)
{
@param = (@param, "--ignore-errors", $ignore_errors);
}
if ($initial)
{
@param = (@param, "--initial");
}
if ($no_markers)
{
@param = (@param, "--no-markers");
}
if ($opt_derive_func_data)
{
@param = (@param, "--derive-func-data");
}
if ($opt_debug)
{
@param = (@param, "--debug");
}
system(@param) and exit($? >> 8);
}
#
# read_file(filename)
#
# Return the contents of the file defined by filename.
#
sub read_file($)
{
my ($filename) = @_;
my $content;
local $\;
local *HANDLE;
open(HANDLE, "<$filename") || return undef;
$content = <HANDLE>;
close(HANDLE);
return $content;
}
#
# get_package(package_file)
#
# Unpack unprocessed coverage data files from package_file to a temporary
# directory and return directory name, build directory and gcov kernel version
# as found in package.
#
sub get_package($)
{
my ($file) = @_;
my $dir = create_temp_dir();
my $gkv;
my $build;
my $cwd = getcwd();
my $count;
local *HANDLE;
info("Reading package $file:\n");
info(" data directory .......: $dir\n");
$file = abs_path($file);
chdir($dir);
open(HANDLE, "tar xvfz $file 2>/dev/null|")
or die("ERROR: could not process package $file\n");
while (<HANDLE>) {
if (/\.da$/ || /\.gcda$/) {
$count++;
}
}
close(HANDLE);
$build = read_file("$dir/$pkg_build_file");
if (defined($build)) {
info(" build directory ......: $build\n");
}
$gkv = read_file("$dir/$pkg_gkv_file");
if (defined($gkv)) {
$gkv = int($gkv);
if ($gkv != $GKV_PROC && $gkv != $GKV_SYS) {
die("ERROR: unsupported gcov kernel version found ".
"($gkv)\n");
}
info(" content type .........: kernel data\n");
info(" gcov kernel version ..: %s\n", $GKV_NAME[$gkv]);
} else {
info(" content type .........: application data\n");
}
info(" data files ...........: $count\n");
chdir($cwd);
return ($dir, $build, $gkv);
}
#
# write_file(filename, $content)
#
# Create a file named filename and write the specified content to it.
#
sub write_file($$)
{
my ($filename, $content) = @_;
local *HANDLE;
open(HANDLE, ">$filename") || return 0;
print(HANDLE $content);
close(HANDLE) || return 0;
return 1;
}
# count_package_data(filename)
#
# Count the number of coverage data files in the specified package file.
#
sub count_package_data($)
{
my ($filename) = @_;
local *HANDLE;
my $count = 0;
open(HANDLE, "tar tfz $filename|") or return undef;
while (<HANDLE>) {
if (/\.da$/ || /\.gcda$/) {
$count++;
}
}
close(HANDLE);
return $count;
}
#
# create_package(package_file, source_directory, build_directory[,
# kernel_gcov_version])
#
# Store unprocessed coverage data files from source_directory to package_file.
#
sub create_package($$$;$)
{
my ($file, $dir, $build, $gkv) = @_;
my $cwd = getcwd();
# Print information about the package
info("Creating package $file:\n");
info(" data directory .......: $dir\n");
# Handle build directory
if (defined($build)) {
info(" build directory ......: $build\n");
write_file("$dir/$pkg_build_file", $build)
or die("ERROR: could not write to ".
"$dir/$pkg_build_file\n");
}
# Handle gcov kernel version data
if (defined($gkv)) {
info(" content type .........: kernel data\n");
info(" gcov kernel version ..: %s\n", $GKV_NAME[$gkv]);
write_file("$dir/$pkg_gkv_file", $gkv)
or die("ERROR: could not write to ".
"$dir/$pkg_gkv_file\n");
} else {
info(" content type .........: application data\n");
}
# Create package
$file = abs_path($file);
chdir($dir);
system("tar cfz $file .")
and die("ERROR: could not create package $file\n");
# Remove temporary files
unlink("$dir/$pkg_build_file");
unlink("$dir/$pkg_gkv_file");
# Show number of data files
if (!$quiet) {
my $count = count_package_data($file);
if (defined($count)) {
info(" data files ...........: $count\n");
}
}
chdir($cwd);
}
sub find_link_fn($$$)
{
my ($from, $rel, $filename) = @_;
my $absfile = catfile($from, $rel, $filename);
if (-l $absfile) {
return $absfile;
}
return undef;
}
#
# get_base(dir)
#
# Return (BASE, OBJ), where
# - BASE: is the path to the kernel base directory relative to dir
# - OBJ: is the absolute path to the kernel build directory
#
sub get_base($)
{
my ($dir) = @_;
my $marker = "kernel/gcov/base.gcno";
my $markerfile;
my $sys;
my $obj;
my $link;
$markerfile = lcov_find($dir, \&find_link_fn, $marker);
if (!defined($markerfile)) {
return (undef, undef);
}
# sys base is parent of parent of markerfile.
$sys = abs2rel(dirname(dirname(dirname($markerfile))), $dir);
# obj base is parent of parent of markerfile link target.
$link = readlink($markerfile);
if (!defined($link)) {
die("ERROR: could not read $markerfile\n");
}
$obj = dirname(dirname(dirname($link)));
return ($sys, $obj);
}
#
# apply_base_dir(data_dir, base_dir, build_dir, @directories)
#
# Make entries in @directories relative to data_dir.
#
sub apply_base_dir($$$@)
{
my ($data, $base, $build, @dirs) = @_;
my $dir;
my @result;
foreach $dir (@dirs) {
# Is directory path relative to data directory?
if (-d catdir($data, $dir)) {
push(@result, $dir);
next;
}
# Relative to the auto-detected base-directory?
if (defined($base)) {
if (-d catdir($data, $base, $dir)) {
push(@result, catdir($base, $dir));
next;
}
}
# Relative to the specified base-directory?
if (defined($base_directory)) {
if (file_name_is_absolute($base_directory)) {
$base = abs2rel($base_directory, rootdir());
} else {
$base = $base_directory;
}
if (-d catdir($data, $base, $dir)) {
push(@result, catdir($base, $dir));
next;
}
}
# Relative to the build directory?
if (defined($build)) {
if (file_name_is_absolute($build)) {
$base = abs2rel($build, rootdir());
} else {
$base = $build;
}
if (-d catdir($data, $base, $dir)) {
push(@result, catdir($base, $dir));
next;
}
}
die("ERROR: subdirectory $dir not found\n".
"Please use -b to specify the correct directory\n");
}
return @result;
}
#
# copy_gcov_dir(dir, [@subdirectories])
#
# Create a temporary directory and copy all or, if specified, only some
# subdirectories from dir to that directory. Return the name of the temporary
# directory.
#
sub copy_gcov_dir($;@)
{
my ($data, @dirs) = @_;
my $tempdir = create_temp_dir();
info("Copying data to temporary directory $tempdir\n");
lcov_copy($data, $tempdir, @dirs);
return $tempdir;
}
#
# kernel_capture_initial
#
# Capture initial kernel coverage data, i.e. create a coverage data file from
# static graph files which contains zero coverage data for all instrumented
# lines.
#
sub kernel_capture_initial()
{
my $build;
my $source;
my @params;
if (defined($base_directory)) {
$build = $base_directory;
$source = "specified";
} else {
(undef, $build) = get_base($gcov_dir);
if (!defined($build)) {
die("ERROR: could not auto-detect build directory.\n".
"Please use -b to specify the build directory\n");
}
$source = "auto-detected";
}
info("Using $build as kernel build directory ($source)\n");
# Build directory needs to be passed to geninfo
$base_directory = $build;
if (@kernel_directory) {
foreach my $dir (@kernel_directory) {
push(@params, "$build/$dir");
}
} else {
push(@params, $build);
}
lcov_geninfo(@params);
}
#
# kernel_capture_from_dir(directory, gcov_kernel_version, build)
#
# Perform the actual kernel coverage capturing from the specified directory
# assuming that the data was copied from the specified gcov kernel version.
#
sub kernel_capture_from_dir($$$)
{
my ($dir, $gkv, $build) = @_;
# Create package or coverage file
if (defined($to_package)) {
create_package($to_package, $dir, $build, $gkv);
} else {
# Build directory needs to be passed to geninfo
$base_directory = $build;
lcov_geninfo($dir);
}
}
#
# adjust_kernel_dir(dir, build)
#
# Adjust directories specified with -k so that they point to the directory
# relative to DIR. Return the build directory if specified or the auto-
# detected build-directory.
#
sub adjust_kernel_dir($$)
{
my ($dir, $build) = @_;
my ($sys_base, $build_auto) = get_base($dir);
if (!defined($build)) {
$build = $build_auto;
}
if (!defined($build)) {
die("ERROR: could not auto-detect build directory.\n".
"Please use -b to specify the build directory\n");
}
# Make @kernel_directory relative to sysfs base
if (@kernel_directory) {
@kernel_directory = apply_base_dir($dir, $sys_base, $build,
@kernel_directory);
}
return $build;
}
sub kernel_capture()
{
my $data_dir;
my $build = $base_directory;
if ($gcov_gkv == $GKV_SYS) {
$build = adjust_kernel_dir($gcov_dir, $build);
}
$data_dir = copy_gcov_dir($gcov_dir, @kernel_directory);
kernel_capture_from_dir($data_dir, $gcov_gkv, $build);
}
#
# package_capture()
#
# Capture coverage data from a package of unprocessed coverage data files
# as generated by lcov --to-package.
#
sub package_capture()
{
my $dir;
my $build;
my $gkv;
($dir, $build, $gkv) = get_package($from_package);
# Check for build directory
if (defined($base_directory)) {
if (defined($build)) {
info("Using build directory specified by -b.\n");
}
$build = $base_directory;
}
# Do the actual capture
if (defined($gkv)) {
if ($gkv == $GKV_SYS) {
$build = adjust_kernel_dir($dir, $build);
}
if (@kernel_directory) {
$dir = copy_gcov_dir($dir, @kernel_directory);
}
kernel_capture_from_dir($dir, $gkv, $build);
} else {
# Build directory needs to be passed to geninfo
$base_directory = $build;
lcov_geninfo($dir);
}
}
#
# info(printf_parameter)
#
# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
# is not set.
#
sub info(@)
{
if (!$quiet)
{
# Print info string
if ($to_file)
{
printf(@_)
}
else
{
# Don't interfere with the .info output to STDOUT
printf(STDERR @_);
}
}
}
#
# create_temp_dir()
#
# Create a temporary directory and return its path.
#
# Die on error.
#
sub create_temp_dir()
{
my $dir;
if (defined($tmp_dir)) {
$dir = tempdir(DIR => $tmp_dir, CLEANUP => 1);
} else {
$dir = tempdir(CLEANUP => 1);
}
if (!defined($dir)) {
die("ERROR: cannot create temporary directory\n");
}
push(@temp_dirs, $dir);
return $dir;
}
#
# br_taken_to_num(taken)
#
# Convert a branch taken value .info format to number format.
#
sub br_taken_to_num($)
{
my ($taken) = @_;
return 0 if ($taken eq '-');
return $taken + 1;
}
#
# br_num_to_taken(taken)
#
# Convert a branch taken value in number format to .info format.
#
sub br_num_to_taken($)
{
my ($taken) = @_;
return '-' if ($taken == 0);
return $taken - 1;
}
#
# br_taken_add(taken1, taken2)
#
# Return the result of taken1 + taken2 for 'branch taken' values.
#
sub br_taken_add($$)
{
my ($t1, $t2) = @_;
return $t1 if (!defined($t2));
return $t2 if (!defined($t1));
return $t1 if ($t2 eq '-');
return $t2 if ($t1 eq '-');
return $t1 + $t2;
}
#
# br_taken_sub(taken1, taken2)
#
# Return the result of taken1 - taken2 for 'branch taken' values. Return 0
# if the result would become negative.
#
sub br_taken_sub($$)
{
my ($t1, $t2) = @_;
return $t1 if (!defined($t2));
return undef if (!defined($t1));
return $t1 if ($t1 eq '-');
return $t1 if ($t2 eq '-');
return 0 if $t2 > $t1;
return $t1 - $t2;
}
#
#
# br_ivec_len(vector)
#
# Return the number of entries in the branch coverage vector.
#
sub br_ivec_len($)
{
my ($vec) = @_;
return 0 if (!defined($vec));
return (length($vec) * 8 / $BR_VEC_WIDTH) / $BR_VEC_ENTRIES;
}
#
# br_ivec_push(vector, block, branch, taken)
#
# Add an entry to the branch coverage vector. If an entry with the same
# branch ID already exists, add the corresponding taken values.
#
sub br_ivec_push($$$$)
{
my ($vec, $block, $branch, $taken) = @_;
my $offset;
my $num = br_ivec_len($vec);
my $i;
$vec = "" if (!defined($vec));
# Check if branch already exists in vector
for ($i = 0; $i < $num; $i++) {
my ($v_block, $v_branch, $v_taken) = br_ivec_get($vec, $i);
next if ($v_block != $block || $v_branch != $branch);
# Add taken counts
$taken = br_taken_add($taken, $v_taken);
last;
}
$offset = $i * $BR_VEC_ENTRIES;
$taken = br_taken_to_num($taken);
# Add to vector
vec($vec, $offset + $BR_BLOCK, $BR_VEC_WIDTH) = $block;
vec($vec, $offset + $BR_BRANCH, $BR_VEC_WIDTH) = $branch;
vec($vec, $offset + $BR_TAKEN, $BR_VEC_WIDTH) = $taken;
return $vec;
}
#
# br_ivec_get(vector, number)
#
# Return an entry from the branch coverage vector.
#
sub br_ivec_get($$)
{
my ($vec, $num) = @_;
my $block;
my $branch;
my $taken;
my $offset = $num * $BR_VEC_ENTRIES;
# Retrieve data from vector
$block = vec($vec, $offset + $BR_BLOCK, $BR_VEC_WIDTH);
$branch = vec($vec, $offset + $BR_BRANCH, $BR_VEC_WIDTH);
$taken = vec($vec, $offset + $BR_TAKEN, $BR_VEC_WIDTH);
# Decode taken value from an integer
$taken = br_num_to_taken($taken);
return ($block, $branch, $taken);
}
#
# get_br_found_and_hit(brcount)
#
# Return (br_found, br_hit) for brcount
#
sub get_br_found_and_hit($)
{
my ($brcount) = @_;
my $line;
my $br_found = 0;
my $br_hit = 0;
foreach $line (keys(%{$brcount})) {
my $brdata = $brcount->{$line};
my $i;
my $num = br_ivec_len($brdata);
for ($i = 0; $i < $num; $i++) {
my $taken;
(undef, undef, $taken) = br_ivec_get($brdata, $i);
$br_found++;
$br_hit++ if ($taken ne "-" && $taken > 0);
}
}
return ($br_found, $br_hit);
}
#
# read_info_file(info_filename)
#
# Read in the contents of the .info file specified by INFO_FILENAME. Data will
# be returned as a reference to a hash containing the following mappings:
#
# %result: for each filename found in file -> \%data
#
# %data: "test" -> \%testdata
# "sum" -> \%sumcount
# "func" -> \%funcdata
# "found" -> $lines_found (number of instrumented lines found in file)
# "hit" -> $lines_hit (number of executed lines in file)
# "check" -> \%checkdata
# "testfnc" -> \%testfncdata
# "sumfnc" -> \%sumfnccount
# "testbr" -> \%testbrdata
# "sumbr" -> \%sumbrcount
#
# %testdata : name of test affecting this file -> \%testcount
# %testfncdata: name of test affecting this file -> \%testfnccount
# %testbrdata: name of test affecting this file -> \%testbrcount
#
# %testcount : line number -> execution count for a single test
# %testfnccount: function name -> execution count for a single test
# %testbrcount : line number -> branch coverage data for a single test
# %sumcount : line number -> execution count for all tests
# %sumfnccount : function name -> execution count for all tests
# %sumbrcount : line number -> branch coverage data for all tests
# %funcdata : function name -> line number
# %checkdata : line number -> checksum of source code line
# $brdata : vector of items: block, branch, taken
#
# Note that .info file sections referring to the same file and test name
# will automatically be combined by adding all execution counts.
#
# Note that if INFO_FILENAME ends with ".gz", it is assumed that the file
# is compressed using GZIP. If available, GUNZIP will be used to decompress
# this file.
#
# Die on error.
#
sub read_info_file($)
{
my $tracefile = $_[0]; # Name of tracefile
my %result; # Resulting hash: file -> data
my $data; # Data handle for current entry
my $testdata; # " "
my $testcount; # " "
my $sumcount; # " "
my $funcdata; # " "
my $checkdata; # " "
my $testfncdata;
my $testfnccount;
my $sumfnccount;
my $testbrdata;
my $testbrcount;
my $sumbrcount;
my $line; # Current line read from .info file
my $testname; # Current test name
my $filename; # Current filename
my $hitcount; # Count for lines hit
my $count; # Execution count of current line
my $negative; # If set, warn about negative counts
my $changed_testname; # If set, warn about changed testname
my $line_checksum; # Checksum of current line
local *INFO_HANDLE; # Filehandle for .info file
info("Reading tracefile $tracefile\n");
# Check if file exists and is readable
stat($_[0]);
if (!(-r _))
{
die("ERROR: cannot read file $_[0]!\n");
}
# Check if this is really a plain file
if (!(-f _))
{
die("ERROR: not a plain file: $_[0]!\n");
}
# Check for .gz extension
if ($_[0] =~ /\.gz$/)
{
# Check for availability of GZIP tool
system_no_output(1, "gunzip" ,"-h")
and die("ERROR: gunzip command not available!\n");
# Check integrity of compressed file
system_no_output(1, "gunzip", "-t", $_[0])
and die("ERROR: integrity check failed for ".
"compressed file $_[0]!\n");
# Open compressed file
open(INFO_HANDLE, "gunzip -c $_[0]|")
or die("ERROR: cannot start gunzip to decompress ".
"file $_[0]!\n");
}
else
{
# Open decompressed file
open(INFO_HANDLE, $_[0])
or die("ERROR: cannot read file $_[0]!\n");
}
$testname = "";
while (<INFO_HANDLE>)
{
chomp($_);
$line = $_;
# Switch statement
foreach ($line)
{
/^TN:([^,]*)(,diff)?/ && do
{
# Test name information found
$testname = defined($1) ? $1 : "";
if ($testname =~ s/\W/_/g)
{
$changed_testname = 1;
}
$testname .= $2 if (defined($2));
last;
};
/^[SK]F:(.*)/ && do
{
# Filename information found
# Retrieve data for new entry
$filename = $1;
$data = $result{$filename};
($testdata, $sumcount, $funcdata, $checkdata,
$testfncdata, $sumfnccount, $testbrdata,
$sumbrcount) =
get_info_entry($data);
if (defined($testname))
{
$testcount = $testdata->{$testname};
$testfnccount = $testfncdata->{$testname};
$testbrcount = $testbrdata->{$testname};
}
else
{
$testcount = {};
$testfnccount = {};
$testbrcount = {};
}
last;
};
/^DA:(\d+),(-?\d+)(,[^,\s]+)?/ && do
{
# Fix negative counts
$count = $2 < 0 ? 0 : $2;
if ($2 < 0)
{
$negative = 1;
}
# Execution count found, add to structure
# Add summary counts
$sumcount->{$1} += $count;
# Add test-specific counts
if (defined($testname))
{
$testcount->{$1} += $count;
}
# Store line checksum if available
if (defined($3))
{
$line_checksum = substr($3, 1);
# Does it match a previous definition
if (defined($checkdata->{$1}) &&
($checkdata->{$1} ne
$line_checksum))
{
die("ERROR: checksum mismatch ".
"at $filename:$1\n");
}
$checkdata->{$1} = $line_checksum;
}
last;
};
/^FN:(\d+),([^,]+)/ && do
{
# Function data found, add to structure
$funcdata->{$2} = $1;
# Also initialize function call data
if (!defined($sumfnccount->{$2})) {
$sumfnccount->{$2} = 0;
}
if (defined($testname))
{
if (!defined($testfnccount->{$2})) {
$testfnccount->{$2} = 0;
}
}
last;
};
/^FNDA:(\d+),([^,]+)/ && do
{
# Function call count found, add to structure
# Add summary counts
$sumfnccount->{$2} += $1;
# Add test-specific counts
if (defined($testname))
{
$testfnccount->{$2} += $1;
}
last;
};
/^BRDA:(\d+),(\d+),(\d+),(\d+|-)/ && do {
# Branch coverage data found
my ($line, $block, $branch, $taken) =
($1, $2, $3, $4);
$sumbrcount->{$line} =
br_ivec_push($sumbrcount->{$line},
$block, $branch, $taken);
# Add test-specific counts
if (defined($testname)) {
$testbrcount->{$line} =
br_ivec_push(
$testbrcount->{$line},
$block, $branch,
$taken);
}
last;
};
/^end_of_record/ && do
{
# Found end of section marker
if ($filename)
{
# Store current section data
if (defined($testname))
{
$testdata->{$testname} =
$testcount;
$testfncdata->{$testname} =
$testfnccount;
$testbrdata->{$testname} =
$testbrcount;
}
set_info_entry($data, $testdata,
$sumcount, $funcdata,
$checkdata, $testfncdata,
$sumfnccount,
$testbrdata,
$sumbrcount);
$result{$filename} = $data;
last;
}
};
# default
last;
}
}
close(INFO_HANDLE);
# Calculate hit and found values for lines and functions of each file
foreach $filename (keys(%result))
{
$data = $result{$filename};
($testdata, $sumcount, undef, undef, $testfncdata,
$sumfnccount, $testbrdata, $sumbrcount) =
get_info_entry($data);
# Filter out empty files
if (scalar(keys(%{$sumcount})) == 0)
{
delete($result{$filename});
next;
}
# Filter out empty test cases
foreach $testname (keys(%{$testdata}))
{
if (!defined($testdata->{$testname}) ||
scalar(keys(%{$testdata->{$testname}})) == 0)
{
delete($testdata->{$testname});
delete($testfncdata->{$testname});
}
}
$data->{"found"} = scalar(keys(%{$sumcount}));
$hitcount = 0;
foreach (keys(%{$sumcount}))
{
if ($sumcount->{$_} > 0) { $hitcount++; }
}
$data->{"hit"} = $hitcount;
# Get found/hit values for function call data
$data->{"f_found"} = scalar(keys(%{$sumfnccount}));
$hitcount = 0;
foreach (keys(%{$sumfnccount})) {
if ($sumfnccount->{$_} > 0) {
$hitcount++;
}
}
$data->{"f_hit"} = $hitcount;
# Get found/hit values for branch data
{
my ($br_found, $br_hit) = get_br_found_and_hit($sumbrcount);
$data->{"b_found"} = $br_found;
$data->{"b_hit"} = $br_hit;
}
}
if (scalar(keys(%result)) == 0)
{
die("ERROR: no valid records found in tracefile $tracefile\n");
}
if ($negative)
{
warn("WARNING: negative counts found in tracefile ".
"$tracefile\n");
}
if ($changed_testname)
{
warn("WARNING: invalid characters removed from testname in ".
"tracefile $tracefile\n");
}
return(\%result);
}
#
# get_info_entry(hash_ref)
#
# Retrieve data from an entry of the structure generated by read_info_file().
# Return a list of references to hashes:
# (test data hash ref, sum count hash ref, funcdata hash ref, checkdata hash
# ref, testfncdata hash ref, sumfnccount hash ref, testbrdata hash ref,
# sumbrcount hash ref, lines found, lines hit, functions found,
# functions hit, branches found, branches hit)
#
sub get_info_entry($)
{
my $testdata_ref = $_[0]->{"test"};
my $sumcount_ref = $_[0]->{"sum"};
my $funcdata_ref = $_[0]->{"func"};
my $checkdata_ref = $_[0]->{"check"};
my $testfncdata = $_[0]->{"testfnc"};
my $sumfnccount = $_[0]->{"sumfnc"};
my $testbrdata = $_[0]->{"testbr"};
my $sumbrcount = $_[0]->{"sumbr"};
my $lines_found = $_[0]->{"found"};
my $lines_hit = $_[0]->{"hit"};
my $f_found = $_[0]->{"f_found"};
my $f_hit = $_[0]->{"f_hit"};
my $br_found = $_[0]->{"b_found"};
my $br_hit = $_[0]->{"b_hit"};
return ($testdata_ref, $sumcount_ref, $funcdata_ref, $checkdata_ref,
$testfncdata, $sumfnccount, $testbrdata, $sumbrcount,
$lines_found, $lines_hit, $f_found, $f_hit,
$br_found, $br_hit);
}
#
# set_info_entry(hash_ref, testdata_ref, sumcount_ref, funcdata_ref,
# checkdata_ref, testfncdata_ref, sumfcncount_ref,
# testbrdata_ref, sumbrcount_ref[,lines_found,
# lines_hit, f_found, f_hit, $b_found, $b_hit])
#
# Update the hash referenced by HASH_REF with the provided data references.
#
sub set_info_entry($$$$$$$$$;$$$$$$)
{
my $data_ref = $_[0];
$data_ref->{"test"} = $_[1];
$data_ref->{"sum"} = $_[2];
$data_ref->{"func"} = $_[3];
$data_ref->{"check"} = $_[4];
$data_ref->{"testfnc"} = $_[5];
$data_ref->{"sumfnc"} = $_[6];
$data_ref->{"testbr"} = $_[7];
$data_ref->{"sumbr"} = $_[8];
if (defined($_[9])) { $data_ref->{"found"} = $_[9]; }
if (defined($_[10])) { $data_ref->{"hit"} = $_[10]; }
if (defined($_[11])) { $data_ref->{"f_found"} = $_[11]; }
if (defined($_[12])) { $data_ref->{"f_hit"} = $_[12]; }
if (defined($_[13])) { $data_ref->{"b_found"} = $_[13]; }
if (defined($_[14])) { $data_ref->{"b_hit"} = $_[14]; }
}
#
# add_counts(data1_ref, data2_ref)
#
# DATA1_REF and DATA2_REF are references to hashes containing a mapping
#
# line number -> execution count
#
# Return a list (RESULT_REF, LINES_FOUND, LINES_HIT) where RESULT_REF
# is a reference to a hash containing the combined mapping in which
# execution counts are added.
#
sub add_counts($$)
{
my %data1 = %{$_[0]}; # Hash 1
my %data2 = %{$_[1]}; # Hash 2
my %result; # Resulting hash
my $line; # Current line iteration scalar
my $data1_count; # Count of line in hash1
my $data2_count; # Count of line in hash2
my $found = 0; # Total number of lines found
my $hit = 0; # Number of lines with a count > 0
foreach $line (keys(%data1))
{
$data1_count = $data1{$line};
$data2_count = $data2{$line};
# Add counts if present in both hashes
if (defined($data2_count)) { $data1_count += $data2_count; }
# Store sum in %result
$result{$line} = $data1_count;
$found++;
if ($data1_count > 0) { $hit++; }
}
# Add lines unique to data2
foreach $line (keys(%data2))
{
# Skip lines already in data1
if (defined($data1{$line})) { next; }
# Copy count from data2
$result{$line} = $data2{$line};
$found++;
if ($result{$line} > 0) { $hit++; }
}
return (\%result, $found, $hit);
}
#
# merge_checksums(ref1, ref2, filename)
#
# REF1 and REF2 are references to hashes containing a mapping
#
# line number -> checksum
#
# Merge checksum lists defined in REF1 and REF2 and return reference to
# resulting hash. Die if a checksum for a line is defined in both hashes
# but does not match.
#
sub merge_checksums($$$)
{
my $ref1 = $_[0];
my $ref2 = $_[1];
my $filename = $_[2];
my %result;
my $line;
foreach $line (keys(%{$ref1}))
{
if (defined($ref2->{$line}) &&
($ref1->{$line} ne $ref2->{$line}))
{
die("ERROR: checksum mismatch at $filename:$line\n");
}
$result{$line} = $ref1->{$line};
}
foreach $line (keys(%{$ref2}))
{
$result{$line} = $ref2->{$line};
}
return \%result;
}
#
# merge_func_data(funcdata1, funcdata2, filename)
#
sub merge_func_data($$$)
{
my ($funcdata1, $funcdata2, $filename) = @_;
my %result;
my $func;
if (defined($funcdata1)) {
%result = %{$funcdata1};
}
foreach $func (keys(%{$funcdata2})) {
my $line1 = $result{$func};
my $line2 = $funcdata2->{$func};
if (defined($line1) && ($line1 != $line2)) {
warn("WARNING: function data mismatch at ".
"$filename:$line2\n");
next;
}
$result{$func} = $line2;
}
return \%result;
}
#
# add_fnccount(fnccount1, fnccount2)
#
# Add function call count data. Return list (fnccount_added, f_found, f_hit)
#
sub add_fnccount($$)
{
my ($fnccount1, $fnccount2) = @_;
my %result;
my $f_found;
my $f_hit;
my $function;
if (defined($fnccount1)) {
%result = %{$fnccount1};
}
foreach $function (keys(%{$fnccount2})) {
$result{$function} += $fnccount2->{$function};
}
$f_found = scalar(keys(%result));
$f_hit = 0;
foreach $function (keys(%result)) {
if ($result{$function} > 0) {
$f_hit++;
}
}
return (\%result, $f_found, $f_hit);
}
#
# add_testfncdata(testfncdata1, testfncdata2)
#
# Add function call count data for several tests. Return reference to
# added_testfncdata.
#
sub add_testfncdata($$)
{
my ($testfncdata1, $testfncdata2) = @_;
my %result;
my $testname;
foreach $testname (keys(%{$testfncdata1})) {
if (defined($testfncdata2->{$testname})) {
my $fnccount;
# Function call count data for this testname exists
# in both data sets: merge
($fnccount) = add_fnccount(
$testfncdata1->{$testname},
$testfncdata2->{$testname});
$result{$testname} = $fnccount;
next;
}
# Function call count data for this testname is unique to
# data set 1: copy
$result{$testname} = $testfncdata1->{$testname};
}
# Add count data for testnames unique to data set 2
foreach $testname (keys(%{$testfncdata2})) {
if (!defined($result{$testname})) {
$result{$testname} = $testfncdata2->{$testname};
}
}
return \%result;
}
#
# brcount_to_db(brcount)
#
# Convert brcount data to the following format:
#
# db: line number -> block hash
# block hash: block number -> branch hash
# branch hash: branch number -> taken value
#
sub brcount_to_db($)
{
my ($brcount) = @_;
my $line;
my $db;
# Add branches from first count to database
foreach $line (keys(%{$brcount})) {
my $brdata = $brcount->{$line};
my $i;
my $num = br_ivec_len($brdata);
for ($i = 0; $i < $num; $i++) {
my ($block, $branch, $taken) = br_ivec_get($brdata, $i);
$db->{$line}->{$block}->{$branch} = $taken;
}
}
return $db;
}
#
# db_to_brcount(db)
#
# Convert branch coverage data back to brcount format.
#
sub db_to_brcount($)
{
my ($db) = @_;
my $line;
my $brcount = {};
my $br_found = 0;
my $br_hit = 0;
# Convert database back to brcount format
foreach $line (sort({$a <=> $b} keys(%{$db}))) {
my $ldata = $db->{$line};
my $brdata;
my $block;
foreach $block (sort({$a <=> $b} keys(%{$ldata}))) {
my $bdata = $ldata->{$block};
my $branch;
foreach $branch (sort({$a <=> $b} keys(%{$bdata}))) {
my $taken = $bdata->{$branch};
$br_found++;
$br_hit++ if ($taken ne "-" && $taken > 0);
$brdata = br_ivec_push($brdata, $block,
$branch, $taken);
}
}
$brcount->{$line} = $brdata;
}
return ($brcount, $br_found, $br_hit);
}
# combine_brcount(brcount1, brcount2, type)
#
# If add is BR_ADD, add branch coverage data and return list (brcount_added,
# br_found, br_hit). If add is BR_SUB, subtract the taken values of brcount2
# from brcount1 and return (brcount_sub, br_found, br_hit).
#
sub combine_brcount($$$)
{
my ($brcount1, $brcount2, $type) = @_;
my $line;
my $block;
my $branch;
my $taken;
my $db;
my $br_found = 0;
my $br_hit = 0;
my $result;
# Convert branches from first count to database
$db = brcount_to_db($brcount1);
# Combine values from database and second count
foreach $line (keys(%{$brcount2})) {
my $brdata = $brcount2->{$line};
my $num = br_ivec_len($brdata);
my $i;
for ($i = 0; $i < $num; $i++) {
($block, $branch, $taken) = br_ivec_get($brdata, $i);
my $new_taken = $db->{$line}->{$block}->{$branch};
if ($type == $BR_ADD) {
$new_taken = br_taken_add($new_taken, $taken);
} elsif ($type == $BR_SUB) {
$new_taken = br_taken_sub($new_taken, $taken);
}
$db->{$line}->{$block}->{$branch} = $new_taken
if (defined($new_taken));
}
}
# Convert database back to brcount format
($result, $br_found, $br_hit) = db_to_brcount($db);
return ($result, $br_found, $br_hit);
}
#
# add_testbrdata(testbrdata1, testbrdata2)
#
# Add branch coverage data for several tests. Return reference to
# added_testbrdata.
#
sub add_testbrdata($$)
{
my ($testbrdata1, $testbrdata2) = @_;
my %result;
my $testname;
foreach $testname (keys(%{$testbrdata1})) {
if (defined($testbrdata2->{$testname})) {
my $brcount;
# Branch coverage data for this testname exists
# in both data sets: add
($brcount) = combine_brcount(
$testbrdata1->{$testname},
$testbrdata2->{$testname}, $BR_ADD);
$result{$testname} = $brcount;
next;
}
# Branch coverage data for this testname is unique to
# data set 1: copy
$result{$testname} = $testbrdata1->{$testname};
}
# Add count data for testnames unique to data set 2
foreach $testname (keys(%{$testbrdata2})) {
if (!defined($result{$testname})) {
$result{$testname} = $testbrdata2->{$testname};
}
}
return \%result;
}
#
# combine_info_entries(entry_ref1, entry_ref2, filename)
#
# Combine .info data entry hashes referenced by ENTRY_REF1 and ENTRY_REF2.
# Return reference to resulting hash.
#
sub combine_info_entries($$$)
{
my $entry1 = $_[0]; # Reference to hash containing first entry
my $testdata1;
my $sumcount1;
my $funcdata1;
my $checkdata1;
my $testfncdata1;
my $sumfnccount1;
my $testbrdata1;
my $sumbrcount1;
my $entry2 = $_[1]; # Reference to hash containing second entry
my $testdata2;
my $sumcount2;
my $funcdata2;
my $checkdata2;
my $testfncdata2;
my $sumfnccount2;
my $testbrdata2;
my $sumbrcount2;
my %result; # Hash containing combined entry
my %result_testdata;
my $result_sumcount = {};
my $result_funcdata;
my $result_testfncdata;
my $result_sumfnccount;
my $result_testbrdata;
my $result_sumbrcount;
my $lines_found;
my $lines_hit;
my $f_found;
my $f_hit;
my $br_found;
my $br_hit;
my $testname;
my $filename = $_[2];
# Retrieve data
($testdata1, $sumcount1, $funcdata1, $checkdata1, $testfncdata1,
$sumfnccount1, $testbrdata1, $sumbrcount1) = get_info_entry($entry1);
($testdata2, $sumcount2, $funcdata2, $checkdata2, $testfncdata2,
$sumfnccount2, $testbrdata2, $sumbrcount2) = get_info_entry($entry2);
# Merge checksums
$checkdata1 = merge_checksums($checkdata1, $checkdata2, $filename);
# Combine funcdata
$result_funcdata = merge_func_data($funcdata1, $funcdata2, $filename);
# Combine function call count data
$result_testfncdata = add_testfncdata($testfncdata1, $testfncdata2);
($result_sumfnccount, $f_found, $f_hit) =
add_fnccount($sumfnccount1, $sumfnccount2);
# Combine branch coverage data
$result_testbrdata = add_testbrdata($testbrdata1, $testbrdata2);
($result_sumbrcount, $br_found, $br_hit) =
combine_brcount($sumbrcount1, $sumbrcount2, $BR_ADD);
# Combine testdata
foreach $testname (keys(%{$testdata1}))
{
if (defined($testdata2->{$testname}))
{
# testname is present in both entries, requires
# combination
($result_testdata{$testname}) =
add_counts($testdata1->{$testname},
$testdata2->{$testname});
}
else
{
# testname only present in entry1, add to result
$result_testdata{$testname} = $testdata1->{$testname};
}
# update sum count hash
($result_sumcount, $lines_found, $lines_hit) =
add_counts($result_sumcount,
$result_testdata{$testname});
}
foreach $testname (keys(%{$testdata2}))
{
# Skip testnames already covered by previous iteration
if (defined($testdata1->{$testname})) { next; }
# testname only present in entry2, add to result hash
$result_testdata{$testname} = $testdata2->{$testname};
# update sum count hash
($result_sumcount, $lines_found, $lines_hit) =
add_counts($result_sumcount,
$result_testdata{$testname});
}
# Calculate resulting sumcount
# Store result
set_info_entry(\%result, \%result_testdata, $result_sumcount,
$result_funcdata, $checkdata1, $result_testfncdata,
$result_sumfnccount, $result_testbrdata,
$result_sumbrcount, $lines_found, $lines_hit,
$f_found, $f_hit, $br_found, $br_hit);
return(\%result);
}
#
# combine_info_files(info_ref1, info_ref2)
#
# Combine .info data in hashes referenced by INFO_REF1 and INFO_REF2. Return
# reference to resulting hash.
#
sub combine_info_files($$)
{
my %hash1 = %{$_[0]};
my %hash2 = %{$_[1]};
my $filename;
foreach $filename (keys(%hash2))
{
if ($hash1{$filename})
{
# Entry already exists in hash1, combine them
$hash1{$filename} =
combine_info_entries($hash1{$filename},
$hash2{$filename},
$filename);
}
else
{
# Entry is unique in both hashes, simply add to
# resulting hash
$hash1{$filename} = $hash2{$filename};
}
}
return(\%hash1);
}
#
# add_traces()
#
sub add_traces()
{
my $total_trace;
my $current_trace;
my $tracefile;
my @result;
local *INFO_HANDLE;
info("Combining tracefiles.\n");
foreach $tracefile (@add_tracefile)
{
$current_trace = read_info_file($tracefile);
if ($total_trace)
{
$total_trace = combine_info_files($total_trace,
$current_trace);
}
else
{
$total_trace = $current_trace;
}
}
# Write combined data
if ($to_file)
{
info("Writing data to $output_filename\n");
open(INFO_HANDLE, ">$output_filename")
or die("ERROR: cannot write to $output_filename!\n");
@result = write_info_file(*INFO_HANDLE, $total_trace);
close(*INFO_HANDLE);
}
else
{
@result = write_info_file(*STDOUT, $total_trace);
}
return @result;
}
#
# write_info_file(filehandle, data)
#
sub write_info_file(*$)
{
local *INFO_HANDLE = $_[0];
my %data = %{$_[1]};
my $source_file;
my $entry;
my $testdata;
my $sumcount;
my $funcdata;
my $checkdata;
my $testfncdata;
my $sumfnccount;
my $testbrdata;
my $sumbrcount;
my $testname;
my $line;
my $func;
my $testcount;
my $testfnccount;
my $testbrcount;
my $found;
my $hit;
my $f_found;
my $f_hit;
my $br_found;
my $br_hit;
my $ln_total_found = 0;
my $ln_total_hit = 0;
my $fn_total_found = 0;
my $fn_total_hit = 0;
my $br_total_found = 0;
my $br_total_hit = 0;
foreach $source_file (sort(keys(%data)))
{
$entry = $data{$source_file};
($testdata, $sumcount, $funcdata, $checkdata, $testfncdata,
$sumfnccount, $testbrdata, $sumbrcount, $found, $hit,
$f_found, $f_hit, $br_found, $br_hit) =
get_info_entry($entry);
# Add to totals
$ln_total_found += $found;
$ln_total_hit += $hit;
$fn_total_found += $f_found;
$fn_total_hit += $f_hit;
$br_total_found += $br_found;
$br_total_hit += $br_hit;
foreach $testname (sort(keys(%{$testdata})))
{
$testcount = $testdata->{$testname};
$testfnccount = $testfncdata->{$testname};
$testbrcount = $testbrdata->{$testname};
$found = 0;
$hit = 0;
print(INFO_HANDLE "TN:$testname\n");
print(INFO_HANDLE "SF:$source_file\n");
# Write function related data
foreach $func (
sort({$funcdata->{$a} <=> $funcdata->{$b}}
keys(%{$funcdata})))
{
print(INFO_HANDLE "FN:".$funcdata->{$func}.
",$func\n");
}
foreach $func (keys(%{$testfnccount})) {
print(INFO_HANDLE "FNDA:".
$testfnccount->{$func}.
",$func\n");
}
($f_found, $f_hit) =
get_func_found_and_hit($testfnccount);
print(INFO_HANDLE "FNF:$f_found\n");
print(INFO_HANDLE "FNH:$f_hit\n");
# Write branch related data
$br_found = 0;
$br_hit = 0;
foreach $line (sort({$a <=> $b}
keys(%{$testbrcount}))) {
my $brdata = $testbrcount->{$line};
my $num = br_ivec_len($brdata);
my $i;
for ($i = 0; $i < $num; $i++) {
my ($block, $branch, $taken) =
br_ivec_get($brdata, $i);
print(INFO_HANDLE "BRDA:$line,$block,".
"$branch,$taken\n");
$br_found++;
$br_hit++ if ($taken ne '-' &&
$taken > 0);
}
}
if ($br_found > 0) {
print(INFO_HANDLE "BRF:$br_found\n");
print(INFO_HANDLE "BRH:$br_hit\n");
}
# Write line related data
foreach $line (sort({$a <=> $b} keys(%{$testcount})))
{
print(INFO_HANDLE "DA:$line,".
$testcount->{$line}.
(defined($checkdata->{$line}) &&
$checksum ?
",".$checkdata->{$line} : "")."\n");
$found++;
if ($testcount->{$line} > 0)
{
$hit++;
}
}
print(INFO_HANDLE "LF:$found\n");
print(INFO_HANDLE "LH:$hit\n");
print(INFO_HANDLE "end_of_record\n");
}
}
return ($ln_total_found, $ln_total_hit, $fn_total_found, $fn_total_hit,
$br_total_found, $br_total_hit);
}
#
# transform_pattern(pattern)
#
# Transform shell wildcard expression to equivalent PERL regular expression.
# Return transformed pattern.
#
sub transform_pattern($)
{
my $pattern = $_[0];
# Escape special chars
$pattern =~ s/\\/\\\\/g;
$pattern =~ s/\//\\\//g;
$pattern =~ s/\^/\\\^/g;
$pattern =~ s/\$/\\\$/g;
$pattern =~ s/\(/\\\(/g;
$pattern =~ s/\)/\\\)/g;
$pattern =~ s/\[/\\\[/g;
$pattern =~ s/\]/\\\]/g;
$pattern =~ s/\{/\\\{/g;
$pattern =~ s/\}/\\\}/g;
$pattern =~ s/\./\\\./g;
$pattern =~ s/\,/\\\,/g;
$pattern =~ s/\|/\\\|/g;
$pattern =~ s/\+/\\\+/g;
$pattern =~ s/\!/\\\!/g;
# Transform ? => (.) and * => (.*)
$pattern =~ s/\*/\(\.\*\)/g;
$pattern =~ s/\?/\(\.\)/g;
return $pattern;
}
#
# extract()
#
sub extract()
{
my $data = read_info_file($extract);
my $filename;
my $keep;
my $pattern;
my @pattern_list;
my $extracted = 0;
my @result;
local *INFO_HANDLE;
# Need perlreg expressions instead of shell pattern
@pattern_list = map({ transform_pattern($_); } @ARGV);
# Filter out files which do not match any pattern
foreach $filename (sort(keys(%{$data})))
{
$keep = 0;
foreach $pattern (@pattern_list)
{
$keep ||= ($filename =~ (/^$pattern$/));
}
if (!$keep)
{
delete($data->{$filename});
}
else
{
info("Extracting $filename\n"),
$extracted++;
}
}
# Write extracted data
if ($to_file)
{
info("Extracted $extracted files\n");
info("Writing data to $output_filename\n");
open(INFO_HANDLE, ">$output_filename")
or die("ERROR: cannot write to $output_filename!\n");
@result = write_info_file(*INFO_HANDLE, $data);
close(*INFO_HANDLE);
}
else
{
@result = write_info_file(*STDOUT, $data);
}
return @result;
}
#
# remove()
#
sub remove()
{
my $data = read_info_file($remove);
my $filename;
my $match_found;
my $pattern;
my @pattern_list;
my $removed = 0;
my @result;
local *INFO_HANDLE;
# Need perlreg expressions instead of shell pattern
@pattern_list = map({ transform_pattern($_); } @ARGV);
# Filter out files that match the pattern
foreach $filename (sort(keys(%{$data})))
{
$match_found = 0;
foreach $pattern (@pattern_list)
{
$match_found ||= ($filename =~ (/$pattern$/));
}
if ($match_found)
{
delete($data->{$filename});
info("Removing $filename\n"),
$removed++;
}
}
# Write data
if ($to_file)
{
info("Deleted $removed files\n");
info("Writing data to $output_filename\n");
open(INFO_HANDLE, ">$output_filename")
or die("ERROR: cannot write to $output_filename!\n");
@result = write_info_file(*INFO_HANDLE, $data);
close(*INFO_HANDLE);
}
else
{
@result = write_info_file(*STDOUT, $data);
}
return @result;
}
# get_prefix(max_width, max_percentage_too_long, path_list)
#
# Return a path prefix that satisfies the following requirements:
# - is shared by more paths in path_list than any other prefix
# - the percentage of paths which would exceed the given max_width length
# after applying the prefix does not exceed max_percentage_too_long
#
# If multiple prefixes satisfy all requirements, the longest prefix is
# returned. Return an empty string if no prefix could be found.
sub get_prefix($$@)
{
my ($max_width, $max_long, @path_list) = @_;
my $path;
my $ENTRY_NUM = 0;
my $ENTRY_LONG = 1;
my %prefix;
# Build prefix hash
foreach $path (@path_list) {
my ($v, $d, $f) = splitpath($path);
my @dirs = splitdir($d);
my $p_len = length($path);
my $i;
# Remove trailing '/'
pop(@dirs) if ($dirs[scalar(@dirs) - 1] eq '');
for ($i = 0; $i < scalar(@dirs); $i++) {
my $subpath = catpath($v, catdir(@dirs[0..$i]), '');
my $entry = $prefix{$subpath};
$entry = [ 0, 0 ] if (!defined($entry));
$entry->[$ENTRY_NUM]++;
if (($p_len - length($subpath) - 1) > $max_width) {
$entry->[$ENTRY_LONG]++;
}
$prefix{$subpath} = $entry;
}
}
# Find suitable prefix (sort descending by two keys: 1. number of
# entries covered by a prefix, 2. length of prefix)
foreach $path (sort {($prefix{$a}->[$ENTRY_NUM] ==
$prefix{$b}->[$ENTRY_NUM]) ?
length($b) <=> length($a) :
$prefix{$b}->[$ENTRY_NUM] <=>
$prefix{$a}->[$ENTRY_NUM]}
keys(%prefix)) {
my ($num, $long) = @{$prefix{$path}};
# Check for additional requirement: number of filenames
# that would be too long may not exceed a certain percentage
if ($long <= $num * $max_long / 100) {
return $path;
}
}
return "";
}
#
# shorten_filename(filename, width)
#
# Truncate filename if it is longer than width characters.
#
sub shorten_filename($$)
{
my ($filename, $width) = @_;
my $l = length($filename);
my $s;
my $e;
return $filename if ($l <= $width);
$e = int(($width - 3) / 2);
$s = $width - 3 - $e;
return substr($filename, 0, $s).'...'.substr($filename, $l - $e);
}
sub shorten_number($$)
{
my ($number, $width) = @_;
my $result = sprintf("%*d", $width, $number);
return $result if (length($result) <= $width);
$number = $number / 1000;
return $result if (length($result) <= $width);
$result = sprintf("%*dk", $width - 1, $number);
return $result if (length($result) <= $width);
$number = $number / 1000;
$result = sprintf("%*dM", $width - 1, $number);
return $result if (length($result) <= $width);
return '#';
}
sub shorten_rate($$)
{
my ($rate, $width) = @_;
my $result = sprintf("%*.1f%%", $width - 3, $rate);
return $result if (length($result) <= $width);
$result = sprintf("%*d%%", $width - 1, $rate);
return $result if (length($result) <= $width);
return "#";
}
#
# list()
#
sub list()
{
my $data = read_info_file($list);
my $filename;
my $found;
my $hit;
my $entry;
my $fn_found;
my $fn_hit;
my $br_found;
my $br_hit;
my $total_found = 0;
my $total_hit = 0;
my $fn_total_found = 0;
my $fn_total_hit = 0;
my $br_total_found = 0;
my $br_total_hit = 0;
my $prefix;
my $strlen = length("Filename");
my $format;
my $heading1;
my $heading2;
my @footer;
my $barlen;
my $rate;
my $fnrate;
my $brrate;
my $lastpath;
my $F_LN_NUM = 0;
my $F_LN_RATE = 1;
my $F_FN_NUM = 2;
my $F_FN_RATE = 3;
my $F_BR_NUM = 4;
my $F_BR_RATE = 5;
my @fwidth_narrow = (5, 5, 3, 5, 4, 5);
my @fwidth_wide = (6, 5, 5, 5, 6, 5);
my @fwidth = @fwidth_wide;
my $w;
my $max_width = $opt_list_width;
my $max_long = $opt_list_truncate_max;
my $fwidth_narrow_length;
my $fwidth_wide_length;
my $got_prefix = 0;
my $root_prefix = 0;
# Calculate total width of narrow fields
$fwidth_narrow_length = 0;
foreach $w (@fwidth_narrow) {
$fwidth_narrow_length += $w + 1;
}
# Calculate total width of wide fields
$fwidth_wide_length = 0;
foreach $w (@fwidth_wide) {
$fwidth_wide_length += $w + 1;
}
# Get common file path prefix
$prefix = get_prefix($max_width - $fwidth_narrow_length, $max_long,
keys(%{$data}));
$root_prefix = 1 if ($prefix eq rootdir());
$got_prefix = 1 if (length($prefix) > 0);
$prefix =~ s/\/$//;
# Get longest filename length
foreach $filename (keys(%{$data})) {
if (!$opt_list_full_path) {
if (!$got_prefix || !$root_prefix &&
!($filename =~ s/^\Q$prefix\/\E//)) {
my ($v, $d, $f) = splitpath($filename);
$filename = $f;
}
}
# Determine maximum length of entries
if (length($filename) > $strlen) {
$strlen = length($filename)
}
}
if (!$opt_list_full_path) {
my $blanks;
$w = $fwidth_wide_length;
# Check if all columns fit into max_width characters
if ($strlen + $fwidth_wide_length > $max_width) {
# Use narrow fields
@fwidth = @fwidth_narrow;
$w = $fwidth_narrow_length;
if (($strlen + $fwidth_narrow_length) > $max_width) {
# Truncate filenames at max width
$strlen = $max_width - $fwidth_narrow_length;
}
}
# Add some blanks between filename and fields if possible
$blanks = int($strlen * 0.5);
$blanks = 4 if ($blanks < 4);
$blanks = 8 if ($blanks > 8);
if (($strlen + $w + $blanks) < $max_width) {
$strlen += $blanks;
} else {
$strlen = $max_width - $w;
}
}
# Filename
$w = $strlen;
$format = "%-${w}s|";
$heading1 = sprintf("%*s|", $w, "");
$heading2 = sprintf("%-*s|", $w, "Filename");
$barlen = $w + 1;
# Line coverage rate
$w = $fwidth[$F_LN_RATE];
$format .= "%${w}s ";
$heading1 .= sprintf("%-*s |", $w + $fwidth[$F_LN_NUM],
"Lines");
$heading2 .= sprintf("%-*s ", $w, "Rate");
$barlen += $w + 1;
# Number of lines
$w = $fwidth[$F_LN_NUM];
$format .= "%${w}s|";
$heading2 .= sprintf("%*s|", $w, "Num");
$barlen += $w + 1;
# Function coverage rate
$w = $fwidth[$F_FN_RATE];
$format .= "%${w}s ";
$heading1 .= sprintf("%-*s|", $w + $fwidth[$F_FN_NUM] + 1,
"Functions");
$heading2 .= sprintf("%-*s ", $w, "Rate");
$barlen += $w + 1;
# Number of functions
$w = $fwidth[$F_FN_NUM];
$format .= "%${w}s|";
$heading2 .= sprintf("%*s|", $w, "Num");
$barlen += $w + 1;
# Branch coverage rate
$w = $fwidth[$F_BR_RATE];
$format .= "%${w}s ";
$heading1 .= sprintf("%-*s", $w + $fwidth[$F_BR_NUM] + 1,
"Branches");
$heading2 .= sprintf("%-*s ", $w, "Rate");
$barlen += $w + 1;
# Number of branches
$w = $fwidth[$F_BR_NUM];
$format .= "%${w}s";
$heading2 .= sprintf("%*s", $w, "Num");
$barlen += $w;
# Line end
$format .= "\n";
$heading1 .= "\n";
$heading2 .= "\n";
# Print heading
print($heading1);
print($heading2);
print(("="x$barlen)."\n");
# Print per file information
foreach $filename (sort(keys(%{$data})))
{
my @file_data;
my $print_filename = $filename;
$entry = $data->{$filename};
if (!$opt_list_full_path) {
my $p;
$print_filename = $filename;
if (!$got_prefix || !$root_prefix &&
!($print_filename =~ s/^\Q$prefix\/\E//)) {
my ($v, $d, $f) = splitpath($filename);
$p = catpath($v, $d, "");
$p =~ s/\/$//;
$print_filename = $f;
} else {
$p = $prefix;
}
if (!defined($lastpath) || $lastpath ne $p) {
print("\n") if (defined($lastpath));
$lastpath = $p;
print("[$lastpath/]\n") if (!$root_prefix);
}
$print_filename = shorten_filename($print_filename,
$strlen);
}
(undef, undef, undef, undef, undef, undef, undef, undef,
$found, $hit, $fn_found, $fn_hit, $br_found, $br_hit) =
get_info_entry($entry);
# Assume zero count if there is no function data for this file
if (!defined($fn_found) || !defined($fn_hit)) {
$fn_found = 0;
$fn_hit = 0;
}
# Assume zero count if there is no branch data for this file
if (!defined($br_found) || !defined($br_hit)) {
$br_found = 0;
$br_hit = 0;
}
# Add line coverage totals
$total_found += $found;
$total_hit += $hit;
# Add function coverage totals
$fn_total_found += $fn_found;
$fn_total_hit += $fn_hit;
# Add branch coverage totals
$br_total_found += $br_found;
$br_total_hit += $br_hit;
# Determine line coverage rate for this file
if ($found == 0) {
$rate = "-";
} else {
$rate = shorten_rate(100 * $hit / $found,
$fwidth[$F_LN_RATE]);
}
# Determine function coverage rate for this file
if (!defined($fn_found) || $fn_found == 0) {
$fnrate = "-";
} else {
$fnrate = shorten_rate(100 * $fn_hit / $fn_found,
$fwidth[$F_FN_RATE]);
}
# Determine branch coverage rate for this file
if (!defined($br_found) || $br_found == 0) {
$brrate = "-";
} else {
$brrate = shorten_rate(100 * $br_hit / $br_found,
$fwidth[$F_BR_RATE]);
}
# Assemble line parameters
push(@file_data, $print_filename);
push(@file_data, $rate);
push(@file_data, shorten_number($found, $fwidth[$F_LN_NUM]));
push(@file_data, $fnrate);
push(@file_data, shorten_number($fn_found, $fwidth[$F_FN_NUM]));
push(@file_data, $brrate);
push(@file_data, shorten_number($br_found, $fwidth[$F_BR_NUM]));
# Print assembled line
printf($format, @file_data);
}
# Determine total line coverage rate
if ($total_found == 0) {
$rate = "-";
} else {
$rate = shorten_rate(100 * $total_hit / $total_found,
$fwidth[$F_LN_RATE]);
}
# Determine total function coverage rate
if ($fn_total_found == 0) {
$fnrate = "-";
} else {
$fnrate = shorten_rate(100 * $fn_total_hit / $fn_total_found,
$fwidth[$F_FN_RATE]);
}
# Determine total branch coverage rate
if ($br_total_found == 0) {
$brrate = "-";
} else {
$brrate = shorten_rate(100 * $br_total_hit / $br_total_found,
$fwidth[$F_BR_RATE]);
}
# Print separator
print(("="x$barlen)."\n");
# Assemble line parameters
push(@footer, sprintf("%*s", $strlen, "Total:"));
push(@footer, $rate);
push(@footer, shorten_number($total_found, $fwidth[$F_LN_NUM]));
push(@footer, $fnrate);
push(@footer, shorten_number($fn_total_found, $fwidth[$F_FN_NUM]));
push(@footer, $brrate);
push(@footer, shorten_number($br_total_found, $fwidth[$F_BR_NUM]));
# Print assembled line
printf($format, @footer);
}
#
# get_common_filename(filename1, filename2)
#
# Check for filename components which are common to FILENAME1 and FILENAME2.
# Upon success, return
#
# (common, path1, path2)
#
# or 'undef' in case there are no such parts.
#
sub get_common_filename($$)
{
my @list1 = split("/", $_[0]);
my @list2 = split("/", $_[1]);
my @result;
# Work in reverse order, i.e. beginning with the filename itself
while (@list1 && @list2 && ($list1[$#list1] eq $list2[$#list2]))
{
unshift(@result, pop(@list1));
pop(@list2);
}
# Did we find any similarities?
if (scalar(@result) > 0)
{
return (join("/", @result), join("/", @list1),
join("/", @list2));
}
else
{
return undef;
}
}
#
# strip_directories($path, $depth)
#
# Remove DEPTH leading directory levels from PATH.
#
sub strip_directories($$)
{
my $filename = $_[0];
my $depth = $_[1];
my $i;
if (!defined($depth) || ($depth < 1))
{
return $filename;
}
for ($i = 0; $i < $depth; $i++)
{
$filename =~ s/^[^\/]*\/+(.*)$/$1/;
}
return $filename;
}
#
# read_diff(filename)
#
# Read diff output from FILENAME to memory. The diff file has to follow the
# format generated by 'diff -u'. Returns a list of hash references:
#
# (mapping, path mapping)
#
# mapping: filename -> reference to line hash
# line hash: line number in new file -> corresponding line number in old file
#
# path mapping: filename -> old filename
#
# Die in case of error.
#
sub read_diff($)
{
my $diff_file = $_[0]; # Name of diff file
my %diff; # Resulting mapping filename -> line hash
my %paths; # Resulting mapping old path -> new path
my $mapping; # Reference to current line hash
my $line; # Contents of current line
my $num_old; # Current line number in old file
my $num_new; # Current line number in new file
my $file_old; # Name of old file in diff section
my $file_new; # Name of new file in diff section
my $filename; # Name of common filename of diff section
my $in_block = 0; # Non-zero while we are inside a diff block
local *HANDLE; # File handle for reading the diff file
info("Reading diff $diff_file\n");
# Check if file exists and is readable
stat($diff_file);
if (!(-r _))
{
die("ERROR: cannot read file $diff_file!\n");
}
# Check if this is really a plain file
if (!(-f _))
{
die("ERROR: not a plain file: $diff_file!\n");
}
# Check for .gz extension
if ($diff_file =~ /\.gz$/)
{
# Check for availability of GZIP tool
system_no_output(1, "gunzip", "-h")
and die("ERROR: gunzip command not available!\n");
# Check integrity of compressed file
system_no_output(1, "gunzip", "-t", $diff_file)
and die("ERROR: integrity check failed for ".
"compressed file $diff_file!\n");
# Open compressed file
open(HANDLE, "gunzip -c $diff_file|")
or die("ERROR: cannot start gunzip to decompress ".
"file $_[0]!\n");
}
else
{
# Open decompressed file
open(HANDLE, $diff_file)
or die("ERROR: cannot read file $_[0]!\n");
}
# Parse diff file line by line
while (<HANDLE>)
{
chomp($_);
$line = $_;
foreach ($line)
{
# Filename of old file:
# --- <filename> <date>
/^--- (\S+)/ && do
{
$file_old = strip_directories($1, $strip);
last;
};
# Filename of new file:
# +++ <filename> <date>
/^\+\+\+ (\S+)/ && do
{
# Add last file to resulting hash
if ($filename)
{
my %new_hash;
$diff{$filename} = $mapping;
$mapping = \%new_hash;
}
$file_new = strip_directories($1, $strip);
$filename = $file_old;
$paths{$filename} = $file_new;
$num_old = 1;
$num_new = 1;
last;
};
# Start of diff block:
# @@ -old_start,old_num, +new_start,new_num @@
/^\@\@\s+-(\d+),(\d+)\s+\+(\d+),(\d+)\s+\@\@$/ && do
{
$in_block = 1;
while ($num_old < $1)
{
$mapping->{$num_new} = $num_old;
$num_old++;
$num_new++;
}
last;
};
# Unchanged line
# <line starts with blank>
/^ / && do
{
if ($in_block == 0)
{
last;
}
$mapping->{$num_new} = $num_old;
$num_old++;
$num_new++;
last;
};
# Line as seen in old file
# <line starts with '-'>
/^-/ && do
{
if ($in_block == 0)
{
last;
}
$num_old++;
last;
};
# Line as seen in new file
# <line starts with '+'>
/^\+/ && do
{
if ($in_block == 0)
{
last;
}
$num_new++;
last;
};
# Empty line
/^$/ && do
{
if ($in_block == 0)
{
last;
}
$mapping->{$num_new} = $num_old;
$num_old++;
$num_new++;
last;
};
}
}
close(HANDLE);
# Add final diff file section to resulting hash
if ($filename)
{
$diff{$filename} = $mapping;
}
if (!%diff)
{
die("ERROR: no valid diff data found in $diff_file!\n".
"Make sure to use 'diff -u' when generating the diff ".
"file.\n");
}
return (\%diff, \%paths);
}
#
# apply_diff($count_data, $line_hash)
#
# Transform count data using a mapping of lines:
#
# $count_data: reference to hash: line number -> data
# $line_hash: reference to hash: line number new -> line number old
#
# Return a reference to transformed count data.
#
sub apply_diff($$)
{
my $count_data = $_[0]; # Reference to data hash: line -> hash
my $line_hash = $_[1]; # Reference to line hash: new line -> old line
my %result; # Resulting hash
my $last_new = 0; # Last new line number found in line hash
my $last_old = 0; # Last old line number found in line hash
# Iterate all new line numbers found in the diff
foreach (sort({$a <=> $b} keys(%{$line_hash})))
{
$last_new = $_;
$last_old = $line_hash->{$last_new};
# Is there data associated with the corresponding old line?
if (defined($count_data->{$line_hash->{$_}}))
{
# Copy data to new hash with a new line number
$result{$_} = $count_data->{$line_hash->{$_}};
}
}
# Transform all other lines which come after the last diff entry
foreach (sort({$a <=> $b} keys(%{$count_data})))
{
if ($_ <= $last_old)
{
# Skip lines which were covered by line hash
next;
}
# Copy data to new hash with an offset
$result{$_ + ($last_new - $last_old)} = $count_data->{$_};
}
return \%result;
}
#
# apply_diff_to_brcount(brcount, linedata)
#
# Adjust line numbers of branch coverage data according to linedata.
#
sub apply_diff_to_brcount($$)
{
my ($brcount, $linedata) = @_;
my $db;
# Convert brcount to db format
$db = brcount_to_db($brcount);
# Apply diff to db format
$db = apply_diff($db, $linedata);
# Convert db format back to brcount format
($brcount) = db_to_brcount($db);
return $brcount;
}
#
# get_hash_max(hash_ref)
#
# Return the highest integer key from hash.
#
sub get_hash_max($)
{
my ($hash) = @_;
my $max;
foreach (keys(%{$hash})) {
if (!defined($max)) {
$max = $_;
} elsif ($hash->{$_} > $max) {
$max = $_;
}
}
return $max;
}
sub get_hash_reverse($)
{
my ($hash) = @_;
my %result;
foreach (keys(%{$hash})) {
$result{$hash->{$_}} = $_;
}
return \%result;
}
#
# apply_diff_to_funcdata(funcdata, line_hash)
#
sub apply_diff_to_funcdata($$)
{
my ($funcdata, $linedata) = @_;
my $last_new = get_hash_max($linedata);
my $last_old = $linedata->{$last_new};
my $func;
my %result;
my $line_diff = get_hash_reverse($linedata);
foreach $func (keys(%{$funcdata})) {
my $line = $funcdata->{$func};
if (defined($line_diff->{$line})) {
$result{$func} = $line_diff->{$line};
} elsif ($line > $last_old) {
$result{$func} = $line + $last_new - $last_old;
}
}
return \%result;
}
#
# get_line_hash($filename, $diff_data, $path_data)
#
# Find line hash in DIFF_DATA which matches FILENAME. On success, return list
# line hash. or undef in case of no match. Die if more than one line hashes in
# DIFF_DATA match.
#
sub get_line_hash($$$)
{
my $filename = $_[0];
my $diff_data = $_[1];
my $path_data = $_[2];
my $conversion;
my $old_path;
my $new_path;
my $diff_name;
my $common;
my $old_depth;
my $new_depth;
# Remove trailing slash from diff path
$diff_path =~ s/\/$//;
foreach (keys(%{$diff_data}))
{
my $sep = "";
$sep = '/' if (!/^\//);
# Try to match diff filename with filename
if ($filename =~ /^\Q$diff_path$sep$_\E$/)
{
if ($diff_name)
{
# Two files match, choose the more specific one
# (the one with more path components)
$old_depth = ($diff_name =~ tr/\///);
$new_depth = (tr/\///);
if ($old_depth == $new_depth)
{
die("ERROR: diff file contains ".
"ambiguous entries for ".
"$filename\n");
}
elsif ($new_depth > $old_depth)
{
$diff_name = $_;
}
}
else
{
$diff_name = $_;
}
};
}
if ($diff_name)
{
# Get converted path
if ($filename =~ /^(.*)$diff_name$/)
{
($common, $old_path, $new_path) =
get_common_filename($filename,
$1.$path_data->{$diff_name});
}
return ($diff_data->{$diff_name}, $old_path, $new_path);
}
else
{
return undef;
}
}
#
# convert_paths(trace_data, path_conversion_data)
#
# Rename all paths in TRACE_DATA which show up in PATH_CONVERSION_DATA.
#
sub convert_paths($$)
{
my $trace_data = $_[0];
my $path_conversion_data = $_[1];
my $filename;
my $new_path;
if (scalar(keys(%{$path_conversion_data})) == 0)
{
info("No path conversion data available.\n");
return;
}
# Expand path conversion list
foreach $filename (keys(%{$path_conversion_data}))
{
$new_path = $path_conversion_data->{$filename};
while (($filename =~ s/^(.*)\/[^\/]+$/$1/) &&
($new_path =~ s/^(.*)\/[^\/]+$/$1/) &&
($filename ne $new_path))
{
$path_conversion_data->{$filename} = $new_path;
}
}
# Adjust paths
FILENAME: foreach $filename (keys(%{$trace_data}))
{
# Find a path in our conversion table that matches, starting
# with the longest path
foreach (sort({length($b) <=> length($a)}
keys(%{$path_conversion_data})))
{
# Is this path a prefix of our filename?
if (!($filename =~ /^$_(.*)$/))
{
next;
}
$new_path = $path_conversion_data->{$_}.$1;
# Make sure not to overwrite an existing entry under
# that path name
if ($trace_data->{$new_path})
{
# Need to combine entries
$trace_data->{$new_path} =
combine_info_entries(
$trace_data->{$filename},
$trace_data->{$new_path},
$filename);
}
else
{
# Simply rename entry
$trace_data->{$new_path} =
$trace_data->{$filename};
}
delete($trace_data->{$filename});
next FILENAME;
}
info("No conversion available for filename $filename\n");
}
}
#
# sub adjust_fncdata(funcdata, testfncdata, sumfnccount)
#
# Remove function call count data from testfncdata and sumfnccount which
# is no longer present in funcdata.
#
sub adjust_fncdata($$$)
{
my ($funcdata, $testfncdata, $sumfnccount) = @_;
my $testname;
my $func;
my $f_found;
my $f_hit;
# Remove count data in testfncdata for functions which are no longer
# in funcdata
foreach $testname (%{$testfncdata}) {
my $fnccount = $testfncdata->{$testname};
foreach $func (%{$fnccount}) {
if (!defined($funcdata->{$func})) {
delete($fnccount->{$func});
}
}
}
# Remove count data in sumfnccount for functions which are no longer
# in funcdata
foreach $func (%{$sumfnccount}) {
if (!defined($funcdata->{$func})) {
delete($sumfnccount->{$func});
}
}
}
#
# get_func_found_and_hit(sumfnccount)
#
# Return (f_found, f_hit) for sumfnccount
#
sub get_func_found_and_hit($)
{
my ($sumfnccount) = @_;
my $function;
my $f_found;
my $f_hit;
$f_found = scalar(keys(%{$sumfnccount}));
$f_hit = 0;
foreach $function (keys(%{$sumfnccount})) {
if ($sumfnccount->{$function} > 0) {
$f_hit++;
}
}
return ($f_found, $f_hit);
}
#
# diff()
#
sub diff()
{
my $trace_data = read_info_file($diff);
my $diff_data;
my $path_data;
my $old_path;
my $new_path;
my %path_conversion_data;
my $filename;
my $line_hash;
my $new_name;
my $entry;
my $testdata;
my $testname;
my $sumcount;
my $funcdata;
my $checkdata;
my $testfncdata;
my $sumfnccount;
my $testbrdata;
my $sumbrcount;
my $found;
my $hit;
my $f_found;
my $f_hit;
my $br_found;
my $br_hit;
my $converted = 0;
my $unchanged = 0;
my @result;
local *INFO_HANDLE;
($diff_data, $path_data) = read_diff($ARGV[0]);
foreach $filename (sort(keys(%{$trace_data})))
{
# Find a diff section corresponding to this file
($line_hash, $old_path, $new_path) =
get_line_hash($filename, $diff_data, $path_data);
if (!$line_hash)
{
# There's no diff section for this file
$unchanged++;
next;
}
$converted++;
if ($old_path && $new_path && ($old_path ne $new_path))
{
$path_conversion_data{$old_path} = $new_path;
}
# Check for deleted files
if (scalar(keys(%{$line_hash})) == 0)
{
info("Removing $filename\n");
delete($trace_data->{$filename});
next;
}
info("Converting $filename\n");
$entry = $trace_data->{$filename};
($testdata, $sumcount, $funcdata, $checkdata, $testfncdata,
$sumfnccount, $testbrdata, $sumbrcount) =
get_info_entry($entry);
# Convert test data
foreach $testname (keys(%{$testdata}))
{
# Adjust line numbers of line coverage data
$testdata->{$testname} =
apply_diff($testdata->{$testname}, $line_hash);
# Adjust line numbers of branch coverage data
$testbrdata->{$testname} =
apply_diff_to_brcount($testbrdata->{$testname},
$line_hash);
# Remove empty sets of test data
if (scalar(keys(%{$testdata->{$testname}})) == 0)
{
delete($testdata->{$testname});
delete($testfncdata->{$testname});
delete($testbrdata->{$testname});
}
}
# Rename test data to indicate conversion
foreach $testname (keys(%{$testdata}))
{
# Skip testnames which already contain an extension
if ($testname =~ /,[^,]+$/)
{
next;
}
# Check for name conflict
if (defined($testdata->{$testname.",diff"}))
{
# Add counts
($testdata->{$testname}) = add_counts(
$testdata->{$testname},
$testdata->{$testname.",diff"});
delete($testdata->{$testname.",diff"});
# Add function call counts
($testfncdata->{$testname}) = add_fnccount(
$testfncdata->{$testname},
$testfncdata->{$testname.",diff"});
delete($testfncdata->{$testname.",diff"});
# Add branch counts
($testbrdata->{$testname}) = combine_brcount(
$testbrdata->{$testname},
$testbrdata->{$testname.",diff"},
$BR_ADD);
delete($testbrdata->{$testname.",diff"});
}
# Move test data to new testname
$testdata->{$testname.",diff"} = $testdata->{$testname};
delete($testdata->{$testname});
# Move function call count data to new testname
$testfncdata->{$testname.",diff"} =
$testfncdata->{$testname};
delete($testfncdata->{$testname});
# Move branch count data to new testname
$testbrdata->{$testname.",diff"} =
$testbrdata->{$testname};
delete($testbrdata->{$testname});
}
# Convert summary of test data
$sumcount = apply_diff($sumcount, $line_hash);
# Convert function data
$funcdata = apply_diff_to_funcdata($funcdata, $line_hash);
# Convert branch coverage data
$sumbrcount = apply_diff_to_brcount($sumbrcount, $line_hash);
# Update found/hit numbers
# Convert checksum data
$checkdata = apply_diff($checkdata, $line_hash);
# Convert function call count data
adjust_fncdata($funcdata, $testfncdata, $sumfnccount);
($f_found, $f_hit) = get_func_found_and_hit($sumfnccount);
($br_found, $br_hit) = get_br_found_and_hit($sumbrcount);
# Update found/hit numbers
$found = 0;
$hit = 0;
foreach (keys(%{$sumcount}))
{
$found++;
if ($sumcount->{$_} > 0)
{
$hit++;
}
}
if ($found > 0)
{
# Store converted entry
set_info_entry($entry, $testdata, $sumcount, $funcdata,
$checkdata, $testfncdata, $sumfnccount,
$testbrdata, $sumbrcount, $found, $hit,
$f_found, $f_hit, $br_found, $br_hit);
}
else
{
# Remove empty data set
delete($trace_data->{$filename});
}
}
# Convert filenames as well if requested
if ($convert_filenames)
{
convert_paths($trace_data, \%path_conversion_data);
}
info("$converted entr".($converted != 1 ? "ies" : "y")." converted, ".
"$unchanged entr".($unchanged != 1 ? "ies" : "y")." left ".
"unchanged.\n");
# Write data
if ($to_file)
{
info("Writing data to $output_filename\n");
open(INFO_HANDLE, ">$output_filename")
or die("ERROR: cannot write to $output_filename!\n");
@result = write_info_file(*INFO_HANDLE, $trace_data);
close(*INFO_HANDLE);
}
else
{
@result = write_info_file(*STDOUT, $trace_data);
}
return @result;
}
#
# system_no_output(mode, parameters)
#
# Call an external program using PARAMETERS while suppressing depending on
# the value of MODE:
#
# MODE & 1: suppress STDOUT
# MODE & 2: suppress STDERR
#
# Return 0 on success, non-zero otherwise.
#
sub system_no_output($@)
{
my $mode = shift;
my $result;
local *OLD_STDERR;
local *OLD_STDOUT;
# Save old stdout and stderr handles
($mode & 1) && open(OLD_STDOUT, ">>&STDOUT");
($mode & 2) && open(OLD_STDERR, ">>&STDERR");
# Redirect to /dev/null
($mode & 1) && open(STDOUT, ">/dev/null");
($mode & 2) && open(STDERR, ">/dev/null");
system(@_);
$result = $?;
# Close redirected handles
($mode & 1) && close(STDOUT);
($mode & 2) && close(STDERR);
# Restore old handles
($mode & 1) && open(STDOUT, ">>&OLD_STDOUT");
($mode & 2) && open(STDERR, ">>&OLD_STDERR");
return $result;
}
#
# read_config(filename)
#
# Read configuration file FILENAME and return a reference to a hash containing
# all valid key=value pairs found.
#
sub read_config($)
{
my $filename = $_[0];
my %result;
my $key;
my $value;
local *HANDLE;
if (!open(HANDLE, "<$filename"))
{
warn("WARNING: cannot read configuration file $filename\n");
return undef;
}
while (<HANDLE>)
{
chomp;
# Skip comments
s/#.*//;
# Remove leading blanks
s/^\s+//;
# Remove trailing blanks
s/\s+$//;
next unless length;
($key, $value) = split(/\s*=\s*/, $_, 2);
if (defined($key) && defined($value))
{
$result{$key} = $value;
}
else
{
warn("WARNING: malformed statement in line $. ".
"of configuration file $filename\n");
}
}
close(HANDLE);
return \%result;
}
#
# apply_config(REF)
#
# REF is a reference to a hash containing the following mapping:
#
# key_string => var_ref
#
# where KEY_STRING is a keyword and VAR_REF is a reference to an associated
# variable. If the global configuration hash CONFIG contains a value for
# keyword KEY_STRING, VAR_REF will be assigned the value for that keyword.
#
sub apply_config($)
{
my $ref = $_[0];
foreach (keys(%{$ref}))
{
if (defined($config->{$_}))
{
${$ref->{$_}} = $config->{$_};
}
}
}
sub warn_handler($)
{
my ($msg) = @_;
temp_cleanup();
warn("$tool_name: $msg");
}
sub die_handler($)
{
my ($msg) = @_;
temp_cleanup();
die("$tool_name: $msg");
}
sub abort_handler($)
{
temp_cleanup();
exit(1);
}
sub temp_cleanup()
{
if (@temp_dirs) {
info("Removing temporary directories.\n");
foreach (@temp_dirs) {
rmtree($_);
}
@temp_dirs = ();
}
}
sub setup_gkv_sys()
{
system_no_output(3, "mount", "-t", "debugfs", "nodev",
"/sys/kernel/debug");
}
sub setup_gkv_proc()
{
if (system_no_output(3, "modprobe", "gcov_proc")) {
system_no_output(3, "modprobe", "gcov_prof");
}
}
sub check_gkv_sys($)
{
my ($dir) = @_;
if (-e "$dir/reset") {
return 1;
}
return 0;
}
sub check_gkv_proc($)
{
my ($dir) = @_;
if (-e "$dir/vmlinux") {
return 1;
}
return 0;
}
sub setup_gkv()
{
my $dir;
my $sys_dir = "/sys/kernel/debug/gcov";
my $proc_dir = "/proc/gcov";
my @todo;
if (!defined($gcov_dir)) {
info("Auto-detecting gcov kernel support.\n");
@todo = ( "cs", "cp", "ss", "cs", "sp", "cp" );
} elsif ($gcov_dir =~ /proc/) {
info("Checking gcov kernel support at $gcov_dir ".
"(user-specified).\n");
@todo = ( "cp", "sp", "cp", "cs", "ss", "cs");
} else {
info("Checking gcov kernel support at $gcov_dir ".
"(user-specified).\n");
@todo = ( "cs", "ss", "cs", "cp", "sp", "cp", );
}
foreach (@todo) {
if ($_ eq "cs") {
# Check /sys
$dir = defined($gcov_dir) ? $gcov_dir : $sys_dir;
if (check_gkv_sys($dir)) {
info("Found ".$GKV_NAME[$GKV_SYS]." gcov ".
"kernel support at $dir\n");
return ($GKV_SYS, $dir);
}
} elsif ($_ eq "cp") {
# Check /proc
$dir = defined($gcov_dir) ? $gcov_dir : $proc_dir;
if (check_gkv_proc($dir)) {
info("Found ".$GKV_NAME[$GKV_PROC]." gcov ".
"kernel support at $dir\n");
return ($GKV_PROC, $dir);
}
} elsif ($_ eq "ss") {
# Setup /sys
setup_gkv_sys();
} elsif ($_ eq "sp") {
# Setup /proc
setup_gkv_proc();
}
}
if (defined($gcov_dir)) {
die("ERROR: could not find gcov kernel data at $gcov_dir\n");
} else {
die("ERROR: no gcov kernel data found\n");
}
}
#
# get_overall_line(found, hit, name_singular, name_plural)
#
# Return a string containing overall information for the specified
# found/hit data.
#
sub get_overall_line($$$$)
{
my ($found, $hit, $name_sn, $name_pl) = @_;
my $name;
return "no data found" if (!defined($found) || $found == 0);
$name = ($found == 1) ? $name_sn : $name_pl;
return sprintf("%.1f%% (%d of %d %s)", $hit * 100 / $found, $hit,
$found, $name);
}
#
# print_overall_rate(ln_do, ln_found, ln_hit, fn_do, fn_found, fn_hit, br_do
# br_found, br_hit)
#
# Print overall coverage rates for the specified coverage types.
#
sub print_overall_rate($$$$$$$$$)
{
my ($ln_do, $ln_found, $ln_hit, $fn_do, $fn_found, $fn_hit,
$br_do, $br_found, $br_hit) = @_;
info("Overall coverage rate:\n");
info(" lines......: %s\n",
get_overall_line($ln_found, $ln_hit, "line", "lines"))
if ($ln_do);
info(" functions..: %s\n",
get_overall_line($fn_found, $fn_hit, "function", "functions"))
if ($fn_do);
info(" branches...: %s\n",
get_overall_line($br_found, $br_hit, "branch", "branches"))
if ($br_do);
}
| zy901002-gpsr | utils/lcov/lcov | Perl | gpl2 | 95,299 |
#!/usr/bin/perl -w
#
# Copyright (c) International Business Machines Corp., 2002,2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# geninfo
#
# This script generates .info files from data files as created by code
# instrumented with gcc's built-in profiling mechanism. Call it with
# --help and refer to the geninfo man page to get information on usage
# and available options.
#
#
# Authors:
# 2002-08-23 created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
# IBM Lab Boeblingen
# based on code by Manoj Iyer <manjo@mail.utexas.edu> and
# Megan Bock <mbock@us.ibm.com>
# IBM Austin
# 2002-09-05 / Peter Oberparleiter: implemented option that allows file list
# 2003-04-16 / Peter Oberparleiter: modified read_gcov so that it can also
# parse the new gcov format which is to be introduced in gcc 3.3
# 2003-04-30 / Peter Oberparleiter: made info write to STDERR, not STDOUT
# 2003-07-03 / Peter Oberparleiter: added line checksum support, added
# --no-checksum
# 2003-09-18 / Nigel Hinds: capture branch coverage data from GCOV
# 2003-12-11 / Laurent Deniel: added --follow option
# workaround gcov (<= 3.2.x) bug with empty .da files
# 2004-01-03 / Laurent Deniel: Ignore empty .bb files
# 2004-02-16 / Andreas Krebbel: Added support for .gcno/.gcda files and
# gcov versioning
# 2004-08-09 / Peter Oberparleiter: added configuration file support
# 2008-07-14 / Tom Zoerner: added --function-coverage command line option
# 2008-08-13 / Peter Oberparleiter: modified function coverage
# implementation (now enabled per default)
#
use strict;
use File::Basename;
use File::Spec::Functions qw /abs2rel catdir file_name_is_absolute splitdir
splitpath/;
use Getopt::Long;
use Digest::MD5 qw(md5_base64);
# Constants
our $lcov_version = 'LCOV version 1.9';
our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php";
our $gcov_tool = "gcov";
our $tool_name = basename($0);
our $GCOV_VERSION_3_4_0 = 0x30400;
our $GCOV_VERSION_3_3_0 = 0x30300;
our $GCNO_FUNCTION_TAG = 0x01000000;
our $GCNO_LINES_TAG = 0x01450000;
our $GCNO_FILE_MAGIC = 0x67636e6f;
our $BBG_FILE_MAGIC = 0x67626267;
our $COMPAT_HAMMER = "hammer";
our $ERROR_GCOV = 0;
our $ERROR_SOURCE = 1;
our $ERROR_GRAPH = 2;
our $EXCL_START = "LCOV_EXCL_START";
our $EXCL_STOP = "LCOV_EXCL_STOP";
our $EXCL_LINE = "LCOV_EXCL_LINE";
our $BR_LINE = 0;
our $BR_BLOCK = 1;
our $BR_BRANCH = 2;
our $BR_TAKEN = 3;
our $BR_VEC_ENTRIES = 4;
our $BR_VEC_WIDTH = 32;
our $UNNAMED_BLOCK = 9999;
# Prototypes
sub print_usage(*);
sub gen_info($);
sub process_dafile($$);
sub match_filename($@);
sub solve_ambiguous_match($$$);
sub split_filename($);
sub solve_relative_path($$);
sub read_gcov_header($);
sub read_gcov_file($);
sub info(@);
sub get_gcov_version();
sub system_no_output($@);
sub read_config($);
sub apply_config($);
sub get_exclusion_data($);
sub apply_exclusion_data($$);
sub process_graphfile($$);
sub filter_fn_name($);
sub warn_handler($);
sub die_handler($);
sub graph_error($$);
sub graph_expect($);
sub graph_read(*$;$);
sub graph_skip(*$;$);
sub sort_uniq(@);
sub sort_uniq_lex(@);
sub graph_cleanup($);
sub graph_find_base($);
sub graph_from_bb($$$);
sub graph_add_order($$$);
sub read_bb_word(*;$);
sub read_bb_value(*;$);
sub read_bb_string(*$);
sub read_bb($$);
sub read_bbg_word(*;$);
sub read_bbg_value(*;$);
sub read_bbg_string(*);
sub read_bbg_lines_record(*$$$$$$);
sub read_bbg($$);
sub read_gcno_word(*;$);
sub read_gcno_value(*$;$);
sub read_gcno_string(*$);
sub read_gcno_lines_record(*$$$$$$$);
sub read_gcno_function_record(*$$$$);
sub read_gcno($$);
sub get_gcov_capabilities();
sub get_overall_line($$$$);
sub print_overall_rate($$$$$$$$$);
sub br_gvec_len($);
sub br_gvec_get($$);
sub debug($);
sub int_handler();
# Global variables
our $gcov_version;
our $graph_file_extension;
our $data_file_extension;
our @data_directory;
our $test_name = "";
our $quiet;
our $help;
our $output_filename;
our $base_directory;
our $version;
our $follow;
our $checksum;
our $no_checksum;
our $compat_libtool;
our $no_compat_libtool;
our $adjust_testname;
our $config; # Configuration file contents
our $compatibility; # Compatibility version flag - used to indicate
# non-standard GCOV data format versions
our @ignore_errors; # List of errors to ignore (parameter)
our @ignore; # List of errors to ignore (array)
our $initial;
our $no_recursion = 0;
our $maxdepth;
our $no_markers = 0;
our $opt_derive_func_data = 0;
our $debug = 0;
our $gcov_caps;
our @gcov_options;
our $cwd = `pwd`;
chomp($cwd);
#
# Code entry point
#
# Register handler routine to be called when interrupted
$SIG{"INT"} = \&int_handler;
$SIG{__WARN__} = \&warn_handler;
$SIG{__DIE__} = \&die_handler;
# Prettify version string
$lcov_version =~ s/\$\s*Revision\s*:?\s*(\S+)\s*\$/$1/;
# Set LANG so that gcov output will be in a unified format
$ENV{"LANG"} = "C";
# Read configuration file if available
if (defined($ENV{"HOME"}) && (-r $ENV{"HOME"}."/.lcovrc"))
{
$config = read_config($ENV{"HOME"}."/.lcovrc");
}
elsif (-r "/etc/lcovrc")
{
$config = read_config("/etc/lcovrc");
}
if ($config)
{
# Copy configuration file values to variables
apply_config({
"geninfo_gcov_tool" => \$gcov_tool,
"geninfo_adjust_testname" => \$adjust_testname,
"geninfo_checksum" => \$checksum,
"geninfo_no_checksum" => \$no_checksum, # deprecated
"geninfo_compat_libtool" => \$compat_libtool});
# Merge options
if (defined($no_checksum))
{
$checksum = ($no_checksum ? 0 : 1);
$no_checksum = undef;
}
}
# Parse command line options
if (!GetOptions("test-name|t=s" => \$test_name,
"output-filename|o=s" => \$output_filename,
"checksum" => \$checksum,
"no-checksum" => \$no_checksum,
"base-directory|b=s" => \$base_directory,
"version|v" =>\$version,
"quiet|q" => \$quiet,
"help|h|?" => \$help,
"follow|f" => \$follow,
"compat-libtool" => \$compat_libtool,
"no-compat-libtool" => \$no_compat_libtool,
"gcov-tool=s" => \$gcov_tool,
"ignore-errors=s" => \@ignore_errors,
"initial|i" => \$initial,
"no-recursion" => \$no_recursion,
"no-markers" => \$no_markers,
"derive-func-data" => \$opt_derive_func_data,
"debug" => \$debug,
))
{
print(STDERR "Use $tool_name --help to get usage information\n");
exit(1);
}
else
{
# Merge options
if (defined($no_checksum))
{
$checksum = ($no_checksum ? 0 : 1);
$no_checksum = undef;
}
if (defined($no_compat_libtool))
{
$compat_libtool = ($no_compat_libtool ? 0 : 1);
$no_compat_libtool = undef;
}
}
@data_directory = @ARGV;
# Check for help option
if ($help)
{
print_usage(*STDOUT);
exit(0);
}
# Check for version option
if ($version)
{
print("$tool_name: $lcov_version\n");
exit(0);
}
# Make sure test names only contain valid characters
if ($test_name =~ s/\W/_/g)
{
warn("WARNING: invalid characters removed from testname!\n");
}
# Adjust test name to include uname output if requested
if ($adjust_testname)
{
$test_name .= "__".`uname -a`;
$test_name =~ s/\W/_/g;
}
# Make sure base_directory contains an absolute path specification
if ($base_directory)
{
$base_directory = solve_relative_path($cwd, $base_directory);
}
# Check for follow option
if ($follow)
{
$follow = "-follow"
}
else
{
$follow = "";
}
# Determine checksum mode
if (defined($checksum))
{
# Normalize to boolean
$checksum = ($checksum ? 1 : 0);
}
else
{
# Default is off
$checksum = 0;
}
# Determine libtool compatibility mode
if (defined($compat_libtool))
{
$compat_libtool = ($compat_libtool? 1 : 0);
}
else
{
# Default is on
$compat_libtool = 1;
}
# Determine max depth for recursion
if ($no_recursion)
{
$maxdepth = "-maxdepth 1";
}
else
{
$maxdepth = "";
}
# Check for directory name
if (!@data_directory)
{
die("No directory specified\n".
"Use $tool_name --help to get usage information\n");
}
else
{
foreach (@data_directory)
{
stat($_);
if (!-r _)
{
die("ERROR: cannot read $_!\n");
}
}
}
if (@ignore_errors)
{
my @expanded;
my $error;
# Expand comma-separated entries
foreach (@ignore_errors) {
if (/,/)
{
push(@expanded, split(",", $_));
}
else
{
push(@expanded, $_);
}
}
foreach (@expanded)
{
/^gcov$/ && do { $ignore[$ERROR_GCOV] = 1; next; } ;
/^source$/ && do { $ignore[$ERROR_SOURCE] = 1; next; };
/^graph$/ && do { $ignore[$ERROR_GRAPH] = 1; next; };
die("ERROR: unknown argument for --ignore-errors: $_\n");
}
}
if (system_no_output(3, $gcov_tool, "--help") == -1)
{
die("ERROR: need tool $gcov_tool!\n");
}
$gcov_version = get_gcov_version();
if ($gcov_version < $GCOV_VERSION_3_4_0)
{
if (defined($compatibility) && $compatibility eq $COMPAT_HAMMER)
{
$data_file_extension = ".da";
$graph_file_extension = ".bbg";
}
else
{
$data_file_extension = ".da";
$graph_file_extension = ".bb";
}
}
else
{
$data_file_extension = ".gcda";
$graph_file_extension = ".gcno";
}
# Determine gcov options
$gcov_caps = get_gcov_capabilities();
push(@gcov_options, "-b") if ($gcov_caps->{'branch-probabilities'});
push(@gcov_options, "-c") if ($gcov_caps->{'branch-counts'});
# Below branch option is disabled due to a reported compiler bug
# present in gcc-4.5 version and earlier. If enabled, gcov will hang on
# such compilers. This workaround just means that lcov won't be able
# to separate multiple branch blocks in a line.
# push(@gcov_options, "-a") if ($gcov_caps->{'all-blocks'});
push(@gcov_options, "-p") if ($gcov_caps->{'preserve-paths'});
# Check output filename
if (defined($output_filename) && ($output_filename ne "-"))
{
# Initially create output filename, data is appended
# for each data file processed
local *DUMMY_HANDLE;
open(DUMMY_HANDLE, ">$output_filename")
or die("ERROR: cannot create $output_filename!\n");
close(DUMMY_HANDLE);
# Make $output_filename an absolute path because we're going
# to change directories while processing files
if (!($output_filename =~ /^\/(.*)$/))
{
$output_filename = $cwd."/".$output_filename;
}
}
# Do something
foreach my $entry (@data_directory) {
gen_info($entry);
}
if ($initial) {
warn("Note: --initial does not generate branch coverage ".
"data\n");
}
info("Finished .info-file creation\n");
exit(0);
#
# print_usage(handle)
#
# Print usage information.
#
sub print_usage(*)
{
local *HANDLE = $_[0];
print(HANDLE <<END_OF_USAGE);
Usage: $tool_name [OPTIONS] DIRECTORY
Traverse DIRECTORY and create a .info file for each data file found. Note
that you may specify more than one directory, all of which are then processed
sequentially.
-h, --help Print this help, then exit
-v, --version Print version number, then exit
-q, --quiet Do not print progress messages
-i, --initial Capture initial zero coverage data
-t, --test-name NAME Use test case name NAME for resulting data
-o, --output-filename OUTFILE Write data only to OUTFILE
-f, --follow Follow links when searching .da/.gcda files
-b, --base-directory DIR Use DIR as base directory for relative paths
--(no-)checksum Enable (disable) line checksumming
--(no-)compat-libtool Enable (disable) libtool compatibility mode
--gcov-tool TOOL Specify gcov tool location
--ignore-errors ERROR Continue after ERROR (gcov, source, graph)
--no-recursion Exclude subdirectories from processing
--function-coverage Capture function call counts
--no-markers Ignore exclusion markers in source code
--derive-func-data Generate function data from line data
For more information see: $lcov_url
END_OF_USAGE
;
}
#
# get_common_prefix(min_dir, filenames)
#
# Return the longest path prefix shared by all filenames. MIN_DIR specifies
# the minimum number of directories that a filename may have after removing
# the prefix.
#
sub get_common_prefix($@)
{
my ($min_dir, @files) = @_;
my $file;
my @prefix;
my $i;
foreach $file (@files) {
my ($v, $d, $f) = splitpath($file);
my @comp = splitdir($d);
if (!@prefix) {
@prefix = @comp;
next;
}
for ($i = 0; $i < scalar(@comp) && $i < scalar(@prefix); $i++) {
if ($comp[$i] ne $prefix[$i] ||
((scalar(@comp) - ($i + 1)) <= $min_dir)) {
delete(@prefix[$i..scalar(@prefix)]);
last;
}
}
}
return catdir(@prefix);
}
#
# gen_info(directory)
#
# Traverse DIRECTORY and create a .info file for each data file found.
# The .info file contains TEST_NAME in the following format:
#
# TN:<test name>
#
# For each source file name referenced in the data file, there is a section
# containing source code and coverage data:
#
# SF:<absolute path to the source file>
# FN:<line number of function start>,<function name> for each function
# DA:<line number>,<execution count> for each instrumented line
# LH:<number of lines with an execution count> greater than 0
# LF:<number of instrumented lines>
#
# Sections are separated by:
#
# end_of_record
#
# In addition to the main source code file there are sections for each
# #included file containing executable code. Note that the absolute path
# of a source file is generated by interpreting the contents of the respective
# graph file. Relative filenames are prefixed with the directory in which the
# graph file is found. Note also that symbolic links to the graph file will be
# resolved so that the actual file path is used instead of the path to a link.
# This approach is necessary for the mechanism to work with the /proc/gcov
# files.
#
# Die on error.
#
sub gen_info($)
{
my $directory = $_[0];
my @file_list;
my $file;
my $prefix;
my $type;
my $ext;
if ($initial) {
$type = "graph";
$ext = $graph_file_extension;
} else {
$type = "data";
$ext = $data_file_extension;
}
if (-d $directory)
{
info("Scanning $directory for $ext files ...\n");
@file_list = `find "$directory" $maxdepth $follow -name \\*$ext -type f 2>/dev/null`;
chomp(@file_list);
@file_list or
die("ERROR: no $ext files found in $directory!\n");
$prefix = get_common_prefix(1, @file_list);
info("Found %d %s files in %s\n", $#file_list+1, $type,
$directory);
}
else
{
@file_list = ($directory);
$prefix = "";
}
# Process all files in list
foreach $file (@file_list) {
# Process file
if ($initial) {
process_graphfile($file, $prefix);
} else {
process_dafile($file, $prefix);
}
}
}
sub derive_data($$$)
{
my ($contentdata, $funcdata, $bbdata) = @_;
my @gcov_content = @{$contentdata};
my @gcov_functions = @{$funcdata};
my %fn_count;
my %ln_fn;
my $line;
my $maxline;
my %fn_name;
my $fn;
my $count;
if (!defined($bbdata)) {
return @gcov_functions;
}
# First add existing function data
while (@gcov_functions) {
$count = shift(@gcov_functions);
$fn = shift(@gcov_functions);
$fn_count{$fn} = $count;
}
# Convert line coverage data to function data
foreach $fn (keys(%{$bbdata})) {
my $line_data = $bbdata->{$fn};
my $line;
if ($fn eq "") {
next;
}
# Find the lowest line count for this function
$count = 0;
foreach $line (@$line_data) {
my $lcount = $gcov_content[ ( $line - 1 ) * 3 + 1 ];
if (($lcount > 0) &&
(($count == 0) || ($lcount < $count))) {
$count = $lcount;
}
}
$fn_count{$fn} = $count;
}
# Check if we got data for all functions
foreach $fn (keys(%fn_name)) {
if ($fn eq "") {
next;
}
if (defined($fn_count{$fn})) {
next;
}
warn("WARNING: no derived data found for function $fn\n");
}
# Convert hash to list in @gcov_functions format
foreach $fn (sort(keys(%fn_count))) {
push(@gcov_functions, $fn_count{$fn}, $fn);
}
return @gcov_functions;
}
#
# get_filenames(directory, pattern)
#
# Return a list of filenames found in directory which match the specified
# pattern.
#
# Die on error.
#
sub get_filenames($$)
{
my ($dirname, $pattern) = @_;
my @result;
my $directory;
local *DIR;
opendir(DIR, $dirname) or
die("ERROR: cannot read directory $dirname\n");
while ($directory = readdir(DIR)) {
push(@result, $directory) if ($directory =~ /$pattern/);
}
closedir(DIR);
return @result;
}
#
# process_dafile(da_filename, dir)
#
# Create a .info file for a single data file.
#
# Die on error.
#
sub process_dafile($$)
{
my ($file, $dir) = @_;
my $da_filename; # Name of data file to process
my $da_dir; # Directory of data file
my $source_dir; # Directory of source file
my $da_basename; # data filename without ".da/.gcda" extension
my $bb_filename; # Name of respective graph file
my $bb_basename; # Basename of the original graph file
my $graph; # Contents of graph file
my $instr; # Contents of graph file part 2
my $gcov_error; # Error code of gcov tool
my $object_dir; # Directory containing all object files
my $source_filename; # Name of a source code file
my $gcov_file; # Name of a .gcov file
my @gcov_content; # Content of a .gcov file
my $gcov_branches; # Branch content of a .gcov file
my @gcov_functions; # Function calls of a .gcov file
my @gcov_list; # List of generated .gcov files
my $line_number; # Line number count
my $lines_hit; # Number of instrumented lines hit
my $lines_found; # Number of instrumented lines found
my $funcs_hit; # Number of instrumented functions hit
my $funcs_found; # Number of instrumented functions found
my $br_hit;
my $br_found;
my $source; # gcov source header information
my $object; # gcov object header information
my @matches; # List of absolute paths matching filename
my @unprocessed; # List of unprocessed source code files
my $base_dir; # Base directory for current file
my @tmp_links; # Temporary links to be cleaned up
my @result;
my $index;
my $da_renamed; # If data file is to be renamed
local *INFO_HANDLE;
info("Processing %s\n", abs2rel($file, $dir));
# Get path to data file in absolute and normalized form (begins with /,
# contains no more ../ or ./)
$da_filename = solve_relative_path($cwd, $file);
# Get directory and basename of data file
($da_dir, $da_basename) = split_filename($da_filename);
# avoid files from .libs dirs
if ($compat_libtool && $da_dir =~ m/(.*)\/\.libs$/) {
$source_dir = $1;
} else {
$source_dir = $da_dir;
}
if (-z $da_filename)
{
$da_renamed = 1;
}
else
{
$da_renamed = 0;
}
# Construct base_dir for current file
if ($base_directory)
{
$base_dir = $base_directory;
}
else
{
$base_dir = $source_dir;
}
# Check for writable $base_dir (gcov will try to write files there)
stat($base_dir);
if (!-w _)
{
die("ERROR: cannot write to directory $base_dir!\n");
}
# Construct name of graph file
$bb_basename = $da_basename.$graph_file_extension;
$bb_filename = "$da_dir/$bb_basename";
# Find out the real location of graph file in case we're just looking at
# a link
while (readlink($bb_filename))
{
my $last_dir = dirname($bb_filename);
$bb_filename = readlink($bb_filename);
$bb_filename = solve_relative_path($last_dir, $bb_filename);
}
# Ignore empty graph file (e.g. source file with no statement)
if (-z $bb_filename)
{
warn("WARNING: empty $bb_filename (skipped)\n");
return;
}
# Read contents of graph file into hash. We need it later to find out
# the absolute path to each .gcov file created as well as for
# information about functions and their source code positions.
if ($gcov_version < $GCOV_VERSION_3_4_0)
{
if (defined($compatibility) && $compatibility eq $COMPAT_HAMMER)
{
($instr, $graph) = read_bbg($bb_filename, $base_dir);
}
else
{
($instr, $graph) = read_bb($bb_filename, $base_dir);
}
}
else
{
($instr, $graph) = read_gcno($bb_filename, $base_dir);
}
# Set $object_dir to real location of object files. This may differ
# from $da_dir if the graph file is just a link to the "real" object
# file location.
$object_dir = dirname($bb_filename);
# Is the data file in a different directory? (this happens e.g. with
# the gcov-kernel patch)
if ($object_dir ne $da_dir)
{
# Need to create link to data file in $object_dir
system("ln", "-s", $da_filename,
"$object_dir/$da_basename$data_file_extension")
and die ("ERROR: cannot create link $object_dir/".
"$da_basename$data_file_extension!\n");
push(@tmp_links,
"$object_dir/$da_basename$data_file_extension");
# Need to create link to graph file if basename of link
# and file are different (CONFIG_MODVERSION compat)
if ((basename($bb_filename) ne $bb_basename) &&
(! -e "$object_dir/$bb_basename")) {
symlink($bb_filename, "$object_dir/$bb_basename") or
warn("WARNING: cannot create link ".
"$object_dir/$bb_basename\n");
push(@tmp_links, "$object_dir/$bb_basename");
}
}
# Change to directory containing data files and apply GCOV
chdir($base_dir);
if ($da_renamed)
{
# Need to rename empty data file to workaround
# gcov <= 3.2.x bug (Abort)
system_no_output(3, "mv", "$da_filename", "$da_filename.ori")
and die ("ERROR: cannot rename $da_filename\n");
}
# Execute gcov command and suppress standard output
$gcov_error = system_no_output(1, $gcov_tool, $da_filename,
"-o", $object_dir, @gcov_options);
if ($da_renamed)
{
system_no_output(3, "mv", "$da_filename.ori", "$da_filename")
and die ("ERROR: cannot rename $da_filename.ori");
}
# Clean up temporary links
foreach (@tmp_links) {
unlink($_);
}
if ($gcov_error)
{
if ($ignore[$ERROR_GCOV])
{
warn("WARNING: GCOV failed for $da_filename!\n");
return;
}
die("ERROR: GCOV failed for $da_filename!\n");
}
# Collect data from resulting .gcov files and create .info file
@gcov_list = get_filenames('.', '\.gcov$');
# Check for files
if (!@gcov_list)
{
warn("WARNING: gcov did not create any files for ".
"$da_filename!\n");
}
# Check whether we're writing to a single file
if ($output_filename)
{
if ($output_filename eq "-")
{
*INFO_HANDLE = *STDOUT;
}
else
{
# Append to output file
open(INFO_HANDLE, ">>$output_filename")
or die("ERROR: cannot write to ".
"$output_filename!\n");
}
}
else
{
# Open .info file for output
open(INFO_HANDLE, ">$da_filename.info")
or die("ERROR: cannot create $da_filename.info!\n");
}
# Write test name
printf(INFO_HANDLE "TN:%s\n", $test_name);
# Traverse the list of generated .gcov files and combine them into a
# single .info file
@unprocessed = keys(%{$instr});
foreach $gcov_file (sort(@gcov_list))
{
my $i;
my $num;
($source, $object) = read_gcov_header($gcov_file);
if (defined($source))
{
$source = solve_relative_path($base_dir, $source);
}
# gcov will happily create output even if there's no source code
# available - this interferes with checksum creation so we need
# to pull the emergency brake here.
if (defined($source) && ! -r $source && $checksum)
{
if ($ignore[$ERROR_SOURCE])
{
warn("WARNING: could not read source file ".
"$source\n");
next;
}
die("ERROR: could not read source file $source\n");
}
@matches = match_filename(defined($source) ? $source :
$gcov_file, keys(%{$instr}));
# Skip files that are not mentioned in the graph file
if (!@matches)
{
warn("WARNING: cannot find an entry for ".$gcov_file.
" in $graph_file_extension file, skipping ".
"file!\n");
unlink($gcov_file);
next;
}
# Read in contents of gcov file
@result = read_gcov_file($gcov_file);
if (!defined($result[0])) {
warn("WARNING: skipping unreadable file ".
$gcov_file."\n");
unlink($gcov_file);
next;
}
@gcov_content = @{$result[0]};
$gcov_branches = $result[1];
@gcov_functions = @{$result[2]};
# Skip empty files
if (!@gcov_content)
{
warn("WARNING: skipping empty file ".$gcov_file."\n");
unlink($gcov_file);
next;
}
if (scalar(@matches) == 1)
{
# Just one match
$source_filename = $matches[0];
}
else
{
# Try to solve the ambiguity
$source_filename = solve_ambiguous_match($gcov_file,
\@matches, \@gcov_content);
}
# Remove processed file from list
for ($index = scalar(@unprocessed) - 1; $index >= 0; $index--)
{
if ($unprocessed[$index] eq $source_filename)
{
splice(@unprocessed, $index, 1);
last;
}
}
# Write absolute path of source file
printf(INFO_HANDLE "SF:%s\n", $source_filename);
# If requested, derive function coverage data from
# line coverage data of the first line of a function
if ($opt_derive_func_data) {
@gcov_functions =
derive_data(\@gcov_content, \@gcov_functions,
$graph->{$source_filename});
}
# Write function-related information
if (defined($graph->{$source_filename}))
{
my $fn_data = $graph->{$source_filename};
my $fn;
foreach $fn (sort
{$fn_data->{$a}->[0] <=> $fn_data->{$b}->[0]}
keys(%{$fn_data})) {
my $ln_data = $fn_data->{$fn};
my $line = $ln_data->[0];
# Skip empty function
if ($fn eq "") {
next;
}
# Remove excluded functions
if (!$no_markers) {
my $gfn;
my $found = 0;
foreach $gfn (@gcov_functions) {
if ($gfn eq $fn) {
$found = 1;
last;
}
}
if (!$found) {
next;
}
}
# Normalize function name
$fn = filter_fn_name($fn);
print(INFO_HANDLE "FN:$line,$fn\n");
}
}
#--
#-- FNDA: <call-count>, <function-name>
#-- FNF: overall count of functions
#-- FNH: overall count of functions with non-zero call count
#--
$funcs_found = 0;
$funcs_hit = 0;
while (@gcov_functions)
{
my $count = shift(@gcov_functions);
my $fn = shift(@gcov_functions);
$fn = filter_fn_name($fn);
printf(INFO_HANDLE "FNDA:$count,$fn\n");
$funcs_found++;
$funcs_hit++ if ($count > 0);
}
if ($funcs_found > 0) {
printf(INFO_HANDLE "FNF:%s\n", $funcs_found);
printf(INFO_HANDLE "FNH:%s\n", $funcs_hit);
}
# Write coverage information for each instrumented branch:
#
# BRDA:<line number>,<block number>,<branch number>,<taken>
#
# where 'taken' is the number of times the branch was taken
# or '-' if the block to which the branch belongs was never
# executed
$br_found = 0;
$br_hit = 0;
$num = br_gvec_len($gcov_branches);
for ($i = 0; $i < $num; $i++) {
my ($line, $block, $branch, $taken) =
br_gvec_get($gcov_branches, $i);
print(INFO_HANDLE "BRDA:$line,$block,$branch,$taken\n");
$br_found++;
$br_hit++ if ($taken ne '-' && $taken > 0);
}
if ($br_found > 0) {
printf(INFO_HANDLE "BRF:%s\n", $br_found);
printf(INFO_HANDLE "BRH:%s\n", $br_hit);
}
# Reset line counters
$line_number = 0;
$lines_found = 0;
$lines_hit = 0;
# Write coverage information for each instrumented line
# Note: @gcov_content contains a list of (flag, count, source)
# tuple for each source code line
while (@gcov_content)
{
$line_number++;
# Check for instrumented line
if ($gcov_content[0])
{
$lines_found++;
printf(INFO_HANDLE "DA:".$line_number.",".
$gcov_content[1].($checksum ?
",". md5_base64($gcov_content[2]) : "").
"\n");
# Increase $lines_hit in case of an execution
# count>0
if ($gcov_content[1] > 0) { $lines_hit++; }
}
# Remove already processed data from array
splice(@gcov_content,0,3);
}
# Write line statistics and section separator
printf(INFO_HANDLE "LF:%s\n", $lines_found);
printf(INFO_HANDLE "LH:%s\n", $lines_hit);
print(INFO_HANDLE "end_of_record\n");
# Remove .gcov file after processing
unlink($gcov_file);
}
# Check for files which show up in the graph file but were never
# processed
if (@unprocessed && @gcov_list)
{
foreach (@unprocessed)
{
warn("WARNING: no data found for $_\n");
}
}
if (!($output_filename && ($output_filename eq "-")))
{
close(INFO_HANDLE);
}
# Change back to initial directory
chdir($cwd);
}
#
# solve_relative_path(path, dir)
#
# Solve relative path components of DIR which, if not absolute, resides in PATH.
#
sub solve_relative_path($$)
{
my $path = $_[0];
my $dir = $_[1];
my $result;
$result = $dir;
# Prepend path if not absolute
if ($dir =~ /^[^\/]/)
{
$result = "$path/$result";
}
# Remove //
$result =~ s/\/\//\//g;
# Remove .
$result =~ s/\/\.\//\//g;
# Solve ..
while ($result =~ s/\/[^\/]+\/\.\.\//\//)
{
}
# Remove preceding ..
$result =~ s/^\/\.\.\//\//g;
return $result;
}
#
# match_filename(gcov_filename, list)
#
# Return a list of those entries of LIST which match the relative filename
# GCOV_FILENAME.
#
sub match_filename($@)
{
my ($filename, @list) = @_;
my ($vol, $dir, $file) = splitpath($filename);
my @comp = splitdir($dir);
my $comps = scalar(@comp);
my $entry;
my @result;
entry:
foreach $entry (@list) {
my ($evol, $edir, $efile) = splitpath($entry);
my @ecomp;
my $ecomps;
my $i;
# Filename component must match
if ($efile ne $file) {
next;
}
# Check directory components last to first for match
@ecomp = splitdir($edir);
$ecomps = scalar(@ecomp);
if ($ecomps < $comps) {
next;
}
for ($i = 0; $i < $comps; $i++) {
if ($comp[$comps - $i - 1] ne
$ecomp[$ecomps - $i - 1]) {
next entry;
}
}
push(@result, $entry),
}
return @result;
}
#
# solve_ambiguous_match(rel_filename, matches_ref, gcov_content_ref)
#
# Try to solve ambiguous matches of mapping (gcov file) -> (source code) file
# by comparing source code provided in the GCOV file with that of the files
# in MATCHES. REL_FILENAME identifies the relative filename of the gcov
# file.
#
# Return the one real match or die if there is none.
#
sub solve_ambiguous_match($$$)
{
my $rel_name = $_[0];
my $matches = $_[1];
my $content = $_[2];
my $filename;
my $index;
my $no_match;
local *SOURCE;
# Check the list of matches
foreach $filename (@$matches)
{
# Compare file contents
open(SOURCE, $filename)
or die("ERROR: cannot read $filename!\n");
$no_match = 0;
for ($index = 2; <SOURCE>; $index += 3)
{
chomp;
# Also remove CR from line-end
s/\015$//;
if ($_ ne @$content[$index])
{
$no_match = 1;
last;
}
}
close(SOURCE);
if (!$no_match)
{
info("Solved source file ambiguity for $rel_name\n");
return $filename;
}
}
die("ERROR: could not match gcov data for $rel_name!\n");
}
#
# split_filename(filename)
#
# Return (path, filename, extension) for a given FILENAME.
#
sub split_filename($)
{
my @path_components = split('/', $_[0]);
my @file_components = split('\.', pop(@path_components));
my $extension = pop(@file_components);
return (join("/",@path_components), join(".",@file_components),
$extension);
}
#
# read_gcov_header(gcov_filename)
#
# Parse file GCOV_FILENAME and return a list containing the following
# information:
#
# (source, object)
#
# where:
#
# source: complete relative path of the source code file (gcc >= 3.3 only)
# object: name of associated graph file
#
# Die on error.
#
sub read_gcov_header($)
{
my $source;
my $object;
local *INPUT;
if (!open(INPUT, $_[0]))
{
if ($ignore_errors[$ERROR_GCOV])
{
warn("WARNING: cannot read $_[0]!\n");
return (undef,undef);
}
die("ERROR: cannot read $_[0]!\n");
}
while (<INPUT>)
{
chomp($_);
# Also remove CR from line-end
s/\015$//;
if (/^\s+-:\s+0:Source:(.*)$/)
{
# Source: header entry
$source = $1;
}
elsif (/^\s+-:\s+0:Object:(.*)$/)
{
# Object: header entry
$object = $1;
}
else
{
last;
}
}
close(INPUT);
return ($source, $object);
}
#
# br_gvec_len(vector)
#
# Return the number of entries in the branch coverage vector.
#
sub br_gvec_len($)
{
my ($vec) = @_;
return 0 if (!defined($vec));
return (length($vec) * 8 / $BR_VEC_WIDTH) / $BR_VEC_ENTRIES;
}
#
# br_gvec_get(vector, number)
#
# Return an entry from the branch coverage vector.
#
sub br_gvec_get($$)
{
my ($vec, $num) = @_;
my $line;
my $block;
my $branch;
my $taken;
my $offset = $num * $BR_VEC_ENTRIES;
# Retrieve data from vector
$line = vec($vec, $offset + $BR_LINE, $BR_VEC_WIDTH);
$block = vec($vec, $offset + $BR_BLOCK, $BR_VEC_WIDTH);
$branch = vec($vec, $offset + $BR_BRANCH, $BR_VEC_WIDTH);
$taken = vec($vec, $offset + $BR_TAKEN, $BR_VEC_WIDTH);
# Decode taken value from an integer
if ($taken == 0) {
$taken = "-";
} else {
$taken--;
}
return ($line, $block, $branch, $taken);
}
#
# br_gvec_push(vector, line, block, branch, taken)
#
# Add an entry to the branch coverage vector.
#
sub br_gvec_push($$$$$)
{
my ($vec, $line, $block, $branch, $taken) = @_;
my $offset;
$vec = "" if (!defined($vec));
$offset = br_gvec_len($vec) * $BR_VEC_ENTRIES;
# Encode taken value into an integer
if ($taken eq "-") {
$taken = 0;
} else {
$taken++;
}
# Add to vector
vec($vec, $offset + $BR_LINE, $BR_VEC_WIDTH) = $line;
vec($vec, $offset + $BR_BLOCK, $BR_VEC_WIDTH) = $block;
vec($vec, $offset + $BR_BRANCH, $BR_VEC_WIDTH) = $branch;
vec($vec, $offset + $BR_TAKEN, $BR_VEC_WIDTH) = $taken;
return $vec;
}
#
# read_gcov_file(gcov_filename)
#
# Parse file GCOV_FILENAME (.gcov file format) and return the list:
# (reference to gcov_content, reference to gcov_branch, reference to gcov_func)
#
# gcov_content is a list of 3 elements
# (flag, count, source) for each source code line:
#
# $result[($line_number-1)*3+0] = instrumentation flag for line $line_number
# $result[($line_number-1)*3+1] = execution count for line $line_number
# $result[($line_number-1)*3+2] = source code text for line $line_number
#
# gcov_branch is a vector of 4 4-byte long elements for each branch:
# line number, block number, branch number, count + 1 or 0
#
# gcov_func is a list of 2 elements
# (number of calls, function name) for each function
#
# Die on error.
#
sub read_gcov_file($)
{
my $filename = $_[0];
my @result = ();
my $branches = "";
my @functions = ();
my $number;
my $exclude_flag = 0;
my $exclude_line = 0;
my $last_block = $UNNAMED_BLOCK;
my $last_line = 0;
local *INPUT;
if (!open(INPUT, $filename)) {
if ($ignore_errors[$ERROR_GCOV])
{
warn("WARNING: cannot read $filename!\n");
return (undef, undef, undef);
}
die("ERROR: cannot read $filename!\n");
}
if ($gcov_version < $GCOV_VERSION_3_3_0)
{
# Expect gcov format as used in gcc < 3.3
while (<INPUT>)
{
chomp($_);
# Also remove CR from line-end
s/\015$//;
if (/^branch\s+(\d+)\s+taken\s+=\s+(\d+)/) {
next if ($exclude_line);
$branches = br_gvec_push($branches, $last_line,
$last_block, $1, $2);
} elsif (/^branch\s+(\d+)\s+never\s+executed/) {
next if ($exclude_line);
$branches = br_gvec_push($branches, $last_line,
$last_block, $1, '-');
}
elsif (/^call/ || /^function/)
{
# Function call return data
}
else
{
$last_line++;
# Check for exclusion markers
if (!$no_markers) {
if (/$EXCL_STOP/) {
$exclude_flag = 0;
} elsif (/$EXCL_START/) {
$exclude_flag = 1;
}
if (/$EXCL_LINE/ || $exclude_flag) {
$exclude_line = 1;
} else {
$exclude_line = 0;
}
}
# Source code execution data
if (/^\t\t(.*)$/)
{
# Uninstrumented line
push(@result, 0);
push(@result, 0);
push(@result, $1);
next;
}
$number = (split(" ",substr($_, 0, 16)))[0];
# Check for zero count which is indicated
# by ######
if ($number eq "######") { $number = 0; }
if ($exclude_line) {
# Register uninstrumented line instead
push(@result, 0);
push(@result, 0);
} else {
push(@result, 1);
push(@result, $number);
}
push(@result, substr($_, 16));
}
}
}
else
{
# Expect gcov format as used in gcc >= 3.3
while (<INPUT>)
{
chomp($_);
# Also remove CR from line-end
s/\015$//;
if (/^\s*(\d+|\$+):\s*(\d+)-block\s+(\d+)\s*$/) {
# Block information - used to group related
# branches
$last_line = $2;
$last_block = $3;
} elsif (/^branch\s+(\d+)\s+taken\s+(\d+)/) {
next if ($exclude_line);
$branches = br_gvec_push($branches, $last_line,
$last_block, $1, $2);
} elsif (/^branch\s+(\d+)\s+never\s+executed/) {
next if ($exclude_line);
$branches = br_gvec_push($branches, $last_line,
$last_block, $1, '-');
}
elsif (/^function\s+(\S+)\s+called\s+(\d+)/)
{
if ($exclude_line) {
next;
}
push(@functions, $2, $1);
}
elsif (/^call/)
{
# Function call return data
}
elsif (/^\s*([^:]+):\s*([^:]+):(.*)$/)
{
my ($count, $line, $code) = ($1, $2, $3);
$last_line = $line;
$last_block = $UNNAMED_BLOCK;
# Check for exclusion markers
if (!$no_markers) {
if (/$EXCL_STOP/) {
$exclude_flag = 0;
} elsif (/$EXCL_START/) {
$exclude_flag = 1;
}
if (/$EXCL_LINE/ || $exclude_flag) {
$exclude_line = 1;
} else {
$exclude_line = 0;
}
}
# <exec count>:<line number>:<source code>
if ($line eq "0")
{
# Extra data
}
elsif ($count eq "-")
{
# Uninstrumented line
push(@result, 0);
push(@result, 0);
push(@result, $code);
}
else
{
if ($exclude_line) {
push(@result, 0);
push(@result, 0);
} else {
# Check for zero count
if ($count eq "#####") {
$count = 0;
}
push(@result, 1);
push(@result, $count);
}
push(@result, $code);
}
}
}
}
close(INPUT);
if ($exclude_flag) {
warn("WARNING: unterminated exclusion section in $filename\n");
}
return(\@result, $branches, \@functions);
}
#
# Get the GCOV tool version. Return an integer number which represents the
# GCOV version. Version numbers can be compared using standard integer
# operations.
#
sub get_gcov_version()
{
local *HANDLE;
my $version_string;
my $result;
open(GCOV_PIPE, "$gcov_tool -v |")
or die("ERROR: cannot retrieve gcov version!\n");
$version_string = <GCOV_PIPE>;
close(GCOV_PIPE);
$result = 0;
if ($version_string =~ /(\d+)\.(\d+)(\.(\d+))?/)
{
if (defined($4))
{
info("Found gcov version: $1.$2.$4\n");
$result = $1 << 16 | $2 << 8 | $4;
}
else
{
info("Found gcov version: $1.$2\n");
$result = $1 << 16 | $2 << 8;
}
}
if ($version_string =~ /suse/i && $result == 0x30303 ||
$version_string =~ /mandrake/i && $result == 0x30302)
{
info("Using compatibility mode for GCC 3.3 (hammer)\n");
$compatibility = $COMPAT_HAMMER;
}
return $result;
}
#
# info(printf_parameter)
#
# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
# is not set.
#
sub info(@)
{
if (!$quiet)
{
# Print info string
if (defined($output_filename) && ($output_filename eq "-"))
{
# Don't interfere with the .info output to STDOUT
printf(STDERR @_);
}
else
{
printf(@_);
}
}
}
#
# int_handler()
#
# Called when the script was interrupted by an INT signal (e.g. CTRl-C)
#
sub int_handler()
{
if ($cwd) { chdir($cwd); }
info("Aborted.\n");
exit(1);
}
#
# system_no_output(mode, parameters)
#
# Call an external program using PARAMETERS while suppressing depending on
# the value of MODE:
#
# MODE & 1: suppress STDOUT
# MODE & 2: suppress STDERR
#
# Return 0 on success, non-zero otherwise.
#
sub system_no_output($@)
{
my $mode = shift;
my $result;
local *OLD_STDERR;
local *OLD_STDOUT;
# Save old stdout and stderr handles
($mode & 1) && open(OLD_STDOUT, ">>&STDOUT");
($mode & 2) && open(OLD_STDERR, ">>&STDERR");
# Redirect to /dev/null
($mode & 1) && open(STDOUT, ">/dev/null");
($mode & 2) && open(STDERR, ">/dev/null");
system(@_);
$result = $?;
# Close redirected handles
($mode & 1) && close(STDOUT);
($mode & 2) && close(STDERR);
# Restore old handles
($mode & 1) && open(STDOUT, ">>&OLD_STDOUT");
($mode & 2) && open(STDERR, ">>&OLD_STDERR");
return $result;
}
#
# read_config(filename)
#
# Read configuration file FILENAME and return a reference to a hash containing
# all valid key=value pairs found.
#
sub read_config($)
{
my $filename = $_[0];
my %result;
my $key;
my $value;
local *HANDLE;
if (!open(HANDLE, "<$filename"))
{
warn("WARNING: cannot read configuration file $filename\n");
return undef;
}
while (<HANDLE>)
{
chomp;
# Skip comments
s/#.*//;
# Remove leading blanks
s/^\s+//;
# Remove trailing blanks
s/\s+$//;
next unless length;
($key, $value) = split(/\s*=\s*/, $_, 2);
if (defined($key) && defined($value))
{
$result{$key} = $value;
}
else
{
warn("WARNING: malformed statement in line $. ".
"of configuration file $filename\n");
}
}
close(HANDLE);
return \%result;
}
#
# apply_config(REF)
#
# REF is a reference to a hash containing the following mapping:
#
# key_string => var_ref
#
# where KEY_STRING is a keyword and VAR_REF is a reference to an associated
# variable. If the global configuration hash CONFIG contains a value for
# keyword KEY_STRING, VAR_REF will be assigned the value for that keyword.
#
sub apply_config($)
{
my $ref = $_[0];
foreach (keys(%{$ref}))
{
if (defined($config->{$_}))
{
${$ref->{$_}} = $config->{$_};
}
}
}
#
# get_exclusion_data(filename)
#
# Scan specified source code file for exclusion markers and return
# linenumber -> 1
# for all lines which should be excluded.
#
sub get_exclusion_data($)
{
my ($filename) = @_;
my %list;
my $flag = 0;
local *HANDLE;
if (!open(HANDLE, "<$filename")) {
warn("WARNING: could not open $filename\n");
return undef;
}
while (<HANDLE>) {
if (/$EXCL_STOP/) {
$flag = 0;
} elsif (/$EXCL_START/) {
$flag = 1;
}
if (/$EXCL_LINE/ || $flag) {
$list{$.} = 1;
}
}
close(HANDLE);
if ($flag) {
warn("WARNING: unterminated exclusion section in $filename\n");
}
return \%list;
}
#
# apply_exclusion_data(instr, graph)
#
# Remove lines from instr and graph data structures which are marked
# for exclusion in the source code file.
#
# Return adjusted (instr, graph).
#
# graph : file name -> function data
# function data : function name -> line data
# line data : [ line1, line2, ... ]
#
# instr : filename -> line data
# line data : [ line1, line2, ... ]
#
sub apply_exclusion_data($$)
{
my ($instr, $graph) = @_;
my $filename;
my %excl_data;
my $excl_read_failed = 0;
# Collect exclusion marker data
foreach $filename (sort_uniq_lex(keys(%{$graph}), keys(%{$instr}))) {
my $excl = get_exclusion_data($filename);
# Skip and note if file could not be read
if (!defined($excl)) {
$excl_read_failed = 1;
next;
}
# Add to collection if there are markers
$excl_data{$filename} = $excl if (keys(%{$excl}) > 0);
}
# Warn if not all source files could be read
if ($excl_read_failed) {
warn("WARNING: some exclusion markers may be ignored\n");
}
# Skip if no markers were found
return ($instr, $graph) if (keys(%excl_data) == 0);
# Apply exclusion marker data to graph
foreach $filename (keys(%excl_data)) {
my $function_data = $graph->{$filename};
my $excl = $excl_data{$filename};
my $function;
next if (!defined($function_data));
foreach $function (keys(%{$function_data})) {
my $line_data = $function_data->{$function};
my $line;
my @new_data;
# To be consistent with exclusion parser in non-initial
# case we need to remove a function if the first line
# was excluded
if ($excl->{$line_data->[0]}) {
delete($function_data->{$function});
next;
}
# Copy only lines which are not excluded
foreach $line (@{$line_data}) {
push(@new_data, $line) if (!$excl->{$line});
}
# Store modified list
if (scalar(@new_data) > 0) {
$function_data->{$function} = \@new_data;
} else {
# All of this function was excluded
delete($function_data->{$function});
}
}
# Check if all functions of this file were excluded
if (keys(%{$function_data}) == 0) {
delete($graph->{$filename});
}
}
# Apply exclusion marker data to instr
foreach $filename (keys(%excl_data)) {
my $line_data = $instr->{$filename};
my $excl = $excl_data{$filename};
my $line;
my @new_data;
next if (!defined($line_data));
# Copy only lines which are not excluded
foreach $line (@{$line_data}) {
push(@new_data, $line) if (!$excl->{$line});
}
# Store modified list
if (scalar(@new_data) > 0) {
$instr->{$filename} = \@new_data;
} else {
# All of this file was excluded
delete($instr->{$filename});
}
}
return ($instr, $graph);
}
sub process_graphfile($$)
{
my ($file, $dir) = @_;
my $graph_filename = $file;
my $graph_dir;
my $graph_basename;
my $source_dir;
my $base_dir;
my $graph;
my $instr;
my $filename;
local *INFO_HANDLE;
info("Processing %s\n", abs2rel($file, $dir));
# Get path to data file in absolute and normalized form (begins with /,
# contains no more ../ or ./)
$graph_filename = solve_relative_path($cwd, $graph_filename);
# Get directory and basename of data file
($graph_dir, $graph_basename) = split_filename($graph_filename);
# avoid files from .libs dirs
if ($compat_libtool && $graph_dir =~ m/(.*)\/\.libs$/) {
$source_dir = $1;
} else {
$source_dir = $graph_dir;
}
# Construct base_dir for current file
if ($base_directory)
{
$base_dir = $base_directory;
}
else
{
$base_dir = $source_dir;
}
if ($gcov_version < $GCOV_VERSION_3_4_0)
{
if (defined($compatibility) && $compatibility eq $COMPAT_HAMMER)
{
($instr, $graph) = read_bbg($graph_filename, $base_dir);
}
else
{
($instr, $graph) = read_bb($graph_filename, $base_dir);
}
}
else
{
($instr, $graph) = read_gcno($graph_filename, $base_dir);
}
if (!$no_markers) {
# Apply exclusion marker data to graph file data
($instr, $graph) = apply_exclusion_data($instr, $graph);
}
# Check whether we're writing to a single file
if ($output_filename)
{
if ($output_filename eq "-")
{
*INFO_HANDLE = *STDOUT;
}
else
{
# Append to output file
open(INFO_HANDLE, ">>$output_filename")
or die("ERROR: cannot write to ".
"$output_filename!\n");
}
}
else
{
# Open .info file for output
open(INFO_HANDLE, ">$graph_filename.info")
or die("ERROR: cannot create $graph_filename.info!\n");
}
# Write test name
printf(INFO_HANDLE "TN:%s\n", $test_name);
foreach $filename (sort(keys(%{$instr})))
{
my $funcdata = $graph->{$filename};
my $line;
my $linedata;
print(INFO_HANDLE "SF:$filename\n");
if (defined($funcdata)) {
my @functions = sort {$funcdata->{$a}->[0] <=>
$funcdata->{$b}->[0]}
keys(%{$funcdata});
my $func;
# Gather list of instrumented lines and functions
foreach $func (@functions) {
$linedata = $funcdata->{$func};
# Print function name and starting line
print(INFO_HANDLE "FN:".$linedata->[0].
",".filter_fn_name($func)."\n");
}
# Print zero function coverage data
foreach $func (@functions) {
print(INFO_HANDLE "FNDA:0,".
filter_fn_name($func)."\n");
}
# Print function summary
print(INFO_HANDLE "FNF:".scalar(@functions)."\n");
print(INFO_HANDLE "FNH:0\n");
}
# Print zero line coverage data
foreach $line (@{$instr->{$filename}}) {
print(INFO_HANDLE "DA:$line,0\n");
}
# Print line summary
print(INFO_HANDLE "LF:".scalar(@{$instr->{$filename}})."\n");
print(INFO_HANDLE "LH:0\n");
print(INFO_HANDLE "end_of_record\n");
}
if (!($output_filename && ($output_filename eq "-")))
{
close(INFO_HANDLE);
}
}
sub filter_fn_name($)
{
my ($fn) = @_;
# Remove characters used internally as function name delimiters
$fn =~ s/[,=]/_/g;
return $fn;
}
sub warn_handler($)
{
my ($msg) = @_;
warn("$tool_name: $msg");
}
sub die_handler($)
{
my ($msg) = @_;
die("$tool_name: $msg");
}
#
# graph_error(filename, message)
#
# Print message about error in graph file. If ignore_graph_error is set, return.
# Otherwise abort.
#
sub graph_error($$)
{
my ($filename, $msg) = @_;
if ($ignore[$ERROR_GRAPH]) {
warn("WARNING: $filename: $msg - skipping\n");
return;
}
die("ERROR: $filename: $msg\n");
}
#
# graph_expect(description)
#
# If debug is set to a non-zero value, print the specified description of what
# is expected to be read next from the graph file.
#
sub graph_expect($)
{
my ($msg) = @_;
if (!$debug || !defined($msg)) {
return;
}
print(STDERR "DEBUG: expecting $msg\n");
}
#
# graph_read(handle, bytes[, description])
#
# Read and return the specified number of bytes from handle. Return undef
# if the number of bytes could not be read.
#
sub graph_read(*$;$)
{
my ($handle, $length, $desc) = @_;
my $data;
my $result;
graph_expect($desc);
$result = read($handle, $data, $length);
if ($debug) {
my $ascii = "";
my $hex = "";
my $i;
print(STDERR "DEBUG: read($length)=$result: ");
for ($i = 0; $i < length($data); $i++) {
my $c = substr($data, $i, 1);;
my $n = ord($c);
$hex .= sprintf("%02x ", $n);
if ($n >= 32 && $n <= 127) {
$ascii .= $c;
} else {
$ascii .= ".";
}
}
print(STDERR "$hex |$ascii|");
print(STDERR "\n");
}
if ($result != $length) {
return undef;
}
return $data;
}
#
# graph_skip(handle, bytes[, description])
#
# Read and discard the specified number of bytes from handle. Return non-zero
# if bytes could be read, zero otherwise.
#
sub graph_skip(*$;$)
{
my ($handle, $length, $desc) = @_;
if (defined(graph_read($handle, $length, $desc))) {
return 1;
}
return 0;
}
#
# sort_uniq(list)
#
# Return list in numerically ascending order and without duplicate entries.
#
sub sort_uniq(@)
{
my (@list) = @_;
my %hash;
foreach (@list) {
$hash{$_} = 1;
}
return sort { $a <=> $b } keys(%hash);
}
#
# sort_uniq_lex(list)
#
# Return list in lexically ascending order and without duplicate entries.
#
sub sort_uniq_lex(@)
{
my (@list) = @_;
my %hash;
foreach (@list) {
$hash{$_} = 1;
}
return sort keys(%hash);
}
#
# graph_cleanup(graph)
#
# Remove entries for functions with no lines. Remove duplicate line numbers.
# Sort list of line numbers numerically ascending.
#
sub graph_cleanup($)
{
my ($graph) = @_;
my $filename;
foreach $filename (keys(%{$graph})) {
my $per_file = $graph->{$filename};
my $function;
foreach $function (keys(%{$per_file})) {
my $lines = $per_file->{$function};
if (scalar(@$lines) == 0) {
# Remove empty function
delete($per_file->{$function});
next;
}
# Normalize list
$per_file->{$function} = [ sort_uniq(@$lines) ];
}
if (scalar(keys(%{$per_file})) == 0) {
# Remove empty file
delete($graph->{$filename});
}
}
}
#
# graph_find_base(bb)
#
# Try to identify the filename which is the base source file for the
# specified bb data.
#
sub graph_find_base($)
{
my ($bb) = @_;
my %file_count;
my $basefile;
my $file;
my $func;
my $filedata;
my $count;
my $num;
# Identify base name for this bb data.
foreach $func (keys(%{$bb})) {
$filedata = $bb->{$func};
foreach $file (keys(%{$filedata})) {
$count = $file_count{$file};
# Count file occurrence
$file_count{$file} = defined($count) ? $count + 1 : 1;
}
}
$count = 0;
$num = 0;
foreach $file (keys(%file_count)) {
if ($file_count{$file} > $count) {
# The file that contains code for the most functions
# is likely the base file
$count = $file_count{$file};
$num = 1;
$basefile = $file;
} elsif ($file_count{$file} == $count) {
# If more than one file could be the basefile, we
# don't have a basefile
$basefile = undef;
}
}
return $basefile;
}
#
# graph_from_bb(bb, fileorder, bb_filename)
#
# Convert data from bb to the graph format and list of instrumented lines.
# Returns (instr, graph).
#
# bb : function name -> file data
# : undef -> file order
# file data : filename -> line data
# line data : [ line1, line2, ... ]
#
# file order : function name -> [ filename1, filename2, ... ]
#
# graph : file name -> function data
# function data : function name -> line data
# line data : [ line1, line2, ... ]
#
# instr : filename -> line data
# line data : [ line1, line2, ... ]
#
sub graph_from_bb($$$)
{
my ($bb, $fileorder, $bb_filename) = @_;
my $graph = {};
my $instr = {};
my $basefile;
my $file;
my $func;
my $filedata;
my $linedata;
my $order;
$basefile = graph_find_base($bb);
# Create graph structure
foreach $func (keys(%{$bb})) {
$filedata = $bb->{$func};
$order = $fileorder->{$func};
# Account for lines in functions
if (defined($basefile) && defined($filedata->{$basefile})) {
# If the basefile contributes to this function,
# account this function to the basefile.
$graph->{$basefile}->{$func} = $filedata->{$basefile};
} else {
# If the basefile does not contribute to this function,
# account this function to the first file contributing
# lines.
$graph->{$order->[0]}->{$func} =
$filedata->{$order->[0]};
}
foreach $file (keys(%{$filedata})) {
# Account for instrumented lines
$linedata = $filedata->{$file};
push(@{$instr->{$file}}, @$linedata);
}
}
# Clean up array of instrumented lines
foreach $file (keys(%{$instr})) {
$instr->{$file} = [ sort_uniq(@{$instr->{$file}}) ];
}
return ($instr, $graph);
}
#
# graph_add_order(fileorder, function, filename)
#
# Add an entry for filename to the fileorder data set for function.
#
sub graph_add_order($$$)
{
my ($fileorder, $function, $filename) = @_;
my $item;
my $list;
$list = $fileorder->{$function};
foreach $item (@$list) {
if ($item eq $filename) {
return;
}
}
push(@$list, $filename);
$fileorder->{$function} = $list;
}
#
# read_bb_word(handle[, description])
#
# Read and return a word in .bb format from handle.
#
sub read_bb_word(*;$)
{
my ($handle, $desc) = @_;
return graph_read($handle, 4, $desc);
}
#
# read_bb_value(handle[, description])
#
# Read a word in .bb format from handle and return the word and its integer
# value.
#
sub read_bb_value(*;$)
{
my ($handle, $desc) = @_;
my $word;
$word = read_bb_word($handle, $desc);
return undef if (!defined($word));
return ($word, unpack("V", $word));
}
#
# read_bb_string(handle, delimiter)
#
# Read and return a string in .bb format from handle up to the specified
# delimiter value.
#
sub read_bb_string(*$)
{
my ($handle, $delimiter) = @_;
my $word;
my $value;
my $string = "";
graph_expect("string");
do {
($word, $value) = read_bb_value($handle, "string or delimiter");
return undef if (!defined($value));
if ($value != $delimiter) {
$string .= $word;
}
} while ($value != $delimiter);
$string =~ s/\0//g;
return $string;
}
#
# read_bb(filename, base_dir)
#
# Read the contents of the specified .bb file and return (instr, graph), where:
#
# instr : filename -> line data
# line data : [ line1, line2, ... ]
#
# graph : filename -> file_data
# file_data : function name -> line_data
# line_data : [ line1, line2, ... ]
#
# Relative filenames are converted to absolute form using base_dir as
# base directory. See the gcov info pages of gcc 2.95 for a description of
# the .bb file format.
#
sub read_bb($$)
{
my ($bb_filename, $base) = @_;
my $minus_one = 0x80000001;
my $minus_two = 0x80000002;
my $value;
my $filename;
my $function;
my $bb = {};
my $fileorder = {};
my $instr;
my $graph;
local *HANDLE;
open(HANDLE, "<$bb_filename") or goto open_error;
binmode(HANDLE);
while (!eof(HANDLE)) {
$value = read_bb_value(*HANDLE, "data word");
goto incomplete if (!defined($value));
if ($value == $minus_one) {
# Source file name
graph_expect("filename");
$filename = read_bb_string(*HANDLE, $minus_one);
goto incomplete if (!defined($filename));
if ($filename ne "") {
$filename = solve_relative_path($base,
$filename);
}
} elsif ($value == $minus_two) {
# Function name
graph_expect("function name");
$function = read_bb_string(*HANDLE, $minus_two);
goto incomplete if (!defined($function));
} elsif ($value > 0) {
# Line number
if (!defined($filename) || !defined($function)) {
warn("WARNING: unassigned line number ".
"$value\n");
next;
}
push(@{$bb->{$function}->{$filename}}, $value);
graph_add_order($fileorder, $function, $filename);
}
}
close(HANDLE);
($instr, $graph) = graph_from_bb($bb, $fileorder, $bb_filename);
graph_cleanup($graph);
return ($instr, $graph);
open_error:
graph_error($bb_filename, "could not open file");
return undef;
incomplete:
graph_error($bb_filename, "reached unexpected end of file");
return undef;
}
#
# read_bbg_word(handle[, description])
#
# Read and return a word in .bbg format.
#
sub read_bbg_word(*;$)
{
my ($handle, $desc) = @_;
return graph_read($handle, 4, $desc);
}
#
# read_bbg_value(handle[, description])
#
# Read a word in .bbg format from handle and return its integer value.
#
sub read_bbg_value(*;$)
{
my ($handle, $desc) = @_;
my $word;
$word = read_bbg_word($handle, $desc);
return undef if (!defined($word));
return unpack("N", $word);
}
#
# read_bbg_string(handle)
#
# Read and return a string in .bbg format.
#
sub read_bbg_string(*)
{
my ($handle, $desc) = @_;
my $length;
my $string;
graph_expect("string");
# Read string length
$length = read_bbg_value($handle, "string length");
return undef if (!defined($length));
if ($length == 0) {
return "";
}
# Read string
$string = graph_read($handle, $length, "string");
return undef if (!defined($string));
# Skip padding
graph_skip($handle, 4 - $length % 4, "string padding") or return undef;
return $string;
}
#
# read_bbg_lines_record(handle, bbg_filename, bb, fileorder, filename,
# function, base)
#
# Read a bbg format lines record from handle and add the relevant data to
# bb and fileorder. Return filename on success, undef on error.
#
sub read_bbg_lines_record(*$$$$$$)
{
my ($handle, $bbg_filename, $bb, $fileorder, $filename, $function,
$base) = @_;
my $string;
my $lineno;
graph_expect("lines record");
# Skip basic block index
graph_skip($handle, 4, "basic block index") or return undef;
while (1) {
# Read line number
$lineno = read_bbg_value($handle, "line number");
return undef if (!defined($lineno));
if ($lineno == 0) {
# Got a marker for a new filename
graph_expect("filename");
$string = read_bbg_string($handle);
return undef if (!defined($string));
# Check for end of record
if ($string eq "") {
return $filename;
}
$filename = solve_relative_path($base, $string);
next;
}
# Got an actual line number
if (!defined($filename)) {
warn("WARNING: unassigned line number in ".
"$bbg_filename\n");
next;
}
push(@{$bb->{$function}->{$filename}}, $lineno);
graph_add_order($fileorder, $function, $filename);
}
}
#
# read_bbg(filename, base_dir)
#
# Read the contents of the specified .bbg file and return the following mapping:
# graph: filename -> file_data
# file_data: function name -> line_data
# line_data: [ line1, line2, ... ]
#
# Relative filenames are converted to absolute form using base_dir as
# base directory. See the gcov-io.h file in the SLES 9 gcc 3.3.3 source code
# for a description of the .bbg format.
#
sub read_bbg($$)
{
my ($bbg_filename, $base) = @_;
my $file_magic = 0x67626267;
my $tag_function = 0x01000000;
my $tag_lines = 0x01450000;
my $word;
my $tag;
my $length;
my $function;
my $filename;
my $bb = {};
my $fileorder = {};
my $instr;
my $graph;
local *HANDLE;
open(HANDLE, "<$bbg_filename") or goto open_error;
binmode(HANDLE);
# Read magic
$word = read_bbg_value(*HANDLE, "file magic");
goto incomplete if (!defined($word));
# Check magic
if ($word != $file_magic) {
goto magic_error;
}
# Skip version
graph_skip(*HANDLE, 4, "version") or goto incomplete;
while (!eof(HANDLE)) {
# Read record tag
$tag = read_bbg_value(*HANDLE, "record tag");
goto incomplete if (!defined($tag));
# Read record length
$length = read_bbg_value(*HANDLE, "record length");
goto incomplete if (!defined($tag));
if ($tag == $tag_function) {
graph_expect("function record");
# Read function name
graph_expect("function name");
$function = read_bbg_string(*HANDLE);
goto incomplete if (!defined($function));
$filename = undef;
# Skip function checksum
graph_skip(*HANDLE, 4, "function checksum")
or goto incomplete;
} elsif ($tag == $tag_lines) {
# Read lines record
$filename = read_bbg_lines_record(HANDLE, $bbg_filename,
$bb, $fileorder, $filename,
$function, $base);
goto incomplete if (!defined($filename));
} else {
# Skip record contents
graph_skip(*HANDLE, $length, "unhandled record")
or goto incomplete;
}
}
close(HANDLE);
($instr, $graph) = graph_from_bb($bb, $fileorder, $bbg_filename);
graph_cleanup($graph);
return ($instr, $graph);
open_error:
graph_error($bbg_filename, "could not open file");
return undef;
incomplete:
graph_error($bbg_filename, "reached unexpected end of file");
return undef;
magic_error:
graph_error($bbg_filename, "found unrecognized bbg file magic");
return undef;
}
#
# read_gcno_word(handle[, description])
#
# Read and return a word in .gcno format.
#
sub read_gcno_word(*;$)
{
my ($handle, $desc) = @_;
return graph_read($handle, 4, $desc);
}
#
# read_gcno_value(handle, big_endian[, description])
#
# Read a word in .gcno format from handle and return its integer value
# according to the specified endianness.
#
sub read_gcno_value(*$;$)
{
my ($handle, $big_endian, $desc) = @_;
my $word;
$word = read_gcno_word($handle, $desc);
return undef if (!defined($word));
if ($big_endian) {
return unpack("N", $word);
} else {
return unpack("V", $word);
}
}
#
# read_gcno_string(handle, big_endian)
#
# Read and return a string in .gcno format.
#
sub read_gcno_string(*$)
{
my ($handle, $big_endian) = @_;
my $length;
my $string;
graph_expect("string");
# Read string length
$length = read_gcno_value($handle, $big_endian, "string length");
return undef if (!defined($length));
if ($length == 0) {
return "";
}
$length *= 4;
# Read string
$string = graph_read($handle, $length, "string and padding");
return undef if (!defined($string));
$string =~ s/\0//g;
return $string;
}
#
# read_gcno_lines_record(handle, gcno_filename, bb, fileorder, filename,
# function, base, big_endian)
#
# Read a gcno format lines record from handle and add the relevant data to
# bb and fileorder. Return filename on success, undef on error.
#
sub read_gcno_lines_record(*$$$$$$$)
{
my ($handle, $gcno_filename, $bb, $fileorder, $filename, $function,
$base, $big_endian) = @_;
my $string;
my $lineno;
graph_expect("lines record");
# Skip basic block index
graph_skip($handle, 4, "basic block index") or return undef;
while (1) {
# Read line number
$lineno = read_gcno_value($handle, $big_endian, "line number");
return undef if (!defined($lineno));
if ($lineno == 0) {
# Got a marker for a new filename
graph_expect("filename");
$string = read_gcno_string($handle, $big_endian);
return undef if (!defined($string));
# Check for end of record
if ($string eq "") {
return $filename;
}
$filename = solve_relative_path($base, $string);
next;
}
# Got an actual line number
if (!defined($filename)) {
warn("WARNING: unassigned line number in ".
"$gcno_filename\n");
next;
}
# Add to list
push(@{$bb->{$function}->{$filename}}, $lineno);
graph_add_order($fileorder, $function, $filename);
}
}
#
# read_gcno_function_record(handle, graph, base, big_endian)
#
# Read a gcno format function record from handle and add the relevant data
# to graph. Return (filename, function) on success, undef on error.
#
sub read_gcno_function_record(*$$$$)
{
my ($handle, $bb, $fileorder, $base, $big_endian) = @_;
my $filename;
my $function;
my $lineno;
my $lines;
graph_expect("function record");
# Skip ident and checksum
graph_skip($handle, 8, "function ident and checksum") or return undef;
# Read function name
graph_expect("function name");
$function = read_gcno_string($handle, $big_endian);
return undef if (!defined($function));
# Read filename
graph_expect("filename");
$filename = read_gcno_string($handle, $big_endian);
return undef if (!defined($filename));
$filename = solve_relative_path($base, $filename);
# Read first line number
$lineno = read_gcno_value($handle, $big_endian, "initial line number");
return undef if (!defined($lineno));
# Add to list
push(@{$bb->{$function}->{$filename}}, $lineno);
graph_add_order($fileorder, $function, $filename);
return ($filename, $function);
}
#
# read_gcno(filename, base_dir)
#
# Read the contents of the specified .gcno file and return the following
# mapping:
# graph: filename -> file_data
# file_data: function name -> line_data
# line_data: [ line1, line2, ... ]
#
# Relative filenames are converted to absolute form using base_dir as
# base directory. See the gcov-io.h file in the gcc 3.3 source code
# for a description of the .gcno format.
#
sub read_gcno($$)
{
my ($gcno_filename, $base) = @_;
my $file_magic = 0x67636e6f;
my $tag_function = 0x01000000;
my $tag_lines = 0x01450000;
my $big_endian;
my $word;
my $tag;
my $length;
my $filename;
my $function;
my $bb = {};
my $fileorder = {};
my $instr;
my $graph;
local *HANDLE;
open(HANDLE, "<$gcno_filename") or goto open_error;
binmode(HANDLE);
# Read magic
$word = read_gcno_word(*HANDLE, "file magic");
goto incomplete if (!defined($word));
# Determine file endianness
if (unpack("N", $word) == $file_magic) {
$big_endian = 1;
} elsif (unpack("V", $word) == $file_magic) {
$big_endian = 0;
} else {
goto magic_error;
}
# Skip version and stamp
graph_skip(*HANDLE, 8, "version and stamp") or goto incomplete;
while (!eof(HANDLE)) {
my $next_pos;
my $curr_pos;
# Read record tag
$tag = read_gcno_value(*HANDLE, $big_endian, "record tag");
goto incomplete if (!defined($tag));
# Read record length
$length = read_gcno_value(*HANDLE, $big_endian,
"record length");
goto incomplete if (!defined($length));
# Convert length to bytes
$length *= 4;
# Calculate start of next record
$next_pos = tell(HANDLE);
goto tell_error if ($next_pos == -1);
$next_pos += $length;
# Process record
if ($tag == $tag_function) {
($filename, $function) = read_gcno_function_record(
*HANDLE, $bb, $fileorder, $base, $big_endian);
goto incomplete if (!defined($function));
} elsif ($tag == $tag_lines) {
# Read lines record
$filename = read_gcno_lines_record(*HANDLE,
$gcno_filename, $bb, $fileorder,
$filename, $function, $base,
$big_endian);
goto incomplete if (!defined($filename));
} else {
# Skip record contents
graph_skip(*HANDLE, $length, "unhandled record")
or goto incomplete;
}
# Ensure that we are at the start of the next record
$curr_pos = tell(HANDLE);
goto tell_error if ($curr_pos == -1);
next if ($curr_pos == $next_pos);
goto record_error if ($curr_pos > $next_pos);
graph_skip(*HANDLE, $next_pos - $curr_pos,
"unhandled record content")
or goto incomplete;
}
close(HANDLE);
($instr, $graph) = graph_from_bb($bb, $fileorder, $gcno_filename);
graph_cleanup($graph);
return ($instr, $graph);
open_error:
graph_error($gcno_filename, "could not open file");
return undef;
incomplete:
graph_error($gcno_filename, "reached unexpected end of file");
return undef;
magic_error:
graph_error($gcno_filename, "found unrecognized gcno file magic");
return undef;
tell_error:
graph_error($gcno_filename, "could not determine file position");
return undef;
record_error:
graph_error($gcno_filename, "found unrecognized record format");
return undef;
}
sub debug($)
{
my ($msg) = @_;
return if (!$debug);
print(STDERR "DEBUG: $msg");
}
#
# get_gcov_capabilities
#
# Determine the list of available gcov options.
#
sub get_gcov_capabilities()
{
my $help = `$gcov_tool --help`;
my %capabilities;
foreach (split(/\n/, $help)) {
next if (!/--(\S+)/);
next if ($1 eq 'help');
next if ($1 eq 'version');
next if ($1 eq 'object-directory');
$capabilities{$1} = 1;
debug("gcov has capability '$1'\n");
}
return \%capabilities;
}
| zy901002-gpsr | utils/lcov/geninfo | Perl | gpl2 | 69,393 |
#!/usr/bin/perl -w
#
# Copyright (c) International Business Machines Corp., 2002,2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# genhtml
#
# This script generates HTML output from .info files as created by the
# geninfo script. Call it with --help and refer to the genhtml man page
# to get information on usage and available options.
#
#
# History:
# 2002-08-23 created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
# IBM Lab Boeblingen
# based on code by Manoj Iyer <manjo@mail.utexas.edu> and
# Megan Bock <mbock@us.ibm.com>
# IBM Austin
# 2002-08-27 / Peter Oberparleiter: implemented frame view
# 2002-08-29 / Peter Oberparleiter: implemented test description filtering
# so that by default only descriptions for test cases which
# actually hit some source lines are kept
# 2002-09-05 / Peter Oberparleiter: implemented --no-sourceview
# 2002-09-05 / Mike Kobler: One of my source file paths includes a "+" in
# the directory name. I found that genhtml.pl died when it
# encountered it. I was able to fix the problem by modifying
# the string with the escape character before parsing it.
# 2002-10-26 / Peter Oberparleiter: implemented --num-spaces
# 2003-04-07 / Peter Oberparleiter: fixed bug which resulted in an error
# when trying to combine .info files containing data without
# a test name
# 2003-04-10 / Peter Oberparleiter: extended fix by Mike to also cover
# other special characters
# 2003-04-30 / Peter Oberparleiter: made info write to STDERR, not STDOUT
# 2003-07-10 / Peter Oberparleiter: added line checksum support
# 2004-08-09 / Peter Oberparleiter: added configuration file support
# 2005-03-04 / Cal Pierog: added legend to HTML output, fixed coloring of
# "good coverage" background
# 2006-03-18 / Marcus Boerger: added --custom-intro, --custom-outro and
# overwrite --no-prefix if --prefix is present
# 2006-03-20 / Peter Oberparleiter: changes to custom_* function (rename
# to html_prolog/_epilog, minor modifications to implementation),
# changed prefix/noprefix handling to be consistent with current
# logic
# 2006-03-20 / Peter Oberparleiter: added --html-extension option
# 2008-07-14 / Tom Zoerner: added --function-coverage command line option;
# added function table to source file page
# 2008-08-13 / Peter Oberparleiter: modified function coverage
# implementation (now enabled per default),
# introduced sorting option (enabled per default)
#
use strict;
use File::Basename;
use Getopt::Long;
use Digest::MD5 qw(md5_base64);
# Global constants
our $title = "LCOV - code coverage report";
our $lcov_version = 'LCOV version 1.9';
our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php";
our $tool_name = basename($0);
# Specify coverage rate limits (in %) for classifying file entries
# HI: $hi_limit <= rate <= 100 graph color: green
# MED: $med_limit <= rate < $hi_limit graph color: orange
# LO: 0 <= rate < $med_limit graph color: red
# For line coverage/all coverage types if not specified
our $hi_limit = 90;
our $med_limit = 75;
# For function coverage
our $fn_hi_limit;
our $fn_med_limit;
# For branch coverage
our $br_hi_limit;
our $br_med_limit;
# Width of overview image
our $overview_width = 80;
# Resolution of overview navigation: this number specifies the maximum
# difference in lines between the position a user selected from the overview
# and the position the source code window is scrolled to.
our $nav_resolution = 4;
# Clicking a line in the overview image should show the source code view at
# a position a bit further up so that the requested line is not the first
# line in the window. This number specifies that offset in lines.
our $nav_offset = 10;
# Clicking on a function name should show the source code at a position a
# few lines before the first line of code of that function. This number
# specifies that offset in lines.
our $func_offset = 2;
our $overview_title = "top level";
# Width for line coverage information in the source code view
our $line_field_width = 12;
# Width for branch coverage information in the source code view
our $br_field_width = 16;
# Internal Constants
# Header types
our $HDR_DIR = 0;
our $HDR_FILE = 1;
our $HDR_SOURCE = 2;
our $HDR_TESTDESC = 3;
our $HDR_FUNC = 4;
# Sort types
our $SORT_FILE = 0;
our $SORT_LINE = 1;
our $SORT_FUNC = 2;
our $SORT_BRANCH = 3;
# Fileview heading types
our $HEAD_NO_DETAIL = 1;
our $HEAD_DETAIL_HIDDEN = 2;
our $HEAD_DETAIL_SHOWN = 3;
# Offsets for storing branch coverage data in vectors
our $BR_BLOCK = 0;
our $BR_BRANCH = 1;
our $BR_TAKEN = 2;
our $BR_VEC_ENTRIES = 3;
our $BR_VEC_WIDTH = 32;
# Additional offsets used when converting branch coverage data to HTML
our $BR_LEN = 3;
our $BR_OPEN = 4;
our $BR_CLOSE = 5;
# Branch data combination types
our $BR_SUB = 0;
our $BR_ADD = 1;
# Data related prototypes
sub print_usage(*);
sub gen_html();
sub html_create($$);
sub process_dir($);
sub process_file($$$);
sub info(@);
sub read_info_file($);
sub get_info_entry($);
sub set_info_entry($$$$$$$$$;$$$$$$);
sub get_prefix(@);
sub shorten_prefix($);
sub get_dir_list(@);
sub get_relative_base_path($);
sub read_testfile($);
sub get_date_string();
sub create_sub_dir($);
sub subtract_counts($$);
sub add_counts($$);
sub apply_baseline($$);
sub remove_unused_descriptions();
sub get_found_and_hit($);
sub get_affecting_tests($$$);
sub combine_info_files($$);
sub merge_checksums($$$);
sub combine_info_entries($$$);
sub apply_prefix($$);
sub system_no_output($@);
sub read_config($);
sub apply_config($);
sub get_html_prolog($);
sub get_html_epilog($);
sub write_dir_page($$$$$$$$$$$$$$$$$);
sub classify_rate($$$$);
sub br_taken_add($$);
sub br_taken_sub($$);
sub br_ivec_len($);
sub br_ivec_get($$);
sub br_ivec_push($$$$);
sub combine_brcount($$$);
sub get_br_found_and_hit($);
sub warn_handler($);
sub die_handler($);
# HTML related prototypes
sub escape_html($);
sub get_bar_graph_code($$$);
sub write_png_files();
sub write_htaccess_file();
sub write_css_file();
sub write_description_file($$$$$$$);
sub write_function_table(*$$$$$$$$$$);
sub write_html(*$);
sub write_html_prolog(*$$);
sub write_html_epilog(*$;$);
sub write_header(*$$$$$$$$$$);
sub write_header_prolog(*$);
sub write_header_line(*@);
sub write_header_epilog(*$);
sub write_file_table(*$$$$$$$);
sub write_file_table_prolog(*$@);
sub write_file_table_entry(*$$$@);
sub write_file_table_detail_entry(*$@);
sub write_file_table_epilog(*);
sub write_test_table_prolog(*$);
sub write_test_table_entry(*$$);
sub write_test_table_epilog(*);
sub write_source($$$$$$$);
sub write_source_prolog(*);
sub write_source_line(*$$$$$$);
sub write_source_epilog(*);
sub write_frameset(*$$$);
sub write_overview_line(*$$$);
sub write_overview(*$$$$);
# External prototype (defined in genpng)
sub gen_png($$$@);
# Global variables & initialization
our %info_data; # Hash containing all data from .info file
our $dir_prefix; # Prefix to remove from all sub directories
our %test_description; # Hash containing test descriptions if available
our $date = get_date_string();
our @info_filenames; # List of .info files to use as data source
our $test_title; # Title for output as written to each page header
our $output_directory; # Name of directory in which to store output
our $base_filename; # Optional name of file containing baseline data
our $desc_filename; # Name of file containing test descriptions
our $css_filename; # Optional name of external stylesheet file to use
our $quiet; # If set, suppress information messages
our $help; # Help option flag
our $version; # Version option flag
our $show_details; # If set, generate detailed directory view
our $no_prefix; # If set, do not remove filename prefix
our $func_coverage = 1; # If set, generate function coverage statistics
our $no_func_coverage; # Disable func_coverage
our $br_coverage = 1; # If set, generate branch coverage statistics
our $no_br_coverage; # Disable br_coverage
our $sort = 1; # If set, provide directory listings with sorted entries
our $no_sort; # Disable sort
our $frames; # If set, use frames for source code view
our $keep_descriptions; # If set, do not remove unused test case descriptions
our $no_sourceview; # If set, do not create a source code view for each file
our $highlight; # If set, highlight lines covered by converted data only
our $legend; # If set, include legend in output
our $tab_size = 8; # Number of spaces to use in place of tab
our $config; # Configuration file contents
our $html_prolog_file; # Custom HTML prolog file (up to and including <body>)
our $html_epilog_file; # Custom HTML epilog file (from </body> onwards)
our $html_prolog; # Actual HTML prolog
our $html_epilog; # Actual HTML epilog
our $html_ext = "html"; # Extension for generated HTML files
our $html_gzip = 0; # Compress with gzip
our $demangle_cpp = 0; # Demangle C++ function names
our @fileview_sortlist;
our @fileview_sortname = ("", "-sort-l", "-sort-f", "-sort-b");
our @funcview_sortlist;
our @rate_name = ("Lo", "Med", "Hi");
our @rate_png = ("ruby.png", "amber.png", "emerald.png");
our $cwd = `pwd`; # Current working directory
chomp($cwd);
our $tool_dir = dirname($0); # Directory where genhtml tool is installed
#
# Code entry point
#
$SIG{__WARN__} = \&warn_handler;
$SIG{__DIE__} = \&die_handler;
# Prettify version string
$lcov_version =~ s/\$\s*Revision\s*:?\s*(\S+)\s*\$/$1/;
# Add current working directory if $tool_dir is not already an absolute path
if (! ($tool_dir =~ /^\/(.*)$/))
{
$tool_dir = "$cwd/$tool_dir";
}
# Read configuration file if available
if (defined($ENV{"HOME"}) && (-r $ENV{"HOME"}."/.lcovrc"))
{
$config = read_config($ENV{"HOME"}."/.lcovrc");
}
elsif (-r "/etc/lcovrc")
{
$config = read_config("/etc/lcovrc");
}
if ($config)
{
# Copy configuration file values to variables
apply_config({
"genhtml_css_file" => \$css_filename,
"genhtml_hi_limit" => \$hi_limit,
"genhtml_med_limit" => \$med_limit,
"genhtml_line_field_width" => \$line_field_width,
"genhtml_overview_width" => \$overview_width,
"genhtml_nav_resolution" => \$nav_resolution,
"genhtml_nav_offset" => \$nav_offset,
"genhtml_keep_descriptions" => \$keep_descriptions,
"genhtml_no_prefix" => \$no_prefix,
"genhtml_no_source" => \$no_sourceview,
"genhtml_num_spaces" => \$tab_size,
"genhtml_highlight" => \$highlight,
"genhtml_legend" => \$legend,
"genhtml_html_prolog" => \$html_prolog_file,
"genhtml_html_epilog" => \$html_epilog_file,
"genhtml_html_extension" => \$html_ext,
"genhtml_html_gzip" => \$html_gzip,
"genhtml_function_hi_limit" => \$fn_hi_limit,
"genhtml_function_med_limit" => \$fn_med_limit,
"genhtml_function_coverage" => \$func_coverage,
"genhtml_branch_hi_limit" => \$br_hi_limit,
"genhtml_branch_med_limit" => \$br_med_limit,
"genhtml_branch_coverage" => \$br_coverage,
"genhtml_branch_field_width" => \$br_field_width,
"genhtml_sort" => \$sort,
});
}
# Copy limit values if not specified
$fn_hi_limit = $hi_limit if (!defined($fn_hi_limit));
$fn_med_limit = $med_limit if (!defined($fn_med_limit));
$br_hi_limit = $hi_limit if (!defined($br_hi_limit));
$br_med_limit = $med_limit if (!defined($br_med_limit));
# Parse command line options
if (!GetOptions("output-directory|o=s" => \$output_directory,
"title|t=s" => \$test_title,
"description-file|d=s" => \$desc_filename,
"keep-descriptions|k" => \$keep_descriptions,
"css-file|c=s" => \$css_filename,
"baseline-file|b=s" => \$base_filename,
"prefix|p=s" => \$dir_prefix,
"num-spaces=i" => \$tab_size,
"no-prefix" => \$no_prefix,
"no-sourceview" => \$no_sourceview,
"show-details|s" => \$show_details,
"frames|f" => \$frames,
"highlight" => \$highlight,
"legend" => \$legend,
"quiet|q" => \$quiet,
"help|h|?" => \$help,
"version|v" => \$version,
"html-prolog=s" => \$html_prolog_file,
"html-epilog=s" => \$html_epilog_file,
"html-extension=s" => \$html_ext,
"html-gzip" => \$html_gzip,
"function-coverage" => \$func_coverage,
"no-function-coverage" => \$no_func_coverage,
"branch-coverage" => \$br_coverage,
"no-branch-coverage" => \$no_br_coverage,
"sort" => \$sort,
"no-sort" => \$no_sort,
"demangle-cpp" => \$demangle_cpp,
))
{
print(STDERR "Use $tool_name --help to get usage information\n");
exit(1);
} else {
# Merge options
if ($no_func_coverage) {
$func_coverage = 0;
}
if ($no_br_coverage) {
$br_coverage = 0;
}
# Merge sort options
if ($no_sort) {
$sort = 0;
}
}
@info_filenames = @ARGV;
# Check for help option
if ($help)
{
print_usage(*STDOUT);
exit(0);
}
# Check for version option
if ($version)
{
print("$tool_name: $lcov_version\n");
exit(0);
}
# Check for info filename
if (!@info_filenames)
{
die("No filename specified\n".
"Use $tool_name --help to get usage information\n");
}
# Generate a title if none is specified
if (!$test_title)
{
if (scalar(@info_filenames) == 1)
{
# Only one filename specified, use it as title
$test_title = basename($info_filenames[0]);
}
else
{
# More than one filename specified, used default title
$test_title = "unnamed";
}
}
# Make sure css_filename is an absolute path (in case we're changing
# directories)
if ($css_filename)
{
if (!($css_filename =~ /^\/(.*)$/))
{
$css_filename = $cwd."/".$css_filename;
}
}
# Make sure tab_size is within valid range
if ($tab_size < 1)
{
print(STDERR "ERROR: invalid number of spaces specified: ".
"$tab_size!\n");
exit(1);
}
# Get HTML prolog and epilog
$html_prolog = get_html_prolog($html_prolog_file);
$html_epilog = get_html_epilog($html_epilog_file);
# Issue a warning if --no-sourceview is enabled together with --frames
if ($no_sourceview && defined($frames))
{
warn("WARNING: option --frames disabled because --no-sourceview ".
"was specified!\n");
$frames = undef;
}
# Issue a warning if --no-prefix is enabled together with --prefix
if ($no_prefix && defined($dir_prefix))
{
warn("WARNING: option --prefix disabled because --no-prefix was ".
"specified!\n");
$dir_prefix = undef;
}
@fileview_sortlist = ($SORT_FILE);
@funcview_sortlist = ($SORT_FILE);
if ($sort) {
push(@fileview_sortlist, $SORT_LINE);
push(@fileview_sortlist, $SORT_FUNC) if ($func_coverage);
push(@fileview_sortlist, $SORT_BRANCH) if ($br_coverage);
push(@funcview_sortlist, $SORT_LINE);
}
if ($frames)
{
# Include genpng code needed for overview image generation
do("$tool_dir/genpng");
}
# Ensure that the c++filt tool is available when using --demangle-cpp
if ($demangle_cpp)
{
if (system_no_output(3, "c++filt", "--version")) {
die("ERROR: could not find c++filt tool needed for ".
"--demangle-cpp\n");
}
}
# Make sure output_directory exists, create it if necessary
if ($output_directory)
{
stat($output_directory);
if (! -e _)
{
create_sub_dir($output_directory);
}
}
# Do something
gen_html();
exit(0);
#
# print_usage(handle)
#
# Print usage information.
#
sub print_usage(*)
{
local *HANDLE = $_[0];
print(HANDLE <<END_OF_USAGE);
Usage: $tool_name [OPTIONS] INFOFILE(S)
Create HTML output for coverage data found in INFOFILE. Note that INFOFILE
may also be a list of filenames.
Misc:
-h, --help Print this help, then exit
-v, --version Print version number, then exit
-q, --quiet Do not print progress messages
Operation:
-o, --output-directory OUTDIR Write HTML output to OUTDIR
-s, --show-details Generate detailed directory view
-d, --description-file DESCFILE Read test case descriptions from DESCFILE
-k, --keep-descriptions Do not remove unused test descriptions
-b, --baseline-file BASEFILE Use BASEFILE as baseline file
-p, --prefix PREFIX Remove PREFIX from all directory names
--no-prefix Do not remove prefix from directory names
--(no-)function-coverage Enable (disable) function coverage display
--(no-)branch-coverage Enable (disable) branch coverage display
HTML output:
-f, --frames Use HTML frames for source code view
-t, --title TITLE Display TITLE in header of all pages
-c, --css-file CSSFILE Use external style sheet file CSSFILE
--no-source Do not create source code view
--num-spaces NUM Replace tabs with NUM spaces in source view
--highlight Highlight lines with converted-only data
--legend Include color legend in HTML output
--html-prolog FILE Use FILE as HTML prolog for generated pages
--html-epilog FILE Use FILE as HTML epilog for generated pages
--html-extension EXT Use EXT as filename extension for pages
--html-gzip Use gzip to compress HTML
--(no-)sort Enable (disable) sorted coverage views
--demangle-cpp Demangle C++ function names
For more information see: $lcov_url
END_OF_USAGE
;
}
#
# get_rate(found, hit)
#
# Return a relative value for the specified found&hit values
# which is used for sorting the corresponding entries in a
# file list.
#
sub get_rate($$)
{
my ($found, $hit) = @_;
if ($found == 0) {
return 10000;
}
return int($hit * 1000 / $found) * 10 + 2 - (1 / $found);
}
#
# get_overall_line(found, hit, name_singular, name_plural)
#
# Return a string containing overall information for the specified
# found/hit data.
#
sub get_overall_line($$$$)
{
my ($found, $hit, $name_sn, $name_pl) = @_;
my $name;
return "no data found" if (!defined($found) || $found == 0);
$name = ($found == 1) ? $name_sn : $name_pl;
return sprintf("%.1f%% (%d of %d %s)", $hit * 100 / $found, $hit,
$found, $name);
}
#
# print_overall_rate(ln_do, ln_found, ln_hit, fn_do, fn_found, fn_hit, br_do
# br_found, br_hit)
#
# Print overall coverage rates for the specified coverage types.
#
sub print_overall_rate($$$$$$$$$)
{
my ($ln_do, $ln_found, $ln_hit, $fn_do, $fn_found, $fn_hit,
$br_do, $br_found, $br_hit) = @_;
info("Overall coverage rate:\n");
info(" lines......: %s\n",
get_overall_line($ln_found, $ln_hit, "line", "lines"))
if ($ln_do);
info(" functions..: %s\n",
get_overall_line($fn_found, $fn_hit, "function", "functions"))
if ($fn_do);
info(" branches...: %s\n",
get_overall_line($br_found, $br_hit, "branch", "branches"))
if ($br_do);
}
#
# gen_html()
#
# Generate a set of HTML pages from contents of .info file INFO_FILENAME.
# Files will be written to the current directory. If provided, test case
# descriptions will be read from .tests file TEST_FILENAME and included
# in ouput.
#
# Die on error.
#
sub gen_html()
{
local *HTML_HANDLE;
my %overview;
my %base_data;
my $lines_found;
my $lines_hit;
my $fn_found;
my $fn_hit;
my $br_found;
my $br_hit;
my $overall_found = 0;
my $overall_hit = 0;
my $total_fn_found = 0;
my $total_fn_hit = 0;
my $total_br_found = 0;
my $total_br_hit = 0;
my $dir_name;
my $link_name;
my @dir_list;
my %new_info;
# Read in all specified .info files
foreach (@info_filenames)
{
%new_info = %{read_info_file($_)};
# Combine %new_info with %info_data
%info_data = %{combine_info_files(\%info_data, \%new_info)};
}
info("Found %d entries.\n", scalar(keys(%info_data)));
# Read and apply baseline data if specified
if ($base_filename)
{
# Read baseline file
info("Reading baseline file $base_filename\n");
%base_data = %{read_info_file($base_filename)};
info("Found %d entries.\n", scalar(keys(%base_data)));
# Apply baseline
info("Subtracting baseline data.\n");
%info_data = %{apply_baseline(\%info_data, \%base_data)};
}
@dir_list = get_dir_list(keys(%info_data));
if ($no_prefix)
{
# User requested that we leave filenames alone
info("User asked not to remove filename prefix\n");
}
elsif (!defined($dir_prefix))
{
# Get prefix common to most directories in list
$dir_prefix = get_prefix(@dir_list);
if ($dir_prefix)
{
info("Found common filename prefix \"$dir_prefix\"\n");
}
else
{
info("No common filename prefix found!\n");
$no_prefix=1;
}
}
else
{
info("Using user-specified filename prefix \"".
"$dir_prefix\"\n");
}
# Read in test description file if specified
if ($desc_filename)
{
info("Reading test description file $desc_filename\n");
%test_description = %{read_testfile($desc_filename)};
# Remove test descriptions which are not referenced
# from %info_data if user didn't tell us otherwise
if (!$keep_descriptions)
{
remove_unused_descriptions();
}
}
# Change to output directory if specified
if ($output_directory)
{
chdir($output_directory)
or die("ERROR: cannot change to directory ".
"$output_directory!\n");
}
info("Writing .css and .png files.\n");
write_css_file();
write_png_files();
if ($html_gzip)
{
info("Writing .htaccess file.\n");
write_htaccess_file();
}
info("Generating output.\n");
# Process each subdirectory and collect overview information
foreach $dir_name (@dir_list)
{
($lines_found, $lines_hit, $fn_found, $fn_hit,
$br_found, $br_hit)
= process_dir($dir_name);
# Remove prefix if applicable
if (!$no_prefix && $dir_prefix)
{
# Match directory names beginning with $dir_prefix
$dir_name = apply_prefix($dir_name, $dir_prefix);
}
# Generate name for directory overview HTML page
if ($dir_name =~ /^\/(.*)$/)
{
$link_name = substr($dir_name, 1)."/index.$html_ext";
}
else
{
$link_name = $dir_name."/index.$html_ext";
}
$overview{$dir_name} = [$lines_found, $lines_hit, $fn_found,
$fn_hit, $br_found, $br_hit, $link_name,
get_rate($lines_found, $lines_hit),
get_rate($fn_found, $fn_hit),
get_rate($br_found, $br_hit)];
$overall_found += $lines_found;
$overall_hit += $lines_hit;
$total_fn_found += $fn_found;
$total_fn_hit += $fn_hit;
$total_br_found += $br_found;
$total_br_hit += $br_hit;
}
# Generate overview page
info("Writing directory view page.\n");
# Create sorted pages
foreach (@fileview_sortlist) {
write_dir_page($fileview_sortname[$_], ".", "", $test_title,
undef, $overall_found, $overall_hit,
$total_fn_found, $total_fn_hit, $total_br_found,
$total_br_hit, \%overview, {}, {}, {}, 0, $_);
}
# Check if there are any test case descriptions to write out
if (%test_description)
{
info("Writing test case description file.\n");
write_description_file( \%test_description,
$overall_found, $overall_hit,
$total_fn_found, $total_fn_hit,
$total_br_found, $total_br_hit);
}
print_overall_rate(1, $overall_found, $overall_hit,
$func_coverage, $total_fn_found, $total_fn_hit,
$br_coverage, $total_br_found, $total_br_hit);
chdir($cwd);
}
#
# html_create(handle, filename)
#
sub html_create($$)
{
my $handle = $_[0];
my $filename = $_[1];
if ($html_gzip)
{
open($handle, "|gzip -c >$filename")
or die("ERROR: cannot open $filename for writing ".
"(gzip)!\n");
}
else
{
open($handle, ">$filename")
or die("ERROR: cannot open $filename for writing!\n");
}
}
sub write_dir_page($$$$$$$$$$$$$$$$$)
{
my ($name, $rel_dir, $base_dir, $title, $trunc_dir, $overall_found,
$overall_hit, $total_fn_found, $total_fn_hit, $total_br_found,
$total_br_hit, $overview, $testhash, $testfnchash, $testbrhash,
$view_type, $sort_type) = @_;
# Generate directory overview page including details
html_create(*HTML_HANDLE, "$rel_dir/index$name.$html_ext");
if (!defined($trunc_dir)) {
$trunc_dir = "";
}
write_html_prolog(*HTML_HANDLE, $base_dir, "LCOV - $title$trunc_dir");
write_header(*HTML_HANDLE, $view_type, $trunc_dir, $rel_dir,
$overall_found, $overall_hit, $total_fn_found,
$total_fn_hit, $total_br_found, $total_br_hit, $sort_type);
write_file_table(*HTML_HANDLE, $base_dir, $overview, $testhash,
$testfnchash, $testbrhash, $view_type, $sort_type);
write_html_epilog(*HTML_HANDLE, $base_dir);
close(*HTML_HANDLE);
}
#
# process_dir(dir_name)
#
sub process_dir($)
{
my $abs_dir = $_[0];
my $trunc_dir;
my $rel_dir = $abs_dir;
my $base_dir;
my $filename;
my %overview;
my $lines_found;
my $lines_hit;
my $fn_found;
my $fn_hit;
my $br_found;
my $br_hit;
my $overall_found=0;
my $overall_hit=0;
my $total_fn_found=0;
my $total_fn_hit=0;
my $total_br_found = 0;
my $total_br_hit = 0;
my $base_name;
my $extension;
my $testdata;
my %testhash;
my $testfncdata;
my %testfnchash;
my $testbrdata;
my %testbrhash;
my @sort_list;
local *HTML_HANDLE;
# Remove prefix if applicable
if (!$no_prefix)
{
# Match directory name beginning with $dir_prefix
$rel_dir = apply_prefix($rel_dir, $dir_prefix);
}
$trunc_dir = $rel_dir;
# Remove leading /
if ($rel_dir =~ /^\/(.*)$/)
{
$rel_dir = substr($rel_dir, 1);
}
$base_dir = get_relative_base_path($rel_dir);
create_sub_dir($rel_dir);
# Match filenames which specify files in this directory, not including
# sub-directories
foreach $filename (grep(/^\Q$abs_dir\E\/[^\/]*$/,keys(%info_data)))
{
my $page_link;
my $func_link;
($lines_found, $lines_hit, $fn_found, $fn_hit, $br_found,
$br_hit, $testdata, $testfncdata, $testbrdata) =
process_file($trunc_dir, $rel_dir, $filename);
$base_name = basename($filename);
if ($no_sourceview) {
$page_link = "";
} elsif ($frames) {
# Link to frameset page
$page_link = "$base_name.gcov.frameset.$html_ext";
} else {
# Link directory to source code view page
$page_link = "$base_name.gcov.$html_ext";
}
$overview{$base_name} = [$lines_found, $lines_hit, $fn_found,
$fn_hit, $br_found, $br_hit,
$page_link,
get_rate($lines_found, $lines_hit),
get_rate($fn_found, $fn_hit),
get_rate($br_found, $br_hit)];
$testhash{$base_name} = $testdata;
$testfnchash{$base_name} = $testfncdata;
$testbrhash{$base_name} = $testbrdata;
$overall_found += $lines_found;
$overall_hit += $lines_hit;
$total_fn_found += $fn_found;
$total_fn_hit += $fn_hit;
$total_br_found += $br_found;
$total_br_hit += $br_hit;
}
# Create sorted pages
foreach (@fileview_sortlist) {
# Generate directory overview page (without details)
write_dir_page($fileview_sortname[$_], $rel_dir, $base_dir,
$test_title, $trunc_dir, $overall_found,
$overall_hit, $total_fn_found, $total_fn_hit,
$total_br_found, $total_br_hit, \%overview, {},
{}, {}, 1, $_);
if (!$show_details) {
next;
}
# Generate directory overview page including details
write_dir_page("-detail".$fileview_sortname[$_], $rel_dir,
$base_dir, $test_title, $trunc_dir,
$overall_found, $overall_hit, $total_fn_found,
$total_fn_hit, $total_br_found, $total_br_hit,
\%overview, \%testhash, \%testfnchash,
\%testbrhash, 1, $_);
}
# Calculate resulting line counts
return ($overall_found, $overall_hit, $total_fn_found, $total_fn_hit,
$total_br_found, $total_br_hit);
}
#
# get_converted_lines(testdata)
#
# Return hash of line numbers of those lines which were only covered in
# converted data sets.
#
sub get_converted_lines($)
{
my $testdata = $_[0];
my $testcount;
my %converted;
my %nonconverted;
my $hash;
my $testcase;
my $line;
my %result;
# Get a hash containing line numbers with positive counts both for
# converted and original data sets
foreach $testcase (keys(%{$testdata}))
{
# Check to see if this is a converted data set
if ($testcase =~ /,diff$/)
{
$hash = \%converted;
}
else
{
$hash = \%nonconverted;
}
$testcount = $testdata->{$testcase};
# Add lines with a positive count to hash
foreach $line (keys%{$testcount})
{
if ($testcount->{$line} > 0)
{
$hash->{$line} = 1;
}
}
}
# Combine both hashes to resulting list
foreach $line (keys(%converted))
{
if (!defined($nonconverted{$line}))
{
$result{$line} = 1;
}
}
return \%result;
}
sub write_function_page($$$$$$$$$$$$$$$$$$)
{
my ($base_dir, $rel_dir, $trunc_dir, $base_name, $title,
$lines_found, $lines_hit, $fn_found, $fn_hit, $br_found, $br_hit,
$sumcount, $funcdata, $sumfnccount, $testfncdata, $sumbrcount,
$testbrdata, $sort_type) = @_;
my $pagetitle;
my $filename;
# Generate function table for this file
if ($sort_type == 0) {
$filename = "$rel_dir/$base_name.func.$html_ext";
} else {
$filename = "$rel_dir/$base_name.func-sort-c.$html_ext";
}
html_create(*HTML_HANDLE, $filename);
$pagetitle = "LCOV - $title - $trunc_dir/$base_name - functions";
write_html_prolog(*HTML_HANDLE, $base_dir, $pagetitle);
write_header(*HTML_HANDLE, 4, "$trunc_dir/$base_name",
"$rel_dir/$base_name", $lines_found, $lines_hit,
$fn_found, $fn_hit, $br_found, $br_hit, $sort_type);
write_function_table(*HTML_HANDLE, "$base_name.gcov.$html_ext",
$sumcount, $funcdata,
$sumfnccount, $testfncdata, $sumbrcount,
$testbrdata, $base_name,
$base_dir, $sort_type);
write_html_epilog(*HTML_HANDLE, $base_dir, 1);
close(*HTML_HANDLE);
}
#
# process_file(trunc_dir, rel_dir, filename)
#
sub process_file($$$)
{
info("Processing file ".apply_prefix($_[2], $dir_prefix)."\n");
my $trunc_dir = $_[0];
my $rel_dir = $_[1];
my $filename = $_[2];
my $base_name = basename($filename);
my $base_dir = get_relative_base_path($rel_dir);
my $testdata;
my $testcount;
my $sumcount;
my $funcdata;
my $checkdata;
my $testfncdata;
my $sumfnccount;
my $testbrdata;
my $sumbrcount;
my $lines_found;
my $lines_hit;
my $fn_found;
my $fn_hit;
my $br_found;
my $br_hit;
my $converted;
my @source;
my $pagetitle;
local *HTML_HANDLE;
($testdata, $sumcount, $funcdata, $checkdata, $testfncdata,
$sumfnccount, $testbrdata, $sumbrcount, $lines_found, $lines_hit,
$fn_found, $fn_hit, $br_found, $br_hit)
= get_info_entry($info_data{$filename});
# Return after this point in case user asked us not to generate
# source code view
if ($no_sourceview)
{
return ($lines_found, $lines_hit, $fn_found, $fn_hit,
$br_found, $br_hit, $testdata, $testfncdata,
$testbrdata);
}
$converted = get_converted_lines($testdata);
# Generate source code view for this file
html_create(*HTML_HANDLE, "$rel_dir/$base_name.gcov.$html_ext");
$pagetitle = "LCOV - $test_title - $trunc_dir/$base_name";
write_html_prolog(*HTML_HANDLE, $base_dir, $pagetitle);
write_header(*HTML_HANDLE, 2, "$trunc_dir/$base_name",
"$rel_dir/$base_name", $lines_found, $lines_hit,
$fn_found, $fn_hit, $br_found, $br_hit, 0);
@source = write_source(*HTML_HANDLE, $filename, $sumcount, $checkdata,
$converted, $funcdata, $sumbrcount);
write_html_epilog(*HTML_HANDLE, $base_dir, 1);
close(*HTML_HANDLE);
if ($func_coverage) {
# Create function tables
foreach (@funcview_sortlist) {
write_function_page($base_dir, $rel_dir, $trunc_dir,
$base_name, $test_title,
$lines_found, $lines_hit,
$fn_found, $fn_hit, $br_found,
$br_hit, $sumcount,
$funcdata, $sumfnccount,
$testfncdata, $sumbrcount,
$testbrdata, $_);
}
}
# Additional files are needed in case of frame output
if (!$frames)
{
return ($lines_found, $lines_hit, $fn_found, $fn_hit,
$br_found, $br_hit, $testdata, $testfncdata,
$testbrdata);
}
# Create overview png file
gen_png("$rel_dir/$base_name.gcov.png", $overview_width, $tab_size,
@source);
# Create frameset page
html_create(*HTML_HANDLE,
"$rel_dir/$base_name.gcov.frameset.$html_ext");
write_frameset(*HTML_HANDLE, $base_dir, $base_name, $pagetitle);
close(*HTML_HANDLE);
# Write overview frame
html_create(*HTML_HANDLE,
"$rel_dir/$base_name.gcov.overview.$html_ext");
write_overview(*HTML_HANDLE, $base_dir, $base_name, $pagetitle,
scalar(@source));
close(*HTML_HANDLE);
return ($lines_found, $lines_hit, $fn_found, $fn_hit, $br_found,
$br_hit, $testdata, $testfncdata, $testbrdata);
}
#
# read_info_file(info_filename)
#
# Read in the contents of the .info file specified by INFO_FILENAME. Data will
# be returned as a reference to a hash containing the following mappings:
#
# %result: for each filename found in file -> \%data
#
# %data: "test" -> \%testdata
# "sum" -> \%sumcount
# "func" -> \%funcdata
# "found" -> $lines_found (number of instrumented lines found in file)
# "hit" -> $lines_hit (number of executed lines in file)
# "check" -> \%checkdata
# "testfnc" -> \%testfncdata
# "sumfnc" -> \%sumfnccount
# "testbr" -> \%testbrdata
# "sumbr" -> \%sumbrcount
#
# %testdata : name of test affecting this file -> \%testcount
# %testfncdata: name of test affecting this file -> \%testfnccount
# %testbrdata: name of test affecting this file -> \%testbrcount
#
# %testcount : line number -> execution count for a single test
# %testfnccount: function name -> execution count for a single test
# %testbrcount : line number -> branch coverage data for a single test
# %sumcount : line number -> execution count for all tests
# %sumfnccount : function name -> execution count for all tests
# %sumbrcount : line number -> branch coverage data for all tests
# %funcdata : function name -> line number
# %checkdata : line number -> checksum of source code line
# $brdata : vector of items: block, branch, taken
#
# Note that .info file sections referring to the same file and test name
# will automatically be combined by adding all execution counts.
#
# Note that if INFO_FILENAME ends with ".gz", it is assumed that the file
# is compressed using GZIP. If available, GUNZIP will be used to decompress
# this file.
#
# Die on error.
#
sub read_info_file($)
{
my $tracefile = $_[0]; # Name of tracefile
my %result; # Resulting hash: file -> data
my $data; # Data handle for current entry
my $testdata; # " "
my $testcount; # " "
my $sumcount; # " "
my $funcdata; # " "
my $checkdata; # " "
my $testfncdata;
my $testfnccount;
my $sumfnccount;
my $testbrdata;
my $testbrcount;
my $sumbrcount;
my $line; # Current line read from .info file
my $testname; # Current test name
my $filename; # Current filename
my $hitcount; # Count for lines hit
my $count; # Execution count of current line
my $negative; # If set, warn about negative counts
my $changed_testname; # If set, warn about changed testname
my $line_checksum; # Checksum of current line
my $br_found;
my $br_hit;
local *INFO_HANDLE; # Filehandle for .info file
info("Reading data file $tracefile\n");
# Check if file exists and is readable
stat($_[0]);
if (!(-r _))
{
die("ERROR: cannot read file $_[0]!\n");
}
# Check if this is really a plain file
if (!(-f _))
{
die("ERROR: not a plain file: $_[0]!\n");
}
# Check for .gz extension
if ($_[0] =~ /\.gz$/)
{
# Check for availability of GZIP tool
system_no_output(1, "gunzip" ,"-h")
and die("ERROR: gunzip command not available!\n");
# Check integrity of compressed file
system_no_output(1, "gunzip", "-t", $_[0])
and die("ERROR: integrity check failed for ".
"compressed file $_[0]!\n");
# Open compressed file
open(INFO_HANDLE, "gunzip -c $_[0]|")
or die("ERROR: cannot start gunzip to decompress ".
"file $_[0]!\n");
}
else
{
# Open decompressed file
open(INFO_HANDLE, $_[0])
or die("ERROR: cannot read file $_[0]!\n");
}
$testname = "";
while (<INFO_HANDLE>)
{
chomp($_);
$line = $_;
# Switch statement
foreach ($line)
{
/^TN:([^,]*)(,diff)?/ && do
{
# Test name information found
$testname = defined($1) ? $1 : "";
if ($testname =~ s/\W/_/g)
{
$changed_testname = 1;
}
$testname .= $2 if (defined($2));
last;
};
/^[SK]F:(.*)/ && do
{
# Filename information found
# Retrieve data for new entry
$filename = $1;
$data = $result{$filename};
($testdata, $sumcount, $funcdata, $checkdata,
$testfncdata, $sumfnccount, $testbrdata,
$sumbrcount) =
get_info_entry($data);
if (defined($testname))
{
$testcount = $testdata->{$testname};
$testfnccount = $testfncdata->{$testname};
$testbrcount = $testbrdata->{$testname};
}
else
{
$testcount = {};
$testfnccount = {};
$testbrcount = {};
}
last;
};
/^DA:(\d+),(-?\d+)(,[^,\s]+)?/ && do
{
# Fix negative counts
$count = $2 < 0 ? 0 : $2;
if ($2 < 0)
{
$negative = 1;
}
# Execution count found, add to structure
# Add summary counts
$sumcount->{$1} += $count;
# Add test-specific counts
if (defined($testname))
{
$testcount->{$1} += $count;
}
# Store line checksum if available
if (defined($3))
{
$line_checksum = substr($3, 1);
# Does it match a previous definition
if (defined($checkdata->{$1}) &&
($checkdata->{$1} ne
$line_checksum))
{
die("ERROR: checksum mismatch ".
"at $filename:$1\n");
}
$checkdata->{$1} = $line_checksum;
}
last;
};
/^FN:(\d+),([^,]+)/ && do
{
# Function data found, add to structure
$funcdata->{$2} = $1;
# Also initialize function call data
if (!defined($sumfnccount->{$2})) {
$sumfnccount->{$2} = 0;
}
if (defined($testname))
{
if (!defined($testfnccount->{$2})) {
$testfnccount->{$2} = 0;
}
}
last;
};
/^FNDA:(\d+),([^,]+)/ && do
{
# Function call count found, add to structure
# Add summary counts
$sumfnccount->{$2} += $1;
# Add test-specific counts
if (defined($testname))
{
$testfnccount->{$2} += $1;
}
last;
};
/^BRDA:(\d+),(\d+),(\d+),(\d+|-)/ && do {
# Branch coverage data found
my ($line, $block, $branch, $taken) =
($1, $2, $3, $4);
$sumbrcount->{$line} =
br_ivec_push($sumbrcount->{$line},
$block, $branch, $taken);
# Add test-specific counts
if (defined($testname)) {
$testbrcount->{$line} =
br_ivec_push(
$testbrcount->{$line},
$block, $branch,
$taken);
}
last;
};
/^end_of_record/ && do
{
# Found end of section marker
if ($filename)
{
# Store current section data
if (defined($testname))
{
$testdata->{$testname} =
$testcount;
$testfncdata->{$testname} =
$testfnccount;
$testbrdata->{$testname} =
$testbrcount;
}
set_info_entry($data, $testdata,
$sumcount, $funcdata,
$checkdata, $testfncdata,
$sumfnccount,
$testbrdata,
$sumbrcount);
$result{$filename} = $data;
last;
}
};
# default
last;
}
}
close(INFO_HANDLE);
# Calculate lines_found and lines_hit for each file
foreach $filename (keys(%result))
{
$data = $result{$filename};
($testdata, $sumcount, undef, undef, $testfncdata,
$sumfnccount, $testbrdata, $sumbrcount) =
get_info_entry($data);
# Filter out empty files
if (scalar(keys(%{$sumcount})) == 0)
{
delete($result{$filename});
next;
}
# Filter out empty test cases
foreach $testname (keys(%{$testdata}))
{
if (!defined($testdata->{$testname}) ||
scalar(keys(%{$testdata->{$testname}})) == 0)
{
delete($testdata->{$testname});
delete($testfncdata->{$testname});
}
}
$data->{"found"} = scalar(keys(%{$sumcount}));
$hitcount = 0;
foreach (keys(%{$sumcount}))
{
if ($sumcount->{$_} > 0) { $hitcount++; }
}
$data->{"hit"} = $hitcount;
# Get found/hit values for function call data
$data->{"f_found"} = scalar(keys(%{$sumfnccount}));
$hitcount = 0;
foreach (keys(%{$sumfnccount})) {
if ($sumfnccount->{$_} > 0) {
$hitcount++;
}
}
$data->{"f_hit"} = $hitcount;
# Get found/hit values for branch data
($br_found, $br_hit) = get_br_found_and_hit($sumbrcount);
$data->{"b_found"} = $br_found;
$data->{"b_hit"} = $br_hit;
}
if (scalar(keys(%result)) == 0)
{
die("ERROR: no valid records found in tracefile $tracefile\n");
}
if ($negative)
{
warn("WARNING: negative counts found in tracefile ".
"$tracefile\n");
}
if ($changed_testname)
{
warn("WARNING: invalid characters removed from testname in ".
"tracefile $tracefile\n");
}
return(\%result);
}
#
# get_info_entry(hash_ref)
#
# Retrieve data from an entry of the structure generated by read_info_file().
# Return a list of references to hashes:
# (test data hash ref, sum count hash ref, funcdata hash ref, checkdata hash
# ref, testfncdata hash ref, sumfnccount hash ref, lines found, lines hit,
# functions found, functions hit)
#
sub get_info_entry($)
{
my $testdata_ref = $_[0]->{"test"};
my $sumcount_ref = $_[0]->{"sum"};
my $funcdata_ref = $_[0]->{"func"};
my $checkdata_ref = $_[0]->{"check"};
my $testfncdata = $_[0]->{"testfnc"};
my $sumfnccount = $_[0]->{"sumfnc"};
my $testbrdata = $_[0]->{"testbr"};
my $sumbrcount = $_[0]->{"sumbr"};
my $lines_found = $_[0]->{"found"};
my $lines_hit = $_[0]->{"hit"};
my $fn_found = $_[0]->{"f_found"};
my $fn_hit = $_[0]->{"f_hit"};
my $br_found = $_[0]->{"b_found"};
my $br_hit = $_[0]->{"b_hit"};
return ($testdata_ref, $sumcount_ref, $funcdata_ref, $checkdata_ref,
$testfncdata, $sumfnccount, $testbrdata, $sumbrcount,
$lines_found, $lines_hit, $fn_found, $fn_hit,
$br_found, $br_hit);
}
#
# set_info_entry(hash_ref, testdata_ref, sumcount_ref, funcdata_ref,
# checkdata_ref, testfncdata_ref, sumfcncount_ref,
# testbrdata_ref, sumbrcount_ref[,lines_found,
# lines_hit, f_found, f_hit, $b_found, $b_hit])
#
# Update the hash referenced by HASH_REF with the provided data references.
#
sub set_info_entry($$$$$$$$$;$$$$$$)
{
my $data_ref = $_[0];
$data_ref->{"test"} = $_[1];
$data_ref->{"sum"} = $_[2];
$data_ref->{"func"} = $_[3];
$data_ref->{"check"} = $_[4];
$data_ref->{"testfnc"} = $_[5];
$data_ref->{"sumfnc"} = $_[6];
$data_ref->{"testbr"} = $_[7];
$data_ref->{"sumbr"} = $_[8];
if (defined($_[9])) { $data_ref->{"found"} = $_[9]; }
if (defined($_[10])) { $data_ref->{"hit"} = $_[10]; }
if (defined($_[11])) { $data_ref->{"f_found"} = $_[11]; }
if (defined($_[12])) { $data_ref->{"f_hit"} = $_[12]; }
if (defined($_[13])) { $data_ref->{"b_found"} = $_[13]; }
if (defined($_[14])) { $data_ref->{"b_hit"} = $_[14]; }
}
#
# add_counts(data1_ref, data2_ref)
#
# DATA1_REF and DATA2_REF are references to hashes containing a mapping
#
# line number -> execution count
#
# Return a list (RESULT_REF, LINES_FOUND, LINES_HIT) where RESULT_REF
# is a reference to a hash containing the combined mapping in which
# execution counts are added.
#
sub add_counts($$)
{
my %data1 = %{$_[0]}; # Hash 1
my %data2 = %{$_[1]}; # Hash 2
my %result; # Resulting hash
my $line; # Current line iteration scalar
my $data1_count; # Count of line in hash1
my $data2_count; # Count of line in hash2
my $found = 0; # Total number of lines found
my $hit = 0; # Number of lines with a count > 0
foreach $line (keys(%data1))
{
$data1_count = $data1{$line};
$data2_count = $data2{$line};
# Add counts if present in both hashes
if (defined($data2_count)) { $data1_count += $data2_count; }
# Store sum in %result
$result{$line} = $data1_count;
$found++;
if ($data1_count > 0) { $hit++; }
}
# Add lines unique to data2
foreach $line (keys(%data2))
{
# Skip lines already in data1
if (defined($data1{$line})) { next; }
# Copy count from data2
$result{$line} = $data2{$line};
$found++;
if ($result{$line} > 0) { $hit++; }
}
return (\%result, $found, $hit);
}
#
# merge_checksums(ref1, ref2, filename)
#
# REF1 and REF2 are references to hashes containing a mapping
#
# line number -> checksum
#
# Merge checksum lists defined in REF1 and REF2 and return reference to
# resulting hash. Die if a checksum for a line is defined in both hashes
# but does not match.
#
sub merge_checksums($$$)
{
my $ref1 = $_[0];
my $ref2 = $_[1];
my $filename = $_[2];
my %result;
my $line;
foreach $line (keys(%{$ref1}))
{
if (defined($ref2->{$line}) &&
($ref1->{$line} ne $ref2->{$line}))
{
die("ERROR: checksum mismatch at $filename:$line\n");
}
$result{$line} = $ref1->{$line};
}
foreach $line (keys(%{$ref2}))
{
$result{$line} = $ref2->{$line};
}
return \%result;
}
#
# merge_func_data(funcdata1, funcdata2, filename)
#
sub merge_func_data($$$)
{
my ($funcdata1, $funcdata2, $filename) = @_;
my %result;
my $func;
if (defined($funcdata1)) {
%result = %{$funcdata1};
}
foreach $func (keys(%{$funcdata2})) {
my $line1 = $result{$func};
my $line2 = $funcdata2->{$func};
if (defined($line1) && ($line1 != $line2)) {
warn("WARNING: function data mismatch at ".
"$filename:$line2\n");
next;
}
$result{$func} = $line2;
}
return \%result;
}
#
# add_fnccount(fnccount1, fnccount2)
#
# Add function call count data. Return list (fnccount_added, f_found, f_hit)
#
sub add_fnccount($$)
{
my ($fnccount1, $fnccount2) = @_;
my %result;
my $fn_found;
my $fn_hit;
my $function;
if (defined($fnccount1)) {
%result = %{$fnccount1};
}
foreach $function (keys(%{$fnccount2})) {
$result{$function} += $fnccount2->{$function};
}
$fn_found = scalar(keys(%result));
$fn_hit = 0;
foreach $function (keys(%result)) {
if ($result{$function} > 0) {
$fn_hit++;
}
}
return (\%result, $fn_found, $fn_hit);
}
#
# add_testfncdata(testfncdata1, testfncdata2)
#
# Add function call count data for several tests. Return reference to
# added_testfncdata.
#
sub add_testfncdata($$)
{
my ($testfncdata1, $testfncdata2) = @_;
my %result;
my $testname;
foreach $testname (keys(%{$testfncdata1})) {
if (defined($testfncdata2->{$testname})) {
my $fnccount;
# Function call count data for this testname exists
# in both data sets: add
($fnccount) = add_fnccount(
$testfncdata1->{$testname},
$testfncdata2->{$testname});
$result{$testname} = $fnccount;
next;
}
# Function call count data for this testname is unique to
# data set 1: copy
$result{$testname} = $testfncdata1->{$testname};
}
# Add count data for testnames unique to data set 2
foreach $testname (keys(%{$testfncdata2})) {
if (!defined($result{$testname})) {
$result{$testname} = $testfncdata2->{$testname};
}
}
return \%result;
}
#
# brcount_to_db(brcount)
#
# Convert brcount data to the following format:
#
# db: line number -> block hash
# block hash: block number -> branch hash
# branch hash: branch number -> taken value
#
sub brcount_to_db($)
{
my ($brcount) = @_;
my $line;
my $db;
# Add branches from first count to database
foreach $line (keys(%{$brcount})) {
my $brdata = $brcount->{$line};
my $i;
my $num = br_ivec_len($brdata);
for ($i = 0; $i < $num; $i++) {
my ($block, $branch, $taken) = br_ivec_get($brdata, $i);
$db->{$line}->{$block}->{$branch} = $taken;
}
}
return $db;
}
#
# db_to_brcount(db)
#
# Convert branch coverage data back to brcount format.
#
sub db_to_brcount($)
{
my ($db) = @_;
my $line;
my $brcount = {};
my $br_found = 0;
my $br_hit = 0;
# Convert database back to brcount format
foreach $line (sort({$a <=> $b} keys(%{$db}))) {
my $ldata = $db->{$line};
my $brdata;
my $block;
foreach $block (sort({$a <=> $b} keys(%{$ldata}))) {
my $bdata = $ldata->{$block};
my $branch;
foreach $branch (sort({$a <=> $b} keys(%{$bdata}))) {
my $taken = $bdata->{$branch};
$br_found++;
$br_hit++ if ($taken ne "-" && $taken > 0);
$brdata = br_ivec_push($brdata, $block,
$branch, $taken);
}
}
$brcount->{$line} = $brdata;
}
return ($brcount, $br_found, $br_hit);
}
#
# combine_brcount(brcount1, brcount2, type)
#
# If add is BR_ADD, add branch coverage data and return list (brcount_added,
# br_found, br_hit). If add is BR_SUB, subtract the taken values of brcount2
# from brcount1 and return (brcount_sub, br_found, br_hit).
#
sub combine_brcount($$$)
{
my ($brcount1, $brcount2, $type) = @_;
my $line;
my $block;
my $branch;
my $taken;
my $db;
my $br_found = 0;
my $br_hit = 0;
my $result;
# Convert branches from first count to database
$db = brcount_to_db($brcount1);
# Combine values from database and second count
foreach $line (keys(%{$brcount2})) {
my $brdata = $brcount2->{$line};
my $num = br_ivec_len($brdata);
my $i;
for ($i = 0; $i < $num; $i++) {
($block, $branch, $taken) = br_ivec_get($brdata, $i);
my $new_taken = $db->{$line}->{$block}->{$branch};
if ($type == $BR_ADD) {
$new_taken = br_taken_add($new_taken, $taken);
} elsif ($type == $BR_SUB) {
$new_taken = br_taken_sub($new_taken, $taken);
}
$db->{$line}->{$block}->{$branch} = $new_taken
if (defined($new_taken));
}
}
# Convert database back to brcount format
($result, $br_found, $br_hit) = db_to_brcount($db);
return ($result, $br_found, $br_hit);
}
#
# add_testbrdata(testbrdata1, testbrdata2)
#
# Add branch coverage data for several tests. Return reference to
# added_testbrdata.
#
sub add_testbrdata($$)
{
my ($testbrdata1, $testbrdata2) = @_;
my %result;
my $testname;
foreach $testname (keys(%{$testbrdata1})) {
if (defined($testbrdata2->{$testname})) {
my $brcount;
# Branch coverage data for this testname exists
# in both data sets: add
($brcount) = combine_brcount($testbrdata1->{$testname},
$testbrdata2->{$testname}, $BR_ADD);
$result{$testname} = $brcount;
next;
}
# Branch coverage data for this testname is unique to
# data set 1: copy
$result{$testname} = $testbrdata1->{$testname};
}
# Add count data for testnames unique to data set 2
foreach $testname (keys(%{$testbrdata2})) {
if (!defined($result{$testname})) {
$result{$testname} = $testbrdata2->{$testname};
}
}
return \%result;
}
#
# combine_info_entries(entry_ref1, entry_ref2, filename)
#
# Combine .info data entry hashes referenced by ENTRY_REF1 and ENTRY_REF2.
# Return reference to resulting hash.
#
sub combine_info_entries($$$)
{
my $entry1 = $_[0]; # Reference to hash containing first entry
my $testdata1;
my $sumcount1;
my $funcdata1;
my $checkdata1;
my $testfncdata1;
my $sumfnccount1;
my $testbrdata1;
my $sumbrcount1;
my $entry2 = $_[1]; # Reference to hash containing second entry
my $testdata2;
my $sumcount2;
my $funcdata2;
my $checkdata2;
my $testfncdata2;
my $sumfnccount2;
my $testbrdata2;
my $sumbrcount2;
my %result; # Hash containing combined entry
my %result_testdata;
my $result_sumcount = {};
my $result_funcdata;
my $result_testfncdata;
my $result_sumfnccount;
my $result_testbrdata;
my $result_sumbrcount;
my $lines_found;
my $lines_hit;
my $fn_found;
my $fn_hit;
my $br_found;
my $br_hit;
my $testname;
my $filename = $_[2];
# Retrieve data
($testdata1, $sumcount1, $funcdata1, $checkdata1, $testfncdata1,
$sumfnccount1, $testbrdata1, $sumbrcount1) = get_info_entry($entry1);
($testdata2, $sumcount2, $funcdata2, $checkdata2, $testfncdata2,
$sumfnccount2, $testbrdata2, $sumbrcount2) = get_info_entry($entry2);
# Merge checksums
$checkdata1 = merge_checksums($checkdata1, $checkdata2, $filename);
# Combine funcdata
$result_funcdata = merge_func_data($funcdata1, $funcdata2, $filename);
# Combine function call count data
$result_testfncdata = add_testfncdata($testfncdata1, $testfncdata2);
($result_sumfnccount, $fn_found, $fn_hit) =
add_fnccount($sumfnccount1, $sumfnccount2);
# Combine branch coverage data
$result_testbrdata = add_testbrdata($testbrdata1, $testbrdata2);
($result_sumbrcount, $br_found, $br_hit) =
combine_brcount($sumbrcount1, $sumbrcount2, $BR_ADD);
# Combine testdata
foreach $testname (keys(%{$testdata1}))
{
if (defined($testdata2->{$testname}))
{
# testname is present in both entries, requires
# combination
($result_testdata{$testname}) =
add_counts($testdata1->{$testname},
$testdata2->{$testname});
}
else
{
# testname only present in entry1, add to result
$result_testdata{$testname} = $testdata1->{$testname};
}
# update sum count hash
($result_sumcount, $lines_found, $lines_hit) =
add_counts($result_sumcount,
$result_testdata{$testname});
}
foreach $testname (keys(%{$testdata2}))
{
# Skip testnames already covered by previous iteration
if (defined($testdata1->{$testname})) { next; }
# testname only present in entry2, add to result hash
$result_testdata{$testname} = $testdata2->{$testname};
# update sum count hash
($result_sumcount, $lines_found, $lines_hit) =
add_counts($result_sumcount,
$result_testdata{$testname});
}
# Calculate resulting sumcount
# Store result
set_info_entry(\%result, \%result_testdata, $result_sumcount,
$result_funcdata, $checkdata1, $result_testfncdata,
$result_sumfnccount, $result_testbrdata,
$result_sumbrcount, $lines_found, $lines_hit,
$fn_found, $fn_hit, $br_found, $br_hit);
return(\%result);
}
#
# combine_info_files(info_ref1, info_ref2)
#
# Combine .info data in hashes referenced by INFO_REF1 and INFO_REF2. Return
# reference to resulting hash.
#
sub combine_info_files($$)
{
my %hash1 = %{$_[0]};
my %hash2 = %{$_[1]};
my $filename;
foreach $filename (keys(%hash2))
{
if ($hash1{$filename})
{
# Entry already exists in hash1, combine them
$hash1{$filename} =
combine_info_entries($hash1{$filename},
$hash2{$filename},
$filename);
}
else
{
# Entry is unique in both hashes, simply add to
# resulting hash
$hash1{$filename} = $hash2{$filename};
}
}
return(\%hash1);
}
#
# get_prefix(filename_list)
#
# Search FILENAME_LIST for a directory prefix which is common to as many
# list entries as possible, so that removing this prefix will minimize the
# sum of the lengths of all resulting shortened filenames.
#
sub get_prefix(@)
{
my @filename_list = @_; # provided list of filenames
my %prefix; # mapping: prefix -> sum of lengths
my $current; # Temporary iteration variable
# Find list of prefixes
foreach (@filename_list)
{
# Need explicit assignment to get a copy of $_ so that
# shortening the contained prefix does not affect the list
$current = shorten_prefix($_);
while ($current = shorten_prefix($current))
{
# Skip rest if the remaining prefix has already been
# added to hash
if ($prefix{$current}) { last; }
# Initialize with 0
$prefix{$current}="0";
}
}
# Calculate sum of lengths for all prefixes
foreach $current (keys(%prefix))
{
foreach (@filename_list)
{
# Add original length
$prefix{$current} += length($_);
# Check whether prefix matches
if (substr($_, 0, length($current)) eq $current)
{
# Subtract prefix length for this filename
$prefix{$current} -= length($current);
}
}
}
# Find and return prefix with minimal sum
$current = (keys(%prefix))[0];
foreach (keys(%prefix))
{
if ($prefix{$_} < $prefix{$current})
{
$current = $_;
}
}
return($current);
}
#
# shorten_prefix(prefix)
#
# Return PREFIX shortened by last directory component.
#
sub shorten_prefix($)
{
my @list = split("/", $_[0]);
pop(@list);
return join("/", @list);
}
#
# get_dir_list(filename_list)
#
# Return sorted list of directories for each entry in given FILENAME_LIST.
#
sub get_dir_list(@)
{
my %result;
foreach (@_)
{
$result{shorten_prefix($_)} = "";
}
return(sort(keys(%result)));
}
#
# get_relative_base_path(subdirectory)
#
# Return a relative path string which references the base path when applied
# in SUBDIRECTORY.
#
# Example: get_relative_base_path("fs/mm") -> "../../"
#
sub get_relative_base_path($)
{
my $result = "";
my $index;
# Make an empty directory path a special case
if (!$_[0]) { return(""); }
# Count number of /s in path
$index = ($_[0] =~ s/\//\//g);
# Add a ../ to $result for each / in the directory path + 1
for (; $index>=0; $index--)
{
$result .= "../";
}
return $result;
}
#
# read_testfile(test_filename)
#
# Read in file TEST_FILENAME which contains test descriptions in the format:
#
# TN:<whitespace><test name>
# TD:<whitespace><test description>
#
# for each test case. Return a reference to a hash containing a mapping
#
# test name -> test description.
#
# Die on error.
#
sub read_testfile($)
{
my %result;
my $test_name;
my $changed_testname;
local *TEST_HANDLE;
open(TEST_HANDLE, "<".$_[0])
or die("ERROR: cannot open $_[0]!\n");
while (<TEST_HANDLE>)
{
chomp($_);
# Match lines beginning with TN:<whitespace(s)>
if (/^TN:\s+(.*?)\s*$/)
{
# Store name for later use
$test_name = $1;
if ($test_name =~ s/\W/_/g)
{
$changed_testname = 1;
}
}
# Match lines beginning with TD:<whitespace(s)>
if (/^TD:\s+(.*?)\s*$/)
{
# Check for empty line
if ($1)
{
# Add description to hash
$result{$test_name} .= " $1";
}
else
{
# Add empty line
$result{$test_name} .= "\n\n";
}
}
}
close(TEST_HANDLE);
if ($changed_testname)
{
warn("WARNING: invalid characters removed from testname in ".
"descriptions file $_[0]\n");
}
return \%result;
}
#
# escape_html(STRING)
#
# Return a copy of STRING in which all occurrences of HTML special characters
# are escaped.
#
sub escape_html($)
{
my $string = $_[0];
if (!$string) { return ""; }
$string =~ s/&/&/g; # & -> &
$string =~ s/</</g; # < -> <
$string =~ s/>/>/g; # > -> >
$string =~ s/\"/"/g; # " -> "
while ($string =~ /^([^\t]*)(\t)/)
{
my $replacement = " "x($tab_size - (length($1) % $tab_size));
$string =~ s/^([^\t]*)(\t)/$1$replacement/;
}
$string =~ s/\n/<br>/g; # \n -> <br>
return $string;
}
#
# get_date_string()
#
# Return the current date in the form: yyyy-mm-dd
#
sub get_date_string()
{
my $year;
my $month;
my $day;
($year, $month, $day) = (localtime())[5, 4, 3];
return sprintf("%d-%02d-%02d", $year+1900, $month+1, $day);
}
#
# create_sub_dir(dir_name)
#
# Create subdirectory DIR_NAME if it does not already exist, including all its
# parent directories.
#
# Die on error.
#
sub create_sub_dir($)
{
my ($dir) = @_;
system("mkdir", "-p" ,$dir)
and die("ERROR: cannot create directory $dir!\n");
}
#
# write_description_file(descriptions, overall_found, overall_hit,
# total_fn_found, total_fn_hit, total_br_found,
# total_br_hit)
#
# Write HTML file containing all test case descriptions. DESCRIPTIONS is a
# reference to a hash containing a mapping
#
# test case name -> test case description
#
# Die on error.
#
sub write_description_file($$$$$$$)
{
my %description = %{$_[0]};
my $found = $_[1];
my $hit = $_[2];
my $fn_found = $_[3];
my $fn_hit = $_[4];
my $br_found = $_[5];
my $br_hit = $_[6];
my $test_name;
local *HTML_HANDLE;
html_create(*HTML_HANDLE,"descriptions.$html_ext");
write_html_prolog(*HTML_HANDLE, "", "LCOV - test case descriptions");
write_header(*HTML_HANDLE, 3, "", "", $found, $hit, $fn_found,
$fn_hit, $br_found, $br_hit, 0);
write_test_table_prolog(*HTML_HANDLE,
"Test case descriptions - alphabetical list");
foreach $test_name (sort(keys(%description)))
{
write_test_table_entry(*HTML_HANDLE, $test_name,
escape_html($description{$test_name}));
}
write_test_table_epilog(*HTML_HANDLE);
write_html_epilog(*HTML_HANDLE, "");
close(*HTML_HANDLE);
}
#
# write_png_files()
#
# Create all necessary .png files for the HTML-output in the current
# directory. .png-files are used as bar graphs.
#
# Die on error.
#
sub write_png_files()
{
my %data;
local *PNG_HANDLE;
$data{"ruby.png"} =
[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00,
0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25,
0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d,
0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x18, 0x10, 0x5d, 0x57,
0x34, 0x6e, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73,
0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2,
0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d,
0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00,
0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0x35, 0x2f,
0x00, 0x00, 0x00, 0xd0, 0x33, 0x9a, 0x9d, 0x00, 0x00, 0x00,
0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00,
0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00,
0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60,
0x82];
$data{"amber.png"} =
[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00,
0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25,
0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d,
0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x28, 0x04, 0x98, 0xcb,
0xd6, 0xe0, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73,
0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2,
0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d,
0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00,
0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xe0, 0x50,
0x00, 0x00, 0x00, 0xa2, 0x7a, 0xda, 0x7e, 0x00, 0x00, 0x00,
0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00,
0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00,
0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60,
0x82];
$data{"emerald.png"} =
[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00,
0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25,
0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d,
0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x22, 0x2b, 0xc9, 0xf5,
0x03, 0x33, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73,
0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2,
0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d,
0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00,
0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0x1b, 0xea, 0x59,
0x0a, 0x0a, 0x0a, 0x0f, 0xba, 0x50, 0x83, 0x00, 0x00, 0x00,
0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00,
0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00,
0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60,
0x82];
$data{"snow.png"} =
[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00,
0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25,
0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49, 0x4d,
0x45, 0x07, 0xd2, 0x07, 0x11, 0x0f, 0x1e, 0x1d, 0x75, 0xbc,
0xef, 0x55, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59, 0x73,
0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01, 0xd2,
0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d,
0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00,
0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x55, 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00,
0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0x63, 0x60, 0x00,
0x00, 0x00, 0x02, 0x00, 0x01, 0xe5, 0x27, 0xde, 0xfc, 0x00,
0x00, 0x00, 0x00, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60,
0x82];
$data{"glass.png"} =
[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00,
0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, 0x01, 0x03, 0x00, 0x00, 0x00, 0x25,
0xdb, 0x56, 0xca, 0x00, 0x00, 0x00, 0x04, 0x67, 0x41, 0x4d,
0x41, 0x00, 0x00, 0xb1, 0x8f, 0x0b, 0xfc, 0x61, 0x05, 0x00,
0x00, 0x00, 0x06, 0x50, 0x4c, 0x54, 0x45, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x55, 0xc2, 0xd3, 0x7e, 0x00, 0x00, 0x00,
0x01, 0x74, 0x52, 0x4e, 0x53, 0x00, 0x40, 0xe6, 0xd8, 0x66,
0x00, 0x00, 0x00, 0x01, 0x62, 0x4b, 0x47, 0x44, 0x00, 0x88,
0x05, 0x1d, 0x48, 0x00, 0x00, 0x00, 0x09, 0x70, 0x48, 0x59,
0x73, 0x00, 0x00, 0x0b, 0x12, 0x00, 0x00, 0x0b, 0x12, 0x01,
0xd2, 0xdd, 0x7e, 0xfc, 0x00, 0x00, 0x00, 0x07, 0x74, 0x49,
0x4d, 0x45, 0x07, 0xd2, 0x07, 0x13, 0x0f, 0x08, 0x19, 0xc4,
0x40, 0x56, 0x10, 0x00, 0x00, 0x00, 0x0a, 0x49, 0x44, 0x41,
0x54, 0x78, 0x9c, 0x63, 0x60, 0x00, 0x00, 0x00, 0x02, 0x00,
0x01, 0x48, 0xaf, 0xa4, 0x71, 0x00, 0x00, 0x00, 0x00, 0x49,
0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82];
$data{"updown.png"} =
[0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00,
0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x0a,
0x00, 0x00, 0x00, 0x0e, 0x08, 0x06, 0x00, 0x00, 0x00, 0x16,
0xa3, 0x8d, 0xab, 0x00, 0x00, 0x00, 0x3c, 0x49, 0x44, 0x41,
0x54, 0x28, 0xcf, 0x63, 0x60, 0x40, 0x03, 0xff, 0xa1, 0x00,
0x5d, 0x9c, 0x11, 0x5d, 0x11, 0x8a, 0x24, 0x23, 0x23, 0x23,
0x86, 0x42, 0x6c, 0xa6, 0x20, 0x2b, 0x66, 0xc4, 0xa7, 0x08,
0x59, 0x31, 0x23, 0x21, 0x45, 0x30, 0xc0, 0xc4, 0x30, 0x60,
0x80, 0xfa, 0x6e, 0x24, 0x3e, 0x78, 0x48, 0x0a, 0x70, 0x62,
0xa2, 0x90, 0x81, 0xd8, 0x44, 0x01, 0x00, 0xe9, 0x5c, 0x2f,
0xf5, 0xe2, 0x9d, 0x0f, 0xf9, 0x00, 0x00, 0x00, 0x00, 0x49,
0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82] if ($sort);
foreach (keys(%data))
{
open(PNG_HANDLE, ">".$_)
or die("ERROR: cannot create $_!\n");
binmode(PNG_HANDLE);
print(PNG_HANDLE map(chr,@{$data{$_}}));
close(PNG_HANDLE);
}
}
#
# write_htaccess_file()
#
sub write_htaccess_file()
{
local *HTACCESS_HANDLE;
my $htaccess_data;
open(*HTACCESS_HANDLE, ">.htaccess")
or die("ERROR: cannot open .htaccess for writing!\n");
$htaccess_data = (<<"END_OF_HTACCESS")
AddEncoding x-gzip .html
END_OF_HTACCESS
;
print(HTACCESS_HANDLE $htaccess_data);
close(*HTACCESS_HANDLE);
}
#
# write_css_file()
#
# Write the cascading style sheet file gcov.css to the current directory.
# This file defines basic layout attributes of all generated HTML pages.
#
sub write_css_file()
{
local *CSS_HANDLE;
# Check for a specified external style sheet file
if ($css_filename)
{
# Simply copy that file
system("cp", $css_filename, "gcov.css")
and die("ERROR: cannot copy file $css_filename!\n");
return;
}
open(CSS_HANDLE, ">gcov.css")
or die ("ERROR: cannot open gcov.css for writing!\n");
# *************************************************************
my $css_data = ($_=<<"END_OF_CSS")
/* All views: initial background and text color */
body
{
color: #000000;
background-color: #FFFFFF;
}
/* All views: standard link format*/
a:link
{
color: #284FA8;
text-decoration: underline;
}
/* All views: standard link - visited format */
a:visited
{
color: #00CB40;
text-decoration: underline;
}
/* All views: standard link - activated format */
a:active
{
color: #FF0040;
text-decoration: underline;
}
/* All views: main title format */
td.title
{
text-align: center;
padding-bottom: 10px;
font-family: sans-serif;
font-size: 20pt;
font-style: italic;
font-weight: bold;
}
/* All views: header item format */
td.headerItem
{
text-align: right;
padding-right: 6px;
font-family: sans-serif;
font-weight: bold;
vertical-align: top;
white-space: nowrap;
}
/* All views: header item value format */
td.headerValue
{
text-align: left;
color: #284FA8;
font-family: sans-serif;
font-weight: bold;
white-space: nowrap;
}
/* All views: header item coverage table heading */
td.headerCovTableHead
{
text-align: center;
padding-right: 6px;
padding-left: 6px;
padding-bottom: 0px;
font-family: sans-serif;
font-size: 80%;
white-space: nowrap;
}
/* All views: header item coverage table entry */
td.headerCovTableEntry
{
text-align: right;
color: #284FA8;
font-family: sans-serif;
font-weight: bold;
white-space: nowrap;
padding-left: 12px;
padding-right: 4px;
background-color: #DAE7FE;
}
/* All views: header item coverage table entry for high coverage rate */
td.headerCovTableEntryHi
{
text-align: right;
color: #000000;
font-family: sans-serif;
font-weight: bold;
white-space: nowrap;
padding-left: 12px;
padding-right: 4px;
background-color: #A7FC9D;
}
/* All views: header item coverage table entry for medium coverage rate */
td.headerCovTableEntryMed
{
text-align: right;
color: #000000;
font-family: sans-serif;
font-weight: bold;
white-space: nowrap;
padding-left: 12px;
padding-right: 4px;
background-color: #FFEA20;
}
/* All views: header item coverage table entry for ow coverage rate */
td.headerCovTableEntryLo
{
text-align: right;
color: #000000;
font-family: sans-serif;
font-weight: bold;
white-space: nowrap;
padding-left: 12px;
padding-right: 4px;
background-color: #FF0000;
}
/* All views: header legend value for legend entry */
td.headerValueLeg
{
text-align: left;
color: #000000;
font-family: sans-serif;
font-size: 80%;
white-space: nowrap;
padding-top: 4px;
}
/* All views: color of horizontal ruler */
td.ruler
{
background-color: #6688D4;
}
/* All views: version string format */
td.versionInfo
{
text-align: center;
padding-top: 2px;
font-family: sans-serif;
font-style: italic;
}
/* Directory view/File view (all)/Test case descriptions:
table headline format */
td.tableHead
{
text-align: center;
color: #FFFFFF;
background-color: #6688D4;
font-family: sans-serif;
font-size: 120%;
font-weight: bold;
white-space: nowrap;
padding-left: 4px;
padding-right: 4px;
}
span.tableHeadSort
{
padding-right: 4px;
}
/* Directory view/File view (all): filename entry format */
td.coverFile
{
text-align: left;
padding-left: 10px;
padding-right: 20px;
color: #284FA8;
background-color: #DAE7FE;
font-family: monospace;
}
/* Directory view/File view (all): bar-graph entry format*/
td.coverBar
{
padding-left: 10px;
padding-right: 10px;
background-color: #DAE7FE;
}
/* Directory view/File view (all): bar-graph outline color */
td.coverBarOutline
{
background-color: #000000;
}
/* Directory view/File view (all): percentage entry for files with
high coverage rate */
td.coverPerHi
{
text-align: right;
padding-left: 10px;
padding-right: 10px;
background-color: #A7FC9D;
font-weight: bold;
font-family: sans-serif;
}
/* Directory view/File view (all): line count entry for files with
high coverage rate */
td.coverNumHi
{
text-align: right;
padding-left: 10px;
padding-right: 10px;
background-color: #A7FC9D;
white-space: nowrap;
font-family: sans-serif;
}
/* Directory view/File view (all): percentage entry for files with
medium coverage rate */
td.coverPerMed
{
text-align: right;
padding-left: 10px;
padding-right: 10px;
background-color: #FFEA20;
font-weight: bold;
font-family: sans-serif;
}
/* Directory view/File view (all): line count entry for files with
medium coverage rate */
td.coverNumMed
{
text-align: right;
padding-left: 10px;
padding-right: 10px;
background-color: #FFEA20;
white-space: nowrap;
font-family: sans-serif;
}
/* Directory view/File view (all): percentage entry for files with
low coverage rate */
td.coverPerLo
{
text-align: right;
padding-left: 10px;
padding-right: 10px;
background-color: #FF0000;
font-weight: bold;
font-family: sans-serif;
}
/* Directory view/File view (all): line count entry for files with
low coverage rate */
td.coverNumLo
{
text-align: right;
padding-left: 10px;
padding-right: 10px;
background-color: #FF0000;
white-space: nowrap;
font-family: sans-serif;
}
/* File view (all): "show/hide details" link format */
a.detail:link
{
color: #B8D0FF;
font-size:80%;
}
/* File view (all): "show/hide details" link - visited format */
a.detail:visited
{
color: #B8D0FF;
font-size:80%;
}
/* File view (all): "show/hide details" link - activated format */
a.detail:active
{
color: #FFFFFF;
font-size:80%;
}
/* File view (detail): test name entry */
td.testName
{
text-align: right;
padding-right: 10px;
background-color: #DAE7FE;
font-family: sans-serif;
}
/* File view (detail): test percentage entry */
td.testPer
{
text-align: right;
padding-left: 10px;
padding-right: 10px;
background-color: #DAE7FE;
font-family: sans-serif;
}
/* File view (detail): test lines count entry */
td.testNum
{
text-align: right;
padding-left: 10px;
padding-right: 10px;
background-color: #DAE7FE;
font-family: sans-serif;
}
/* Test case descriptions: test name format*/
dt
{
font-family: sans-serif;
font-weight: bold;
}
/* Test case descriptions: description table body */
td.testDescription
{
padding-top: 10px;
padding-left: 30px;
padding-bottom: 10px;
padding-right: 30px;
background-color: #DAE7FE;
}
/* Source code view: function entry */
td.coverFn
{
text-align: left;
padding-left: 10px;
padding-right: 20px;
color: #284FA8;
background-color: #DAE7FE;
font-family: monospace;
}
/* Source code view: function entry zero count*/
td.coverFnLo
{
text-align: right;
padding-left: 10px;
padding-right: 10px;
background-color: #FF0000;
font-weight: bold;
font-family: sans-serif;
}
/* Source code view: function entry nonzero count*/
td.coverFnHi
{
text-align: right;
padding-left: 10px;
padding-right: 10px;
background-color: #DAE7FE;
font-weight: bold;
font-family: sans-serif;
}
/* Source code view: source code format */
pre.source
{
font-family: monospace;
white-space: pre;
margin-top: 2px;
}
/* Source code view: line number format */
span.lineNum
{
background-color: #EFE383;
}
/* Source code view: format for lines which were executed */
td.lineCov,
span.lineCov
{
background-color: #CAD7FE;
}
/* Source code view: format for Cov legend */
span.coverLegendCov
{
padding-left: 10px;
padding-right: 10px;
padding-bottom: 2px;
background-color: #CAD7FE;
}
/* Source code view: format for lines which were not executed */
td.lineNoCov,
span.lineNoCov
{
background-color: #FF6230;
}
/* Source code view: format for NoCov legend */
span.coverLegendNoCov
{
padding-left: 10px;
padding-right: 10px;
padding-bottom: 2px;
background-color: #FF6230;
}
/* Source code view (function table): standard link - visited format */
td.lineNoCov > a:visited,
td.lineCov > a:visited
{
color: black;
text-decoration: underline;
}
/* Source code view: format for lines which were executed only in a
previous version */
span.lineDiffCov
{
background-color: #B5F7AF;
}
/* Source code view: format for branches which were executed
* and taken */
span.branchCov
{
background-color: #CAD7FE;
}
/* Source code view: format for branches which were executed
* but not taken */
span.branchNoCov
{
background-color: #FF6230;
}
/* Source code view: format for branches which were not executed */
span.branchNoExec
{
background-color: #FF6230;
}
/* Source code view: format for the source code heading line */
pre.sourceHeading
{
white-space: pre;
font-family: monospace;
font-weight: bold;
margin: 0px;
}
/* All views: header legend value for low rate */
td.headerValueLegL
{
font-family: sans-serif;
text-align: center;
white-space: nowrap;
padding-left: 4px;
padding-right: 2px;
background-color: #FF0000;
font-size: 80%;
}
/* All views: header legend value for med rate */
td.headerValueLegM
{
font-family: sans-serif;
text-align: center;
white-space: nowrap;
padding-left: 2px;
padding-right: 2px;
background-color: #FFEA20;
font-size: 80%;
}
/* All views: header legend value for hi rate */
td.headerValueLegH
{
font-family: sans-serif;
text-align: center;
white-space: nowrap;
padding-left: 2px;
padding-right: 4px;
background-color: #A7FC9D;
font-size: 80%;
}
/* All views except source code view: legend format for low coverage */
span.coverLegendCovLo
{
padding-left: 10px;
padding-right: 10px;
padding-top: 2px;
background-color: #FF0000;
}
/* All views except source code view: legend format for med coverage */
span.coverLegendCovMed
{
padding-left: 10px;
padding-right: 10px;
padding-top: 2px;
background-color: #FFEA20;
}
/* All views except source code view: legend format for hi coverage */
span.coverLegendCovHi
{
padding-left: 10px;
padding-right: 10px;
padding-top: 2px;
background-color: #A7FC9D;
}
END_OF_CSS
;
# *************************************************************
# Remove leading tab from all lines
$css_data =~ s/^\t//gm;
print(CSS_HANDLE $css_data);
close(CSS_HANDLE);
}
#
# get_bar_graph_code(base_dir, cover_found, cover_hit)
#
# Return a string containing HTML code which implements a bar graph display
# for a coverage rate of cover_hit * 100 / cover_found.
#
sub get_bar_graph_code($$$)
{
my $rate;
my $alt;
my $width;
my $remainder;
my $png_name;
my $graph_code;
# Check number of instrumented lines
if ($_[1] == 0) { return ""; }
$rate = $_[2] * 100 / $_[1];
$alt = sprintf("%.1f", $rate)."%";
$width = sprintf("%.0f", $rate);
$remainder = sprintf("%d", 100-$width);
# Decide which .png file to use
$png_name = $rate_png[classify_rate($_[1], $_[2], $med_limit,
$hi_limit)];
if ($width == 0)
{
# Zero coverage
$graph_code = (<<END_OF_HTML)
<table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="$_[0]snow.png" width=100 height=10 alt="$alt"></td></tr></table>
END_OF_HTML
;
}
elsif ($width == 100)
{
# Full coverage
$graph_code = (<<END_OF_HTML)
<table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="$_[0]$png_name" width=100 height=10 alt="$alt"></td></tr></table>
END_OF_HTML
;
}
else
{
# Positive coverage
$graph_code = (<<END_OF_HTML)
<table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="$_[0]$png_name" width=$width height=10 alt="$alt"><img src="$_[0]snow.png" width=$remainder height=10 alt="$alt"></td></tr></table>
END_OF_HTML
;
}
# Remove leading tabs from all lines
$graph_code =~ s/^\t+//gm;
chomp($graph_code);
return($graph_code);
}
#
# sub classify_rate(found, hit, med_limit, high_limit)
#
# Return 0 for low rate, 1 for medium rate and 2 for hi rate.
#
sub classify_rate($$$$)
{
my ($found, $hit, $med, $hi) = @_;
my $rate;
if ($found == 0) {
return 2;
}
$rate = $hit * 100 / $found;
if ($rate < $med) {
return 0;
} elsif ($rate < $hi) {
return 1;
}
return 2;
}
#
# write_html(filehandle, html_code)
#
# Write out HTML_CODE to FILEHANDLE while removing a leading tabulator mark
# in each line of HTML_CODE.
#
sub write_html(*$)
{
local *HTML_HANDLE = $_[0];
my $html_code = $_[1];
# Remove leading tab from all lines
$html_code =~ s/^\t//gm;
print(HTML_HANDLE $html_code)
or die("ERROR: cannot write HTML data ($!)\n");
}
#
# write_html_prolog(filehandle, base_dir, pagetitle)
#
# Write an HTML prolog common to all HTML files to FILEHANDLE. PAGETITLE will
# be used as HTML page title. BASE_DIR contains a relative path which points
# to the base directory.
#
sub write_html_prolog(*$$)
{
my $basedir = $_[1];
my $pagetitle = $_[2];
my $prolog;
$prolog = $html_prolog;
$prolog =~ s/\@pagetitle\@/$pagetitle/g;
$prolog =~ s/\@basedir\@/$basedir/g;
write_html($_[0], $prolog);
}
#
# write_header_prolog(filehandle, base_dir)
#
# Write beginning of page header HTML code.
#
sub write_header_prolog(*$)
{
# *************************************************************
write_html($_[0], <<END_OF_HTML)
<table width="100%" border=0 cellspacing=0 cellpadding=0>
<tr><td class="title">$title</td></tr>
<tr><td class="ruler"><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
<tr>
<td width="100%">
<table cellpadding=1 border=0 width="100%">
END_OF_HTML
;
# *************************************************************
}
#
# write_header_line(handle, content)
#
# Write a header line with the specified table contents.
#
sub write_header_line(*@)
{
my ($handle, @content) = @_;
my $entry;
write_html($handle, " <tr>\n");
foreach $entry (@content) {
my ($width, $class, $text, $colspan) = @{$entry};
if (defined($width)) {
$width = " width=\"$width\"";
} else {
$width = "";
}
if (defined($class)) {
$class = " class=\"$class\"";
} else {
$class = "";
}
if (defined($colspan)) {
$colspan = " colspan=\"$colspan\"";
} else {
$colspan = "";
}
$text = "" if (!defined($text));
write_html($handle,
" <td$width$class$colspan>$text</td>\n");
}
write_html($handle, " </tr>\n");
}
#
# write_header_epilog(filehandle, base_dir)
#
# Write end of page header HTML code.
#
sub write_header_epilog(*$)
{
# *************************************************************
write_html($_[0], <<END_OF_HTML)
<tr><td><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
</table>
</td>
</tr>
<tr><td class="ruler"><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
</table>
END_OF_HTML
;
# *************************************************************
}
#
# write_file_table_prolog(handle, file_heading, ([heading, num_cols], ...))
#
# Write heading for file table.
#
sub write_file_table_prolog(*$@)
{
my ($handle, $file_heading, @columns) = @_;
my $num_columns = 0;
my $file_width;
my $col;
my $width;
$width = 20 if (scalar(@columns) == 1);
$width = 10 if (scalar(@columns) == 2);
$width = 8 if (scalar(@columns) > 2);
foreach $col (@columns) {
my ($heading, $cols) = @{$col};
$num_columns += $cols;
}
$file_width = 100 - $num_columns * $width;
# Table definition
write_html($handle, <<END_OF_HTML);
<center>
<table width="80%" cellpadding=1 cellspacing=1 border=0>
<tr>
<td width="$file_width%"><br></td>
END_OF_HTML
# Empty first row
foreach $col (@columns) {
my ($heading, $cols) = @{$col};
while ($cols-- > 0) {
write_html($handle, <<END_OF_HTML);
<td width="$width%"></td>
END_OF_HTML
}
}
# Next row
write_html($handle, <<END_OF_HTML);
</tr>
<tr>
<td class="tableHead">$file_heading</td>
END_OF_HTML
# Heading row
foreach $col (@columns) {
my ($heading, $cols) = @{$col};
my $colspan = "";
$colspan = " colspan=$cols" if ($cols > 1);
write_html($handle, <<END_OF_HTML);
<td class="tableHead"$colspan>$heading</td>
END_OF_HTML
}
write_html($handle, <<END_OF_HTML);
</tr>
END_OF_HTML
}
# write_file_table_entry(handle, base_dir, filename, page_link,
# ([ found, hit, med_limit, hi_limit, graph ], ..)
#
# Write an entry of the file table.
#
sub write_file_table_entry(*$$$@)
{
my ($handle, $base_dir, $filename, $page_link, @entries) = @_;
my $file_code;
my $entry;
# Add link to source if provided
if (defined($page_link) && $page_link ne "") {
$file_code = "<a href=\"$page_link\">$filename</a>";
} else {
$file_code = $filename;
}
# First column: filename
write_html($handle, <<END_OF_HTML);
<tr>
<td class="coverFile">$file_code</td>
END_OF_HTML
# Columns as defined
foreach $entry (@entries) {
my ($found, $hit, $med, $hi, $graph) = @{$entry};
my $bar_graph;
my $class;
my $rate;
# Generate bar graph if requested
if ($graph) {
$bar_graph = get_bar_graph_code($base_dir, $found,
$hit);
write_html($handle, <<END_OF_HTML);
<td class="coverBar" align="center">
$bar_graph
</td>
END_OF_HTML
}
# Get rate color and text
if ($found == 0) {
$rate = "-";
$class = "Hi";
} else {
$rate = sprintf("%.1f %%", $hit * 100 / $found);
$class = $rate_name[classify_rate($found, $hit,
$med, $hi)];
}
write_html($handle, <<END_OF_HTML);
<td class="coverPer$class">$rate</td>
<td class="coverNum$class">$hit / $found</td>
END_OF_HTML
}
# End of row
write_html($handle, <<END_OF_HTML);
</tr>
END_OF_HTML
}
#
# write_file_table_detail_entry(filehandle, test_name, ([found, hit], ...))
#
# Write entry for detail section in file table.
#
sub write_file_table_detail_entry(*$@)
{
my ($handle, $test, @entries) = @_;
my $entry;
if ($test eq "") {
$test = "<span style=\"font-style:italic\"><unnamed></span>";
} elsif ($test =~ /^(.*),diff$/) {
$test = $1." (converted)";
}
# Testname
write_html($handle, <<END_OF_HTML);
<tr>
<td class="testName" colspan=2>$test</td>
END_OF_HTML
# Test data
foreach $entry (@entries) {
my ($found, $hit) = @{$entry};
my $rate = "-";
if ($found > 0) {
$rate = sprintf("%.1f %%", $hit * 100 / $found);
}
write_html($handle, <<END_OF_HTML);
<td class="testPer">$rate</td>
<td class="testNum">$hit / $found</td>
END_OF_HTML
}
write_html($handle, <<END_OF_HTML);
</tr>
END_OF_HTML
# *************************************************************
}
#
# write_file_table_epilog(filehandle)
#
# Write end of file table HTML code.
#
sub write_file_table_epilog(*)
{
# *************************************************************
write_html($_[0], <<END_OF_HTML)
</table>
</center>
<br>
END_OF_HTML
;
# *************************************************************
}
#
# write_test_table_prolog(filehandle, table_heading)
#
# Write heading for test case description table.
#
sub write_test_table_prolog(*$)
{
# *************************************************************
write_html($_[0], <<END_OF_HTML)
<center>
<table width="80%" cellpadding=2 cellspacing=1 border=0>
<tr>
<td><br></td>
</tr>
<tr>
<td class="tableHead">$_[1]</td>
</tr>
<tr>
<td class="testDescription">
<dl>
END_OF_HTML
;
# *************************************************************
}
#
# write_test_table_entry(filehandle, test_name, test_description)
#
# Write entry for the test table.
#
sub write_test_table_entry(*$$)
{
# *************************************************************
write_html($_[0], <<END_OF_HTML)
<dt>$_[1]<a name="$_[1]"> </a></dt>
<dd>$_[2]<br><br></dd>
END_OF_HTML
;
# *************************************************************
}
#
# write_test_table_epilog(filehandle)
#
# Write end of test description table HTML code.
#
sub write_test_table_epilog(*)
{
# *************************************************************
write_html($_[0], <<END_OF_HTML)
</dl>
</td>
</tr>
</table>
</center>
<br>
END_OF_HTML
;
# *************************************************************
}
sub fmt_centered($$)
{
my ($width, $text) = @_;
my $w0 = length($text);
my $w1 = int(($width - $w0) / 2);
my $w2 = $width - $w0 - $w1;
return (" "x$w1).$text.(" "x$w2);
}
#
# write_source_prolog(filehandle)
#
# Write start of source code table.
#
sub write_source_prolog(*)
{
my $lineno_heading = " ";
my $branch_heading = "";
my $line_heading = fmt_centered($line_field_width, "Line data");
my $source_heading = " Source code";
if ($br_coverage) {
$branch_heading = fmt_centered($br_field_width, "Branch data").
" ";
}
# *************************************************************
write_html($_[0], <<END_OF_HTML)
<table cellpadding=0 cellspacing=0 border=0>
<tr>
<td><br></td>
</tr>
<tr>
<td>
<pre class="sourceHeading">${lineno_heading}${branch_heading}${line_heading} ${source_heading}</pre>
<pre class="source">
END_OF_HTML
;
# *************************************************************
}
#
# get_branch_blocks(brdata)
#
# Group branches that belong to the same basic block.
#
# Returns: [block1, block2, ...]
# block: [branch1, branch2, ...]
# branch: [block_num, branch_num, taken_count, text_length, open, close]
#
sub get_branch_blocks($)
{
my ($brdata) = @_;
my $last_block_num;
my $block = [];
my @blocks;
my $i;
my $num = br_ivec_len($brdata);
# Group branches
for ($i = 0; $i < $num; $i++) {
my ($block_num, $branch, $taken) = br_ivec_get($brdata, $i);
my $br;
if (defined($last_block_num) && $block_num != $last_block_num) {
push(@blocks, $block);
$block = [];
}
$br = [$block_num, $branch, $taken, 3, 0, 0];
push(@{$block}, $br);
$last_block_num = $block_num;
}
push(@blocks, $block) if (scalar(@{$block}) > 0);
# Add braces to first and last branch in group
foreach $block (@blocks) {
$block->[0]->[$BR_OPEN] = 1;
$block->[0]->[$BR_LEN]++;
$block->[scalar(@{$block}) - 1]->[$BR_CLOSE] = 1;
$block->[scalar(@{$block}) - 1]->[$BR_LEN]++;
}
return @blocks;
}
#
# get_block_len(block)
#
# Calculate total text length of all branches in a block of branches.
#
sub get_block_len($)
{
my ($block) = @_;
my $len = 0;
my $branch;
foreach $branch (@{$block}) {
$len += $branch->[$BR_LEN];
}
return $len;
}
#
# get_branch_html(brdata)
#
# Return a list of HTML lines which represent the specified branch coverage
# data in source code view.
#
sub get_branch_html($)
{
my ($brdata) = @_;
my @blocks = get_branch_blocks($brdata);
my $block;
my $branch;
my $line_len = 0;
my $line = []; # [branch2|" ", branch|" ", ...]
my @lines; # [line1, line2, ...]
my @result;
# Distribute blocks to lines
foreach $block (@blocks) {
my $block_len = get_block_len($block);
# Does this block fit into the current line?
if ($line_len + $block_len <= $br_field_width) {
# Add it
$line_len += $block_len;
push(@{$line}, @{$block});
next;
} elsif ($block_len <= $br_field_width) {
# It would fit if the line was empty - add it to new
# line
push(@lines, $line);
$line_len = $block_len;
$line = [ @{$block} ];
next;
}
# Split the block into several lines
foreach $branch (@{$block}) {
if ($line_len + $branch->[$BR_LEN] >= $br_field_width) {
# Start a new line
if (($line_len + 1 <= $br_field_width) &&
scalar(@{$line}) > 0 &&
!$line->[scalar(@$line) - 1]->[$BR_CLOSE]) {
# Try to align branch symbols to be in
# one # row
push(@{$line}, " ");
}
push(@lines, $line);
$line_len = 0;
$line = [];
}
push(@{$line}, $branch);
$line_len += $branch->[$BR_LEN];
}
}
push(@lines, $line);
# Convert to HTML
foreach $line (@lines) {
my $current = "";
my $current_len = 0;
foreach $branch (@$line) {
# Skip alignment space
if ($branch eq " ") {
$current .= " ";
$current_len++;
next;
}
my ($block_num, $br_num, $taken, $len, $open, $close) =
@{$branch};
my $class;
my $title;
my $text;
if ($taken eq '-') {
$class = "branchNoExec";
$text = " # ";
$title = "Branch $br_num was not executed";
} elsif ($taken == 0) {
$class = "branchNoCov";
$text = " - ";
$title = "Branch $br_num was not taken";
} else {
$class = "branchCov";
$text = " + ";
$title = "Branch $br_num was taken $taken ".
"time";
$title .= "s" if ($taken > 1);
}
$current .= "[" if ($open);
$current .= "<span class=\"$class\" title=\"$title\">";
$current .= $text."</span>";
$current .= "]" if ($close);
$current_len += $len;
}
# Right-align result text
if ($current_len < $br_field_width) {
$current = (" "x($br_field_width - $current_len)).
$current;
}
push(@result, $current);
}
return @result;
}
#
# format_count(count, width)
#
# Return a right-aligned representation of count that fits in width characters.
#
sub format_count($$)
{
my ($count, $width) = @_;
my $result;
my $exp;
$result = sprintf("%*.0f", $width, $count);
while (length($result) > $width) {
last if ($count < 10);
$exp++;
$count = int($count/10);
$result = sprintf("%*s", $width, ">$count*10^$exp");
}
return $result;
}
#
# write_source_line(filehandle, line_num, source, hit_count, converted,
# brdata, add_anchor)
#
# Write formatted source code line. Return a line in a format as needed
# by gen_png()
#
sub write_source_line(*$$$$$$)
{
my ($handle, $line, $source, $count, $converted, $brdata,
$add_anchor) = @_;
my $source_format;
my $count_format;
my $result;
my $anchor_start = "";
my $anchor_end = "";
my $count_field_width = $line_field_width - 1;
my @br_html;
my $html;
# Get branch HTML data for this line
@br_html = get_branch_html($brdata) if ($br_coverage);
if (!defined($count)) {
$result = "";
$source_format = "";
$count_format = " "x$count_field_width;
}
elsif ($count == 0) {
$result = $count;
$source_format = '<span class="lineNoCov">';
$count_format = format_count($count, $count_field_width);
}
elsif ($converted && defined($highlight)) {
$result = "*".$count;
$source_format = '<span class="lineDiffCov">';
$count_format = format_count($count, $count_field_width);
}
else {
$result = $count;
$source_format = '<span class="lineCov">';
$count_format = format_count($count, $count_field_width);
}
$result .= ":".$source;
# Write out a line number navigation anchor every $nav_resolution
# lines if necessary
if ($add_anchor)
{
$anchor_start = "<a name=\"$_[1]\">";
$anchor_end = "</a>";
}
# *************************************************************
$html = $anchor_start;
$html .= "<span class=\"lineNum\">".sprintf("%8d", $line)." </span>";
$html .= shift(@br_html).":" if ($br_coverage);
$html .= "$source_format$count_format : ";
$html .= escape_html($source);
$html .= "</span>" if ($source_format);
$html .= $anchor_end."\n";
write_html($handle, $html);
if ($br_coverage) {
# Add lines for overlong branch information
foreach (@br_html) {
write_html($handle, "<span class=\"lineNum\">".
" </span>$_\n");
}
}
# *************************************************************
return($result);
}
#
# write_source_epilog(filehandle)
#
# Write end of source code table.
#
sub write_source_epilog(*)
{
# *************************************************************
write_html($_[0], <<END_OF_HTML)
</pre>
</td>
</tr>
</table>
<br>
END_OF_HTML
;
# *************************************************************
}
#
# write_html_epilog(filehandle, base_dir[, break_frames])
#
# Write HTML page footer to FILEHANDLE. BREAK_FRAMES should be set when
# this page is embedded in a frameset, clicking the URL link will then
# break this frameset.
#
sub write_html_epilog(*$;$)
{
my $basedir = $_[1];
my $break_code = "";
my $epilog;
if (defined($_[2]))
{
$break_code = " target=\"_parent\"";
}
# *************************************************************
write_html($_[0], <<END_OF_HTML)
<table width="100%" border=0 cellspacing=0 cellpadding=0>
<tr><td class="ruler"><img src="$_[1]glass.png" width=3 height=3 alt=""></td></tr>
<tr><td class="versionInfo">Generated by: <a href="$lcov_url"$break_code>$lcov_version</a></td></tr>
</table>
<br>
END_OF_HTML
;
$epilog = $html_epilog;
$epilog =~ s/\@basedir\@/$basedir/g;
write_html($_[0], $epilog);
}
#
# write_frameset(filehandle, basedir, basename, pagetitle)
#
#
sub write_frameset(*$$$)
{
my $frame_width = $overview_width + 40;
# *************************************************************
write_html($_[0], <<END_OF_HTML)
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN">
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>$_[3]</title>
<link rel="stylesheet" type="text/css" href="$_[1]gcov.css">
</head>
<frameset cols="$frame_width,*">
<frame src="$_[2].gcov.overview.$html_ext" name="overview">
<frame src="$_[2].gcov.$html_ext" name="source">
<noframes>
<center>Frames not supported by your browser!<br></center>
</noframes>
</frameset>
</html>
END_OF_HTML
;
# *************************************************************
}
#
# sub write_overview_line(filehandle, basename, line, link)
#
#
sub write_overview_line(*$$$)
{
my $y1 = $_[2] - 1;
my $y2 = $y1 + $nav_resolution - 1;
my $x2 = $overview_width - 1;
# *************************************************************
write_html($_[0], <<END_OF_HTML)
<area shape="rect" coords="0,$y1,$x2,$y2" href="$_[1].gcov.$html_ext#$_[3]" target="source" alt="overview">
END_OF_HTML
;
# *************************************************************
}
#
# write_overview(filehandle, basedir, basename, pagetitle, lines)
#
#
sub write_overview(*$$$$)
{
my $index;
my $max_line = $_[4] - 1;
my $offset;
# *************************************************************
write_html($_[0], <<END_OF_HTML)
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html lang="en">
<head>
<title>$_[3]</title>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<link rel="stylesheet" type="text/css" href="$_[1]gcov.css">
</head>
<body>
<map name="overview">
END_OF_HTML
;
# *************************************************************
# Make $offset the next higher multiple of $nav_resolution
$offset = ($nav_offset + $nav_resolution - 1) / $nav_resolution;
$offset = sprintf("%d", $offset ) * $nav_resolution;
# Create image map for overview image
for ($index = 1; $index <= $_[4]; $index += $nav_resolution)
{
# Enforce nav_offset
if ($index < $offset + 1)
{
write_overview_line($_[0], $_[2], $index, 1);
}
else
{
write_overview_line($_[0], $_[2], $index, $index - $offset);
}
}
# *************************************************************
write_html($_[0], <<END_OF_HTML)
</map>
<center>
<a href="$_[2].gcov.$html_ext#top" target="source">Top</a><br><br>
<img src="$_[2].gcov.png" width=$overview_width height=$max_line alt="Overview" border=0 usemap="#overview">
</center>
</body>
</html>
END_OF_HTML
;
# *************************************************************
}
# format_rate(found, hit)
#
# Return formatted percent string for coverage rate.
#
sub format_rate($$)
{
return $_[0] == 0 ? "-" : sprintf("%.1f", $_[1] * 100 / $_[0])." %";
}
sub max($$)
{
my ($a, $b) = @_;
return $a if ($a > $b);
return $b;
}
#
# write_header(filehandle, type, trunc_file_name, rel_file_name, lines_found,
# lines_hit, funcs_found, funcs_hit, sort_type)
#
# Write a complete standard page header. TYPE may be (0, 1, 2, 3, 4)
# corresponding to (directory view header, file view header, source view
# header, test case description header, function view header)
#
sub write_header(*$$$$$$$$$$)
{
local *HTML_HANDLE = $_[0];
my $type = $_[1];
my $trunc_name = $_[2];
my $rel_filename = $_[3];
my $lines_found = $_[4];
my $lines_hit = $_[5];
my $fn_found = $_[6];
my $fn_hit = $_[7];
my $br_found = $_[8];
my $br_hit = $_[9];
my $sort_type = $_[10];
my $base_dir;
my $view;
my $test;
my $base_name;
my $style;
my $rate;
my @row_left;
my @row_right;
my $num_rows;
my $i;
$base_name = basename($rel_filename);
# Prepare text for "current view" field
if ($type == $HDR_DIR)
{
# Main overview
$base_dir = "";
$view = $overview_title;
}
elsif ($type == $HDR_FILE)
{
# Directory overview
$base_dir = get_relative_base_path($rel_filename);
$view = "<a href=\"$base_dir"."index.$html_ext\">".
"$overview_title</a> - $trunc_name";
}
elsif ($type == $HDR_SOURCE || $type == $HDR_FUNC)
{
# File view
my $dir_name = dirname($rel_filename);
$base_dir = get_relative_base_path($dir_name);
if ($frames)
{
# Need to break frameset when clicking any of these
# links
$view = "<a href=\"$base_dir"."index.$html_ext\" ".
"target=\"_parent\">$overview_title</a> - ".
"<a href=\"index.$html_ext\" target=\"_parent\">".
"$dir_name</a> - $base_name";
}
else
{
$view = "<a href=\"$base_dir"."index.$html_ext\">".
"$overview_title</a> - ".
"<a href=\"index.$html_ext\">".
"$dir_name</a> - $base_name";
}
# Add function suffix
if ($func_coverage) {
$view .= "<span style=\"font-size: 80%;\">";
if ($type == $HDR_SOURCE) {
$view .= " (source / <a href=\"$base_name.func.$html_ext\">functions</a>)";
} elsif ($type == $HDR_FUNC) {
$view .= " (<a href=\"$base_name.gcov.$html_ext\">source</a> / functions)";
}
$view .= "</span>";
}
}
elsif ($type == $HDR_TESTDESC)
{
# Test description header
$base_dir = "";
$view = "<a href=\"$base_dir"."index.$html_ext\">".
"$overview_title</a> - test case descriptions";
}
# Prepare text for "test" field
$test = escape_html($test_title);
# Append link to test description page if available
if (%test_description && ($type != $HDR_TESTDESC))
{
if ($frames && ($type == $HDR_SOURCE || $type == $HDR_FUNC))
{
# Need to break frameset when clicking this link
$test .= " ( <span style=\"font-size:80%;\">".
"<a href=\"$base_dir".
"descriptions.$html_ext\" target=\"_parent\">".
"view descriptions</a></span> )";
}
else
{
$test .= " ( <span style=\"font-size:80%;\">".
"<a href=\"$base_dir".
"descriptions.$html_ext\">".
"view descriptions</a></span> )";
}
}
# Write header
write_header_prolog(*HTML_HANDLE, $base_dir);
# Left row
push(@row_left, [[ "10%", "headerItem", "Current view:" ],
[ "35%", "headerValue", $view ]]);
push(@row_left, [[undef, "headerItem", "Test:"],
[undef, "headerValue", $test]]);
push(@row_left, [[undef, "headerItem", "Date:"],
[undef, "headerValue", $date]]);
# Right row
if ($legend && ($type == $HDR_SOURCE || $type == $HDR_FUNC)) {
my $text = <<END_OF_HTML;
Lines:
<span class="coverLegendCov">hit</span>
<span class="coverLegendNoCov">not hit</span>
END_OF_HTML
if ($br_coverage) {
$text .= <<END_OF_HTML;
| Branches:
<span class="coverLegendCov">+</span> taken
<span class="coverLegendNoCov">-</span> not taken
<span class="coverLegendNoCov">#</span> not executed
END_OF_HTML
}
push(@row_left, [[undef, "headerItem", "Legend:"],
[undef, "headerValueLeg", $text]]);
} elsif ($legend && ($type != $HDR_TESTDESC)) {
my $text = <<END_OF_HTML;
Rating:
<span class="coverLegendCovLo" title="Coverage rates below $med_limit % are classified as low">low: < $med_limit %</span>
<span class="coverLegendCovMed" title="Coverage rates between $med_limit % and $hi_limit % are classified as medium">medium: >= $med_limit %</span>
<span class="coverLegendCovHi" title="Coverage rates of $hi_limit % and more are classified as high">high: >= $hi_limit %</span>
END_OF_HTML
push(@row_left, [[undef, "headerItem", "Legend:"],
[undef, "headerValueLeg", $text]]);
}
if ($type == $HDR_TESTDESC) {
push(@row_right, [[ "55%" ]]);
} else {
push(@row_right, [["15%", undef, undef ],
["10%", "headerCovTableHead", "Hit" ],
["10%", "headerCovTableHead", "Total" ],
["15%", "headerCovTableHead", "Coverage"]]);
}
# Line coverage
$style = $rate_name[classify_rate($lines_found, $lines_hit,
$med_limit, $hi_limit)];
$rate = format_rate($lines_found, $lines_hit);
push(@row_right, [[undef, "headerItem", "Lines:"],
[undef, "headerCovTableEntry", $lines_hit],
[undef, "headerCovTableEntry", $lines_found],
[undef, "headerCovTableEntry$style", $rate]])
if ($type != $HDR_TESTDESC);
# Function coverage
if ($func_coverage) {
$style = $rate_name[classify_rate($fn_found, $fn_hit,
$fn_med_limit, $fn_hi_limit)];
$rate = format_rate($fn_found, $fn_hit);
push(@row_right, [[undef, "headerItem", "Functions:"],
[undef, "headerCovTableEntry", $fn_hit],
[undef, "headerCovTableEntry", $fn_found],
[undef, "headerCovTableEntry$style", $rate]])
if ($type != $HDR_TESTDESC);
}
# Branch coverage
if ($br_coverage) {
$style = $rate_name[classify_rate($br_found, $br_hit,
$br_med_limit, $br_hi_limit)];
$rate = format_rate($br_found, $br_hit);
push(@row_right, [[undef, "headerItem", "Branches:"],
[undef, "headerCovTableEntry", $br_hit],
[undef, "headerCovTableEntry", $br_found],
[undef, "headerCovTableEntry$style", $rate]])
if ($type != $HDR_TESTDESC);
}
# Print rows
$num_rows = max(scalar(@row_left), scalar(@row_right));
for ($i = 0; $i < $num_rows; $i++) {
my $left = $row_left[$i];
my $right = $row_right[$i];
if (!defined($left)) {
$left = [[undef, undef, undef], [undef, undef, undef]];
}
if (!defined($right)) {
$right = [];
}
write_header_line(*HTML_HANDLE, @{$left},
[ $i == 0 ? "5%" : undef, undef, undef],
@{$right});
}
# Fourth line
write_header_epilog(*HTML_HANDLE, $base_dir);
}
#
# get_sorted_keys(hash_ref, sort_type)
#
sub get_sorted_keys($$)
{
my ($hash, $type) = @_;
if ($type == $SORT_FILE) {
# Sort by name
return sort(keys(%{$hash}));
} elsif ($type == $SORT_LINE) {
# Sort by line coverage
return sort({$hash->{$a}[7] <=> $hash->{$b}[7]} keys(%{$hash}));
} elsif ($type == $SORT_FUNC) {
# Sort by function coverage;
return sort({$hash->{$a}[8] <=> $hash->{$b}[8]} keys(%{$hash}));
} elsif ($type == $SORT_BRANCH) {
# Sort by br coverage;
return sort({$hash->{$a}[9] <=> $hash->{$b}[9]} keys(%{$hash}));
}
}
sub get_sort_code($$$)
{
my ($link, $alt, $base) = @_;
my $png;
my $link_start;
my $link_end;
if (!defined($link)) {
$png = "glass.png";
$link_start = "";
$link_end = "";
} else {
$png = "updown.png";
$link_start = '<a href="'.$link.'">';
$link_end = "</a>";
}
return ' <span class="tableHeadSort">'.$link_start.
'<img src="'.$base.$png.'" width=10 height=14 '.
'alt="'.$alt.'" title="'.$alt.'" border=0>'.$link_end.'</span>';
}
sub get_file_code($$$$)
{
my ($type, $text, $sort_button, $base) = @_;
my $result = $text;
my $link;
if ($sort_button) {
if ($type == $HEAD_NO_DETAIL) {
$link = "index.$html_ext";
} else {
$link = "index-detail.$html_ext";
}
}
$result .= get_sort_code($link, "Sort by name", $base);
return $result;
}
sub get_line_code($$$$$)
{
my ($type, $sort_type, $text, $sort_button, $base) = @_;
my $result = $text;
my $sort_link;
if ($type == $HEAD_NO_DETAIL) {
# Just text
if ($sort_button) {
$sort_link = "index-sort-l.$html_ext";
}
} elsif ($type == $HEAD_DETAIL_HIDDEN) {
# Text + link to detail view
$result .= ' ( <a class="detail" href="index-detail'.
$fileview_sortname[$sort_type].'.'.$html_ext.
'">show details</a> )';
if ($sort_button) {
$sort_link = "index-sort-l.$html_ext";
}
} else {
# Text + link to standard view
$result .= ' ( <a class="detail" href="index'.
$fileview_sortname[$sort_type].'.'.$html_ext.
'">hide details</a> )';
if ($sort_button) {
$sort_link = "index-detail-sort-l.$html_ext";
}
}
# Add sort button
$result .= get_sort_code($sort_link, "Sort by line coverage", $base);
return $result;
}
sub get_func_code($$$$)
{
my ($type, $text, $sort_button, $base) = @_;
my $result = $text;
my $link;
if ($sort_button) {
if ($type == $HEAD_NO_DETAIL) {
$link = "index-sort-f.$html_ext";
} else {
$link = "index-detail-sort-f.$html_ext";
}
}
$result .= get_sort_code($link, "Sort by function coverage", $base);
return $result;
}
sub get_br_code($$$$)
{
my ($type, $text, $sort_button, $base) = @_;
my $result = $text;
my $link;
if ($sort_button) {
if ($type == $HEAD_NO_DETAIL) {
$link = "index-sort-b.$html_ext";
} else {
$link = "index-detail-sort-b.$html_ext";
}
}
$result .= get_sort_code($link, "Sort by branch coverage", $base);
return $result;
}
#
# write_file_table(filehandle, base_dir, overview, testhash, testfnchash,
# testbrhash, fileview, sort_type)
#
# Write a complete file table. OVERVIEW is a reference to a hash containing
# the following mapping:
#
# filename -> "lines_found,lines_hit,funcs_found,funcs_hit,page_link,
# func_link"
#
# TESTHASH is a reference to the following hash:
#
# filename -> \%testdata
# %testdata: name of test affecting this file -> \%testcount
# %testcount: line number -> execution count for a single test
#
# Heading of first column is "Filename" if FILEVIEW is true, "Directory name"
# otherwise.
#
sub write_file_table(*$$$$$$$)
{
local *HTML_HANDLE = $_[0];
my $base_dir = $_[1];
my $overview = $_[2];
my $testhash = $_[3];
my $testfnchash = $_[4];
my $testbrhash = $_[5];
my $fileview = $_[6];
my $sort_type = $_[7];
my $filename;
my $bar_graph;
my $hit;
my $found;
my $fn_found;
my $fn_hit;
my $br_found;
my $br_hit;
my $page_link;
my $testname;
my $testdata;
my $testfncdata;
my $testbrdata;
my %affecting_tests;
my $line_code = "";
my $func_code;
my $br_code;
my $file_code;
my @head_columns;
# Determine HTML code for column headings
if (($base_dir ne "") && $show_details)
{
my $detailed = keys(%{$testhash});
$file_code = get_file_code($detailed ? $HEAD_DETAIL_HIDDEN :
$HEAD_NO_DETAIL,
$fileview ? "Filename" : "Directory",
$sort && $sort_type != $SORT_FILE,
$base_dir);
$line_code = get_line_code($detailed ? $HEAD_DETAIL_SHOWN :
$HEAD_DETAIL_HIDDEN,
$sort_type,
"Line Coverage",
$sort && $sort_type != $SORT_LINE,
$base_dir);
$func_code = get_func_code($detailed ? $HEAD_DETAIL_HIDDEN :
$HEAD_NO_DETAIL,
"Functions",
$sort && $sort_type != $SORT_FUNC,
$base_dir);
$br_code = get_br_code($detailed ? $HEAD_DETAIL_HIDDEN :
$HEAD_NO_DETAIL,
"Branches",
$sort && $sort_type != $SORT_BRANCH,
$base_dir);
} else {
$file_code = get_file_code($HEAD_NO_DETAIL,
$fileview ? "Filename" : "Directory",
$sort && $sort_type != $SORT_FILE,
$base_dir);
$line_code = get_line_code($HEAD_NO_DETAIL, $sort_type, "Line Coverage",
$sort && $sort_type != $SORT_LINE,
$base_dir);
$func_code = get_func_code($HEAD_NO_DETAIL, "Functions",
$sort && $sort_type != $SORT_FUNC,
$base_dir);
$br_code = get_br_code($HEAD_NO_DETAIL, "Branches",
$sort && $sort_type != $SORT_BRANCH,
$base_dir);
}
push(@head_columns, [ $line_code, 3 ]);
push(@head_columns, [ $func_code, 2]) if ($func_coverage);
push(@head_columns, [ $br_code, 2]) if ($br_coverage);
write_file_table_prolog(*HTML_HANDLE, $file_code, @head_columns);
foreach $filename (get_sorted_keys($overview, $sort_type))
{
my @columns;
($found, $hit, $fn_found, $fn_hit, $br_found, $br_hit,
$page_link) = @{$overview->{$filename}};
# Line coverage
push(@columns, [$found, $hit, $med_limit, $hi_limit, 1]);
# Function coverage
if ($func_coverage) {
push(@columns, [$fn_found, $fn_hit, $fn_med_limit,
$fn_hi_limit, 0]);
}
# Branch coverage
if ($br_coverage) {
push(@columns, [$br_found, $br_hit, $br_med_limit,
$br_hi_limit, 0]);
}
write_file_table_entry(*HTML_HANDLE, $base_dir, $filename,
$page_link, @columns);
$testdata = $testhash->{$filename};
$testfncdata = $testfnchash->{$filename};
$testbrdata = $testbrhash->{$filename};
# Check whether we should write test specific coverage
# as well
if (!($show_details && $testdata)) { next; }
# Filter out those tests that actually affect this file
%affecting_tests = %{ get_affecting_tests($testdata,
$testfncdata, $testbrdata) };
# Does any of the tests affect this file at all?
if (!%affecting_tests) { next; }
foreach $testname (keys(%affecting_tests))
{
my @results;
($found, $hit, $fn_found, $fn_hit, $br_found, $br_hit) =
split(",", $affecting_tests{$testname});
# Insert link to description of available
if ($test_description{$testname})
{
$testname = "<a href=\"$base_dir".
"descriptions.$html_ext#$testname\">".
"$testname</a>";
}
push(@results, [$found, $hit]);
push(@results, [$fn_found, $fn_hit]) if ($func_coverage);
push(@results, [$br_found, $br_hit]) if ($br_coverage);
write_file_table_detail_entry(*HTML_HANDLE, $testname,
@results);
}
}
write_file_table_epilog(*HTML_HANDLE);
}
#
# get_found_and_hit(hash)
#
# Return the count for entries (found) and entries with an execution count
# greater than zero (hit) in a hash (linenumber -> execution count) as
# a list (found, hit)
#
sub get_found_and_hit($)
{
my %hash = %{$_[0]};
my $found = 0;
my $hit = 0;
# Calculate sum
$found = 0;
$hit = 0;
foreach (keys(%hash))
{
$found++;
if ($hash{$_}>0) { $hit++; }
}
return ($found, $hit);
}
#
# get_func_found_and_hit(sumfnccount)
#
# Return (f_found, f_hit) for sumfnccount
#
sub get_func_found_and_hit($)
{
my ($sumfnccount) = @_;
my $function;
my $fn_found;
my $fn_hit;
$fn_found = scalar(keys(%{$sumfnccount}));
$fn_hit = 0;
foreach $function (keys(%{$sumfnccount})) {
if ($sumfnccount->{$function} > 0) {
$fn_hit++;
}
}
return ($fn_found, $fn_hit);
}
#
# br_taken_to_num(taken)
#
# Convert a branch taken value .info format to number format.
#
sub br_taken_to_num($)
{
my ($taken) = @_;
return 0 if ($taken eq '-');
return $taken + 1;
}
#
# br_num_to_taken(taken)
#
# Convert a branch taken value in number format to .info format.
#
sub br_num_to_taken($)
{
my ($taken) = @_;
return '-' if ($taken == 0);
return $taken - 1;
}
#
# br_taken_add(taken1, taken2)
#
# Return the result of taken1 + taken2 for 'branch taken' values.
#
sub br_taken_add($$)
{
my ($t1, $t2) = @_;
return $t1 if (!defined($t2));
return $t2 if (!defined($t1));
return $t1 if ($t2 eq '-');
return $t2 if ($t1 eq '-');
return $t1 + $t2;
}
#
# br_taken_sub(taken1, taken2)
#
# Return the result of taken1 - taken2 for 'branch taken' values. Return 0
# if the result would become negative.
#
sub br_taken_sub($$)
{
my ($t1, $t2) = @_;
return $t1 if (!defined($t2));
return undef if (!defined($t1));
return $t1 if ($t1 eq '-');
return $t1 if ($t2 eq '-');
return 0 if $t2 > $t1;
return $t1 - $t2;
}
#
# br_ivec_len(vector)
#
# Return the number of entries in the branch coverage vector.
#
sub br_ivec_len($)
{
my ($vec) = @_;
return 0 if (!defined($vec));
return (length($vec) * 8 / $BR_VEC_WIDTH) / $BR_VEC_ENTRIES;
}
#
# br_ivec_get(vector, number)
#
# Return an entry from the branch coverage vector.
#
sub br_ivec_get($$)
{
my ($vec, $num) = @_;
my $block;
my $branch;
my $taken;
my $offset = $num * $BR_VEC_ENTRIES;
# Retrieve data from vector
$block = vec($vec, $offset + $BR_BLOCK, $BR_VEC_WIDTH);
$branch = vec($vec, $offset + $BR_BRANCH, $BR_VEC_WIDTH);
$taken = vec($vec, $offset + $BR_TAKEN, $BR_VEC_WIDTH);
# Decode taken value from an integer
$taken = br_num_to_taken($taken);
return ($block, $branch, $taken);
}
#
# br_ivec_push(vector, block, branch, taken)
#
# Add an entry to the branch coverage vector. If an entry with the same
# branch ID already exists, add the corresponding taken values.
#
sub br_ivec_push($$$$)
{
my ($vec, $block, $branch, $taken) = @_;
my $offset;
my $num = br_ivec_len($vec);
my $i;
$vec = "" if (!defined($vec));
# Check if branch already exists in vector
for ($i = 0; $i < $num; $i++) {
my ($v_block, $v_branch, $v_taken) = br_ivec_get($vec, $i);
next if ($v_block != $block || $v_branch != $branch);
# Add taken counts
$taken = br_taken_add($taken, $v_taken);
last;
}
$offset = $i * $BR_VEC_ENTRIES;
$taken = br_taken_to_num($taken);
# Add to vector
vec($vec, $offset + $BR_BLOCK, $BR_VEC_WIDTH) = $block;
vec($vec, $offset + $BR_BRANCH, $BR_VEC_WIDTH) = $branch;
vec($vec, $offset + $BR_TAKEN, $BR_VEC_WIDTH) = $taken;
return $vec;
}
#
# get_br_found_and_hit(sumbrcount)
#
# Return (br_found, br_hit) for sumbrcount
#
sub get_br_found_and_hit($)
{
my ($sumbrcount) = @_;
my $line;
my $br_found = 0;
my $br_hit = 0;
foreach $line (keys(%{$sumbrcount})) {
my $brdata = $sumbrcount->{$line};
my $i;
my $num = br_ivec_len($brdata);
for ($i = 0; $i < $num; $i++) {
my $taken;
(undef, undef, $taken) = br_ivec_get($brdata, $i);
$br_found++;
$br_hit++ if ($taken ne "-" && $taken > 0);
}
}
return ($br_found, $br_hit);
}
#
# get_affecting_tests(testdata, testfncdata, testbrdata)
#
# HASHREF contains a mapping filename -> (linenumber -> exec count). Return
# a hash containing mapping filename -> "lines found, lines hit" for each
# filename which has a nonzero hit count.
#
sub get_affecting_tests($$$)
{
my ($testdata, $testfncdata, $testbrdata) = @_;
my $testname;
my $testcount;
my $testfnccount;
my $testbrcount;
my %result;
my $found;
my $hit;
my $fn_found;
my $fn_hit;
my $br_found;
my $br_hit;
foreach $testname (keys(%{$testdata}))
{
# Get (line number -> count) hash for this test case
$testcount = $testdata->{$testname};
$testfnccount = $testfncdata->{$testname};
$testbrcount = $testbrdata->{$testname};
# Calculate sum
($found, $hit) = get_found_and_hit($testcount);
($fn_found, $fn_hit) = get_func_found_and_hit($testfnccount);
($br_found, $br_hit) = get_br_found_and_hit($testbrcount);
if ($hit>0)
{
$result{$testname} = "$found,$hit,$fn_found,$fn_hit,".
"$br_found,$br_hit";
}
}
return(\%result);
}
sub get_hash_reverse($)
{
my ($hash) = @_;
my %result;
foreach (keys(%{$hash})) {
$result{$hash->{$_}} = $_;
}
return \%result;
}
#
# write_source(filehandle, source_filename, count_data, checksum_data,
# converted_data, func_data, sumbrcount)
#
# Write an HTML view of a source code file. Returns a list containing
# data as needed by gen_png().
#
# Die on error.
#
sub write_source($$$$$$$)
{
local *HTML_HANDLE = $_[0];
local *SOURCE_HANDLE;
my $source_filename = $_[1];
my %count_data;
my $line_number;
my @result;
my $checkdata = $_[3];
my $converted = $_[4];
my $funcdata = $_[5];
my $sumbrcount = $_[6];
my $datafunc = get_hash_reverse($funcdata);
my $add_anchor;
if ($_[2])
{
%count_data = %{$_[2]};
}
open(SOURCE_HANDLE, "<".$source_filename)
or die("ERROR: cannot open $source_filename for reading!\n");
write_source_prolog(*HTML_HANDLE);
for ($line_number = 1; <SOURCE_HANDLE> ; $line_number++)
{
chomp($_);
# Also remove CR from line-end
s/\015$//;
# Source code matches coverage data?
if (defined($checkdata->{$line_number}) &&
($checkdata->{$line_number} ne md5_base64($_)))
{
die("ERROR: checksum mismatch at $source_filename:".
"$line_number\n");
}
$add_anchor = 0;
if ($frames) {
if (($line_number - 1) % $nav_resolution == 0) {
$add_anchor = 1;
}
}
if ($func_coverage) {
if ($line_number == 1) {
$add_anchor = 1;
} elsif (defined($datafunc->{$line_number +
$func_offset})) {
$add_anchor = 1;
}
}
push (@result,
write_source_line(HTML_HANDLE, $line_number,
$_, $count_data{$line_number},
$converted->{$line_number},
$sumbrcount->{$line_number}, $add_anchor));
}
close(SOURCE_HANDLE);
write_source_epilog(*HTML_HANDLE);
return(@result);
}
sub funcview_get_func_code($$$)
{
my ($name, $base, $type) = @_;
my $result;
my $link;
if ($sort && $type == 1) {
$link = "$name.func.$html_ext";
}
$result = "Function Name";
$result .= get_sort_code($link, "Sort by function name", $base);
return $result;
}
sub funcview_get_count_code($$$)
{
my ($name, $base, $type) = @_;
my $result;
my $link;
if ($sort && $type == 0) {
$link = "$name.func-sort-c.$html_ext";
}
$result = "Hit count";
$result .= get_sort_code($link, "Sort by hit count", $base);
return $result;
}
#
# funcview_get_sorted(funcdata, sumfncdata, sort_type)
#
# Depending on the value of sort_type, return a list of functions sorted
# by name (type 0) or by the associated call count (type 1).
#
sub funcview_get_sorted($$$)
{
my ($funcdata, $sumfncdata, $type) = @_;
if ($type == 0) {
return sort(keys(%{$funcdata}));
}
return sort({$sumfncdata->{$b} <=> $sumfncdata->{$a}}
keys(%{$sumfncdata}));
}
#
# write_function_table(filehandle, source_file, sumcount, funcdata,
# sumfnccount, testfncdata, sumbrcount, testbrdata,
# base_name, base_dir, sort_type)
#
# Write an HTML table listing all functions in a source file, including
# also function call counts and line coverages inside of each function.
#
# Die on error.
#
sub write_function_table(*$$$$$$$$$$)
{
local *HTML_HANDLE = $_[0];
my $source = $_[1];
my $sumcount = $_[2];
my $funcdata = $_[3];
my $sumfncdata = $_[4];
my $testfncdata = $_[5];
my $sumbrcount = $_[6];
my $testbrdata = $_[7];
my $name = $_[8];
my $base = $_[9];
my $type = $_[10];
my $func;
my $func_code;
my $count_code;
# Get HTML code for headings
$func_code = funcview_get_func_code($name, $base, $type);
$count_code = funcview_get_count_code($name, $base, $type);
write_html(*HTML_HANDLE, <<END_OF_HTML)
<center>
<table width="60%" cellpadding=1 cellspacing=1 border=0>
<tr><td><br></td></tr>
<tr>
<td width="80%" class="tableHead">$func_code</td>
<td width="20%" class="tableHead">$count_code</td>
</tr>
END_OF_HTML
;
# Get a sorted table
foreach $func (funcview_get_sorted($funcdata, $sumfncdata, $type)) {
if (!defined($funcdata->{$func}))
{
next;
}
my $startline = $funcdata->{$func} - $func_offset;
my $name = $func;
my $count = $sumfncdata->{$name};
my $countstyle;
# Demangle C++ function names if requested
if ($demangle_cpp) {
$name = `c++filt "$name"`;
chomp($name);
}
# Escape any remaining special characters
$name = escape_html($name);
if ($startline < 1) {
$startline = 1;
}
if ($count == 0) {
$countstyle = "coverFnLo";
} else {
$countstyle = "coverFnHi";
}
write_html(*HTML_HANDLE, <<END_OF_HTML)
<tr>
<td class="coverFn"><a href="$source#$startline">$name</a></td>
<td class="$countstyle">$count</td>
</tr>
END_OF_HTML
;
}
write_html(*HTML_HANDLE, <<END_OF_HTML)
</table>
<br>
</center>
END_OF_HTML
;
}
#
# info(printf_parameter)
#
# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
# is not set.
#
sub info(@)
{
if (!$quiet)
{
# Print info string
printf(@_);
}
}
#
# subtract_counts(data_ref, base_ref)
#
sub subtract_counts($$)
{
my %data = %{$_[0]};
my %base = %{$_[1]};
my $line;
my $data_count;
my $base_count;
my $hit = 0;
my $found = 0;
foreach $line (keys(%data))
{
$found++;
$data_count = $data{$line};
$base_count = $base{$line};
if (defined($base_count))
{
$data_count -= $base_count;
# Make sure we don't get negative numbers
if ($data_count<0) { $data_count = 0; }
}
$data{$line} = $data_count;
if ($data_count > 0) { $hit++; }
}
return (\%data, $found, $hit);
}
#
# subtract_fnccounts(data, base)
#
# Subtract function call counts found in base from those in data.
# Return (data, f_found, f_hit).
#
sub subtract_fnccounts($$)
{
my %data;
my %base;
my $func;
my $data_count;
my $base_count;
my $fn_hit = 0;
my $fn_found = 0;
%data = %{$_[0]} if (defined($_[0]));
%base = %{$_[1]} if (defined($_[1]));
foreach $func (keys(%data)) {
$fn_found++;
$data_count = $data{$func};
$base_count = $base{$func};
if (defined($base_count)) {
$data_count -= $base_count;
# Make sure we don't get negative numbers
if ($data_count < 0) {
$data_count = 0;
}
}
$data{$func} = $data_count;
if ($data_count > 0) {
$fn_hit++;
}
}
return (\%data, $fn_found, $fn_hit);
}
#
# apply_baseline(data_ref, baseline_ref)
#
# Subtract the execution counts found in the baseline hash referenced by
# BASELINE_REF from actual data in DATA_REF.
#
sub apply_baseline($$)
{
my %data_hash = %{$_[0]};
my %base_hash = %{$_[1]};
my $filename;
my $testname;
my $data;
my $data_testdata;
my $data_funcdata;
my $data_checkdata;
my $data_testfncdata;
my $data_testbrdata;
my $data_count;
my $data_testfnccount;
my $data_testbrcount;
my $base;
my $base_checkdata;
my $base_sumfnccount;
my $base_sumbrcount;
my $base_count;
my $sumcount;
my $sumfnccount;
my $sumbrcount;
my $found;
my $hit;
my $fn_found;
my $fn_hit;
my $br_found;
my $br_hit;
foreach $filename (keys(%data_hash))
{
# Get data set for data and baseline
$data = $data_hash{$filename};
$base = $base_hash{$filename};
# Skip data entries for which no base entry exists
if (!defined($base))
{
next;
}
# Get set entries for data and baseline
($data_testdata, undef, $data_funcdata, $data_checkdata,
$data_testfncdata, undef, $data_testbrdata) =
get_info_entry($data);
(undef, $base_count, undef, $base_checkdata, undef,
$base_sumfnccount, undef, $base_sumbrcount) =
get_info_entry($base);
# Check for compatible checksums
merge_checksums($data_checkdata, $base_checkdata, $filename);
# sumcount has to be calculated anew
$sumcount = {};
$sumfnccount = {};
$sumbrcount = {};
# For each test case, subtract test specific counts
foreach $testname (keys(%{$data_testdata}))
{
# Get counts of both data and baseline
$data_count = $data_testdata->{$testname};
$data_testfnccount = $data_testfncdata->{$testname};
$data_testbrcount = $data_testbrdata->{$testname};
($data_count, undef, $hit) =
subtract_counts($data_count, $base_count);
($data_testfnccount) =
subtract_fnccounts($data_testfnccount,
$base_sumfnccount);
($data_testbrcount) =
combine_brcount($data_testbrcount,
$base_sumbrcount, $BR_SUB);
# Check whether this test case did hit any line at all
if ($hit > 0)
{
# Write back resulting hash
$data_testdata->{$testname} = $data_count;
$data_testfncdata->{$testname} =
$data_testfnccount;
$data_testbrdata->{$testname} =
$data_testbrcount;
}
else
{
# Delete test case which did not impact this
# file
delete($data_testdata->{$testname});
delete($data_testfncdata->{$testname});
delete($data_testbrdata->{$testname});
}
# Add counts to sum of counts
($sumcount, $found, $hit) =
add_counts($sumcount, $data_count);
($sumfnccount, $fn_found, $fn_hit) =
add_fnccount($sumfnccount, $data_testfnccount);
($sumbrcount, $br_found, $br_hit) =
combine_brcount($sumbrcount, $data_testbrcount,
$BR_ADD);
}
# Write back resulting entry
set_info_entry($data, $data_testdata, $sumcount, $data_funcdata,
$data_checkdata, $data_testfncdata, $sumfnccount,
$data_testbrdata, $sumbrcount, $found, $hit,
$fn_found, $fn_hit, $br_found, $br_hit);
$data_hash{$filename} = $data;
}
return (\%data_hash);
}
#
# remove_unused_descriptions()
#
# Removes all test descriptions from the global hash %test_description which
# are not present in %info_data.
#
sub remove_unused_descriptions()
{
my $filename; # The current filename
my %test_list; # Hash containing found test names
my $test_data; # Reference to hash test_name -> count_data
my $before; # Initial number of descriptions
my $after; # Remaining number of descriptions
$before = scalar(keys(%test_description));
foreach $filename (keys(%info_data))
{
($test_data) = get_info_entry($info_data{$filename});
foreach (keys(%{$test_data}))
{
$test_list{$_} = "";
}
}
# Remove descriptions for tests which are not in our list
foreach (keys(%test_description))
{
if (!defined($test_list{$_}))
{
delete($test_description{$_});
}
}
$after = scalar(keys(%test_description));
if ($after < $before)
{
info("Removed ".($before - $after).
" unused descriptions, $after remaining.\n");
}
}
#
# apply_prefix(filename, prefix)
#
# If FILENAME begins with PREFIX, remove PREFIX from FILENAME and return
# resulting string, otherwise return FILENAME.
#
sub apply_prefix($$)
{
my $filename = $_[0];
my $prefix = $_[1];
if (defined($prefix) && ($prefix ne ""))
{
if ($filename =~ /^\Q$prefix\E\/(.*)$/)
{
return substr($filename, length($prefix) + 1);
}
}
return $filename;
}
#
# system_no_output(mode, parameters)
#
# Call an external program using PARAMETERS while suppressing depending on
# the value of MODE:
#
# MODE & 1: suppress STDOUT
# MODE & 2: suppress STDERR
#
# Return 0 on success, non-zero otherwise.
#
sub system_no_output($@)
{
my $mode = shift;
my $result;
local *OLD_STDERR;
local *OLD_STDOUT;
# Save old stdout and stderr handles
($mode & 1) && open(OLD_STDOUT, ">>&STDOUT");
($mode & 2) && open(OLD_STDERR, ">>&STDERR");
# Redirect to /dev/null
($mode & 1) && open(STDOUT, ">/dev/null");
($mode & 2) && open(STDERR, ">/dev/null");
system(@_);
$result = $?;
# Close redirected handles
($mode & 1) && close(STDOUT);
($mode & 2) && close(STDERR);
# Restore old handles
($mode & 1) && open(STDOUT, ">>&OLD_STDOUT");
($mode & 2) && open(STDERR, ">>&OLD_STDERR");
return $result;
}
#
# read_config(filename)
#
# Read configuration file FILENAME and return a reference to a hash containing
# all valid key=value pairs found.
#
sub read_config($)
{
my $filename = $_[0];
my %result;
my $key;
my $value;
local *HANDLE;
if (!open(HANDLE, "<$filename"))
{
warn("WARNING: cannot read configuration file $filename\n");
return undef;
}
while (<HANDLE>)
{
chomp;
# Skip comments
s/#.*//;
# Remove leading blanks
s/^\s+//;
# Remove trailing blanks
s/\s+$//;
next unless length;
($key, $value) = split(/\s*=\s*/, $_, 2);
if (defined($key) && defined($value))
{
$result{$key} = $value;
}
else
{
warn("WARNING: malformed statement in line $. ".
"of configuration file $filename\n");
}
}
close(HANDLE);
return \%result;
}
#
# apply_config(REF)
#
# REF is a reference to a hash containing the following mapping:
#
# key_string => var_ref
#
# where KEY_STRING is a keyword and VAR_REF is a reference to an associated
# variable. If the global configuration hash CONFIG contains a value for
# keyword KEY_STRING, VAR_REF will be assigned the value for that keyword.
#
sub apply_config($)
{
my $ref = $_[0];
foreach (keys(%{$ref}))
{
if (defined($config->{$_}))
{
${$ref->{$_}} = $config->{$_};
}
}
}
#
# get_html_prolog(FILENAME)
#
# If FILENAME is defined, return contents of file. Otherwise return default
# HTML prolog. Die on error.
#
sub get_html_prolog($)
{
my $filename = $_[0];
my $result = "";
if (defined($filename))
{
local *HANDLE;
open(HANDLE, "<".$filename)
or die("ERROR: cannot open html prolog $filename!\n");
while (<HANDLE>)
{
$result .= $_;
}
close(HANDLE);
}
else
{
$result = <<END_OF_HTML
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>\@pagetitle\@</title>
<link rel="stylesheet" type="text/css" href="\@basedir\@gcov.css">
</head>
<body>
END_OF_HTML
;
}
return $result;
}
#
# get_html_epilog(FILENAME)
#
# If FILENAME is defined, return contents of file. Otherwise return default
# HTML epilog. Die on error.
#
sub get_html_epilog($)
{
my $filename = $_[0];
my $result = "";
if (defined($filename))
{
local *HANDLE;
open(HANDLE, "<".$filename)
or die("ERROR: cannot open html epilog $filename!\n");
while (<HANDLE>)
{
$result .= $_;
}
close(HANDLE);
}
else
{
$result = <<END_OF_HTML
</body>
</html>
END_OF_HTML
;
}
return $result;
}
sub warn_handler($)
{
my ($msg) = @_;
warn("$tool_name: $msg");
}
sub die_handler($)
{
my ($msg) = @_;
die("$tool_name: $msg");
}
| zy901002-gpsr | utils/lcov/genhtml | Perl | gpl2 | 133,465 |
/**
* \ingroup utils
* \defgroup CheckStyle check-style.py
*
* The check-style.py script will test and reformat code according to the
* ns-3 coding style posted at http://www.nsnam.org/codingstyle.html
* It requires that you install 'uncrustify'
*
* It has multiple levels of conformance:
* - level=0: the default: merely checks indentation
* - level=1: checks also for missing spaces before parentheses
* - level=2: checks also for missing newlines and braces around single-line statements
* - level=3: checks also for missing trailing whitespaces
*
* Examples:
*
* check a single file (level 0 by default):
\verbatim
./check-style.py -f src/core/object.h
\endverbatim
*
* fix the style of a single file:
\verbatim
./check-style.py --level=2 --in-place -f src/core/object.h
\endverbatim
*
* look at the changes needed for a single file:
\verbatim
./check-style.py --diff --level=1 -f src/core/object.h | less
\endverbatim
*
* look at the status of all files modified in your mercurial repository:
\verbatim
./check-style.py --check-hg
\endverbatim
*
* look at the changes needed for all modified files in your mercurial
* repository:
\verbatim
./check-style.py --check-hg --diff |less
\endverbatim
*
* Enable this script to run as a 'commit' hook in your repository and
* disallow commits which contain files with invalid style:
*
\verbatim
cat hgrc (can be appended to .hg/hgrc or ~/.hg/hgrc or /etc/hg/hgrc
[hooks]
# uncomment below line to enable: works only with mercurial >= 1.3
#pretxncommit.indent = path-to-binary/check-indent.py --check-hg-hook
# uncomment below line to enable: works with all (?) versions
# of mercurial but requires that PYTHONPATH is defined to point to
# the directory which contains check-indent.py
#pretxncommit.indent = python:check-indent.run_as_hg_hook
\endverbatim
*
* Usage:
\verbatim
Usage: check-style.py [options]
Options:
-h, --help show this help message and exit
--debug Output some debugging information
-l LEVEL, --level=LEVEL
Level of style conformance: higher levels include all
lower levels. level=0: re-indent only. level=1: add
extra spaces. level=2: insert extra newlines and extra
braces around single-line statements. level=3: remove
all trailing spaces
--check-hg-hook Get the list of files to check from mercurial's list
of modified and added files and assume that the script
runs as a pretxncommit mercurial hook
--check-hg Get the list of files to check from mercurial's list
of modified and added files
-f FILE, --check-file=FILE
Check a single file
--diff Generate a diff on stdout of the indented files
-i, --in-place Indent the input files in-place
\endverbatim
*/
| zy901002-gpsr | utils/utils.h | C | gpl2 | 2,975 |
#!/usr/bin/env python
import os
import subprocess
import tempfile
import sys
import filecmp
import optparse
import shutil
import difflib
import re
def hg_modified_files():
files = os.popen ('hg st -nma')
return [filename.strip() for filename in files]
def copy_file(filename):
[tmp,pathname] = tempfile.mkstemp()
src = open(filename, 'r')
dst = open(pathname, 'w')
for line in src:
dst.write(line)
dst.close()
src.close()
return pathname
# generate a temporary configuration file
def uncrustify_config_file(level):
level2 = """
nl_collapse_empty_body=False
nl_if_brace=Add
nl_brace_else=Add
nl_elseif_brace=Add
nl_else_brace=Add
nl_while_brace=Add
nl_do_brace=Add
nl_for_brace=Add
nl_brace_while=Add
nl_switch_brace=Add
nl_after_case=True
nl_namespace_brace=Remove
nl_after_brace_open=True
nl_class_leave_one_liners=False
nl_enum_leave_one_liners=False
nl_func_leave_one_liners=False
nl_if_leave_one_liners=False
nl_class_colon=Ignore
nl_after_access_spec=1
nl_after_semicolon=True
pos_class_colon=Lead
pos_class_comma=Trail
pos_bool=Lead
nl_class_init_args=Add
nl_template_class=Add
nl_class_brace=Add
# does not work very well
nl_func_type_name=Ignore
nl_func_scope_name=Ignore
nl_func_type_name_class=Ignore
nl_func_proto_type_name=Ignore
# function\\n(
nl_func_paren=Remove
nl_fdef_brace=Add
nl_struct_brace=Add
nl_enum_brace=Add
nl_union_brace=Add
mod_full_brace_do=Add
mod_full_brace_for=Add
mod_full_brace_if=Add
mod_full_brace_while=Add
mod_full_brace_for=Add
mod_remove_extra_semicolon=True
# max code width
#code_width=128
#ls_for_split_full=True
#ls_func_split_full=True
"""
level1 = """
# extra spaces here and there
sp_brace_typedef=Add
sp_enum_assign=Add
sp_before_sparen=Add
sp_after_semi_for=Add
sp_arith=Add
sp_assign=Add
sp_compare=Add
sp_func_class_paren=Add
sp_after_type=Add
sp_type_func=Add
sp_angle_paren=Add
"""
level0 = """
sp_func_proto_paren=Add
sp_func_def_paren=Add
sp_func_call_paren=Add
sp_after_semi_for=Ignore
sp_before_sparen=Ignore
sp_type_func=Ignore
sp_after_type=Ignore
nl_class_leave_one_liners=True
nl_enum_leave_one_liners=True
nl_func_leave_one_liners=True
nl_assign_leave_one_liners=True
#nl_collapse_empty_body=False
nl_getset_leave_one_liners=True
nl_if_leave_one_liners=True
nl_fdef_brace=Ignore
# finally, indentation configuration
indent_with_tabs=0
indent_namespace=false
indent_columns=2
indent_brace=2
indent_case_brace=2
indent_class=true
indent_class_colon=True
# alignment
indent_align_assign=False
align_left_shift=True
# comment reformating disabled
cmt_reflow_mode=1 # do not touch comments at all
cmt_indent_multi=False # really, do not touch them
"""
[tmp,pathname] = tempfile.mkstemp()
dst = open(pathname, 'w')
dst.write(level0)
if level >= 1:
dst.write(level1)
if level >= 2:
dst.write(level2)
dst.close()
return pathname
class PatchChunkLine:
SRC = 1
DST = 2
BOTH = 3
def __init__(self):
self.__type = 0
self.__line = ''
def set_src(self,line):
self.__type = self.SRC
self.__line = line
def set_dst(self,line):
self.__type = self.DST
self.__line = line
def set_both(self,line):
self.__type = self.BOTH
self.__line = line
def append_to_line(self, s):
self.__line = self.__line + s
def line(self):
return self.__line
def is_src(self):
return self.__type == self.SRC or self.__type == self.BOTH
def is_dst(self):
return self.__type == self.DST or self.__type == self.BOTH
def write(self, f):
if self.__type == self.SRC:
f.write('-%s\n' % self.__line)
elif self.__type == self.DST:
f.write('+%s\n' % self.__line)
elif self.__type == self.BOTH:
f.write(' %s\n' % self.__line)
else:
raise Exception('invalid patch')
class PatchChunk:
def __init__(self, src_pos, dst_pos):
self.__lines = []
self.__src_pos = int(src_pos)
self.__dst_pos = int(dst_pos)
def src_start(self):
return self.__src_pos
def add_line(self,line):
self.__lines.append(line)
def src(self):
src = []
for line in self.__lines:
if line.is_src():
src.append(line)
return src
def dst(self):
dst = []
for line in self.__lines:
if line.is_dst():
dst.append(line)
return dst
def src_len(self):
return len(self.src())
def dst_len(self):
return len(self.dst())
def write(self,f):
f.write('@@ -%d,%d +%d,%d @@\n' % (self.__src_pos, self.src_len(),
self.__dst_pos, self.dst_len()))
for line in self.__lines:
line.write(f)
class Patch:
def __init__(self):
self.__src = ''
self.__dst = ''
self.__chunks = []
def add_chunk(self, chunk):
self.__chunks.append(chunk)
def chunks(self):
return self.__chunks
def set_src(self,src):
self.__src = src
def set_dst(self,dst):
self.__dst = dst
def apply(self,filename):
# XXX: not implemented
return
def write(self,f):
f.write('--- %s\n' % self.__src )
f.write('+++ %s\n' % self.__dst )
for chunk in self.__chunks:
chunk.write(f)
def parse_patchset(generator):
src_file = re.compile('^--- (.*)$')
dst_file = re.compile('^\+\+\+ (.*)$')
chunk_start = re.compile('^@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@')
src = re.compile('^-(.*)$')
dst = re.compile('^\+(.*)$')
both = re.compile('^ (.*)$')
patchset = []
current_patch = None
for line in generator:
m = src_file.search(line)
if m is not None:
current_patch = Patch()
patchset.append(current_patch)
current_patch.set_src(m.group(1))
continue
m = dst_file.search(line)
if m is not None:
current_patch.set_dst(m.group(1))
continue
m = chunk_start.search(line)
if m is not None:
current_chunk = PatchChunk(m.group(1), m.group(3))
current_patch.add_chunk(current_chunk)
continue
m = src.search(line)
if m is not None:
l = PatchChunkLine()
l.set_src(m.group(1))
current_chunk.add_line(l)
continue
m = dst.search(line)
if m is not None:
l = PatchChunkLine()
l.set_dst(m.group(1))
current_chunk.add_line(l)
continue
m = both.search(line)
if m is not None:
l = PatchChunkLine()
l.set_both(m.group(1))
current_chunk.add_line(l)
continue
raise Exception()
return patchset
def remove_trailing_whitespace_changes(patch_generator):
whitespace = re.compile('^(.*)([ \t]+)$')
patchset = parse_patchset(patch_generator)
for patch in patchset:
for chunk in patch.chunks():
src = chunk.src()
dst = chunk.dst()
try:
for i in range(0,len(src)):
s = src[i]
d = dst[i]
m = whitespace.search(s.line())
if m is not None and m.group(1) == d.line():
d.append_to_line(m.group(2))
except:
return patchset
return patchset
def indent(source, debug, level):
output = tempfile.mkstemp()[1]
# apply uncrustify
cfg = uncrustify_config_file(level)
if debug:
sys.stderr.write('original file=' + source + '\n')
sys.stderr.write('uncrustify config file=' + cfg + '\n')
sys.stderr.write('temporary file=' + output + '\n')
try:
uncrust = subprocess.Popen(['uncrustify', '-c', cfg, '-f', source, '-o', output],
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
(out, err) = uncrust.communicate('')
if debug:
sys.stderr.write(out)
sys.stderr.write(err)
except OSError:
raise Exception ('uncrustify not installed')
# generate a diff file
src = open(source, 'r')
dst = open(output, 'r')
diff = difflib.unified_diff(src.readlines(), dst.readlines(),
fromfile=source, tofile=output)
src.close()
dst.close()
if debug:
initial_diff = tempfile.mkstemp()[1]
sys.stderr.write('initial diff file=' + initial_diff + '\n')
tmp = open(initial_diff, 'w')
tmp.writelines(diff)
tmp.close()
final_diff = tempfile.mkstemp()[1]
if level < 3:
patchset = remove_trailing_whitespace_changes(diff);
dst = open(final_diff, 'w')
if len(patchset) != 0:
patchset[0].write(dst)
dst.close()
else:
dst = open(final_diff, 'w')
dst.writelines(diff)
dst.close()
# apply diff file
if debug:
sys.stderr.write('final diff file=' + final_diff + '\n')
shutil.copyfile(source,output)
patch = subprocess.Popen(['patch', '-p1', '-i', final_diff, output],
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
(out, err) = patch.communicate('')
if debug:
sys.stderr.write(out)
sys.stderr.write(err)
return output
def indent_files(files, diff=False, debug=False, level=0, inplace=False):
output = []
for f in files:
dst = indent(f, debug=debug, level=level)
output.append([f,dst])
# First, copy to inplace
if inplace:
for src,dst in output:
shutil.copyfile(dst,src)
return True
# now, compare
failed = []
for src,dst in output:
if filecmp.cmp(src,dst) == 0:
failed.append([src, dst])
if len(failed) > 0:
if not diff:
print 'Found %u badly indented files:' % len(failed)
for src,dst in failed:
print ' ' + src
else:
for src,dst in failed:
s = open(src, 'r').readlines()
d = open(dst, 'r').readlines()
for line in difflib.unified_diff(s, d, fromfile=src, tofile=dst):
sys.stdout.write(line)
return False
return True
def run_as_hg_hook(ui, repo, **kwargs):
# hack to work around mercurial < 1.3 bug
from mercurial import lock, error
lock.LockError = error.LockError
# actually do the work
files = hg_modified_files()
if not indent_files(files, inplace=False):
return True
return False
def run_as_main():
parser = optparse.OptionParser()
parser.add_option('--debug', action='store_true', dest='debug', default=False,
help='Output some debugging information')
parser.add_option('-l', '--level', type='int', dest='level', default=0,
help="Level of style conformance: higher levels include all lower levels. "
"level=0: re-indent only. level=1: add extra spaces. level=2: insert extra newlines and "
"extra braces around single-line statements. level=3: remove all trailing spaces")
parser.add_option('--check-hg-hook', action='store_true', dest='hg_hook', default=False,
help='Get the list of files to check from mercurial\'s list of modified '
'and added files and assume that the script runs as a pretxncommit mercurial hook')
parser.add_option('--check-hg', action='store_true', dest='hg', default=False,
help="Get the list of files to check from mercurial\'s list of modified and added files")
parser.add_option('-f', '--check-file', action='store', dest='file', default='',
help="Check a single file")
parser.add_option('--diff', action='store_true', dest='diff', default=False,
help="Generate a diff on stdout of the indented files")
parser.add_option('-i', '--in-place', action='store_true', dest='in_place', default=False,
help="Indent the input files in-place")
(options,args) = parser.parse_args()
debug = options.debug
if options.hg_hook:
files = hg_modified_files()
if not indent_files(files, debug=options.debug,
level=options.level,
inplace=False):
sys.exit(1)
elif options.hg:
files = hg_modified_files()
indent_files(files, diff=options.diff,
debug=options.debug,
level=options.level,
inplace=options.in_place)
elif options.file != '':
file = options.file
if not os.path.exists(file) or \
not os.path.isfile(file):
print 'file %s does not exist' % file
sys.exit(1)
indent_files([file], diff=options.diff,
debug=options.debug,
level=options.level,
inplace=options.in_place)
sys.exit(0)
if __name__ == '__main__':
# try:
run_as_main()
# except Exception, e:
# sys.stderr.write(str(e) + '\n')
# sys.exit(1)
| zy901002-gpsr | utils/check-style.py | Python | gpl2 | 13,569 |
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2006 INRIA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Mathieu Lacage <mathieu.lacage@sophia.inria.fr>
*/
#include "ns3/system-wall-clock-ms.h"
#include "ns3/packet.h"
#include "ns3/packet-metadata.h"
#include <iostream>
#include <sstream>
#include <string>
#include <stdlib.h> // for exit ()
using namespace ns3;
template <int N>
class BenchHeader : public Header
{
public:
BenchHeader ();
bool IsOk (void) const;
static TypeId GetTypeId (void);
virtual TypeId GetInstanceTypeId (void) const;
virtual void Print (std::ostream &os) const;
virtual uint32_t GetSerializedSize (void) const;
virtual void Serialize (Buffer::Iterator start) const;
virtual uint32_t Deserialize (Buffer::Iterator start);
private:
static std::string GetTypeName (void);
bool m_ok;
};
template <int N>
BenchHeader<N>::BenchHeader ()
: m_ok (false)
{}
template <int N>
bool
BenchHeader<N>::IsOk (void) const
{
return m_ok;
}
template <int N>
std::string
BenchHeader<N>::GetTypeName (void)
{
std::ostringstream oss;
oss << "ns3::BenchHeader<" << N << ">";
return oss.str ();
}
template <int N>
TypeId
BenchHeader<N>::GetTypeId (void)
{
static TypeId tid = TypeId (GetTypeName ().c_str ())
.SetParent<Header> ()
;
return tid;
}
template <int N>
TypeId
BenchHeader<N>::GetInstanceTypeId (void) const
{
return GetTypeId ();
}
template <int N>
void
BenchHeader<N>::Print (std::ostream &os) const
{
NS_ASSERT (false);
}
template <int N>
uint32_t
BenchHeader<N>::GetSerializedSize (void) const
{
return N;
}
template <int N>
void
BenchHeader<N>::Serialize (Buffer::Iterator start) const
{
start.WriteU8 (N, N);
}
template <int N>
uint32_t
BenchHeader<N>::Deserialize (Buffer::Iterator start)
{
m_ok = true;
for (int i = 0; i < N; i++)
{
if (start.ReadU8 () != N)
{
m_ok = false;
}
}
return N;
}
template <int N>
class BenchTag : public Tag
{
public:
static std::string GetName (void) {
std::ostringstream oss;
oss << "anon::BenchTag<" << N << ">";
return oss.str ();
}
static TypeId GetTypeId (void) {
static TypeId tid = TypeId (GetName ().c_str ())
.SetParent<Tag> ()
.AddConstructor<BenchTag > ()
.HideFromDocumentation ()
;
return tid;
}
virtual TypeId GetInstanceTypeId (void) const {
return GetTypeId ();
}
virtual uint32_t GetSerializedSize (void) const {
return N;
}
virtual void Serialize (TagBuffer buf) const {
for (uint32_t i = 0; i < N; ++i)
{
buf.WriteU8 (N);
}
}
virtual void Deserialize (TagBuffer buf) {
for (uint32_t i = 0; i < N; ++i)
{
buf.ReadU8 ();
}
}
virtual void Print (std::ostream &os) const {
os << "N=" << N;
}
BenchTag ()
: Tag () {}
};
static void
benchD (uint32_t n)
{
BenchHeader<25> ipv4;
BenchHeader<8> udp;
BenchTag<16> tag1;
BenchTag<17> tag2;
for (uint32_t i = 0; i < n; i++) {
Ptr<Packet> p = Create<Packet> (2000);
p->AddPacketTag (tag1);
p->AddHeader (udp);
p->RemovePacketTag (tag1);
p->AddPacketTag (tag2);
p->AddHeader (ipv4);
Ptr<Packet> o = p->Copy ();
o->RemoveHeader (ipv4);
p->RemovePacketTag (tag2);
o->RemoveHeader (udp);
}
}
static void
benchA (uint32_t n)
{
BenchHeader<25> ipv4;
BenchHeader<8> udp;
for (uint32_t i = 0; i < n; i++) {
Ptr<Packet> p = Create<Packet> (2000);
p->AddHeader (udp);
p->AddHeader (ipv4);
Ptr<Packet> o = p->Copy ();
o->RemoveHeader (ipv4);
o->RemoveHeader (udp);
}
}
static void
benchB (uint32_t n)
{
BenchHeader<25> ipv4;
BenchHeader<8> udp;
for (uint32_t i = 0; i < n; i++) {
Ptr<Packet> p = Create<Packet> (2000);
p->AddHeader (udp);
p->AddHeader (ipv4);
}
}
static void
C2 (Ptr<Packet> p)
{
BenchHeader<8> udp;
p->RemoveHeader (udp);
}
static void
C1 (Ptr<Packet> p)
{
BenchHeader<25> ipv4;
p->RemoveHeader (ipv4);
C2 (p);
}
static void
benchC (uint32_t n)
{
BenchHeader<25> ipv4;
BenchHeader<8> udp;
for (uint32_t i = 0; i < n; i++) {
Ptr<Packet> p = Create<Packet> (2000);
p->AddHeader (udp);
p->AddHeader (ipv4);
C1 (p);
}
}
static void
runBench (void (*bench) (uint32_t), uint32_t n, char const *name)
{
SystemWallClockMs time;
time.Start ();
(*bench) (n);
uint64_t deltaMs = time.End ();
double ps = n;
ps *= 1000;
ps /= deltaMs;
std::cout << name<<"=" << ps << " packets/s" << std::endl;
}
int main (int argc, char *argv[])
{
uint32_t n = 0;
while (argc > 0) {
if (strncmp ("--n=", argv[0],strlen ("--n=")) == 0)
{
char const *nAscii = argv[0] + strlen ("--n=");
std::istringstream iss;
iss.str (nAscii);
iss >> n;
}
if (strncmp ("--enable-printing", argv[0], strlen ("--enable-printing")) == 0)
{
Packet::EnablePrinting ();
}
argc--;
argv++;
}
if (n == 0)
{
std::cerr << "Error-- number of packets must be specified " <<
"by command-line argument --n=(number of packets)" << std::endl;
exit (1);
}
std::cout << "Running bench-packets with n=" << n << std::endl;
runBench (&benchA, n, "a");
runBench (&benchB, n, "b");
runBench (&benchC, n, "c");
runBench (&benchD, n, "d");
return 0;
}
| zy901002-gpsr | utils/bench-packets.cc | C++ | gpl2 | 6,034 |
import unittest
from ns.core import Simulator, Seconds, Config, int64x64_t
import ns.core
import ns.network
import ns.internet
import ns.mobility
import ns.csma
class TestSimulator(unittest.TestCase):
def testScheduleNow(self):
def callback(args):
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
Simulator.ScheduleNow(callback, "args")
Simulator.Run()
self.assertEqual(self._args_received, "args")
self.assertEqual(self._cb_time.GetSeconds(), 0.0)
def testSchedule(self):
def callback(args):
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
Simulator.Schedule(Seconds(123), callback, "args")
Simulator.Run()
self.assertEqual(self._args_received, "args")
self.assertEqual(self._cb_time.GetSeconds(), 123.0)
def testScheduleDestroy(self):
def callback(args):
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
def null(): pass
Simulator.Schedule(Seconds(123), null)
Simulator.ScheduleDestroy(callback, "args")
Simulator.Run()
Simulator.Destroy()
self.assertEqual(self._args_received, "args")
self.assertEqual(self._cb_time.GetSeconds(), 123.0)
def testTimeComparison(self):
self.assert_(Seconds(123) == Seconds(123))
self.assert_(Seconds(123) >= Seconds(123))
self.assert_(Seconds(123) <= Seconds(123))
self.assert_(Seconds(124) > Seconds(123))
self.assert_(Seconds(123) < Seconds(124))
def testTimeNumericOperations(self):
self.assertEqual(Seconds(10) + Seconds(5), Seconds(15))
self.assertEqual(Seconds(10) - Seconds(5), Seconds(5))
v1 = int64x64_t(5.0)*int64x64_t(10)
self.assertEqual(v1, int64x64_t(50))
def testConfig(self):
Config.SetDefault("ns3::OnOffApplication::PacketSize", ns.core.UintegerValue(123))
# hm.. no Config.Get?
def testSocket(self):
node = ns.network.Node()
internet = ns.internet.InternetStackHelper()
internet.Install(node)
self._received_packet = None
def rx_callback(socket):
assert self._received_packet is None
self._received_packet = socket.Recv()
sink = ns.network.Socket.CreateSocket(node, ns.core.TypeId.LookupByName("ns3::UdpSocketFactory"))
sink.Bind(ns.network.InetSocketAddress(ns.network.Ipv4Address.GetAny(), 80))
sink.SetRecvCallback(rx_callback)
source = ns.network.Socket.CreateSocket(node, ns.core.TypeId.LookupByName("ns3::UdpSocketFactory"))
source.SendTo(ns.network.Packet(19), 0, ns.network.InetSocketAddress(ns.network.Ipv4Address("127.0.0.1"), 80))
Simulator.Run()
self.assert_(self._received_packet is not None)
self.assertEqual(self._received_packet.GetSize(), 19)
def testAttributes(self):
##
## Yes, I know, the GetAttribute interface for Python is
## horrible, we should fix this soon, I hope.
##
queue = ns.network.DropTailQueue()
queue.SetAttribute("MaxPackets", ns.core.UintegerValue(123456))
limit = ns.core.UintegerValue()
queue.GetAttribute("MaxPackets", limit)
self.assertEqual(limit.Get(), 123456)
## -- object pointer values
mobility = ns.mobility.RandomWaypointMobilityModel()
ptr = ns.core.PointerValue()
mobility.GetAttribute("PositionAllocator", ptr)
self.assertEqual(ptr.GetObject(), None)
pos = ns.mobility.ListPositionAllocator()
mobility.SetAttribute("PositionAllocator", ns.core.PointerValue(pos))
ptr = ns.core.PointerValue()
mobility.GetAttribute("PositionAllocator", ptr)
self.assert_(ptr.GetObject() is not None)
def testIdentity(self):
csma = ns.csma.CsmaNetDevice()
channel = ns.csma.CsmaChannel()
csma.Attach(channel)
c1 = csma.GetChannel()
c2 = csma.GetChannel()
self.assert_(c1 is c2)
def testTypeId(self):
typeId1 = ns.core.TypeId.LookupByNameFailSafe("ns3::UdpSocketFactory")
self.assertEqual(typeId1.GetName (), "ns3::UdpSocketFactory")
self.assertRaises(KeyError, ns.core.TypeId.LookupByNameFailSafe, "__InvalidTypeName__")
def testCommandLine(self):
cmd = ns.core.CommandLine()
cmd.AddValue("Test1", "this is a test option")
cmd.AddValue("Test2", "this is a test option")
cmd.AddValue("Test3", "this is a test option", variable="test_xxx")
cmd.Test1 = None
cmd.Test2 = None
cmd.test_xxx = None
class Foo:
pass
foo = Foo()
foo.test_foo = None
cmd.AddValue("Test4", "this is a test option", variable="test_foo", namespace=foo)
cmd.Parse(["python", "--Test1=value1", "--Test2=value2", "--Test3=123", "--Test4=xpto"])
self.assertEqual(cmd.Test1, "value1")
self.assertEqual(cmd.Test2, "value2")
self.assertEqual(cmd.test_xxx, "123")
self.assertEqual(foo.test_foo, "xpto")
def testSubclass(self):
class MyNode(ns.network.Node):
def __init__(self):
super(MyNode, self).__init__()
node = MyNode()
if __name__ == '__main__':
unittest.main()
| zy901002-gpsr | utils/python-unit-tests.py | Python | gpl2 | 5,687 |
#!/usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
import cairo
import sys
import re
import gtk
class DataRange:
def __init__(self, start = 0, end = 0, value = ''):
self.start = start
self.end = end
self.value = value
class EventString:
def __init__(self, at = 0, value = ''):
self.at = at
self.value = value
class EventFloat:
def __init__(self, at = 0, value = 0.0):
self.at = at
self.value = value
class EventInt:
def __init__(self, at = 0, value = 0.0):
self.at = at
self.value = value
def ranges_cmp(a, b):
diff = a.start - b.start
if diff < 0:
return -1
elif diff > 0:
return +1
else:
return 0
def events_cmp(a, b):
diff = a.at - b.at
if diff < 0:
return -1
elif diff > 0:
return +1
else:
return 0
class TimelineDataRange:
def __init__(self, name = ''):
self.name = name
self.ranges = []
return
def __search(self, key):
l = 0
u = len(self.ranges)-1
while l <= u:
i = int((l + u) / 2)
if key >= self.ranges[i].start and key <= self.ranges[i].end:
return i
elif key < self.ranges[i].start:
u = i - 1
else:
# key > self.ranges[i].end
l = i + 1
return - 1
def add_range(self, range):
self.ranges.append(range)
def get_all(self):
return self.ranges
def get_ranges(self, start, end):
s = self.__search(start)
e = self.__search(end)
if s == -1 and e == -1:
return []
elif s == -1:
return self.ranges[0:e + 1]
elif e == -1:
return self.ranges[s:len(self.ranges)]
else:
return self.ranges[s:e + 1]
def get_ranges_bounds(self, start, end):
s = self.__search(start)
e = self.__search(end)
if s == -1 and e == -1:
return(0, 0)
elif s == -1:
return(0, e + 1)
elif e == -1:
return(s, len(self.ranges))
else:
return(s, e + 1)
def sort(self):
self.ranges.sort(ranges_cmp)
def get_bounds(self):
if len(self.ranges) > 0:
lo = self.ranges[0].start
hi = self.ranges[len(self.ranges)-1].end
return(lo, hi)
else:
return(0, 0)
class TimelineEvent:
def __init__(self, name = ''):
self.name = name
self.events = []
def __search(self, key):
l = 0
u = len(self.events)-1
while l <= u:
i = int((l + u) / 2)
if key == self.events[i].at:
return i
elif key < self.events[i].at:
u = i - 1
else:
# key > self.events[i].at
l = i + 1
return l
def add_event(self, event):
self.events.append(event)
def get_events(self, start, end):
s = self.__search(start)
e = self.__search(end)
return self.events[s:e + 1]
def get_events_bounds(self, start, end):
s = self.__search(start)
e = self.__search(end)
return(s, e + 1)
def sort(self):
self.events.sort(events_cmp)
def get_bounds(self):
if len(self.events) > 0:
lo = self.events[0].at
hi = self.events[-1].at
return(lo, hi)
else:
return(0, 0)
class Timeline:
def __init__(self, name = ''):
self.ranges = []
self.event_str = []
self.event_int = []
self.name = name
def get_range(self, name):
for range in self.ranges:
if range.name == name:
return range
timeline = TimelineDataRange(name)
self.ranges.append(timeline)
return timeline
def get_event_str(self, name):
for event_str in self.event_str:
if event_str.name == name:
return event_str
timeline = TimelineEvent(name)
self.event_str.append(timeline)
return timeline
def get_event_int(self, name):
for event_int in self.event_int:
if event_int.name == name:
return event_int
timeline = TimelineEvent(name)
self.event_int.append(timeline)
return timeline
def get_ranges(self):
return self.ranges
def get_events_str(self):
return self.event_str
def get_events_int(self):
return self.event_int
def sort(self):
for range in self.ranges:
range.sort()
for event in self.event_int:
event.sort()
for event in self.event_str:
event.sort()
def get_bounds(self):
lo = 0
hi = 0
for range in self.ranges:
(range_lo, range_hi) = range.get_bounds()
if range_lo < lo:
lo = range_lo
if range_hi > hi:
hi = range_hi
for event_str in self.event_str:
(ev_lo, ev_hi) = event_str.get_bounds()
if ev_lo < lo:
lo = ev_lo
if ev_hi > hi:
hi = ev_hi
for event_int in self.event_int:
(ev_lo, ev_hi) = event_int.get_bounds()
if ev_lo < lo:
lo = ev_lo
if ev_hi > hi:
hi = ev_hi
return(lo, hi)
class Timelines:
def __init__(self):
self.timelines = []
def get(self, name):
for timeline in self.timelines:
if timeline.name == name:
return timeline
timeline = Timeline(name)
self.timelines.append(timeline)
return timeline
def get_all(self):
return self.timelines
def sort(self):
for timeline in self.timelines:
timeline.sort()
def get_bounds(self):
lo = 0
hi = 0
for timeline in self.timelines:
(t_lo, t_hi) = timeline.get_bounds()
if t_lo < lo:
lo = t_lo
if t_hi > hi:
hi = t_hi
return(lo, hi)
def get_all_range_values(self):
range_values = {}
for timeline in self.timelines:
for ranges in timeline.get_ranges():
for ran in ranges.get_all():
range_values[ran.value] = 1
return range_values.keys()
class Color:
def __init__(self, r = 0.0, g = 0.0, b = 0.0):
self.r = r
self.g = g
self.b = b
def set(self, r, g, b):
self.r = r
self.g = g
self.b = b
class Colors:
# XXX add more
default_colors = [Color(1, 0, 0), Color(0, 1, 0), Color(0, 0, 1), Color(1, 1, 0), Color(1, 0, 1), Color(0, 1, 1)]
def __init__(self):
self.__colors = {}
def add(self, name, color):
self.__colors[name] = color
def lookup(self, name):
if not self.__colors.has_key(name):
self.add(name, self.default_colors.pop())
return self.__colors.get(name)
class TopLegendRenderer:
def __init__(self):
self.__padding = 10
def set_padding(self, padding):
self.__padding = padding
def set_legends(self, legends, colors):
self.__legends = legends
self.__colors = colors
def layout(self, width):
self.__width = width
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
line_height = 0
total_height = self.__padding
line_used = self.__padding
for legend in self.__legends:
(t_width, t_height) = ctx.text_extents(legend)[2:4]
item_width = self.__padding + self.__padding + t_width + self.__padding
item_height = t_height + self.__padding
if item_height > line_height:
line_height = item_height
if line_used + item_width > self.__width:
line_used = self.__padding + item_width
total_height += line_height
else:
line_used += item_width
x = line_used - item_width
total_height += line_height
self.__height = total_height
def get_height(self):
return self.__height
def draw(self, ctx):
i = 0
line_height = 0
total_height = self.__padding
line_used = self.__padding
for legend in self.__legends:
(t_width, t_height) = ctx.text_extents(legend)[2:4]
item_width = self.__padding + self.__padding + t_width + self.__padding
item_height = t_height + self.__padding
if item_height > line_height:
line_height = item_height
if line_used + item_width > self.__width:
line_used = self.__padding + item_width
total_height += line_height
else:
line_used += item_width
x = line_used - item_width
ctx.rectangle(x, total_height, self.__padding, self.__padding)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(2)
ctx.stroke_preserve()
ctx.set_source_rgb(self.__colors[i].r,
self.__colors[i].g,
self.__colors[i].b)
ctx.fill()
ctx.move_to(x + self.__padding*2, total_height + t_height)
ctx.set_source_rgb(0, 0, 0)
ctx.show_text(legend)
i += 1
return
class TimelinesRenderer:
def __init__(self):
self.padding = 10
return
def get_height(self):
return self.height
def set_timelines(self, timelines, colors):
self.timelines = timelines
self.colors = colors
def set_render_range(self, start, end):
self.start = start
self.end = end
def get_data_x_start(self):
return self.padding / 2 + self.left_width + self.padding + self.right_width + self.padding / 2
def layout(self, width):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
max_text_height = ctx.text_extents("ABCDEFGHIJKLMNOPQRSTUVWXYZabcedefghijklmnopqrstuvwxyz0123456789")[3]
left_width = 0
right_width = 0
left_n_lines = 0
range_n = 0
eventint_n = 0
eventstr_n = 0
for timeline in self.timelines.get_all():
left_n_lines += 1
t_width = ctx.text_extents(timeline.name)[2]
left_width = max(left_width, t_width)
for rang in timeline.get_ranges():
t_width = ctx.text_extents(rang.name)[2]
right_width = max(right_width, t_width)
range_n += 1
for events_int in timeline.get_events_int():
t_width = ctx.text_extents(events_int.name)[2]
right_width = max(right_width, t_width)
eventint_n += 1
for events_str in timeline.get_events_str():
t_width = ctx.text_extents(events_str.name)[2]
right_width = max(right_width, t_width)
eventstr_n += 1
left_height = left_n_lines * max_text_height + (left_n_lines - 1) * self.padding
right_n_lines = range_n + eventint_n + eventstr_n
right_height = (right_n_lines - 1) * self.padding + right_n_lines * max_text_height
right_data_height = (eventint_n + eventstr_n) * (max_text_height + 5) + range_n * 10
right_data_height += (right_n_lines - 1) * self.padding
height = max(left_height, right_height)
height = max(height, right_data_height)
self.left_width = left_width
self.right_width = right_width
self.max_text_height = max_text_height
self.width = width
self.height = height + self.padding
def draw_line(self, ctx, x, y, width, height):
ctx.move_to(x, y)
ctx.rel_line_to(width, height)
ctx.close_path()
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.set_line_width(1.0)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
def draw_events(self, ctx, events, x, y, width, height):
if (self.grey_background % 2) == 0:
ctx.rectangle(x, y - self.padding / 2,
width, height + self.padding)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
last_x_drawn = int(x)
(lo, hi) = events.get_events_bounds(self.start, self.end)
for event in events.events[lo:hi]:
real_x = int(x + (event.at - self.start) * width / (self.end - self.start))
if real_x > last_x_drawn + 2:
ctx.rectangle(real_x, y, 1, 1)
ctx.set_source_rgb(1, 0, 0)
ctx.stroke()
ctx.move_to(real_x, y + self.max_text_height)
ctx.set_source_rgb(0, 0, 0)
ctx.show_text(str(event.value))
last_x_drawn = real_x
self.grey_background += 1
def draw_ranges(self, ctx, ranges, x, y, width, height):
if (self.grey_background % 2) == 0:
ctx.rectangle(x, y - self.padding / 2,
width, height + self.padding)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
last_x_drawn = int(x - 1)
(lo, hi) = ranges.get_ranges_bounds(self.start, self.end)
for data_range in ranges.ranges[lo:hi]:
s = max(data_range.start, self.start)
e = min(data_range.end, self.end)
x_start = int(x + (s - self.start) * width / (self.end - self.start))
x_end = int(x + (e - self.start) * width / (self.end - self.start))
if x_end > last_x_drawn:
ctx.rectangle(x_start, y, x_end - x_start, 10)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke_preserve()
color = self.colors.lookup(data_range.value)
ctx.set_source_rgb(color.r, color.g, color.b)
ctx.fill()
last_x_drawn = x_end
self.grey_background += 1
def draw(self, ctx):
timeline_top = 0
top_y = self.padding / 2
left_x_start = self.padding / 2
left_x_end = left_x_start + self.left_width
right_x_start = left_x_end + self.padding
right_x_end = right_x_start + self.right_width
data_x_start = right_x_end + self.padding / 2
data_x_end = self.width
data_width = data_x_end - data_x_start
cur_y = top_y
self.draw_line(ctx, 0, 0, self.width, 0)
self.grey_background = 1
for timeline in self.timelines.get_all():
(y_bearing, t_width, t_height) = ctx.text_extents(timeline.name)[1:4]
ctx.move_to(left_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(timeline.name);
for events_int in timeline.get_events_int():
(y_bearing, t_width, t_height) = ctx.text_extents(events_int.name)[1:4]
ctx.move_to(right_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(events_int.name)
self.draw_events(ctx, events_int, data_x_start, cur_y, data_width, self.max_text_height + 5)
cur_y += self.max_text_height + 5 + self.padding
self.draw_line(ctx, right_x_start - self.padding / 2, cur_y - self.padding / 2,
self.right_width + self.padding, 0)
for events_str in timeline.get_events_str():
(y_bearing, t_width, t_height) = ctx.text_extents(events_str.name)[1:4]
ctx.move_to(right_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(events_str.name)
self.draw_events(ctx, events_str, data_x_start, cur_y, data_width, self.max_text_height + 5)
cur_y += self.max_text_height + 5 + self.padding
self.draw_line(ctx, right_x_start - self.padding / 2, cur_y - self.padding / 2,
self.right_width + self.padding, 0)
for ranges in timeline.get_ranges():
(y_bearing, t_width, t_height) = ctx.text_extents(ranges.name)[1:4]
ctx.move_to(right_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(ranges.name)
self.draw_ranges(ctx, ranges, data_x_start, cur_y, data_width, 10)
cur_y += self.max_text_height + self.padding
self.draw_line(ctx, right_x_start - self.padding / 2, cur_y - self.padding / 2,
self.right_width + self.padding, 0)
self.draw_line(ctx, 0, cur_y - self.padding / 2,
self.width, 0)
bot_y = cur_y - self.padding / 2
self.draw_line(ctx, left_x_end + self.padding / 2, 0,
0, bot_y)
self.draw_line(ctx, right_x_end + self.padding / 2, 0,
0, bot_y)
return
class ScaleRenderer:
def __init__(self):
self.__top = 0
return
def set_bounds(self, lo, hi):
self.__lo = lo
self.__hi = hi
def get_position(self, x):
real_x = (x - self.__lo ) * self.__width / (self.__hi - self.__lo)
return real_x
def set_top(self):
self.__top = 1
def set_bot(self):
self.__top = 0
def layout(self, width):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
# calculate scale delta
data_delta = self.__hi - self.__lo
closest = 1
while (closest*10) < data_delta:
closest *= 10
if (data_delta / closest) == 0:
delta = closest
elif(data_delta / closest) == 1:
delta = closest / 10
else:
delta = closest
start = self.__lo - (self.__lo % delta) + delta
end = self.__hi - (self.__hi % delta)
self.__delta = delta
self.__width = width
# calculate text height
max_text_height = ctx.text_extents("ABCDEFGHIJKLMNOPQRSTUVWXYZabcedefghijklmnopqrstuvwxyz0123456789")[3]
self.max_text_height = max_text_height
height = max_text_height + 10
self.__height = height
def get_height(self):
return self.__height
def draw(self, ctx):
delta = self.__delta
start = self.__lo - (self.__lo % delta) + delta
end = self.__hi - (self.__hi % delta)
if self.__top == 1:
s = -1
else:
s = 1
# print scale points
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1.0)
ticks = range(int(start), int(end + delta), int(delta))
for x in ticks:
real_x = (x - self.__lo ) * self.__width / (self.__hi - self.__lo)
ctx.move_to(real_x, 0)
ctx.line_to(real_x, 5*s)
ctx.close_path()
ctx.stroke()
(t_y_bearing, t_width, t_height) = ctx.text_extents(str(x))[1:4]
if self.__top:
text_delta = t_height + t_y_bearing
else:
text_delta = -t_y_bearing
ctx.move_to(real_x - t_width / 2, (5 + 5 + text_delta)*s)
ctx.show_text(str(x))
# draw subticks
delta /= 10
if delta > 0:
start = self.__lo - (self.__lo % delta) + delta
end = self.__hi - (self.__hi % delta)
for x in range(int(start), int(end + delta), int(delta)):
real_x = (x - self.__lo ) * self.__width / (self.__hi - self.__lo)
ctx.move_to(real_x, 0)
ctx.line_to(real_x, 3*s)
ctx.close_path()
ctx.stroke()
class GraphicRenderer:
def __init__(self, start, end):
self.__start = float(start)
self.__end = float(end)
self.__mid_scale = ScaleRenderer()
self.__mid_scale.set_top()
self.__bot_scale = ScaleRenderer()
self.__bot_scale.set_bounds(start, end)
self.__bot_scale.set_bot()
self.__width = 1
self.__height = 1
def get_width(self):
return self.__width
def get_height(self):
return self.__height
# return x, y, width, height
def get_data_rectangle(self):
y_start = self.__top_legend.get_height()
x_start = self.__data.get_data_x_start()
return(x_start, y_start, self.__width - x_start, self.__data.get_height())
def scale_data(self, x):
x_start = self.__data.get_data_x_start()
x_scaled = x / (self.__width - x_start) * (self.__r_end - self.__r_start)
return x_scaled
# return x, y, width, height
def get_selection_rectangle(self):
y_start = self.__top_legend.get_height() + self.__data.get_height() + self.__mid_scale.get_height() + 20
y_height = self.__bot_scale.get_height() + 20
x_start = self.__bot_scale.get_position(self.__r_start)
x_end = self.__bot_scale.get_position(self.__r_end)
return(x_start, y_start, x_end - x_start, y_height)
def scale_selection(self, x):
x_scaled = x / self.__width * (self.__end - self.__start)
return x_scaled
def set_range(self, start, end):
s = min(start, end)
e = max(start, end)
start = max(self.__start, s)
end = min(self.__end, e)
self.__r_start = start
self.__r_end = end
self.__data.set_render_range(start, end)
self.__mid_scale.set_bounds(start, end)
self.layout(self.__width, self.__height)
def get_range(self):
return(self.__r_start, self.__r_end)
def set_data(self, data):
self.__data = data
def set_top_legend(self, top_legend):
self.__top_legend = top_legend
def layout(self, width, height):
self.__width = width
self.__height = height
self.__top_legend.layout(width)
top_legend_height = self.__top_legend.get_height()
self.__data.layout(width)
self.__mid_scale.layout(width - self.__data.get_data_x_start())
self.__bot_scale.layout(width)
return
def __x_pixel(self, x, width):
new_x = (x - self.__start) * width / (self.__end - self.__start)
return new_x
def draw(self, ctx):
# default background is white
ctx.save()
ctx.set_source_rgb(1, 1, 1)
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.rectangle(0, 0, self.__width, self.__height)
ctx.fill()
# top legend
ctx.save()
self.__top_legend.draw(ctx)
top_legend_height = self.__top_legend.get_height()
ctx.restore()
# separation line
ctx.move_to(0, top_legend_height)
ctx.line_to(self.__width, top_legend_height)
ctx.close_path()
ctx.set_line_width(2)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
# data
ctx.save()
ctx.translate(0,
top_legend_height)
self.__data.draw(ctx)
ctx.restore()
# scale below data
ctx.save()
ctx.translate(self.__data.get_data_x_start(),
top_legend_height + self.__data.get_height() + self.__mid_scale.get_height())
self.__mid_scale.draw(ctx)
ctx.restore()
height_used = top_legend_height + self.__data.get_height() + self.__mid_scale.get_height()
# separation between scale and left pane
ctx.move_to(self.__data.get_data_x_start(), height_used)
ctx.rel_line_to(0, -self.__mid_scale.get_height())
ctx.close_path()
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(2)
ctx.stroke()
# separation below scale
ctx.move_to(0, height_used)
ctx.line_to(self.__width, height_used)
ctx.close_path()
ctx.set_line_width(2)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
select_start = self.__bot_scale.get_position(self.__r_start)
select_end = self.__bot_scale.get_position(self.__r_end)
# left connection between top scale and bottom scale
ctx.move_to(0, height_used);
ctx.line_to(self.__data.get_data_x_start(), height_used)
ctx.line_to(select_start, height_used + 20)
ctx.line_to(0, height_used + 20)
ctx.line_to(0, height_used)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke_preserve()
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
# right connection between top scale and bottom scale
ctx.move_to(self.__width, height_used)
ctx.line_to(self.__width, height_used + 20)
ctx.line_to(select_end, height_used + 20)
ctx.line_to(self.__width, height_used)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke_preserve()
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
height_used += 20
# unused area background
unused_start = self.__bot_scale.get_position(self.__r_start)
unused_end = self.__bot_scale.get_position(self.__r_end)
unused_height = self.__bot_scale.get_height() + 20
ctx.rectangle(0, height_used,
unused_start,
unused_height)
ctx.rectangle(unused_end,
height_used,
self.__width - unused_end,
unused_height)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
# border line around bottom scale
ctx.move_to(unused_end, height_used)
ctx.line_to(self.__width, height_used)
ctx.line_to(self.__width, height_used + unused_height)
ctx.line_to(0, height_used + unused_height)
ctx.line_to(0, height_used)
ctx.line_to(unused_start, height_used)
ctx.close_path()
ctx.set_line_width(2)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
ctx.move_to(unused_start, height_used)
ctx.line_to(unused_end, height_used)
ctx.close_path()
ctx.set_line_width(1)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.stroke()
# unused area dot borders
ctx.save()
ctx.move_to(max(unused_start, 2), height_used)
ctx.rel_line_to(0, unused_height)
ctx.move_to(min(unused_end, self.__width - 2), height_used)
ctx.rel_line_to(0, unused_height)
ctx.set_dash([5], 0)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke()
ctx.restore()
# bottom scale
ctx.save()
ctx.translate(0, height_used)
self.__bot_scale.draw(ctx)
ctx.restore()
class GtkGraphicRenderer(gtk.DrawingArea):
def __init__(self, data):
super(GtkGraphicRenderer, self).__init__()
self.__data = data
self.__moving_left = False
self.__moving_right = False
self.__moving_both = False
self.__moving_top = False
self.__force_full_redraw = True
self.add_events(gtk.gdk.POINTER_MOTION_MASK)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.add_events(gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("expose_event", self.expose)
self.connect('size-allocate', self.size_allocate)
self.connect('motion-notify-event', self.motion_notify)
self.connect('button-press-event', self.button_press)
self.connect('button-release-event', self.button_release)
def set_smaller_zoom(self):
(start, end) = self.__data.get_range()
self.__data.set_range(start, start + (end - start)*2)
self.__force_full_redraw = True
self.queue_draw()
def set_bigger_zoom(self):
(start, end) = self.__data.get_range()
self.__data.set_range(start, start + (end - start) / 2)
self.__force_full_redraw = True
self.queue_draw()
def output_png(self, filename):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
self.__data.get_width(),
self.__data.get_height())
ctx = cairo.Context(self.__buffer_surface)
self.__data.draw(ctx)
surface.write_to_png(filename)
def button_press(self, widget, event):
(x, y, width, height) = self.__data.get_selection_rectangle()
(d_x, d_y, d_width, d_height) = self.__data.get_data_rectangle()
if event.y > y and event.y < y + height:
if abs(event.x - x) < 5:
self.__moving_left = True
return True
if abs(event.x - (x + width)) < 5:
self.__moving_right = True
return True
if event.x > x and event.x < x + width:
self.__moving_both = True
self.__moving_both_start = event.x
self.__moving_both_cur = event.x
return True
if event.y > d_y and event.y < (d_y + d_height):
if event.x > d_x and event.x < (d_x + d_width):
self.__moving_top = True
self.__moving_top_start = event.x
self.__moving_top_cur = event.x
return True
return False
def button_release(self, widget, event):
if self.__moving_left:
self.__moving_left = False
left = self.__data.scale_selection(self.__moving_left_cur)
right = self.__data.get_range()[1]
self.__data.set_range(left, right)
self.__force_full_redraw = True
self.queue_draw()
return True
if self.__moving_right:
self.__moving_right = False
right = self.__data.scale_selection(self.__moving_right_cur)
left = self.__data.get_range()[0]
self.__data.set_range(left, right)
self.__force_full_redraw = True
self.queue_draw()
return True
if self.__moving_both:
self.__moving_both = False
delta = self.__data.scale_selection(self.__moving_both_cur - self.__moving_both_start)
(left, right) = self.__data.get_range()
self.__data.set_range(left + delta, right + delta)
self.__force_full_redraw = True
self.queue_draw()
return True
if self.__moving_top:
self.__moving_top = False
return False
def motion_notify(self, widget, event):
(x, y, width, height) = self.__data.get_selection_rectangle()
if self.__moving_left:
if event.x <= 0:
self.__moving_left_cur = 0
elif event.x >= x + width:
self.__moving_left_cur = x + width
else:
self.__moving_left_cur = event.x
self.queue_draw_area(0, int(y), int(self.__width), int(height))
return True
if self.__moving_right:
if event.x >= self.__width:
self.__moving_right = self.__width
elif event.x < x:
self.__moving_right_cur = x
else:
self.__moving_right_cur = event.x
self.queue_draw_area(0, int(y), int(self.__width), int(height))
return True
if self.__moving_both:
cur_e = self.__width - (x + width - self.__moving_both_start)
cur_s = (self.__moving_both_start - x)
if event.x < cur_s:
self.__moving_both_cur = cur_s
elif event.x > cur_e:
self.__moving_both_cur = cur_e
else:
self.__moving_both_cur = event.x
self.queue_draw_area(0, int(y), int(self.__width), int(height))
return True
if self.__moving_top:
self.__moving_top_cur = event.x
delta = self.__data.scale_data(self.__moving_top_start - self.__moving_top_cur)
(left, right) = self.__data.get_range()
self.__data.set_range(left + delta, right + delta)
self.__force_full_redraw = True
self.__moving_top_start = event.x
self.queue_draw()
return True
(d_x, d_y, d_width, d_height) = self.__data.get_data_rectangle()
if event.y > y and event.y < y + height:
if abs(event.x - x) < 5 or abs(event.x - (x + width)) < 5:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.SB_H_DOUBLE_ARROW))
return True
if event.x > x and event.x < x + width:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
return True
if event.y > d_y and event.y < (d_y + d_height):
if event.x > d_x and event.x < (d_x + d_width):
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
return True
widget.window.set_cursor(None)
return False
def size_allocate(self, widget, allocation):
self.__width = allocation.width
self.__height = allocation.height
self.__data.layout(allocation.width, allocation.height)
self.__force_full_redraw = True
self.queue_draw()
def expose(self, widget, event):
if self.__force_full_redraw:
self.__buffer_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
self.__data.get_width(),
self.__data.get_height())
ctx = cairo.Context(self.__buffer_surface)
self.__data.draw(ctx)
self.__force_full_redraw = False
ctx = widget.window.cairo_create()
ctx.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
ctx.clip()
ctx.set_source_surface(self.__buffer_surface)
ctx.paint()
(x, y, width, height) = self.__data.get_selection_rectangle()
if self.__moving_left:
ctx.move_to(max(self.__moving_left_cur, 2), y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.set_line_width(1)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
if self.__moving_right:
ctx.move_to(min(self.__moving_right_cur, self.__width - 2), y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.set_line_width(1)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
if self.__moving_both:
delta_x = self.__moving_both_cur - self.__moving_both_start
left_x = x + delta_x
ctx.move_to(x + delta_x, y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.move_to(x + width + delta_x, y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke()
return False
class MainWindow:
def __init__(self):
return
def run(self, graphic):
window = gtk.Window()
self.__window = window
window.set_default_size(200, 200)
vbox = gtk.VBox()
window.add(vbox)
render = GtkGraphicRenderer(graphic)
self.__render = render
vbox.pack_end(render, True, True, 0)
hbox = gtk.HBox()
vbox.pack_start(hbox, False, False, 0)
smaller_zoom = gtk.Button("Zoom Out")
smaller_zoom.connect("clicked", self.__set_smaller_cb)
hbox.pack_start(smaller_zoom)
bigger_zoom = gtk.Button("Zoom In")
bigger_zoom.connect("clicked", self.__set_bigger_cb)
hbox.pack_start(bigger_zoom)
output_png = gtk.Button("Output Png")
output_png.connect("clicked", self.__output_png_cb)
hbox.pack_start(output_png)
window.connect('destroy', gtk.main_quit)
window.show_all()
#gtk.bindings_activate(gtk.main_quit, 'q', 0)
gtk.main()
def __set_smaller_cb(self, widget):
self.__render.set_smaller_zoom()
def __set_bigger_cb(self, widget):
self.__render.set_bigger_zoom()
def __output_png_cb(self, widget):
dialog = gtk.FileChooserDialog("Output Png", self.__window,
gtk.FILE_CHOOSER_ACTION_SAVE, ("Save", 1))
self.__dialog = dialog
dialog.set_default_response(1)
dialog.connect("response", self.__dialog_response_cb)
dialog.show()
return
def __dialog_response_cb(self, widget, response):
if response == 1:
filename = self.__dialog.get_filename()
self.__render.output_png(filename)
widget.hide()
return
def read_data(filename):
timelines = Timelines()
colors = Colors()
fh = open(filename)
m1 = re.compile('range ([^ ]+) ([^ ]+) ([^ ]+) ([0-9]+) ([0-9]+)')
m2 = re.compile('event-str ([^ ]+) ([^ ]+) ([^ ]+) ([0-9]+)')
m3 = re.compile('event-int ([^ ]+) ([^ ]+) ([0-9]+) ([0-9]+)')
m4 = re.compile('color ([^ ]+) #([a-fA-F0-9]{2,2})([a-fA-F0-9]{2,2})([a-fA-F0-9]{2,2})')
for line in fh.readlines():
m = m1.match(line)
if m:
line_name = m.group(1)
timeline = timelines.get(m.group(1))
rang = timeline.get_range(m.group(2))
data_range = DataRange()
data_range.value = m.group(3)
data_range.start = int(m.group(4))
data_range.end = int(m.group(5))
rang.add_range(data_range)
continue
m = m2.match(line)
if m:
line_name = m.group(1)
timeline = timelines.get(m.group(1))
ev = timeline.get_event_str(m.group(2))
event = EventString()
event.value = m.group(3)
event.at = int(m.group(4))
ev.add_event(event)
continue
m = m3.match(line)
if m:
line_name = m.group(1)
timeline = timelines.get(m.group(1))
ev = timeline.get_event_int(m.group(2))
event = EventInt()
event.value = int(m.group(3))
event.at = int(m.group(4))
ev.add_event(event)
continue
m = m4.match(line)
if m:
r = int(m.group(2), 16)
g = int(m.group(3), 16)
b = int(m.group(4), 16)
color = Color(r / 255, g / 255, b / 255)
colors.add(m.group(1), color)
continue
timelines.sort()
return (colors, timelines)
def main():
(colors, timelines) = read_data(sys.argv[1])
(lower_bound, upper_bound) = timelines.get_bounds()
graphic = GraphicRenderer(lower_bound, upper_bound)
top_legend = TopLegendRenderer()
range_values = timelines.get_all_range_values()
range_colors = []
for range_value in range_values:
range_colors.append(colors.lookup(range_value))
top_legend.set_legends(range_values,
range_colors)
graphic.set_top_legend(top_legend)
data = TimelinesRenderer()
data.set_timelines(timelines, colors)
graphic.set_data(data)
# default range
range_mid = (upper_bound - lower_bound) / 2
range_width = (upper_bound - lower_bound) / 10
range_lo = range_mid - range_width / 2
range_hi = range_mid + range_width / 2
graphic.set_range(range_lo, range_hi)
main_window = MainWindow()
main_window.run(graphic)
main()
| zy901002-gpsr | utils/grid.py | Python | gpl2 | 39,708 |
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2006 INRIA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Mathieu Lacage <mathieu.lacage@sophia.inria.fr>
*/
#include "ns3/core-module.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <string.h>
using namespace ns3;
bool g_debug = false;
class Bench
{
public:
Bench ();
void ReadDistribution (std::istream &istream);
void SetTotal (uint32_t total);
void RunBench (void);
private:
void Cb (void);
std::vector<uint64_t> m_distribution;
std::vector<uint64_t>::const_iterator m_current;
uint32_t m_n;
uint32_t m_total;
};
Bench::Bench ()
: m_n (0),
m_total (0)
{}
void
Bench::SetTotal (uint32_t total)
{
m_total = total;
}
void
Bench::ReadDistribution (std::istream &input)
{
double data;
while (!input.eof ())
{
if (input >> data)
{
uint64_t ns = (uint64_t) (data * 1000000000);
m_distribution.push_back (ns);
}
else
{
input.clear ();
std::string line;
input >> line;
}
}
}
void
Bench::RunBench (void)
{
SystemWallClockMs time;
double init, simu;
time.Start ();
for (std::vector<uint64_t>::const_iterator i = m_distribution.begin ();
i != m_distribution.end (); i++)
{
Simulator::Schedule (NanoSeconds (*i), &Bench::Cb, this);
}
init = time.End ();
init /= 1000;
m_current = m_distribution.begin ();
time.Start ();
Simulator::Run ();
simu = time.End ();
simu /= 1000;
std::cout <<
"init n=" << m_distribution.size () << ", time=" << init << "s" << std::endl <<
"simu n=" << m_n << ", time=" <<simu << "s" << std::endl <<
"init " << ((double)m_distribution.size ()) / init << " insert/s, avg insert=" <<
init / ((double)m_distribution.size ())<< "s" << std::endl <<
"simu " << ((double)m_n) / simu<< " hold/s, avg hold=" <<
simu / ((double)m_n) << "s" << std::endl
;
}
void
Bench::Cb (void)
{
if (m_n > m_total)
{
return;
}
if (m_current == m_distribution.end ())
{
m_current = m_distribution.begin ();
}
if (g_debug)
{
std::cerr << "event at " << Simulator::Now ().GetSeconds () << "s" << std::endl;
}
Simulator::Schedule (NanoSeconds (*m_current), &Bench::Cb, this);
m_current++;
m_n++;
}
void
PrintHelp (void)
{
std::cout << "bench-simulator filename [options]"<<std::endl;
std::cout << " filename: a string which identifies the input distribution. \"-\" represents stdin." << std::endl;
std::cout << " Options:"<<std::endl;
std::cout << " --list: use std::list scheduler"<<std::endl;
std::cout << " --map: use std::map cheduler"<<std::endl;
std::cout << " --heap: use Binary Heap scheduler"<<std::endl;
std::cout << " --debug: enable some debugging"<<std::endl;
}
int main (int argc, char *argv[])
{
char const *filename = argv[1];
std::istream *input;
uint32_t n = 1;
uint32_t total = 20000;
if (argc == 1)
{
PrintHelp ();
return 0;
}
argc-=2;
argv+= 2;
if (strcmp (filename, "-") == 0)
{
input = &std::cin;
}
else
{
input = new std::ifstream (filename);
}
while (argc > 0)
{
ObjectFactory factory;
if (strcmp ("--list", argv[0]) == 0)
{
factory.SetTypeId ("ns3::ListScheduler");
Simulator::SetScheduler (factory);
}
else if (strcmp ("--heap", argv[0]) == 0)
{
factory.SetTypeId ("ns3::HeapScheduler");
Simulator::SetScheduler (factory);
}
else if (strcmp ("--map", argv[0]) == 0)
{
factory.SetTypeId ("ns3::HeapScheduler");
Simulator::SetScheduler (factory);
}
else if (strcmp ("--calendar", argv[0]) == 0)
{
factory.SetTypeId ("ns3::CalendarScheduler");
Simulator::SetScheduler (factory);
}
else if (strcmp ("--debug", argv[0]) == 0)
{
g_debug = true;
}
else if (strncmp ("--total=", argv[0], strlen("--total=")) == 0)
{
total = atoi (argv[0]+strlen ("--total="));
}
else if (strncmp ("--n=", argv[0], strlen("--n=")) == 0)
{
n = atoi (argv[0]+strlen ("--n="));
}
argc--;
argv++;
}
Bench *bench = new Bench ();
bench->ReadDistribution (*input);
bench->SetTotal (total);
for (uint32_t i = 0; i < n; i++)
{
bench->RunBench ();
}
return 0;
}
| zy901002-gpsr | utils/bench-simulator.cc | C++ | gpl2 | 5,201 |
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
import os.path
def build(bld):
env = bld.env
test_runner = bld.create_ns3_program('test-runner', ['core'])
test_runner.install_path = None # do not install
test_runner.source = 'test-runner.cc'
# Set the libraries the testrunner depends on equal to the list of
# enabled modules plus the list of enabled module test libraries.
test_runner.use = [mod for mod in (env['NS3_ENABLED_MODULES'] + env['NS3_ENABLED_MODULE_TEST_LIBRARIES'])]
obj = bld.create_ns3_program('bench-simulator', ['core'])
obj.source = 'bench-simulator.cc'
# Because the list of enabled modules must be set before
# test-runner can be built, this diretory is parsed by the top
# level wscript file after all of the other program module
# dependencies have been handled.
#
# So, make sure that the network module is enabled before building
# these programs.
if 'ns3-network' in env['NS3_ENABLED_MODULES']:
obj = bld.create_ns3_program('bench-packets', ['network'])
obj.source = 'bench-packets.cc'
obj = bld.create_ns3_program('print-introspected-doxygen', ['network'])
obj.source = 'print-introspected-doxygen.cc'
obj.use = [mod for mod in env['NS3_ENABLED_MODULES']]
| zy901002-gpsr | utils/wscript | Python | gpl2 | 1,339 |
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2009 University of Washington
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "ns3/test.h"
int main (int argc, char *argv[])
{
return ns3::TestRunner::Run (argc, argv);
}
| zy901002-gpsr | utils/test-runner.cc | C++ | gpl2 | 877 |
#include <iostream>
#include <algorithm>
#include <map>
#include "ns3/object.h"
#include "ns3/pointer.h"
#include "ns3/object-vector.h"
#include "ns3/config.h"
#include "ns3/log.h"
#include "ns3/global-value.h"
#include "ns3/string.h"
#include "ns3/node-container.h"
using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("PrintIntrospectedDoxygen");
namespace
{
std::string anchor;
std::string boldStart;
std::string boldStop;
std::string breakBoth;
std::string breakHtmlOnly;
std::string breakTextOnly;
std::string brief;
std::string commentStart;
std::string commentStop;
std::string defgroupAttributeListStart;
std::string defgroupAttributeListStop;
std::string defgroupGlobalValueListStart;
std::string defgroupGlobalValueListStop;
std::string defgroupTraceSourceListStart;
std::string defgroupTraceSourceListStop;
std::string functionStart;
std::string functionStop;
std::string indentHtmlOnly;
std::string ingroupConstructs;
std::string listStart;
std::string listStop;
std::string listLineStart;
std::string listLineStop;
std::string reference;
std::string temporaryCharacter;
} // anonymous namespace
void
PrintAttributes (TypeId tid, std::ostream &os)
{
os << listStart << std::endl;
for (uint32_t j = 0; j < tid.GetAttributeN (); j++)
{
struct TypeId::AttributeInformation info = tid.GetAttribute(j);
os << listLineStart << boldStart << info.name << boldStop << ": "
<< info.help << std::endl;
os << " " << listStart << std::endl
<< " " << listLineStart << "Set with class: " << reference << info.checker->GetValueTypeName () << listLineStop << std::endl;
if (info.checker->HasUnderlyingTypeInformation ())
{
os << " " << listLineStart << "Underlying type: " << reference << info.checker->GetUnderlyingTypeInformation () << listLineStop << std::endl;
}
if (info.flags & TypeId::ATTR_CONSTRUCT && info.accessor->HasSetter ())
{
os << " " << listLineStart << "Initial value: " << info.initialValue->SerializeToString (info.checker) << listLineStop << std::endl;
}
os << " " << listLineStart << "Flags: ";
if (info.flags & TypeId::ATTR_CONSTRUCT && info.accessor->HasSetter ())
{
os << "construct ";
}
if (info.flags & TypeId::ATTR_SET && info.accessor->HasSetter ())
{
os << "write ";
}
if (info.flags & TypeId::ATTR_GET && info.accessor->HasGetter ())
{
os << "read ";
}
os << listLineStop << std::endl;
os << " " << listStop << " " << std::endl;
}
os << listStop << std::endl;
}
void
PrintTraceSources (TypeId tid, std::ostream &os)
{
os << listStart << std::endl;
for (uint32_t i = 0; i < tid.GetTraceSourceN (); ++i)
{
struct TypeId::TraceSourceInformation info = tid.GetTraceSource (i);
os << listLineStart << boldStart << info.name << boldStop << ": "
<< info.help
<< std::endl;
os << listLineStop << std::endl;
}
os << listStop << std::endl;
}
class StaticInformation
{
public:
void RecordAggregationInfo (std::string a, std::string b);
void Gather (TypeId tid);
void Print (void) const;
std::vector<std::string> Get (TypeId tid);
private:
std::string GetCurrentPath (void) const;
void DoGather (TypeId tid);
void RecordOutput (TypeId tid);
bool HasAlreadyBeenProcessed (TypeId tid) const;
void find_and_replace (std::string &source, const std::string find, std::string replace );
std::vector<std::pair<TypeId,std::string> > m_output;
std::vector<std::string> m_currentPath;
std::vector<TypeId> m_alreadyProcessed;
std::vector<std::pair<TypeId,TypeId> > m_aggregates;
};
void
StaticInformation::RecordAggregationInfo (std::string a, std::string b)
{
m_aggregates.push_back (std::make_pair (TypeId::LookupByName (a), TypeId::LookupByName (b)));
}
void
StaticInformation::Print (void) const
{
for (std::vector<std::pair<TypeId,std::string> >::const_iterator i = m_output.begin (); i != m_output.end (); ++i)
{
std::pair<TypeId,std::string> item = *i;
std::cout << item.first.GetName () << " -> " << item.second << std::endl;
}
}
std::string
StaticInformation::GetCurrentPath (void) const
{
std::ostringstream oss;
for (std::vector<std::string>::const_iterator i = m_currentPath.begin (); i != m_currentPath.end (); ++i)
{
std::string item = *i;
oss << "/" << item;
}
return oss.str ();
}
void
StaticInformation::RecordOutput (TypeId tid)
{
m_output.push_back (std::make_pair (tid, GetCurrentPath ()));
}
bool
StaticInformation::HasAlreadyBeenProcessed (TypeId tid) const
{
for (uint32_t i = 0; i < m_alreadyProcessed.size (); ++i)
{
if (m_alreadyProcessed[i] == tid)
{
return true;
}
}
return false;
}
std::vector<std::string>
StaticInformation::Get (TypeId tid)
{
std::vector<std::string> paths;
for (uint32_t i = 0; i < m_output.size (); ++i)
{
std::pair<TypeId,std::string> tmp = m_output[i];
if (tmp.first == tid)
{
paths.push_back (tmp.second);
}
}
return paths;
}
void
StaticInformation::Gather (TypeId tid)
{
DoGather (tid);
std::sort (m_output.begin (), m_output.end ());
m_output.erase (std::unique (m_output.begin (), m_output.end ()), m_output.end ());
}
void
StaticInformation::DoGather (TypeId tid)
{
NS_LOG_FUNCTION (this);
if (HasAlreadyBeenProcessed (tid))
{
return;
}
RecordOutput (tid);
for (uint32_t i = 0; i < tid.GetAttributeN (); ++i)
{
struct TypeId::AttributeInformation info = tid.GetAttribute(i);
const PointerChecker *ptrChecker = dynamic_cast<const PointerChecker *> (PeekPointer (info.checker));
if (ptrChecker != 0)
{
TypeId pointee = ptrChecker->GetPointeeTypeId ();
m_currentPath.push_back (info.name);
m_alreadyProcessed.push_back (tid);
DoGather (pointee);
m_alreadyProcessed.pop_back ();
m_currentPath.pop_back ();
continue;
}
// attempt to cast to an object vector.
const ObjectPtrContainerChecker *vectorChecker = dynamic_cast<const ObjectPtrContainerChecker *> (PeekPointer (info.checker));
if (vectorChecker != 0)
{
TypeId item = vectorChecker->GetItemTypeId ();
m_currentPath.push_back (info.name + "/[i]");
m_alreadyProcessed.push_back (tid);
DoGather (item);
m_alreadyProcessed.pop_back ();
m_currentPath.pop_back ();
continue;
}
}
for (uint32_t j = 0; j < TypeId::GetRegisteredN (); j++)
{
TypeId child = TypeId::GetRegistered (j);
if (child.IsChildOf (tid))
{
//please take a look at the following note for an explanation
std::string childName = "$" + temporaryCharacter + child.GetName ();
std::string replaceWith = "::" + temporaryCharacter;
find_and_replace(childName,"::",replaceWith);
m_currentPath.push_back (childName);
m_alreadyProcessed.push_back (tid);
DoGather (child);
m_alreadyProcessed.pop_back ();
m_currentPath.pop_back ();
}
}
for (uint32_t k = 0; k < m_aggregates.size (); ++k)
{
std::pair<TypeId,TypeId> tmp = m_aggregates[k];
if (tmp.first == tid || tmp.second == tid)
{
TypeId other;
if (tmp.first == tid)
{
other = tmp.second;
}
if (tmp.second == tid)
{
other = tmp.first;
}
/**
* Note: for the Doxygen version only, we insert a % in the
* path below to ensure that doxygen does not attempt to
* resolve the typeid names included in the string. if the
* name contains ::, using the % sign will remove that sign
* resulting for instance in $ns3MobilityModel instead of
* $ns3::MobilityModel hence the output must be in the form
* $%ns3::%MobilityModel in order to show correctly
* $ns3::MobilityModel We add at the beginning of the name
* $% and we replace all the :: in the string by ::%.
*/
std::string name = "$" + temporaryCharacter + other.GetName ();
//finding and replacing :: by ::% (for Doxygen version only).
std::string replaceWith = "::" + temporaryCharacter;
find_and_replace(name,"::",replaceWith);
m_currentPath.push_back (name);
m_alreadyProcessed.push_back (tid);
DoGather (other);
m_alreadyProcessed.pop_back ();
m_currentPath.pop_back ();
}
}
}
void
StaticInformation::find_and_replace( std::string &source, const std::string find, std::string replace )
{
size_t j;
j = source.find (find);
while (j != std::string::npos )
{
source.replace (j, find.length (),replace);
j = source.find (find,j+1);
}
}
void
PrintHelp (const char *program_name)
{
std::cout << "Usage: " << program_name << " [options]" << std::endl
<< std::endl
<< "Options:" << std::endl
<< " --help : print these options" << std::endl
<< " --output-text : format output as plain text" << std::endl;
}
int main (int argc, char *argv[])
{
bool outputText = false;
char *programName = argv[0];
argv++;
while (*argv != 0)
{
char *arg = *argv;
if (strcmp (arg, "--help") == 0)
{
PrintHelp (programName);
return 0;
}
else if (strcmp(arg, "--output-text") == 0)
{
outputText = true;
}
else
{
// un-recognized command-line argument
PrintHelp (programName);
return 0;
}
argv++;
}
if (outputText)
{
anchor = "";
boldStart = "";
boldStop = "";
breakBoth = "\n";
breakHtmlOnly = "";
breakTextOnly = "\n";
brief = "";
commentStart = "===============================================================\n";
commentStop = "";
defgroupAttributeListStart = "";
defgroupAttributeListStop = "\n";
defgroupGlobalValueListStart = "";
defgroupGlobalValueListStop = "";
defgroupTraceSourceListStart = "";
defgroupTraceSourceListStop = "\n";
functionStart = "";
functionStop = "\n\n";
indentHtmlOnly = "";
ingroupConstructs = "";
listStart = "";
listStop = "";
listLineStart = " * ";
listLineStop = "";
reference = "";
temporaryCharacter = "";
}
else
{
anchor = "\\anchor ";
boldStart = "<b>";
boldStop = "</b>";
breakBoth = "<br>";
breakHtmlOnly = "<br>";
breakTextOnly = "";
brief = "\\brief ";
commentStart = "/*!";
commentStop = "*/";
defgroupAttributeListStart = "\\defgroup AttributeList ";
defgroupAttributeListStop = "";
defgroupGlobalValueListStart = "\\defgroup GlobalValueList ";
defgroupGlobalValueListStop = "";
defgroupTraceSourceListStart = "\\defgroup TraceSourceList ";
defgroupTraceSourceListStop = "";
functionStart = "\\fn ";
functionStop = "";
indentHtmlOnly = " ";
ingroupConstructs = "\\ingroup constructs\n";
listStart = "<ul>";
listStop = "</ul>";
listLineStart = "<li>";
listLineStop = "</li>";
reference = "\\ref ";
temporaryCharacter = "%";
}
NodeContainer c; c.Create (1);
StaticInformation info;
info.RecordAggregationInfo ("ns3::Node", "ns3::TcpSocketFactory");
info.RecordAggregationInfo ("ns3::Node", "ns3::UdpSocketFactory");
info.RecordAggregationInfo ("ns3::Node", "ns3::PacketSocketFactory");
info.RecordAggregationInfo ("ns3::Node", "ns3::olsr::RoutingProtocol");
info.RecordAggregationInfo ("ns3::Node", "ns3::MobilityModel");
info.RecordAggregationInfo ("ns3::Node", "ns3::Ipv4L3Protocol");
info.RecordAggregationInfo ("ns3::Node", "ns3::ArpL3Protocol");
for (uint32_t i = 0; i < Config::GetRootNamespaceObjectN (); ++i)
{
Ptr<Object> object = Config::GetRootNamespaceObject (i);
info.Gather (object->GetInstanceTypeId ());
}
std::map< std::string, uint32_t> nameMap;
std::map< std::string, uint32_t>::const_iterator nameMapIterator;
// Create a map from the class names to their index in the vector of
// TypeId's so that the names will end up in alphabetical order.
for (uint32_t i = 0; i < TypeId::GetRegisteredN (); i++)
{
TypeId tid = TypeId::GetRegistered (i);
if (tid.MustHideFromDocumentation ())
{
continue;
}
// Capitalize all of letters in the name so that it sorts
// correctly in the map.
std::string name = tid.GetName ();
for (uint32_t j = 0; j < name.length (); j++)
{
name[j] = toupper (name[j]);
}
// Save this name's index.
nameMap[name] = i;
}
// Iterate over the map, which will print the class names in
// alphabetical order.
for (nameMapIterator = nameMap.begin (); nameMapIterator != nameMap.end (); nameMapIterator++)
{
// Get the class's index out of the map;
uint32_t i = nameMapIterator->second;
std::cout << commentStart << std::endl;
TypeId tid = TypeId::GetRegistered (i);
if (tid.MustHideFromDocumentation ())
{
continue;
}
std::cout << functionStart << "static TypeId " << tid.GetName () << "::GetTypeId (void)" << functionStop << std::endl;
std::cout << brief << "This method returns the TypeId associated to " << reference << tid.GetName () << "."
<< std::endl << std::endl;
std::vector<std::string> paths = info.Get (tid);
if (!paths.empty ())
{
std::cout << "This object is accessible through the following paths with Config::Set and Config::Connect:"
<< std::endl;
std::cout << listStart << std::endl;
for (uint32_t k = 0; k < paths.size (); ++k)
{
std::string path = paths[k];
std::cout << listLineStart << path << listLineStop << breakTextOnly << std::endl;
}
std::cout << listStop << std::endl;
}
if (tid.GetAttributeN () == 0)
{
std::cout << "No Attributes defined for this type." << breakBoth << std::endl;
}
else
{
std::cout << "Attributes defined for this type:" << breakHtmlOnly << std::endl;
PrintAttributes (tid, std::cout);
}
{
TypeId tmp = tid.GetParent ();
while (tmp.GetParent () != tmp)
{
if (tmp.GetAttributeN () != 0)
{
std::cout << "Attributes defined in parent class " << tmp.GetName () << ":" << breakHtmlOnly << std::endl;
PrintAttributes (tmp, std::cout);
}
tmp = tmp.GetParent ();
}
}
if (tid.GetTraceSourceN () == 0)
{
std::cout << "No TraceSources defined for this type." << breakBoth << std::endl;
}
else
{
std::cout << "TraceSources defined for this type:" << breakHtmlOnly << std::endl;
PrintTraceSources (tid, std::cout);
}
{
TypeId tmp = tid.GetParent ();
while (tmp.GetParent () != tmp)
{
if (tmp.GetTraceSourceN () != 0)
{
std::cout << "TraceSources defined in parent class " << tmp.GetName () << ":" << breakHtmlOnly << std::endl;
PrintTraceSources (tmp, std::cout);
}
tmp = tmp.GetParent ();
}
}
std::cout << commentStop << std::endl;
}
std::cout << commentStart << std::endl
<< ingroupConstructs
<< defgroupTraceSourceListStart << "The list of all trace sources." << defgroupTraceSourceListStop << std::endl;
for (uint32_t i = 0; i < TypeId::GetRegisteredN (); ++i)
{
TypeId tid = TypeId::GetRegistered (i);
if (tid.GetTraceSourceN () == 0 ||
tid.MustHideFromDocumentation ())
{
continue;
}
std::cout << boldStart << tid.GetName () << boldStop << breakHtmlOnly << std::endl
<< listStart << std::endl;
for (uint32_t j = 0; j < tid.GetTraceSourceN (); ++j)
{
struct TypeId::TraceSourceInformation info = tid.GetTraceSource(j);
std::cout << listLineStart << info.name << ": " << info.help << listLineStop << std::endl;
}
std::cout << listStop << std::endl;
}
std::cout << commentStop << std::endl;
std::cout << commentStart << std::endl
<< ingroupConstructs
<< defgroupAttributeListStart << "The list of all attributes." << defgroupAttributeListStop << std::endl;
for (uint32_t i = 0; i < TypeId::GetRegisteredN (); ++i)
{
TypeId tid = TypeId::GetRegistered (i);
if (tid.GetAttributeN () == 0 ||
tid.MustHideFromDocumentation ())
{
continue;
}
std::cout << boldStart << tid.GetName () << boldStop << breakHtmlOnly << std::endl
<< listStart << std::endl;
for (uint32_t j = 0; j < tid.GetAttributeN (); ++j)
{
struct TypeId::AttributeInformation info = tid.GetAttribute(j);
std::cout << listLineStart << info.name << ": " << info.help << listLineStop << std::endl;
}
std::cout << listStop << std::endl;
}
std::cout << commentStop << std::endl;
std::cout << commentStart << std::endl
<< ingroupConstructs
<< defgroupGlobalValueListStart << "The list of all global values." << defgroupGlobalValueListStop << std::endl
<< listStart << std::endl;
for (GlobalValue::Iterator i = GlobalValue::Begin (); i != GlobalValue::End (); ++i)
{
StringValue val;
(*i)->GetValue (val);
std::cout << indentHtmlOnly << listLineStart << boldStart << anchor << "GlobalValue" << (*i)->GetName () << " " << (*i)->GetName () << boldStop << ": " << (*i)->GetHelp () << "(" << val.Get () << ")" << listLineStop << std::endl;
}
std::cout << listStop << std::endl
<< commentStop << std::endl;
return 0;
}
| zy901002-gpsr | utils/print-introspected-doxygen.cc | C++ | gpl2 | 18,399 |
#!/usr/bin/env perl
use Math::Random qw(:all);
@values = random_uniform (10000, 0, 10000000);
foreach $value (@values) {
print $value . "\n";
}
| zy901002-gpsr | utils/generate-distributions.pl | Perl | gpl2 | 151 |
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# python lib modules
import sys
import shutil
import types
import optparse
import os.path
import re
import shlex
import textwrap
# WAF modules
import subprocess
import Options
import Logs
import TaskGen
import Task
import Utils
import Build
import Configure
import Scripting
from waflib.Errors import WafError
from utils import read_config_file
# By default, all modules will be enabled, examples will be disabled,
# and tests will be disabled.
modules_enabled = ['all_modules']
examples_enabled = False
tests_enabled = False
# Get the information out of the NS-3 configuration file.
config_file_exists = False
(config_file_exists, modules_enabled, examples_enabled, tests_enabled) = read_config_file()
sys.path.insert(0, os.path.abspath('waf-tools'))
try:
import cflags # override the build profiles from waf
finally:
sys.path.pop(0)
cflags.profiles = {
# profile name: [optimization_level, warnings_level, debug_level]
'debug': [0, 2, 3],
'optimized': [3, 2, 1],
'release': [3, 2, 0],
}
cflags.default_profile = 'debug'
# local modules
import wutils
Configure.autoconfig = 0
# until http://code.google.com/p/waf/issues/detail?id=1039 gets fixed...
wutils.monkey_patch_Runner_start()
# the following two variables are used by the target "waf dist"
VERSION = file("VERSION", "rt").read().strip()
APPNAME = 'ns'
wutils.VERSION = VERSION
wutils.APPNAME = APPNAME
# note: here we disable the VNUM for OSX since it causes problems (bug #1251)
wutils.VNUM = None
if sys.platform != 'darwin' and re.match(r"^\d+\.\d+(\.\d+)?$", VERSION) is not None:
wutils.VNUM = VERSION
# these variables are mandatory ('/' are converted automatically)
top = '.'
out = 'build'
def load_env():
bld_cls = getattr(Utils.g_module, 'build_context', Utils.Context)
bld_ctx = bld_cls()
bld_ctx.load_dirs(os.path.abspath(os.path.join (srcdir,'..')),
os.path.abspath(os.path.join (srcdir,'..', blddir)))
bld_ctx.load_envs()
env = bld_ctx.get_env()
return env
def get_files(base_dir):
retval = []
reference=os.path.dirname(base_dir)
for root, dirs, files in os.walk(base_dir):
if root.find('.hg') != -1:
continue
for file in files:
if file.find('.hg') != -1:
continue
fullname = os.path.join(root,file)
# we can't use os.path.relpath because it's new in python 2.6
relname = fullname.replace(reference + '/','')
retval.append([fullname,relname])
return retval
def dist_hook():
import tarfile
shutil.rmtree("doc/html", True)
shutil.rmtree("doc/latex", True)
shutil.rmtree("nsc", True)
# Print the sorted list of module names in columns.
def print_module_names(names):
# Sort the list of module names.
names.sort()
# Print the list of module names in 3 columns.
i = 1
for name in names:
print name.ljust(25),
if i == 3:
print
i = 0
i = i+1
if i != 1:
print
def options(opt):
# options provided by the modules
opt.load('compiler_c')
opt.load('compiler_cxx')
opt.load('cflags')
opt.load('gnu_dirs')
opt.add_option('--cwd',
help=('Set the working directory for a program.'),
action="store", type="string", default=None,
dest='cwd_launch')
opt.add_option('--enable-gcov',
help=('Enable code coverage analysis.'
' WARNING: this option only has effect '
'with the configure command.'),
action="store_true", default=False,
dest='enable_gcov')
opt.add_option('--no-task-lines',
help=("Don't print task lines, i.e. messages saying which tasks are being executed by WAF."
" Coupled with a single -v will cause WAF to output only the executed commands,"
" just like 'make' does by default."),
action="store_true", default=False,
dest='no_task_lines')
opt.add_option('--lcov-report',
help=('Generate a code coverage report '
'(use this option at build time, not in configure)'),
action="store_true", default=False,
dest='lcov_report')
opt.add_option('--run',
help=('Run a locally built program; argument can be a program name,'
' or a command starting with the program name.'),
type="string", default='', dest='run')
opt.add_option('--visualize',
help=('Modify --run arguments to enable the visualizer'),
action="store_true", default=False, dest='visualize')
opt.add_option('--command-template',
help=('Template of the command used to run the program given by --run;'
' It should be a shell command string containing %s inside,'
' which will be replaced by the actual program.'),
type="string", default=None, dest='command_template')
opt.add_option('--pyrun',
help=('Run a python program using locally built ns3 python module;'
' argument is the path to the python program, optionally followed'
' by command-line options that are passed to the program.'),
type="string", default='', dest='pyrun')
opt.add_option('--valgrind',
help=('Change the default command template to run programs and unit tests with valgrind'),
action="store_true", default=False,
dest='valgrind')
opt.add_option('--shell',
help=('DEPRECATED (run ./waf shell)'),
action="store_true", default=False,
dest='shell')
opt.add_option('--enable-sudo',
help=('Use sudo to setup suid bits on ns3 executables.'),
dest='enable_sudo', action='store_true',
default=False)
opt.add_option('--enable-tests',
help=('Build the ns-3 tests.'),
dest='enable_tests', action='store_true',
default=False)
opt.add_option('--disable-tests',
help=('Do not build the ns-3 tests.'),
dest='disable_tests', action='store_true',
default=False)
opt.add_option('--enable-examples',
help=('Build the ns-3 examples.'),
dest='enable_examples', action='store_true',
default=False)
opt.add_option('--disable-examples',
help=('Do not build the ns-3 examples.'),
dest='disable_examples', action='store_true',
default=False)
opt.add_option('--check',
help=('DEPRECATED (run ./test.py)'),
default=False, dest='check', action="store_true")
opt.add_option('--enable-static',
help=('Compile NS-3 statically: works only on linux, without python'),
dest='enable_static', action='store_true',
default=False)
opt.add_option('--enable-mpi',
help=('Compile NS-3 with MPI and distributed simulation support'),
dest='enable_mpi', action='store_true',
default=False)
opt.add_option('--doxygen-no-build',
help=('Run doxygen to generate html documentation from source comments, '
'but do not wait for ns-3 to finish the full build.'),
action="store_true", default=False,
dest='doxygen_no_build')
# options provided in subdirectories
opt.sub_options('src')
opt.sub_options('bindings/python')
opt.sub_options('src/internet')
def _check_compilation_flag(conf, flag, mode='cxx', linkflags=None):
"""
Checks if the C++ compiler accepts a certain compilation flag or flags
flag: can be a string or a list of strings
"""
l = []
if flag:
l.append(flag)
if isinstance(linkflags, list):
l.extend(linkflags)
else:
if linkflags:
l.append(linkflags)
if len(l) > 1:
flag_str = 'flags ' + ' '.join(l)
else:
flag_str = 'flag ' + ' '.join(l)
if flag_str > 28:
flag_str = flag_str[:28] + "..."
conf.start_msg('Checking for compilation %s support' % (flag_str,))
env = conf.env.copy()
if mode == 'cc':
mode = 'c'
if mode == 'cxx':
fname = 'test.cc'
env.append_value('CXXFLAGS', flag)
else:
fname = 'test.c'
env.append_value('CFLAGS', flag)
if linkflags is not None:
env.append_value("LINKFLAGS", linkflags)
try:
retval = conf.run_c_code(code='#include <stdio.h>\nint main() { return 0; }\n',
env=env, compile_filename=fname,
features=[mode, mode+'program'], execute=False)
except Configure.ConfigurationError:
ok = False
else:
ok = (retval == 0)
conf.end_msg(ok)
return ok
def report_optional_feature(conf, name, caption, was_enabled, reason_not_enabled):
conf.env.append_value('NS3_OPTIONAL_FEATURES', [(name, caption, was_enabled, reason_not_enabled)])
# starting with waf 1.6, conf.check() becomes fatal by default if the
# test fails, this alternative method makes the test non-fatal, as it
# was in waf <= 1.5
def _check_nonfatal(conf, *args, **kwargs):
try:
return conf.check(*args, **kwargs)
except conf.errors.ConfigurationError:
return None
def configure(conf):
conf.check_tool("relocation", ["waf-tools"])
# attach some extra methods
conf.check_nonfatal = types.MethodType(_check_nonfatal, conf)
conf.check_compilation_flag = types.MethodType(_check_compilation_flag, conf)
conf.report_optional_feature = types.MethodType(report_optional_feature, conf)
conf.env['NS3_OPTIONAL_FEATURES'] = []
conf.check_tool('compiler_c')
conf.check_tool('compiler_cxx')
conf.check_tool('cflags', ['waf-tools'])
try:
conf.check_tool('pkgconfig', ['waf-tools'])
except Configure.ConfigurationError:
pass
conf.check_tool('command', ['waf-tools'])
conf.check_tool('gnu_dirs')
env = conf.env
if Options.options.enable_gcov:
env['GCOV_ENABLED'] = True
env.append_value('CCFLAGS', '-fprofile-arcs')
env.append_value('CCFLAGS', '-ftest-coverage')
env.append_value('CXXFLAGS', '-fprofile-arcs')
env.append_value('CXXFLAGS', '-ftest-coverage')
env.append_value('LINKFLAGS', '-lgcov')
if Options.options.build_profile == 'debug':
env.append_value('DEFINES', 'NS3_ASSERT_ENABLE')
env.append_value('DEFINES', 'NS3_LOG_ENABLE')
env['PLATFORM'] = sys.platform
if conf.env['CXX_NAME'] in ['gcc', 'icc']:
if Options.options.build_profile == 'release':
env.append_value('CXXFLAGS', '-fomit-frame-pointer')
if conf.check_compilation_flag('-march=native'):
env.append_value('CXXFLAGS', '-march=native')
if sys.platform == 'win32':
env.append_value("LINKFLAGS", "-Wl,--enable-runtime-pseudo-reloc")
elif sys.platform == 'cygwin':
env.append_value("LINKFLAGS", "-Wl,--enable-auto-import")
cxx, = env['CXX']
p = subprocess.Popen([cxx, '-print-file-name=libstdc++.so'], stdout=subprocess.PIPE)
libstdcxx_location = os.path.dirname(p.stdout.read().strip())
p.wait()
if libstdcxx_location:
conf.env.append_value('NS3_MODULE_PATH', libstdcxx_location)
if Options.platform in ['linux']:
if conf.check_compilation_flag('-Wl,--soname=foo'):
env['WL_SONAME_SUPPORTED'] = True
env['ENABLE_STATIC_NS3'] = False
if Options.options.enable_static:
if Options.platform == 'darwin':
if conf.check_compilation_flag(flag=[], linkflags=['-Wl,-all_load']):
conf.report_optional_feature("static", "Static build", True, '')
env['ENABLE_STATIC_NS3'] = True
else:
conf.report_optional_feature("static", "Static build", False,
"Link flag -Wl,-all_load does not work")
else:
if conf.check_compilation_flag(flag=[], linkflags=['-Wl,--whole-archive,-Bstatic', '-Wl,-Bdynamic,--no-whole-archive']):
conf.report_optional_feature("static", "Static build", True, '')
env['ENABLE_STATIC_NS3'] = True
else:
conf.report_optional_feature("static", "Static build", False,
"Link flag -Wl,--whole-archive,-Bstatic does not work")
conf.env['MODULES_NOT_BUILT'] = []
conf.sub_config('src')
# Set the list of enabled modules.
if Options.options.enable_modules:
# Use the modules explicitly enabled.
conf.env['NS3_ENABLED_MODULES'] = ['ns3-'+mod for mod in
Options.options.enable_modules.split(',')]
else:
# Use the enabled modules list from the ns3 configuration file.
if modules_enabled[0] == 'all_modules':
# Enable all modules if requested.
conf.env['NS3_ENABLED_MODULES'] = conf.env['NS3_MODULES']
else:
# Enable the modules from the list.
conf.env['NS3_ENABLED_MODULES'] = ['ns3-'+mod for mod in
modules_enabled]
# Add the template module to the list of enabled modules that
# should not be built if this is a static build on Darwin. They
# don't work there for the template module, and this is probably
# because the template module has no source files.
if conf.env['ENABLE_STATIC_NS3'] and sys.platform == 'darwin':
conf.env['MODULES_NOT_BUILT'].append('template')
# Remove these modules from the list of enabled modules.
for not_built in conf.env['MODULES_NOT_BUILT']:
not_built_name = 'ns3-' + not_built
if not_built_name in conf.env['NS3_ENABLED_MODULES']:
conf.env['NS3_ENABLED_MODULES'].remove(not_built_name)
if not conf.env['NS3_ENABLED_MODULES']:
raise WafError('Exiting because the ' + not_built + ' module can not be built and it was the only one enabled.')
conf.sub_config('bindings/python')
conf.sub_config('src/mpi')
# for suid bits
try:
conf.find_program('sudo', var='SUDO')
except WafError:
pass
why_not_sudo = "because we like it"
if Options.options.enable_sudo and conf.env['SUDO']:
env['ENABLE_SUDO'] = True
else:
env['ENABLE_SUDO'] = False
if Options.options.enable_sudo:
why_not_sudo = "program sudo not found"
else:
why_not_sudo = "option --enable-sudo not selected"
conf.report_optional_feature("ENABLE_SUDO", "Use sudo to set suid bit", env['ENABLE_SUDO'], why_not_sudo)
# Decide if tests will be built or not.
if Options.options.enable_tests:
# Tests were explicitly enabled.
env['ENABLE_TESTS'] = True
why_not_tests = "option --enable-tests selected"
elif Options.options.disable_tests:
# Tests were explicitly disabled.
env['ENABLE_TESTS'] = False
why_not_tests = "option --disable-tests selected"
else:
# Enable tests based on the ns3 configuration file.
env['ENABLE_TESTS'] = tests_enabled
if config_file_exists:
why_not_tests = "based on configuration file"
elif tests_enabled:
why_not_tests = "defaults to enabled"
else:
why_not_tests = "defaults to disabled"
conf.report_optional_feature("ENABLE_TESTS", "Build tests", env['ENABLE_TESTS'], why_not_tests)
# Decide if examples will be built or not.
if Options.options.enable_examples:
# Examples were explicitly enabled.
env['ENABLE_EXAMPLES'] = True
why_not_examples = "option --enable-examples selected"
elif Options.options.disable_examples:
# Examples were explicitly disabled.
env['ENABLE_EXAMPLES'] = False
why_not_examples = "option --disable-examples selected"
else:
# Enable examples based on the ns3 configuration file.
env['ENABLE_EXAMPLES'] = examples_enabled
if config_file_exists:
why_not_examples = "based on configuration file"
elif examples_enabled:
why_not_examples = "defaults to enabled"
else:
why_not_examples = "defaults to disabled"
env['EXAMPLE_DIRECTORIES'] = []
for dir in os.listdir('examples'):
if dir.startswith('.') or dir == 'CVS':
continue
if os.path.isdir(os.path.join('examples', dir)):
env['EXAMPLE_DIRECTORIES'].append(dir)
conf.report_optional_feature("ENABLE_EXAMPLES", "Build examples", env['ENABLE_EXAMPLES'],
why_not_examples)
try:
conf.find_program('valgrind', var='VALGRIND')
except WafError:
pass
# These flags are used for the implicitly dependent modules.
if env['ENABLE_STATIC_NS3']:
if sys.platform == 'darwin':
env.STATICLIB_MARKER = '-Wl,-all_load'
else:
env.STATICLIB_MARKER = '-Wl,--whole-archive,-Bstatic'
env.SHLIB_MARKER = '-Wl,-Bdynamic,--no-whole-archive'
have_gsl = conf.pkg_check_modules('GSL', 'gsl', mandatory=False)
conf.env['ENABLE_GSL'] = have_gsl
conf.report_optional_feature("GSL", "GNU Scientific Library (GSL)",
conf.env['ENABLE_GSL'],
"GSL not found")
if have_gsl:
conf.env.append_value('DEFINES', "ENABLE_GSL")
# for compiling C code, copy over the CXX* flags
conf.env.append_value('CCFLAGS', conf.env['CXXFLAGS'])
def add_gcc_flag(flag):
if env['COMPILER_CXX'] == 'g++' and 'CXXFLAGS' not in os.environ:
if conf.check_compilation_flag(flag, mode='cxx'):
env.append_value('CXXFLAGS', flag)
if env['COMPILER_CC'] == 'gcc' and 'CCFLAGS' not in os.environ:
if conf.check_compilation_flag(flag, mode='cc'):
env.append_value('CCFLAGS', flag)
add_gcc_flag('-Wno-error=deprecated-declarations')
add_gcc_flag('-fstrict-aliasing')
add_gcc_flag('-Wstrict-aliasing')
try:
conf.find_program('doxygen', var='DOXYGEN')
except WafError:
pass
# append user defined flags after all our ones
for (confvar, envvar) in [['CCFLAGS', 'CCFLAGS_EXTRA'],
['CXXFLAGS', 'CXXFLAGS_EXTRA'],
['LINKFLAGS', 'LINKFLAGS_EXTRA'],
['LINKFLAGS', 'LDFLAGS_EXTRA']]:
if envvar in os.environ:
value = shlex.split(os.environ[envvar])
conf.env.append_value(confvar, value)
# Write a summary of optional features status
print "---- Summary of optional NS-3 features:"
for (name, caption, was_enabled, reason_not_enabled) in conf.env['NS3_OPTIONAL_FEATURES']:
if was_enabled:
status = 'enabled'
else:
status = 'not enabled (%s)' % reason_not_enabled
print "%-30s: %s" % (caption, status)
class SuidBuild_task(Task.TaskBase):
"""task that makes a binary Suid
"""
after = 'link'
def __init__(self, *args, **kwargs):
super(SuidBuild_task, self).__init__(*args, **kwargs)
self.m_display = 'build-suid'
try:
program_obj = wutils.find_program(self.generator.target, self.generator.env)
except ValueError, ex:
raise WafError(str(ex))
program_node = program_obj.path.find_or_declare(program_obj.target)
self.filename = program_node.abspath()
def run(self):
print >> sys.stderr, 'setting suid bit on executable ' + self.filename
if subprocess.Popen(['sudo', 'chown', 'root', self.filename]).wait():
return 1
if subprocess.Popen(['sudo', 'chmod', 'u+s', self.filename]).wait():
return 1
return 0
def runnable_status(self):
"RUN_ME SKIP_ME or ASK_LATER"
st = os.stat(self.filename)
if st.st_uid == 0:
return Task.SKIP_ME
else:
return Task.RUN_ME
def create_suid_program(bld, name):
grp = bld.current_group
bld.add_group() # this to make sure no two sudo tasks run at the same time
program = bld.new_task_gen(features=['cxx', 'cxxprogram'])
program.is_ns3_program = True
program.module_deps = list()
program.name = name
program.target = name
if bld.env['ENABLE_SUDO']:
program.create_task("SuidBuild")
bld.set_group(grp)
return program
def create_ns3_program(bld, name, dependencies=('core',)):
program = bld.new_task_gen(features=['cxx', 'cxxprogram'])
program.is_ns3_program = True
program.name = name
program.target = program.name
# Each of the modules this program depends on has its own library.
program.ns3_module_dependencies = ['ns3-'+dep for dep in dependencies]
program.includes = "# #/.."
program.use = program.ns3_module_dependencies
if program.env['ENABLE_STATIC_NS3']:
if sys.platform == 'darwin':
program.env.STLIB_MARKER = '-Wl,-all_load'
else:
program.env.STLIB_MARKER = '-Wl,--whole-archive,-Bstatic'
program.env.SHLIB_MARKER = '-Wl,-Bdynamic,--no-whole-archive'
return program
def register_ns3_script(bld, name, dependencies=('core',)):
ns3_module_dependencies = ['ns3-'+dep for dep in dependencies]
bld.env.append_value('NS3_SCRIPT_DEPENDENCIES', [(name, ns3_module_dependencies)])
def add_examples_programs(bld):
env = bld.env
if env['ENABLE_EXAMPLES']:
for dir in os.listdir('examples'):
if dir.startswith('.') or dir == 'CVS':
continue
if os.path.isdir(os.path.join('examples', dir)):
bld.add_subdirs(os.path.join('examples', dir))
def add_scratch_programs(bld):
all_modules = [mod[len("ns3-"):] for mod in bld.env['NS3_ENABLED_MODULES']]
for filename in os.listdir("scratch"):
if filename.startswith('.') or filename == 'CVS':
continue
if os.path.isdir(os.path.join("scratch", filename)):
obj = bld.create_ns3_program(filename, all_modules)
obj.path = obj.path.find_dir('scratch').find_dir(filename)
obj.find_sources_in_dirs('.')
obj.target = filename
obj.name = obj.target
elif filename.endswith(".cc"):
name = filename[:-len(".cc")]
obj = bld.create_ns3_program(name, all_modules)
obj.path = obj.path.find_dir('scratch')
obj.source = filename
obj.target = name
obj.name = obj.target
def _get_all_task_gen(self):
for group in self.groups:
for taskgen in group:
yield taskgen
# ok, so WAF does not provide an API to prevent an
# arbitrary taskgen from running; we have to muck around with
# WAF internal state, something that might stop working if
# WAF is upgraded...
def _exclude_taskgen(self, taskgen):
for group in self.groups:
for tg1 in group:
if tg1 is taskgen:
group.remove(tg1)
break
else:
continue
break
def build(bld):
env = bld.env
# If --enabled-modules option was given, then print a warning
# message and exit this function.
if Options.options.enable_modules:
Logs.warn("No modules were built. Use waf configure --enable-modules to enable modules.")
return
bld.env['NS3_MODULES_WITH_TEST_LIBRARIES'] = []
bld.env['NS3_ENABLED_MODULE_TEST_LIBRARIES'] = []
bld.env['NS3_SCRIPT_DEPENDENCIES'] = []
bld.env['NS3_RUNNABLE_PROGRAMS'] = []
bld.env['NS3_RUNNABLE_SCRIPTS'] = []
wutils.bld = bld
if Options.options.no_task_lines:
import Runner
def null_printout(s):
pass
Runner.printout = null_printout
Options.cwd_launch = bld.path.abspath()
bld.create_ns3_program = types.MethodType(create_ns3_program, bld)
bld.register_ns3_script = types.MethodType(register_ns3_script, bld)
bld.create_suid_program = types.MethodType(create_suid_program, bld)
bld.__class__.all_task_gen = property(_get_all_task_gen)
bld.exclude_taskgen = types.MethodType(_exclude_taskgen, bld)
# process subfolders from here
bld.add_subdirs('src')
# If modules have been enabled, then set lists of enabled modules
# and enabled module test libraries.
if env['NS3_ENABLED_MODULES']:
modules = env['NS3_ENABLED_MODULES']
# Find out about additional modules that need to be enabled
# due to dependency constraints.
changed = True
while changed:
changed = False
for module in modules:
module_obj = bld.get_tgen_by_name(module)
if module_obj is None:
raise ValueError("module %s not found" % module)
# Each enabled module has its own library.
for dep in module_obj.use:
if not dep.startswith('ns3-'):
continue
if dep not in modules:
modules.append(dep)
changed = True
env['NS3_ENABLED_MODULES'] = modules
# If tests are being built, then set the list of the enabled
# module test libraries.
if env['ENABLE_TESTS']:
for (mod, testlib) in bld.env['NS3_MODULES_WITH_TEST_LIBRARIES']:
if mod in bld.env['NS3_ENABLED_MODULES']:
bld.env.append_value('NS3_ENABLED_MODULE_TEST_LIBRARIES', testlib)
add_examples_programs(bld)
add_scratch_programs(bld)
if env['NS3_ENABLED_MODULES']:
modules = env['NS3_ENABLED_MODULES']
# Exclude the programs other misc task gens that depend on disabled modules
for obj in list(bld.all_task_gen):
# check for ns3moduleheader_taskgen
if 'ns3moduleheader' in getattr(obj, "features", []):
if ("ns3-%s" % obj.module) not in modules:
obj.mode = 'remove' # tell it to remove headers instead of installing
# check for programs
if hasattr(obj, 'ns3_module_dependencies'):
# this is an NS-3 program (bld.create_ns3_program)
program_built = True
for dep in obj.ns3_module_dependencies:
if dep not in modules: # prog. depends on a module that isn't enabled?
bld.exclude_taskgen(obj)
program_built = False
break
# Add this program to the list if all of its
# dependencies will be built.
if program_built:
bld.env.append_value('NS3_RUNNABLE_PROGRAMS', obj.name)
# disable the modules themselves
if hasattr(obj, "is_ns3_module") and obj.name not in modules:
bld.exclude_taskgen(obj) # kill the module
# disable the module test libraries
if hasattr(obj, "is_ns3_module_test_library"):
if not env['ENABLE_TESTS'] or (obj.module_name not in modules):
bld.exclude_taskgen(obj) # kill the module test library
# disable the ns3header_taskgen
if 'ns3header' in getattr(obj, "features", []):
if ("ns3-%s" % obj.module) not in modules:
obj.mode = 'remove' # tell it to remove headers instead of installing
if env['NS3_ENABLED_MODULES']:
env['NS3_ENABLED_MODULES'] = list(modules)
# Determine which scripts will be runnable.
for (script, dependencies) in bld.env['NS3_SCRIPT_DEPENDENCIES']:
script_runnable = True
for dep in dependencies:
if dep not in modules:
script_runnable = False
break
# Add this script to the list if all of its dependencies will
# be built.
if script_runnable:
bld.env.append_value('NS3_RUNNABLE_SCRIPTS', script)
bld.add_subdirs('bindings/python')
# Process this subfolder here after the lists of enabled modules
# and module test libraries have been set.
bld.add_subdirs('utils')
if Options.options.run:
# Check that the requested program name is valid
program_name, dummy_program_argv = wutils.get_run_program(Options.options.run, wutils.get_command_template(env))
# When --run'ing a program, tell WAF to only build that program,
# nothing more; this greatly speeds up compilation when all you
# want to do is run a test program.
Options.options.targets += ',' + os.path.basename(program_name)
for gen in bld.all_task_gen:
if type(gen).__name__ in ['ns3header_taskgen', 'ns3moduleheader_taskgen']:
gen.post()
if Options.options.doxygen_no_build:
_doxygen(bld)
raise SystemExit(0)
def shutdown(ctx):
bld = wutils.bld
if wutils.bld is None:
return
env = bld.env
# Don't print the lists if a program is being run, a Python
# program is being run, this a clean, or this is a distribution
# clean.
if ((not Options.options.run)
and (not Options.options.pyrun)
and ('clean' not in Options.commands)
and ('distclean' not in Options.commands)
and ('shell' not in Options.commands)):
# Print the list of built modules.
print
print 'Modules built:'
names_without_prefix =[name[len('ns3-'):] for name in env['NS3_ENABLED_MODULES']]
print_module_names(names_without_prefix)
print
# Print the list of enabled modules that were not built.
if env['MODULES_NOT_BUILT']:
print 'Modules not built:'
print_module_names(env['MODULES_NOT_BUILT'])
print
# Write the build status file.
build_status_file = os.path.join(bld.out_dir, 'build-status.py')
out = open(build_status_file, 'w')
out.write('#! /usr/bin/env python\n')
out.write('\n')
out.write('# Programs that are runnable.\n')
out.write('ns3_runnable_programs = ' + str(env['NS3_RUNNABLE_PROGRAMS']) + '\n')
out.write('\n')
out.write('# Scripts that are runnable.\n')
out.write('ns3_runnable_scripts = ' + str(env['NS3_RUNNABLE_SCRIPTS']) + '\n')
out.write('\n')
out.close()
if Options.options.lcov_report:
lcov_report(bld)
if Options.options.run:
wutils.run_program(Options.options.run, env, wutils.get_command_template(env),
visualize=Options.options.visualize)
raise SystemExit(0)
if Options.options.pyrun:
wutils.run_python_program(Options.options.pyrun, env,
visualize=Options.options.visualize)
raise SystemExit(0)
if Options.options.shell:
raise WafError("Please run `./waf shell' now, instead of `./waf --shell'")
if Options.options.check:
raise WafError("Please run `./test.py' now, instead of `./waf --check'")
check_shell(bld)
from waflib import Context, Build
class CheckContext(Context.Context):
"""run the equivalent of the old ns-3 unit tests using test.py"""
cmd = 'check'
def execute(self):
# first we execute the build
bld = Context.create_context("build")
bld.options = Options.options # provided for convenience
bld.cmd = "build"
bld.execute()
wutils.bld = bld
wutils.run_python_program("test.py -n -c core", bld.env)
class print_introspected_doxygen_task(Task.TaskBase):
after = 'cc cxx link'
color = 'BLUE'
def __init__(self, bld):
self.bld = bld
super(print_introspected_doxygen_task, self).__init__(generator=self)
def __str__(self):
return 'print-introspected-doxygen\n'
def runnable_status(self):
return Task.RUN_ME
def run(self):
## generate the trace sources list docs
env = wutils.bld.env
proc_env = wutils.get_proc_env()
try:
program_obj = wutils.find_program('print-introspected-doxygen', env)
except ValueError: # could happen if print-introspected-doxygen is
# not built because of waf configure
# --enable-modules=xxx
pass
else:
prog = program_obj.path.find_or_declare(ccroot.get_target_name(program_obj)).abspath(env)
# Create a header file with the introspected information.
doxygen_out = open(os.path.join('doc', 'introspected-doxygen.h'), 'w')
if subprocess.Popen([prog], stdout=doxygen_out, env=proc_env).wait():
raise SystemExit(1)
doxygen_out.close()
# Create a text file with the introspected information.
text_out = open(os.path.join('doc', 'ns3-object.txt'), 'w')
if subprocess.Popen([prog, '--output-text'], stdout=text_out, env=proc_env).wait():
raise SystemExit(1)
text_out.close()
class run_python_unit_tests_task(Task.TaskBase):
after = 'cc cxx link'
color = 'BLUE'
def __init__(self, bld):
self.bld = bld
super(run_python_unit_tests_task, self).__init__(generator=self)
def __str__(self):
return 'run-python-unit-tests\n'
def runnable_status(self):
return Task.RUN_ME
def run(self):
proc_env = wutils.get_proc_env()
wutils.run_argv([self.bld.env['PYTHON'], os.path.join("..", "utils", "python-unit-tests.py")],
self.bld.env, proc_env, force_no_valgrind=True)
def check_shell(bld):
if ('NS3_MODULE_PATH' not in os.environ) or ('NS3_EXECUTABLE_PATH' not in os.environ):
return
env = bld.env
correct_modpath = os.pathsep.join(env['NS3_MODULE_PATH'])
found_modpath = os.environ['NS3_MODULE_PATH']
correct_execpath = os.pathsep.join(env['NS3_EXECUTABLE_PATH'])
found_execpath = os.environ['NS3_EXECUTABLE_PATH']
if (found_modpath != correct_modpath) or (correct_execpath != found_execpath):
msg = ("Detected shell (./waf shell) with incorrect configuration\n"
"=========================================================\n"
"Possible reasons for this problem:\n"
" 1. You switched to another ns-3 tree from inside this shell\n"
" 2. You switched ns-3 debug level (waf configure --debug)\n"
" 3. You modified the list of built ns-3 modules\n"
"You should correct this situation before running any program. Possible solutions:\n"
" 1. Exit this shell, and start a new one\n"
" 2. Run a new nested shell")
raise WafError(msg)
from waflib import Context, Build
class Ns3ShellContext(Context.Context):
"""run a shell with an environment suitably modified to run locally built programs"""
cmd = 'shell'
def execute(self):
# first we execute the build
bld = Context.create_context("build")
bld.options = Options.options # provided for convenience
bld.cmd = "build"
bld.execute()
if sys.platform == 'win32':
shell = os.environ.get("COMSPEC", "cmd.exe")
else:
shell = os.environ.get("SHELL", "/bin/sh")
env = bld.env
os_env = {
'NS3_MODULE_PATH': os.pathsep.join(env['NS3_MODULE_PATH']),
'NS3_EXECUTABLE_PATH': os.pathsep.join(env['NS3_EXECUTABLE_PATH']),
}
wutils.run_argv([shell], env, os_env)
def _doxygen(bld):
env = wutils.bld.env
proc_env = wutils.get_proc_env()
if not env['DOXYGEN']:
Logs.error("waf configure did not detect doxygen in the system -> cannot build api docs.")
raise SystemExit(1)
return
try:
program_obj = wutils.find_program('print-introspected-doxygen', env)
except ValueError:
Logs.warn("print-introspected-doxygen does not exist")
raise SystemExit(1)
return
prog = program_obj.path.find_or_declare(program_obj.target).abspath()
if not os.path.exists(prog):
Logs.error("print-introspected-doxygen has not been built yet."
" You need to build ns-3 at least once before "
"generating doxygen docs...")
raise SystemExit(1)
# Create a header file with the introspected information.
doxygen_out = open(os.path.join('doc', 'introspected-doxygen.h'), 'w')
if subprocess.Popen([prog], stdout=doxygen_out, env=proc_env).wait():
raise SystemExit(1)
doxygen_out.close()
# Create a text file with the introspected information.
text_out = open(os.path.join('doc', 'ns3-object.txt'), 'w')
if subprocess.Popen([prog, '--output-text'], stdout=text_out, env=proc_env).wait():
raise SystemExit(1)
text_out.close()
doxygen_config = os.path.join('doc', 'doxygen.conf')
if subprocess.Popen([env['DOXYGEN'], doxygen_config]).wait():
raise SystemExit(1)
from waflib import Context, Build
class Ns3DoxygenContext(Context.Context):
"""do a full build, generate the introspected doxygen and then the doxygen"""
cmd = 'doxygen'
def execute(self):
# first we execute the build
bld = Context.create_context("build")
bld.options = Options.options # provided for convenience
bld.cmd = "build"
bld.execute()
_doxygen(bld)
def lcov_report(bld):
env = bld.env
if not env['GCOV_ENABLED']:
raise WafError("project not configured for code coverage;"
" reconfigure with --enable-gcov")
os.chdir(out)
try:
lcov_report_dir = 'lcov-report'
create_dir_command = "rm -rf " + lcov_report_dir
create_dir_command += " && mkdir " + lcov_report_dir + ";"
if subprocess.Popen(create_dir_command, shell=True).wait():
raise SystemExit(1)
info_file = os.path.join(lcov_report_dir, 'report.info')
lcov_command = "../utils/lcov/lcov -c -d . -o " + info_file
lcov_command += " -b " + os.getcwd()
if subprocess.Popen(lcov_command, shell=True).wait():
raise SystemExit(1)
genhtml_command = "../utils/lcov/genhtml -o " + lcov_report_dir
genhtml_command += " " + info_file
if subprocess.Popen(genhtml_command, shell=True).wait():
raise SystemExit(1)
finally:
os.chdir("..")
| zy901002-gpsr | wscript | Python | gpl2 | 39,310 |
@python -x waf %* & exit /b
| zy901002-gpsr | waf.bat | Batchfile | gpl2 | 28 |
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import com.google.common.annotations.Beta;
import com.google.common.annotations.VisibleForTesting;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
* An {@link OutputStream} that starts buffering to a byte array, but
* switches to file buffering once the data reaches a configurable size.
*
* <p>This class is thread-safe.
*
* @author Chris Nokleberg
* @since 1.0
*/
@Beta
public final class FileBackedOutputStream extends OutputStream {
private final int fileThreshold;
private final boolean resetOnFinalize;
private final ByteSource source;
private OutputStream out;
private MemoryOutput memory;
private File file;
/** ByteArrayOutputStream that exposes its internals. */
private static class MemoryOutput extends ByteArrayOutputStream {
byte[] getBuffer() {
return buf;
}
int getCount() {
return count;
}
}
/** Returns the file holding the data (possibly null). */
@VisibleForTesting synchronized File getFile() {
return file;
}
/**
* Creates a new instance that uses the given file threshold, and does
* not reset the data when the {@link ByteSource} returned by
* {@link #asByteSource} is finalized.
*
* @param fileThreshold the number of bytes before the stream should
* switch to buffering to a file
*/
public FileBackedOutputStream(int fileThreshold) {
this(fileThreshold, false);
}
/**
* Creates a new instance that uses the given file threshold, and
* optionally resets the data when the {@link ByteSource} returned
* by {@link #asByteSource} is finalized.
*
* @param fileThreshold the number of bytes before the stream should
* switch to buffering to a file
* @param resetOnFinalize if true, the {@link #reset} method will
* be called when the {@link ByteSource} returned by {@link
* #asByteSource} is finalized
*/
public FileBackedOutputStream(int fileThreshold, boolean resetOnFinalize) {
this.fileThreshold = fileThreshold;
this.resetOnFinalize = resetOnFinalize;
memory = new MemoryOutput();
out = memory;
if (resetOnFinalize) {
source = new ByteSource() {
@Override
public InputStream openStream() throws IOException {
return openInputStream();
}
@Override protected void finalize() {
try {
reset();
} catch (Throwable t) {
t.printStackTrace(System.err);
}
}
};
} else {
source = new ByteSource() {
@Override
public InputStream openStream() throws IOException {
return openInputStream();
}
};
}
}
/**
* Returns a readable {@link ByteSource} view of the data that has been
* written to this stream.
*
* @since 15.0
*/
public ByteSource asByteSource() {
return source;
}
private synchronized InputStream openInputStream() throws IOException {
if (file != null) {
return new FileInputStream(file);
} else {
return new ByteArrayInputStream(
memory.getBuffer(), 0, memory.getCount());
}
}
/**
* Calls {@link #close} if not already closed, and then resets this
* object back to its initial state, for reuse. If data was buffered
* to a file, it will be deleted.
*
* @throws IOException if an I/O error occurred while deleting the file buffer
*/
public synchronized void reset() throws IOException {
try {
close();
} finally {
if (memory == null) {
memory = new MemoryOutput();
} else {
memory.reset();
}
out = memory;
if (file != null) {
File deleteMe = file;
file = null;
if (!deleteMe.delete()) {
throw new IOException("Could not delete: " + deleteMe);
}
}
}
}
@Override public synchronized void write(int b) throws IOException {
update(1);
out.write(b);
}
@Override public synchronized void write(byte[] b) throws IOException {
write(b, 0, b.length);
}
@Override public synchronized void write(byte[] b, int off, int len)
throws IOException {
update(len);
out.write(b, off, len);
}
@Override public synchronized void close() throws IOException {
out.close();
}
@Override public synchronized void flush() throws IOException {
out.flush();
}
/**
* Checks if writing {@code len} bytes would go over threshold, and
* switches to file buffering if so.
*/
private void update(int len) throws IOException {
if (file == null && (memory.getCount() + len > fileThreshold)) {
File temp = File.createTempFile("FileBackedOutputStream", null);
if (resetOnFinalize) {
// Finalizers are not guaranteed to be called on system shutdown;
// this is insurance.
temp.deleteOnExit();
}
FileOutputStream transfer = new FileOutputStream(temp);
transfer.write(memory.getBuffer(), 0, memory.getCount());
transfer.flush();
// We've successfully transferred the data; switch to writing to file
out = transfer;
file = temp;
memory = null;
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/FileBackedOutputStream.java | Java | asf20 | 5,981 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import com.google.common.annotations.Beta;
import com.google.common.annotations.VisibleForTesting;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Nullable;
/**
* Utility methods for working with {@link Closeable} objects.
*
* @author Michael Lancaster
* @since 1.0
*/
@Beta
public final class Closeables {
@VisibleForTesting static final Logger logger
= Logger.getLogger(Closeables.class.getName());
private Closeables() {}
/**
* Closes a {@link Closeable}, with control over whether an {@code IOException} may be thrown.
* This is primarily useful in a finally block, where a thrown exception needs to be logged but
* not propagated (otherwise the original exception will be lost).
*
* <p>If {@code swallowIOException} is true then we never throw {@code IOException} but merely log
* it.
*
* <p>Example: <pre> {@code
*
* public void useStreamNicely() throws IOException {
* SomeStream stream = new SomeStream("foo");
* boolean threw = true;
* try {
* // ... code which does something with the stream ...
* threw = false;
* } finally {
* // If an exception occurs, rethrow it only if threw==false:
* Closeables.close(stream, threw);
* }
* }}</pre>
*
* @param closeable the {@code Closeable} object to be closed, or null, in which case this method
* does nothing
* @param swallowIOException if true, don't propagate IO exceptions thrown by the {@code close}
* methods
* @throws IOException if {@code swallowIOException} is false and {@code close} throws an
* {@code IOException}.
*/
public static void close(@Nullable Closeable closeable,
boolean swallowIOException) throws IOException {
if (closeable == null) {
return;
}
try {
closeable.close();
} catch (IOException e) {
if (swallowIOException) {
logger.log(Level.WARNING,
"IOException thrown while closing Closeable.", e);
} else {
throw e;
}
}
}
/**
* Closes the given {@link InputStream}, logging any {@code IOException} that's thrown rather
* than propagating it.
*
* <p>While it's not safe in the general case to ignore exceptions that are thrown when closing
* an I/O resource, it should generally be safe in the case of a resource that's being used only
* for reading, such as an {@code InputStream}. Unlike with writable resources, there's no
* chance that a failure that occurs when closing the stream indicates a meaningful problem such
* as a failure to flush all bytes to the underlying resource.
*
* @param inputStream the input stream to be closed, or {@code null} in which case this method
* does nothing
* @since 17.0
*/
public static void closeQuietly(@Nullable InputStream inputStream) {
try {
close(inputStream, true);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
/**
* Closes the given {@link Reader}, logging any {@code IOException} that's thrown rather than
* propagating it.
*
* <p>While it's not safe in the general case to ignore exceptions that are thrown when closing
* an I/O resource, it should generally be safe in the case of a resource that's being used only
* for reading, such as a {@code Reader}. Unlike with writable resources, there's no chance that
* a failure that occurs when closing the reader indicates a meaningful problem such as a failure
* to flush all bytes to the underlying resource.
*
* @param reader the reader to be closed, or {@code null} in which case this method does nothing
* @since 17.0
*/
public static void closeQuietly(@Nullable Reader reader) {
try {
close(reader, true);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/Closeables.java | Java | asf20 | 4,674 |
/*
* Copyright (C) 2006 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import com.google.common.annotations.Beta;
import com.google.common.base.Preconditions;
import java.io.File;
import java.io.FilenameFilter;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import javax.annotation.Nullable;
/**
* File name filter that only accepts files matching a regular expression. This
* class is thread-safe and immutable.
*
* @author Apple Chow
* @since 1.0
*/
@Beta
public final class PatternFilenameFilter implements FilenameFilter {
private final Pattern pattern;
/**
* Constructs a pattern file name filter object.
* @param patternStr the pattern string on which to filter file names
*
* @throws PatternSyntaxException if pattern compilation fails (runtime)
*/
public PatternFilenameFilter(String patternStr) {
this(Pattern.compile(patternStr));
}
/**
* Constructs a pattern file name filter object.
* @param pattern the pattern on which to filter file names
*/
public PatternFilenameFilter(Pattern pattern) {
this.pattern = Preconditions.checkNotNull(pattern);
}
@Override public boolean accept(@Nullable File dir, String fileName) {
return pattern.matcher(fileName).matches();
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/PatternFilenameFilter.java | Java | asf20 | 1,839 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import com.google.common.annotations.Beta;
import java.io.Flushable;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Utility methods for working with {@link Flushable} objects.
*
* @author Michael Lancaster
* @since 1.0
*/
@Beta
public final class Flushables {
private static final Logger logger
= Logger.getLogger(Flushables.class.getName());
private Flushables() {}
/**
* Flush a {@link Flushable}, with control over whether an
* {@code IOException} may be thrown.
*
* <p>If {@code swallowIOException} is true, then we don't rethrow
* {@code IOException}, but merely log it.
*
* @param flushable the {@code Flushable} object to be flushed.
* @param swallowIOException if true, don't propagate IO exceptions
* thrown by the {@code flush} method
* @throws IOException if {@code swallowIOException} is false and
* {@link Flushable#flush} throws an {@code IOException}.
* @see Closeables#close
*/
public static void flush(Flushable flushable, boolean swallowIOException)
throws IOException {
try {
flushable.flush();
} catch (IOException e) {
if (swallowIOException) {
logger.log(Level.WARNING,
"IOException thrown while flushing Flushable.", e);
} else {
throw e;
}
}
}
/**
* Equivalent to calling {@code flush(flushable, true)}, but with no
* {@code IOException} in the signature.
*
* @param flushable the {@code Flushable} object to be flushed.
*/
public static void flushQuietly(Flushable flushable) {
try {
flush(flushable, true);
} catch (IOException e) {
logger.log(Level.SEVERE, "IOException should not have been thrown.", e);
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/Flushables.java | Java | asf20 | 2,411 |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkPositionIndexes;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.io.GwtWorkarounds.asCharInput;
import static com.google.common.io.GwtWorkarounds.asCharOutput;
import static com.google.common.io.GwtWorkarounds.asInputStream;
import static com.google.common.io.GwtWorkarounds.asOutputStream;
import static com.google.common.io.GwtWorkarounds.stringBuilderOutput;
import static com.google.common.math.IntMath.divide;
import static com.google.common.math.IntMath.log2;
import static java.math.RoundingMode.CEILING;
import static java.math.RoundingMode.FLOOR;
import static java.math.RoundingMode.UNNECESSARY;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.base.Ascii;
import com.google.common.base.CharMatcher;
import com.google.common.io.GwtWorkarounds.ByteInput;
import com.google.common.io.GwtWorkarounds.ByteOutput;
import com.google.common.io.GwtWorkarounds.CharInput;
import com.google.common.io.GwtWorkarounds.CharOutput;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
import java.io.Writer;
import java.util.Arrays;
import javax.annotation.CheckReturnValue;
import javax.annotation.Nullable;
/**
* A binary encoding scheme for reversibly translating between byte sequences and printable ASCII
* strings. This class includes several constants for encoding schemes specified by <a
* href="http://tools.ietf.org/html/rfc4648">RFC 4648</a>. For example, the expression:
*
* <pre> {@code
* BaseEncoding.base32().encode("foo".getBytes(Charsets.US_ASCII))}</pre>
*
* <p>returns the string {@code "MZXW6==="}, and <pre> {@code
* byte[] decoded = BaseEncoding.base32().decode("MZXW6===");}</pre>
*
* <p>...returns the ASCII bytes of the string {@code "foo"}.
*
* <p>By default, {@code BaseEncoding}'s behavior is relatively strict and in accordance with
* RFC 4648. Decoding rejects characters in the wrong case, though padding is optional.
* To modify encoding and decoding behavior, use configuration methods to obtain a new encoding
* with modified behavior:
*
* <pre> {@code
* BaseEncoding.base16().lowerCase().decode("deadbeef");}</pre>
*
* <p>Warning: BaseEncoding instances are immutable. Invoking a configuration method has no effect
* on the receiving instance; you must store and use the new encoding instance it returns, instead.
*
* <pre> {@code
* // Do NOT do this
* BaseEncoding hex = BaseEncoding.base16();
* hex.lowerCase(); // does nothing!
* return hex.decode("deadbeef"); // throws an IllegalArgumentException}</pre>
*
* <p>It is guaranteed that {@code encoding.decode(encoding.encode(x))} is always equal to
* {@code x}, but the reverse does not necessarily hold.
*
* <p>
* <table>
* <tr>
* <th>Encoding
* <th>Alphabet
* <th>{@code char:byte} ratio
* <th>Default padding
* <th>Comments
* <tr>
* <td>{@link #base16()}
* <td>0-9 A-F
* <td>2.00
* <td>N/A
* <td>Traditional hexadecimal. Defaults to upper case.
* <tr>
* <td>{@link #base32()}
* <td>A-Z 2-7
* <td>1.60
* <td>=
* <td>Human-readable; no possibility of mixing up 0/O or 1/I. Defaults to upper case.
* <tr>
* <td>{@link #base32Hex()}
* <td>0-9 A-V
* <td>1.60
* <td>=
* <td>"Numerical" base 32; extended from the traditional hex alphabet. Defaults to upper case.
* <tr>
* <td>{@link #base64()}
* <td>A-Z a-z 0-9 + /
* <td>1.33
* <td>=
* <td>
* <tr>
* <td>{@link #base64Url()}
* <td>A-Z a-z 0-9 - _
* <td>1.33
* <td>=
* <td>Safe to use as filenames, or to pass in URLs without escaping
* </table>
*
* <p>
* All instances of this class are immutable, so they may be stored safely as static constants.
*
* @author Louis Wasserman
* @since 14.0
*/
@Beta
@GwtCompatible(emulated = true)
public abstract class BaseEncoding {
// TODO(user): consider adding encodeTo(Appendable, byte[], [int, int])
BaseEncoding() {}
/**
* Exception indicating invalid base-encoded input encountered while decoding.
*
* @author Louis Wasserman
* @since 15.0
*/
public static final class DecodingException extends IOException {
DecodingException(String message) {
super(message);
}
DecodingException(Throwable cause) {
super(cause);
}
}
/**
* Encodes the specified byte array, and returns the encoded {@code String}.
*/
public String encode(byte[] bytes) {
return encode(checkNotNull(bytes), 0, bytes.length);
}
/**
* Encodes the specified range of the specified byte array, and returns the encoded
* {@code String}.
*/
public final String encode(byte[] bytes, int off, int len) {
checkNotNull(bytes);
checkPositionIndexes(off, off + len, bytes.length);
CharOutput result = stringBuilderOutput(maxEncodedSize(len));
ByteOutput byteOutput = encodingStream(result);
try {
for (int i = 0; i < len; i++) {
byteOutput.write(bytes[off + i]);
}
byteOutput.close();
} catch (IOException impossible) {
throw new AssertionError("impossible");
}
return result.toString();
}
/**
* Returns an {@code OutputStream} that encodes bytes using this encoding into the specified
* {@code Writer}. When the returned {@code OutputStream} is closed, so is the backing
* {@code Writer}.
*/
@GwtIncompatible("Writer,OutputStream")
public final OutputStream encodingStream(Writer writer) {
return asOutputStream(encodingStream(asCharOutput(writer)));
}
/**
* Returns a {@code ByteSink} that writes base-encoded bytes to the specified {@code CharSink}.
*/
@GwtIncompatible("ByteSink,CharSink")
public final ByteSink encodingSink(final CharSink encodedSink) {
checkNotNull(encodedSink);
return new ByteSink() {
@Override
public OutputStream openStream() throws IOException {
return encodingStream(encodedSink.openStream());
}
};
}
// TODO(user): document the extent of leniency, probably after adding ignore(CharMatcher)
private static byte[] extract(byte[] result, int length) {
if (length == result.length) {
return result;
} else {
byte[] trunc = new byte[length];
System.arraycopy(result, 0, trunc, 0, length);
return trunc;
}
}
/**
* Decodes the specified character sequence, and returns the resulting {@code byte[]}.
* This is the inverse operation to {@link #encode(byte[])}.
*
* @throws IllegalArgumentException if the input is not a valid encoded string according to this
* encoding.
*/
public final byte[] decode(CharSequence chars) {
try {
return decodeChecked(chars);
} catch (DecodingException badInput) {
throw new IllegalArgumentException(badInput);
}
}
/**
* Decodes the specified character sequence, and returns the resulting {@code byte[]}.
* This is the inverse operation to {@link #encode(byte[])}.
*
* @throws DecodingException if the input is not a valid encoded string according to this
* encoding.
*/
final byte[] decodeChecked(CharSequence chars) throws DecodingException {
chars = padding().trimTrailingFrom(chars);
ByteInput decodedInput = decodingStream(asCharInput(chars));
byte[] tmp = new byte[maxDecodedSize(chars.length())];
int index = 0;
try {
for (int i = decodedInput.read(); i != -1; i = decodedInput.read()) {
tmp[index++] = (byte) i;
}
} catch (DecodingException badInput) {
throw badInput;
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
return extract(tmp, index);
}
/**
* Returns an {@code InputStream} that decodes base-encoded input from the specified
* {@code Reader}. The returned stream throws a {@link DecodingException} upon decoding-specific
* errors.
*/
@GwtIncompatible("Reader,InputStream")
public final InputStream decodingStream(Reader reader) {
return asInputStream(decodingStream(asCharInput(reader)));
}
/**
* Returns a {@code ByteSource} that reads base-encoded bytes from the specified
* {@code CharSource}.
*/
@GwtIncompatible("ByteSource,CharSource")
public final ByteSource decodingSource(final CharSource encodedSource) {
checkNotNull(encodedSource);
return new ByteSource() {
@Override
public InputStream openStream() throws IOException {
return decodingStream(encodedSource.openStream());
}
};
}
// Implementations for encoding/decoding
abstract int maxEncodedSize(int bytes);
abstract ByteOutput encodingStream(CharOutput charOutput);
abstract int maxDecodedSize(int chars);
abstract ByteInput decodingStream(CharInput charInput);
abstract CharMatcher padding();
// Modified encoding generators
/**
* Returns an encoding that behaves equivalently to this encoding, but omits any padding
* characters as specified by <a href="http://tools.ietf.org/html/rfc4648#section-3.2">RFC 4648
* section 3.2</a>, Padding of Encoded Data.
*/
@CheckReturnValue
public abstract BaseEncoding omitPadding();
/**
* Returns an encoding that behaves equivalently to this encoding, but uses an alternate character
* for padding.
*
* @throws IllegalArgumentException if this padding character is already used in the alphabet or a
* separator
*/
@CheckReturnValue
public abstract BaseEncoding withPadChar(char padChar);
/**
* Returns an encoding that behaves equivalently to this encoding, but adds a separator string
* after every {@code n} characters. Any occurrences of any characters that occur in the separator
* are skipped over in decoding.
*
* @throws IllegalArgumentException if any alphabet or padding characters appear in the separator
* string, or if {@code n <= 0}
* @throws UnsupportedOperationException if this encoding already uses a separator
*/
@CheckReturnValue
public abstract BaseEncoding withSeparator(String separator, int n);
/**
* Returns an encoding that behaves equivalently to this encoding, but encodes and decodes with
* uppercase letters. Padding and separator characters remain in their original case.
*
* @throws IllegalStateException if the alphabet used by this encoding contains mixed upper- and
* lower-case characters
*/
@CheckReturnValue
public abstract BaseEncoding upperCase();
/**
* Returns an encoding that behaves equivalently to this encoding, but encodes and decodes with
* lowercase letters. Padding and separator characters remain in their original case.
*
* @throws IllegalStateException if the alphabet used by this encoding contains mixed upper- and
* lower-case characters
*/
@CheckReturnValue
public abstract BaseEncoding lowerCase();
private static final BaseEncoding BASE64 = new StandardBaseEncoding(
"base64()", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", '=');
/**
* The "base64" base encoding specified by <a
* href="http://tools.ietf.org/html/rfc4648#section-4">RFC 4648 section 4</a>, Base 64 Encoding.
* (This is the same as the base 64 encoding from <a
* href="http://tools.ietf.org/html/rfc3548#section-3">RFC 3548</a>.)
*
* <p>The character {@code '='} is used for padding, but can be {@linkplain #omitPadding()
* omitted} or {@linkplain #withPadChar(char) replaced}.
*
* <p>No line feeds are added by default, as per <a
* href="http://tools.ietf.org/html/rfc4648#section-3.1"> RFC 4648 section 3.1</a>, Line Feeds in
* Encoded Data. Line feeds may be added using {@link #withSeparator(String, int)}.
*/
public static BaseEncoding base64() {
return BASE64;
}
private static final BaseEncoding BASE64_URL = new StandardBaseEncoding(
"base64Url()", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_", '=');
/**
* The "base64url" encoding specified by <a
* href="http://tools.ietf.org/html/rfc4648#section-5">RFC 4648 section 5</a>, Base 64 Encoding
* with URL and Filename Safe Alphabet, also sometimes referred to as the "web safe Base64."
* (This is the same as the base 64 encoding with URL and filename safe alphabet from <a
* href="http://tools.ietf.org/html/rfc3548#section-4">RFC 3548</a>.)
*
* <p>The character {@code '='} is used for padding, but can be {@linkplain #omitPadding()
* omitted} or {@linkplain #withPadChar(char) replaced}.
*
* <p>No line feeds are added by default, as per <a
* href="http://tools.ietf.org/html/rfc4648#section-3.1"> RFC 4648 section 3.1</a>, Line Feeds in
* Encoded Data. Line feeds may be added using {@link #withSeparator(String, int)}.
*/
public static BaseEncoding base64Url() {
return BASE64_URL;
}
private static final BaseEncoding BASE32 =
new StandardBaseEncoding("base32()", "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567", '=');
/**
* The "base32" encoding specified by <a
* href="http://tools.ietf.org/html/rfc4648#section-6">RFC 4648 section 6</a>, Base 32 Encoding.
* (This is the same as the base 32 encoding from <a
* href="http://tools.ietf.org/html/rfc3548#section-5">RFC 3548</a>.)
*
* <p>The character {@code '='} is used for padding, but can be {@linkplain #omitPadding()
* omitted} or {@linkplain #withPadChar(char) replaced}.
*
* <p>No line feeds are added by default, as per <a
* href="http://tools.ietf.org/html/rfc4648#section-3.1"> RFC 4648 section 3.1</a>, Line Feeds in
* Encoded Data. Line feeds may be added using {@link #withSeparator(String, int)}.
*/
public static BaseEncoding base32() {
return BASE32;
}
private static final BaseEncoding BASE32_HEX =
new StandardBaseEncoding("base32Hex()", "0123456789ABCDEFGHIJKLMNOPQRSTUV", '=');
/**
* The "base32hex" encoding specified by <a
* href="http://tools.ietf.org/html/rfc4648#section-7">RFC 4648 section 7</a>, Base 32 Encoding
* with Extended Hex Alphabet. There is no corresponding encoding in RFC 3548.
*
* <p>The character {@code '='} is used for padding, but can be {@linkplain #omitPadding()
* omitted} or {@linkplain #withPadChar(char) replaced}.
*
* <p>No line feeds are added by default, as per <a
* href="http://tools.ietf.org/html/rfc4648#section-3.1"> RFC 4648 section 3.1</a>, Line Feeds in
* Encoded Data. Line feeds may be added using {@link #withSeparator(String, int)}.
*/
public static BaseEncoding base32Hex() {
return BASE32_HEX;
}
private static final BaseEncoding BASE16 =
new StandardBaseEncoding("base16()", "0123456789ABCDEF", null);
/**
* The "base16" encoding specified by <a
* href="http://tools.ietf.org/html/rfc4648#section-8">RFC 4648 section 8</a>, Base 16 Encoding.
* (This is the same as the base 16 encoding from <a
* href="http://tools.ietf.org/html/rfc3548#section-6">RFC 3548</a>.) This is commonly known as
* "hexadecimal" format.
*
* <p>No padding is necessary in base 16, so {@link #withPadChar(char)} and
* {@link #omitPadding()} have no effect.
*
* <p>No line feeds are added by default, as per <a
* href="http://tools.ietf.org/html/rfc4648#section-3.1"> RFC 4648 section 3.1</a>, Line Feeds in
* Encoded Data. Line feeds may be added using {@link #withSeparator(String, int)}.
*/
public static BaseEncoding base16() {
return BASE16;
}
private static final class Alphabet extends CharMatcher {
private final String name;
// this is meant to be immutable -- don't modify it!
private final char[] chars;
final int mask;
final int bitsPerChar;
final int charsPerChunk;
final int bytesPerChunk;
private final byte[] decodabet;
private final boolean[] validPadding;
Alphabet(String name, char[] chars) {
this.name = checkNotNull(name);
this.chars = checkNotNull(chars);
try {
this.bitsPerChar = log2(chars.length, UNNECESSARY);
} catch (ArithmeticException e) {
throw new IllegalArgumentException("Illegal alphabet length " + chars.length, e);
}
/*
* e.g. for base64, bitsPerChar == 6, charsPerChunk == 4, and bytesPerChunk == 3. This makes
* for the smallest chunk size that still has charsPerChunk * bitsPerChar be a multiple of 8.
*/
int gcd = Math.min(8, Integer.lowestOneBit(bitsPerChar));
this.charsPerChunk = 8 / gcd;
this.bytesPerChunk = bitsPerChar / gcd;
this.mask = chars.length - 1;
byte[] decodabet = new byte[Ascii.MAX + 1];
Arrays.fill(decodabet, (byte) -1);
for (int i = 0; i < chars.length; i++) {
char c = chars[i];
checkArgument(CharMatcher.ASCII.matches(c), "Non-ASCII character: %s", c);
checkArgument(decodabet[c] == -1, "Duplicate character: %s", c);
decodabet[c] = (byte) i;
}
this.decodabet = decodabet;
boolean[] validPadding = new boolean[charsPerChunk];
for (int i = 0; i < bytesPerChunk; i++) {
validPadding[divide(i * 8, bitsPerChar, CEILING)] = true;
}
this.validPadding = validPadding;
}
char encode(int bits) {
return chars[bits];
}
boolean isValidPaddingStartPosition(int index) {
return validPadding[index % charsPerChunk];
}
int decode(char ch) throws IOException {
if (ch > Ascii.MAX || decodabet[ch] == -1) {
throw new DecodingException("Unrecognized character: " + ch);
}
return decodabet[ch];
}
private boolean hasLowerCase() {
for (char c : chars) {
if (Ascii.isLowerCase(c)) {
return true;
}
}
return false;
}
private boolean hasUpperCase() {
for (char c : chars) {
if (Ascii.isUpperCase(c)) {
return true;
}
}
return false;
}
Alphabet upperCase() {
if (!hasLowerCase()) {
return this;
} else {
checkState(!hasUpperCase(), "Cannot call upperCase() on a mixed-case alphabet");
char[] upperCased = new char[chars.length];
for (int i = 0; i < chars.length; i++) {
upperCased[i] = Ascii.toUpperCase(chars[i]);
}
return new Alphabet(name + ".upperCase()", upperCased);
}
}
Alphabet lowerCase() {
if (!hasUpperCase()) {
return this;
} else {
checkState(!hasLowerCase(), "Cannot call lowerCase() on a mixed-case alphabet");
char[] lowerCased = new char[chars.length];
for (int i = 0; i < chars.length; i++) {
lowerCased[i] = Ascii.toLowerCase(chars[i]);
}
return new Alphabet(name + ".lowerCase()", lowerCased);
}
}
@Override
public boolean matches(char c) {
return CharMatcher.ASCII.matches(c) && decodabet[c] != -1;
}
@Override
public String toString() {
return name;
}
}
static final class StandardBaseEncoding extends BaseEncoding {
// TODO(user): provide a useful toString
private final Alphabet alphabet;
@Nullable
private final Character paddingChar;
StandardBaseEncoding(String name, String alphabetChars, @Nullable Character paddingChar) {
this(new Alphabet(name, alphabetChars.toCharArray()), paddingChar);
}
StandardBaseEncoding(Alphabet alphabet, @Nullable Character paddingChar) {
this.alphabet = checkNotNull(alphabet);
checkArgument(paddingChar == null || !alphabet.matches(paddingChar),
"Padding character %s was already in alphabet", paddingChar);
this.paddingChar = paddingChar;
}
@Override
CharMatcher padding() {
return (paddingChar == null) ? CharMatcher.NONE : CharMatcher.is(paddingChar.charValue());
}
@Override
int maxEncodedSize(int bytes) {
return alphabet.charsPerChunk * divide(bytes, alphabet.bytesPerChunk, CEILING);
}
@Override
ByteOutput encodingStream(final CharOutput out) {
checkNotNull(out);
return new ByteOutput() {
int bitBuffer = 0;
int bitBufferLength = 0;
int writtenChars = 0;
@Override
public void write(byte b) throws IOException {
bitBuffer <<= 8;
bitBuffer |= b & 0xFF;
bitBufferLength += 8;
while (bitBufferLength >= alphabet.bitsPerChar) {
int charIndex = (bitBuffer >> (bitBufferLength - alphabet.bitsPerChar))
& alphabet.mask;
out.write(alphabet.encode(charIndex));
writtenChars++;
bitBufferLength -= alphabet.bitsPerChar;
}
}
@Override
public void flush() throws IOException {
out.flush();
}
@Override
public void close() throws IOException {
if (bitBufferLength > 0) {
int charIndex = (bitBuffer << (alphabet.bitsPerChar - bitBufferLength))
& alphabet.mask;
out.write(alphabet.encode(charIndex));
writtenChars++;
if (paddingChar != null) {
while (writtenChars % alphabet.charsPerChunk != 0) {
out.write(paddingChar.charValue());
writtenChars++;
}
}
}
out.close();
}
};
}
@Override
int maxDecodedSize(int chars) {
return (int) ((alphabet.bitsPerChar * (long) chars + 7L) / 8L);
}
@Override
ByteInput decodingStream(final CharInput reader) {
checkNotNull(reader);
return new ByteInput() {
int bitBuffer = 0;
int bitBufferLength = 0;
int readChars = 0;
boolean hitPadding = false;
final CharMatcher paddingMatcher = padding();
@Override
public int read() throws IOException {
while (true) {
int readChar = reader.read();
if (readChar == -1) {
if (!hitPadding && !alphabet.isValidPaddingStartPosition(readChars)) {
throw new DecodingException("Invalid input length " + readChars);
}
return -1;
}
readChars++;
char ch = (char) readChar;
if (paddingMatcher.matches(ch)) {
if (!hitPadding
&& (readChars == 1 || !alphabet.isValidPaddingStartPosition(readChars - 1))) {
throw new DecodingException("Padding cannot start at index " + readChars);
}
hitPadding = true;
} else if (hitPadding) {
throw new DecodingException(
"Expected padding character but found '" + ch + "' at index " + readChars);
} else {
bitBuffer <<= alphabet.bitsPerChar;
bitBuffer |= alphabet.decode(ch);
bitBufferLength += alphabet.bitsPerChar;
if (bitBufferLength >= 8) {
bitBufferLength -= 8;
return (bitBuffer >> bitBufferLength) & 0xFF;
}
}
}
}
@Override
public void close() throws IOException {
reader.close();
}
};
}
@Override
public BaseEncoding omitPadding() {
return (paddingChar == null) ? this : new StandardBaseEncoding(alphabet, null);
}
@Override
public BaseEncoding withPadChar(char padChar) {
if (8 % alphabet.bitsPerChar == 0 ||
(paddingChar != null && paddingChar.charValue() == padChar)) {
return this;
} else {
return new StandardBaseEncoding(alphabet, padChar);
}
}
@Override
public BaseEncoding withSeparator(String separator, int afterEveryChars) {
checkNotNull(separator);
checkArgument(padding().or(alphabet).matchesNoneOf(separator),
"Separator cannot contain alphabet or padding characters");
return new SeparatedBaseEncoding(this, separator, afterEveryChars);
}
private transient BaseEncoding upperCase;
private transient BaseEncoding lowerCase;
@Override
public BaseEncoding upperCase() {
BaseEncoding result = upperCase;
if (result == null) {
Alphabet upper = alphabet.upperCase();
result = upperCase =
(upper == alphabet) ? this : new StandardBaseEncoding(upper, paddingChar);
}
return result;
}
@Override
public BaseEncoding lowerCase() {
BaseEncoding result = lowerCase;
if (result == null) {
Alphabet lower = alphabet.lowerCase();
result = lowerCase =
(lower == alphabet) ? this : new StandardBaseEncoding(lower, paddingChar);
}
return result;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder("BaseEncoding.");
builder.append(alphabet.toString());
if (8 % alphabet.bitsPerChar != 0) {
if (paddingChar == null) {
builder.append(".omitPadding()");
} else {
builder.append(".withPadChar(").append(paddingChar).append(')');
}
}
return builder.toString();
}
}
static CharInput ignoringInput(final CharInput delegate, final CharMatcher toIgnore) {
checkNotNull(delegate);
checkNotNull(toIgnore);
return new CharInput() {
@Override
public int read() throws IOException {
int readChar;
do {
readChar = delegate.read();
} while (readChar != -1 && toIgnore.matches((char) readChar));
return readChar;
}
@Override
public void close() throws IOException {
delegate.close();
}
};
}
static CharOutput separatingOutput(
final CharOutput delegate, final String separator, final int afterEveryChars) {
checkNotNull(delegate);
checkNotNull(separator);
checkArgument(afterEveryChars > 0);
return new CharOutput() {
int charsUntilSeparator = afterEveryChars;
@Override
public void write(char c) throws IOException {
if (charsUntilSeparator == 0) {
for (int i = 0; i < separator.length(); i++) {
delegate.write(separator.charAt(i));
}
charsUntilSeparator = afterEveryChars;
}
delegate.write(c);
charsUntilSeparator--;
}
@Override
public void flush() throws IOException {
delegate.flush();
}
@Override
public void close() throws IOException {
delegate.close();
}
};
}
static final class SeparatedBaseEncoding extends BaseEncoding {
private final BaseEncoding delegate;
private final String separator;
private final int afterEveryChars;
private final CharMatcher separatorChars;
SeparatedBaseEncoding(BaseEncoding delegate, String separator, int afterEveryChars) {
this.delegate = checkNotNull(delegate);
this.separator = checkNotNull(separator);
this.afterEveryChars = afterEveryChars;
checkArgument(
afterEveryChars > 0, "Cannot add a separator after every %s chars", afterEveryChars);
this.separatorChars = CharMatcher.anyOf(separator).precomputed();
}
@Override
CharMatcher padding() {
return delegate.padding();
}
@Override
int maxEncodedSize(int bytes) {
int unseparatedSize = delegate.maxEncodedSize(bytes);
return unseparatedSize + separator.length()
* divide(Math.max(0, unseparatedSize - 1), afterEveryChars, FLOOR);
}
@Override
ByteOutput encodingStream(final CharOutput output) {
return delegate.encodingStream(separatingOutput(output, separator, afterEveryChars));
}
@Override
int maxDecodedSize(int chars) {
return delegate.maxDecodedSize(chars);
}
@Override
ByteInput decodingStream(final CharInput input) {
return delegate.decodingStream(ignoringInput(input, separatorChars));
}
@Override
public BaseEncoding omitPadding() {
return delegate.omitPadding().withSeparator(separator, afterEveryChars);
}
@Override
public BaseEncoding withPadChar(char padChar) {
return delegate.withPadChar(padChar).withSeparator(separator, afterEveryChars);
}
@Override
public BaseEncoding withSeparator(String separator, int afterEveryChars) {
throw new UnsupportedOperationException("Already have a separator");
}
@Override
public BaseEncoding upperCase() {
return delegate.upperCase().withSeparator(separator, afterEveryChars);
}
@Override
public BaseEncoding lowerCase() {
return delegate.lowerCase().withSeparator(separator, afterEveryChars);
}
@Override
public String toString() {
return delegate.toString() +
".withSeparator(\"" + separator + "\", " + afterEveryChars + ")";
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/BaseEncoding.java | Java | asf20 | 29,695 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import java.io.IOException;
/**
* A factory for readable streams of bytes or characters.
*
* @author Chris Nokleberg
* @since 1.0
* @deprecated For {@code InputSupplier<? extends InputStream>}, use
* {@link ByteSource} instead. For {@code InputSupplier<? extends Reader>},
* use {@link CharSource}. Implementations of {@code InputSupplier} that
* don't fall into one of those categories do not benefit from any of the
* methods in {@code common.io} and should use a different interface. This
* interface is scheduled for removal in June 2015.
*/
@Deprecated
public interface InputSupplier<T> {
/**
* Returns an object that encapsulates a readable resource.
* <p>
* Like {@link Iterable#iterator}, this method may be called repeatedly to
* get independent channels to the same underlying resource.
* <p>
* Where the channel maintains a position within the resource, moving that
* cursor within one channel should not affect the starting position of
* channels returned by other calls.
*/
T getInput() throws IOException;
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/InputSupplier.java | Java | asf20 | 1,719 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package contains utility methods and classes for working with Java I/O;
* for example input streams, output streams, readers, writers, and files.
*
* <p>At the core of this package are the Source/Sink types:
* {@link com.google.common.io.ByteSource ByteSource},
* {@link com.google.common.io.CharSource CharSource},
* {@link com.google.common.io.ByteSink ByteSink} and
* {@link com.google.common.io.CharSink CharSink}. They are factories for I/O streams that
* provide many convenience methods that handle both opening and closing streams for you.
*
* <p>This package is a part of the open-source
* <a href="http://guava-libraries.googlecode.com">Guava libraries</a>. For more information on
* Sources and Sinks as well as other features of this package, see
* <a href="https://code.google.com/p/guava-libraries/wiki/IOExplained">I/O Explained</a> on the
* Guava wiki.
*
* @author Chris Nokleberg
*/
@ParametersAreNonnullByDefault
package com.google.common.io;
import javax.annotation.ParametersAreNonnullByDefault;
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/package-info.java | Java | asf20 | 1,649 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkPositionIndexes;
import com.google.common.annotations.Beta;
import com.google.common.base.Charsets;
import com.google.common.base.Function;
import com.google.common.collect.Iterables;
import java.io.Closeable;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.StringReader;
import java.io.Writer;
import java.nio.CharBuffer;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Provides utility methods for working with character streams.
*
* <p>All method parameters must be non-null unless documented otherwise.
*
* <p>Some of the methods in this class take arguments with a generic type of
* {@code Readable & Closeable}. A {@link java.io.Reader} implements both of
* those interfaces. Similarly for {@code Appendable & Closeable} and
* {@link java.io.Writer}.
*
* @author Chris Nokleberg
* @author Bin Zhu
* @author Colin Decker
* @since 1.0
*/
@Beta
public final class CharStreams {
private static final int BUF_SIZE = 0x800; // 2K chars (4K bytes)
private CharStreams() {}
/**
* Returns a factory that will supply instances of {@link StringReader} that
* read a string value.
*
* @param value the string to read
* @return the factory
* @deprecated Use {@link CharSource#wrap(CharSequence}} instead. This method
* is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static InputSupplier<StringReader> newReaderSupplier(
final String value) {
return asInputSupplier(CharSource.wrap(value));
}
/**
* Returns a factory that will supply instances of {@link InputStreamReader},
* using the given {@link InputStream} factory and character set.
*
* @param in the factory that will be used to open input streams
* @param charset the charset used to decode the input stream; see {@link
* Charsets} for helpful predefined constants
* @return the factory
* @deprecated Use {@link ByteSource#asCharSource(Charset)} instead. This
* method is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static InputSupplier<InputStreamReader> newReaderSupplier(
final InputSupplier<? extends InputStream> in, final Charset charset) {
return asInputSupplier(
ByteStreams.asByteSource(in).asCharSource(charset));
}
/**
* Returns a factory that will supply instances of {@link OutputStreamWriter},
* using the given {@link OutputStream} factory and character set.
*
* @param out the factory that will be used to open output streams
* @param charset the charset used to encode the output stream; see {@link
* Charsets} for helpful predefined constants
* @return the factory
* @deprecated Use {@link ByteSink#asCharSink(Charset)} instead. This method
* is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static OutputSupplier<OutputStreamWriter> newWriterSupplier(
final OutputSupplier<? extends OutputStream> out, final Charset charset) {
return asOutputSupplier(
ByteStreams.asByteSink(out).asCharSink(charset));
}
/**
* Writes a character sequence (such as a string) to an appendable
* object from the given supplier.
*
* @param from the character sequence to write
* @param to the output supplier
* @throws IOException if an I/O error occurs
* @deprecated Use {@link CharSink#write(CharSequence)} instead. This method
* is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static <W extends Appendable & Closeable> void write(CharSequence from,
OutputSupplier<W> to) throws IOException {
asCharSink(to).write(from);
}
/**
* Opens {@link Readable} and {@link Appendable} objects from the
* given factories, copies all characters between the two, and closes
* them.
*
* @param from the input factory
* @param to the output factory
* @return the number of characters copied
* @throws IOException if an I/O error occurs
* @deprecated Use {@link CharSource#copyTo(CharSink)} instead. This method is
* scheduled for removal in Guava 18.0.
*/
@Deprecated
public static <R extends Readable & Closeable,
W extends Appendable & Closeable> long copy(InputSupplier<R> from,
OutputSupplier<W> to) throws IOException {
return asCharSource(from).copyTo(asCharSink(to));
}
/**
* Opens a {@link Readable} object from the supplier, copies all characters
* to the {@link Appendable} object, and closes the input. Does not close
* or flush the output.
*
* @param from the input factory
* @param to the object to write to
* @return the number of characters copied
* @throws IOException if an I/O error occurs
* @deprecated Use {@link CharSource#copyTo(Appendable)} instead. This method
* is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static <R extends Readable & Closeable> long copy(
InputSupplier<R> from, Appendable to) throws IOException {
return asCharSource(from).copyTo(to);
}
/**
* Copies all characters between the {@link Readable} and {@link Appendable}
* objects. Does not close or flush either object.
*
* @param from the object to read from
* @param to the object to write to
* @return the number of characters copied
* @throws IOException if an I/O error occurs
*/
public static long copy(Readable from, Appendable to) throws IOException {
checkNotNull(from);
checkNotNull(to);
CharBuffer buf = CharBuffer.allocate(BUF_SIZE);
long total = 0;
while (from.read(buf) != -1) {
buf.flip();
to.append(buf);
total += buf.remaining();
buf.clear();
}
return total;
}
/**
* Reads all characters from a {@link Readable} object into a {@link String}.
* Does not close the {@code Readable}.
*
* @param r the object to read from
* @return a string containing all the characters
* @throws IOException if an I/O error occurs
*/
public static String toString(Readable r) throws IOException {
return toStringBuilder(r).toString();
}
/**
* Returns the characters from a {@link Readable} & {@link Closeable} object
* supplied by a factory as a {@link String}.
*
* @param supplier the factory to read from
* @return a string containing all the characters
* @throws IOException if an I/O error occurs
* @deprecated Use {@link CharSource#read()} instead. This method is
* scheduled for removal in Guava 18.0.
*/
@Deprecated
public static <R extends Readable & Closeable> String toString(
InputSupplier<R> supplier) throws IOException {
return asCharSource(supplier).read();
}
/**
* Reads all characters from a {@link Readable} object into a new
* {@link StringBuilder} instance. Does not close the {@code Readable}.
*
* @param r the object to read from
* @return a {@link StringBuilder} containing all the characters
* @throws IOException if an I/O error occurs
*/
private static StringBuilder toStringBuilder(Readable r) throws IOException {
StringBuilder sb = new StringBuilder();
copy(r, sb);
return sb;
}
/**
* Reads the first line from a {@link Readable} & {@link Closeable} object
* supplied by a factory. The line does not include line-termination
* characters, but does include other leading and trailing whitespace.
*
* @param supplier the factory to read from
* @return the first line, or null if the reader is empty
* @throws IOException if an I/O error occurs
* @deprecated Use {@link CharSource#readFirstLine()} instead. This method is
* scheduled for removal in Guava 18.0.
*/
@Deprecated
public static <R extends Readable & Closeable> String readFirstLine(
InputSupplier<R> supplier) throws IOException {
return asCharSource(supplier).readFirstLine();
}
/**
* Reads all of the lines from a {@link Readable} & {@link Closeable} object
* supplied by a factory. The lines do not include line-termination
* characters, but do include other leading and trailing whitespace.
*
* @param supplier the factory to read from
* @return a mutable {@link List} containing all the lines
* @throws IOException if an I/O error occurs
* @deprecated Use {@link CharSource#readLines()} instead, but note that it
* returns an {@code ImmutableList}. This method is scheduled for removal
* in Guava 18.0.
*/
@Deprecated
public static <R extends Readable & Closeable> List<String> readLines(
InputSupplier<R> supplier) throws IOException {
Closer closer = Closer.create();
try {
R r = closer.register(supplier.getInput());
return readLines(r);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Reads all of the lines from a {@link Readable} object. The lines do
* not include line-termination characters, but do include other
* leading and trailing whitespace.
*
* <p>Does not close the {@code Readable}. If reading files or resources you
* should use the {@link Files#readLines} and {@link Resources#readLines}
* methods.
*
* @param r the object to read from
* @return a mutable {@link List} containing all the lines
* @throws IOException if an I/O error occurs
*/
public static List<String> readLines(Readable r) throws IOException {
List<String> result = new ArrayList<String>();
LineReader lineReader = new LineReader(r);
String line;
while ((line = lineReader.readLine()) != null) {
result.add(line);
}
return result;
}
/**
* Streams lines from a {@link Readable} object, stopping when the processor
* returns {@code false} or all lines have been read and returning the result
* produced by the processor. Does not close {@code readable}. Note that this
* method may not fully consume the contents of {@code readable} if the
* processor stops processing early.
*
* @throws IOException if an I/O error occurs
* @since 14.0
*/
public static <T> T readLines(
Readable readable, LineProcessor<T> processor) throws IOException {
checkNotNull(readable);
checkNotNull(processor);
LineReader lineReader = new LineReader(readable);
String line;
while ((line = lineReader.readLine()) != null) {
if (!processor.processLine(line)) {
break;
}
}
return processor.getResult();
}
/**
* Streams lines from a {@link Readable} and {@link Closeable} object
* supplied by a factory, stopping when our callback returns false, or we
* have read all of the lines.
*
* @param supplier the factory to read from
* @param callback the LineProcessor to use to handle the lines
* @return the output of processing the lines
* @throws IOException if an I/O error occurs
* @deprecated Use {@link CharSource#readLines(LineProcessor)} instead. This
* method is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static <R extends Readable & Closeable, T> T readLines(
InputSupplier<R> supplier, LineProcessor<T> callback) throws IOException {
checkNotNull(supplier);
checkNotNull(callback);
Closer closer = Closer.create();
try {
R r = closer.register(supplier.getInput());
return readLines(r, callback);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Joins multiple {@link Reader} suppliers into a single supplier.
* Reader returned from the supplier will contain the concatenated data
* from the readers of the underlying suppliers.
*
* <p>Reading from the joined reader will throw a {@link NullPointerException}
* if any of the suppliers are null or return null.
*
* <p>Only one underlying reader will be open at a time. Closing the
* joined reader will close the open underlying reader.
*
* @param suppliers the suppliers to concatenate
* @return a supplier that will return a reader containing the concatenated
* data
* @deprecated Use {@link CharSource#concat(Iterable)} instead. This method
* is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static InputSupplier<Reader> join(
final Iterable<? extends InputSupplier<? extends Reader>> suppliers) {
checkNotNull(suppliers);
Iterable<CharSource> sources = Iterables.transform(suppliers,
new Function<InputSupplier<? extends Reader>, CharSource>() {
@Override
public CharSource apply(InputSupplier<? extends Reader> input) {
return asCharSource(input);
}
});
return asInputSupplier(CharSource.concat(sources));
}
/**
* Varargs form of {@link #join(Iterable)}.
*
* @deprecated Use {@link CharSource#concat(CharSource[])} instead. This
* method is scheduled for removal in Guava 18.0.
*/
@Deprecated
@SuppressWarnings("unchecked") // suppress "possible heap pollution" warning in JDK7
public static InputSupplier<Reader> join(
InputSupplier<? extends Reader>... suppliers) {
return join(Arrays.asList(suppliers));
}
/**
* Discards {@code n} characters of data from the reader. This method
* will block until the full amount has been skipped. Does not close the
* reader.
*
* @param reader the reader to read from
* @param n the number of characters to skip
* @throws EOFException if this stream reaches the end before skipping all
* the characters
* @throws IOException if an I/O error occurs
*/
public static void skipFully(Reader reader, long n) throws IOException {
checkNotNull(reader);
while (n > 0) {
long amt = reader.skip(n);
if (amt == 0) {
// force a blocking read
if (reader.read() == -1) {
throw new EOFException();
}
n--;
} else {
n -= amt;
}
}
}
/**
* Returns a {@link Writer} that simply discards written chars.
*
* @since 15.0
*/
public static Writer nullWriter() {
return NullWriter.INSTANCE;
}
private static final class NullWriter extends Writer {
private static final NullWriter INSTANCE = new NullWriter();
@Override
public void write(int c) {
}
@Override
public void write(char[] cbuf) {
checkNotNull(cbuf);
}
@Override
public void write(char[] cbuf, int off, int len) {
checkPositionIndexes(off, off + len, cbuf.length);
}
@Override
public void write(String str) {
checkNotNull(str);
}
@Override
public void write(String str, int off, int len) {
checkPositionIndexes(off, off + len, str.length());
}
@Override
public Writer append(CharSequence csq) {
checkNotNull(csq);
return this;
}
@Override
public Writer append(CharSequence csq, int start, int end) {
checkPositionIndexes(start, end, csq.length());
return this;
}
@Override
public Writer append(char c) {
return this;
}
@Override
public void flush() {
}
@Override
public void close() {
}
@Override
public String toString() {
return "CharStreams.nullWriter()";
}
}
/**
* Returns a Writer that sends all output to the given {@link Appendable}
* target. Closing the writer will close the target if it is {@link
* Closeable}, and flushing the writer will flush the target if it is {@link
* java.io.Flushable}.
*
* @param target the object to which output will be sent
* @return a new Writer object, unless target is a Writer, in which case the
* target is returned
*/
public static Writer asWriter(Appendable target) {
if (target instanceof Writer) {
return (Writer) target;
}
return new AppendableWriter(target);
}
// TODO(user): Remove these once Input/OutputSupplier methods are removed
static Reader asReader(final Readable readable) {
checkNotNull(readable);
if (readable instanceof Reader) {
return (Reader) readable;
}
return new Reader() {
@Override
public int read(char[] cbuf, int off, int len) throws IOException {
return read(CharBuffer.wrap(cbuf, off, len));
}
@Override
public int read(CharBuffer target) throws IOException {
return readable.read(target);
}
@Override
public void close() throws IOException {
if (readable instanceof Closeable) {
((Closeable) readable).close();
}
}
};
}
/**
* Returns a view of the given {@code Readable} supplier as a
* {@code CharSource}.
*
* <p>This method is a temporary method provided for easing migration from
* suppliers to sources and sinks.
*
* @since 15.0
* @deprecated Convert all {@code InputSupplier<? extends Readable>}
* implementations to extend {@link CharSource} or provide a method for
* viewing the object as a {@code CharSource}. This method is scheduled
* for removal in Guava 18.0.
*/
@Deprecated
public static CharSource asCharSource(
final InputSupplier<? extends Readable> supplier) {
checkNotNull(supplier);
return new CharSource() {
@Override
public Reader openStream() throws IOException {
return asReader(supplier.getInput());
}
@Override
public String toString() {
return "CharStreams.asCharSource(" + supplier + ")";
}
};
}
/**
* Returns a view of the given {@code Appendable} supplier as a
* {@code CharSink}.
*
* <p>This method is a temporary method provided for easing migration from
* suppliers to sources and sinks.
*
* @since 15.0
* @deprecated Convert all {@code OutputSupplier<? extends Appendable>}
* implementations to extend {@link CharSink} or provide a method for
* viewing the object as a {@code CharSink}. This method is scheduled
* for removal in Guava 18.0.
*/
@Deprecated
public static CharSink asCharSink(
final OutputSupplier<? extends Appendable> supplier) {
checkNotNull(supplier);
return new CharSink() {
@Override
public Writer openStream() throws IOException {
return asWriter(supplier.getOutput());
}
@Override
public String toString() {
return "CharStreams.asCharSink(" + supplier + ")";
}
};
}
@SuppressWarnings("unchecked") // used internally where known to be safe
static <R extends Reader> InputSupplier<R> asInputSupplier(
CharSource source) {
return (InputSupplier) checkNotNull(source);
}
@SuppressWarnings("unchecked") // used internally where known to be safe
static <W extends Writer> OutputSupplier<W> asOutputSupplier(
CharSink sink) {
return (OutputSupplier) checkNotNull(sink);
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/CharStreams.java | Java | asf20 | 19,794 |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkNotNull;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.Reader;
import java.io.Writer;
import java.nio.charset.Charset;
/**
* A destination to which characters can be written, such as a text file. Unlike a {@link Writer}, a
* {@code CharSink} is not an open, stateful stream that can be written to and closed. Instead, it
* is an immutable <i>supplier</i> of {@code Writer} instances.
*
* <p>{@code CharSink} provides two kinds of methods:
* <ul>
* <li><b>Methods that return a writer:</b> These methods should return a <i>new</i>,
* independent instance each time they are called. The caller is responsible for ensuring that the
* returned writer is closed.
* <li><b>Convenience methods:</b> These are implementations of common operations that are
* typically implemented by opening a writer using one of the methods in the first category,
* doing something and finally closing the writer that was opened.
* </ul>
*
* <p>Any {@link ByteSink} may be viewed as a {@code CharSink} with a specific {@linkplain Charset
* character encoding} using {@link ByteSink#asCharSink(Charset)}. Characters written to the
* resulting {@code CharSink} will written to the {@code ByteSink} as encoded bytes.
*
* @since 14.0
* @author Colin Decker
*/
public abstract class CharSink implements OutputSupplier<Writer> {
/**
* Constructor for use by subclasses.
*/
protected CharSink() {}
/**
* Opens a new {@link Writer} for writing to this sink. This method should return a new,
* independent writer each time it is called.
*
* <p>The caller is responsible for ensuring that the returned writer is closed.
*
* @throws IOException if an I/O error occurs in the process of opening the writer
*/
public abstract Writer openStream() throws IOException;
/**
* This method is a temporary method provided for easing migration from suppliers to sources and
* sinks.
*
* @since 15.0
* @deprecated This method is only provided for temporary compatibility with the
* {@link OutputSupplier} interface and should not be called directly. Use
* {@link #openStream} instead. This method is scheduled for removal in Guava 18.0.
*/
@Override
@Deprecated
public final Writer getOutput() throws IOException {
return openStream();
}
/**
* Opens a new buffered {@link Writer} for writing to this sink. The returned stream is not
* required to be a {@link BufferedWriter} in order to allow implementations to simply delegate
* to {@link #openStream()} when the stream returned by that method does not benefit from
* additional buffering. This method should return a new, independent writer each time it is
* called.
*
* <p>The caller is responsible for ensuring that the returned writer is closed.
*
* @throws IOException if an I/O error occurs in the process of opening the writer
* @since 15.0 (in 14.0 with return type {@link BufferedWriter})
*/
public Writer openBufferedStream() throws IOException {
Writer writer = openStream();
return (writer instanceof BufferedWriter)
? (BufferedWriter) writer
: new BufferedWriter(writer);
}
/**
* Writes the given character sequence to this sink.
*
* @throws IOException if an I/O error in the process of writing to this sink
*/
public void write(CharSequence charSequence) throws IOException {
checkNotNull(charSequence);
Closer closer = Closer.create();
try {
Writer out = closer.register(openStream());
out.append(charSequence);
out.flush(); // https://code.google.com/p/guava-libraries/issues/detail?id=1330
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Writes the given lines of text to this sink with each line (including the last) terminated with
* the operating system's default line separator. This method is equivalent to
* {@code writeLines(lines, System.getProperty("line.separator"))}.
*
* @throws IOException if an I/O error occurs in the process of writing to this sink
*/
public void writeLines(Iterable<? extends CharSequence> lines) throws IOException {
writeLines(lines, System.getProperty("line.separator"));
}
/**
* Writes the given lines of text to this sink with each line (including the last) terminated with
* the given line separator.
*
* @throws IOException if an I/O error occurs in the process of writing to this sink
*/
public void writeLines(Iterable<? extends CharSequence> lines, String lineSeparator)
throws IOException {
checkNotNull(lines);
checkNotNull(lineSeparator);
Closer closer = Closer.create();
try {
Writer out = closer.register(openBufferedStream());
for (CharSequence line : lines) {
out.append(line).append(lineSeparator);
}
out.flush(); // https://code.google.com/p/guava-libraries/issues/detail?id=1330
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Writes all the text from the given {@link Readable} (such as a {@link Reader}) to this sink.
* Does not close {@code readable} if it is {@code Closeable}.
*
* @throws IOException if an I/O error occurs in the process of reading from {@code readable} or
* writing to this sink
*/
public long writeFrom(Readable readable) throws IOException {
checkNotNull(readable);
Closer closer = Closer.create();
try {
Writer out = closer.register(openStream());
long written = CharStreams.copy(readable, out);
out.flush(); // https://code.google.com/p/guava-libraries/issues/detail?id=1330
return written;
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/CharSink.java | Java | asf20 | 6,551 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import com.google.common.annotations.Beta;
import java.io.IOException;
/**
* A callback interface to process bytes from a stream.
*
* <p>{@link #processBytes} will be called for each line that is read, and
* should return {@code false} when you want to stop processing.
*
* @author Chris Nokleberg
* @since 1.0
*/
@Beta
public interface ByteProcessor<T> {
/**
* This method will be called for each chunk of bytes in an
* input stream. The implementation should process the bytes
* from {@code buf[off]} through {@code buf[off + len - 1]}
* (inclusive).
*
* @param buf the byte array containing the data to process
* @param off the initial offset into the array
* @param len the length of data to be processed
* @return true to continue processing, false to stop
*/
boolean processBytes(byte[] buf, int off, int len) throws IOException;
/** Return the result of processing all the bytes. */
T getResult();
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/ByteProcessor.java | Java | asf20 | 1,589 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import com.google.common.annotations.Beta;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import javax.annotation.Nullable;
/**
* An OutputStream that counts the number of bytes written.
*
* @author Chris Nokleberg
* @since 1.0
*/
@Beta
public final class CountingOutputStream extends FilterOutputStream {
private long count;
/**
* Wraps another output stream, counting the number of bytes written.
*
* @param out the output stream to be wrapped
*/
public CountingOutputStream(@Nullable OutputStream out) {
super(out);
}
/** Returns the number of bytes written. */
public long getCount() {
return count;
}
@Override public void write(byte[] b, int off, int len) throws IOException {
out.write(b, off, len);
count += len;
}
@Override public void write(int b) throws IOException {
out.write(b);
count++;
}
// Overriding close() because FilterOutputStream's close() method pre-JDK8 has bad behavior:
// it silently ignores any exception thrown by flush(). Instead, just close the delegate stream.
// It should flush itself if necessary.
@Override public void close() throws IOException {
out.close();
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/CountingOutputStream.java | Java | asf20 | 1,866 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import java.io.DataInput;
import java.io.IOException;
/**
* An extension of {@code DataInput} for reading from in-memory byte arrays; its
* methods offer identical functionality but do not throw {@link IOException}.
*
* <p><b>Warning:<b> The caller is responsible for not attempting to read past
* the end of the array. If any method encounters the end of the array
* prematurely, it throws {@link IllegalStateException} to signify <i>programmer
* error</i>. This behavior is a technical violation of the supertype's
* contract, which specifies a checked exception.
*
* @author Kevin Bourrillion
* @since 1.0
*/
public interface ByteArrayDataInput extends DataInput {
@Override void readFully(byte b[]);
@Override void readFully(byte b[], int off, int len);
@Override int skipBytes(int n);
@Override boolean readBoolean();
@Override byte readByte();
@Override int readUnsignedByte();
@Override short readShort();
@Override int readUnsignedShort();
@Override char readChar();
@Override int readInt();
@Override long readLong();
@Override float readFloat();
@Override double readDouble();
@Override String readLine();
@Override String readUTF();
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/ByteArrayDataInput.java | Java | asf20 | 1,836 |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
/**
* Modes for opening a file for writing. The default when mode when none is specified is to
* truncate the file before writing.
*
* @author Colin Decker
*/
public enum FileWriteMode {
/** Specifies that writes to the opened file should append to the end of the file. */
APPEND
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/FileWriteMode.java | Java | asf20 | 924 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import com.google.common.annotations.Beta;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import javax.annotation.Nullable;
/**
* An {@link InputStream} that counts the number of bytes read.
*
* @author Chris Nokleberg
* @since 1.0
*/
@Beta
public final class CountingInputStream extends FilterInputStream {
private long count;
private long mark = -1;
/**
* Wraps another input stream, counting the number of bytes read.
*
* @param in the input stream to be wrapped
*/
public CountingInputStream(@Nullable InputStream in) {
super(in);
}
/** Returns the number of bytes read. */
public long getCount() {
return count;
}
@Override public int read() throws IOException {
int result = in.read();
if (result != -1) {
count++;
}
return result;
}
@Override public int read(byte[] b, int off, int len) throws IOException {
int result = in.read(b, off, len);
if (result != -1) {
count += result;
}
return result;
}
@Override public long skip(long n) throws IOException {
long result = in.skip(n);
count += result;
return result;
}
@Override public synchronized void mark(int readlimit) {
in.mark(readlimit);
mark = count;
// it's okay to mark even if mark isn't supported, as reset won't work
}
@Override public synchronized void reset() throws IOException {
if (!in.markSupported()) {
throw new IOException("Mark not supported");
}
if (mark == -1) {
throw new IOException("Mark not set");
}
in.reset();
count = mark;
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/CountingInputStream.java | Java | asf20 | 2,266 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import com.google.common.annotations.Beta;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
/**
* An implementation of {@link DataInput} that uses little-endian byte ordering
* for reading {@code short}, {@code int}, {@code float}, {@code double}, and
* {@code long} values.
* <p>
* <b>Note:</b> This class intentionally violates the specification of its
* supertype {@code DataInput}, which explicitly requires big-endian byte order.
*
* @author Chris Nokleberg
* @author Keith Bottner
* @since 8.0
*/
@Beta
public final class LittleEndianDataInputStream extends FilterInputStream
implements DataInput {
/**
* Creates a {@code LittleEndianDataInputStream} that wraps the given stream.
*
* @param in the stream to delegate to
*/
public LittleEndianDataInputStream(InputStream in) {
super(Preconditions.checkNotNull(in));
}
/**
* This method will throw an {@link UnsupportedOperationException}.
*/
@Override
public String readLine() {
throw new UnsupportedOperationException("readLine is not supported");
}
@Override
public void readFully(byte[] b) throws IOException {
ByteStreams.readFully(this, b);
}
@Override
public void readFully(byte[] b, int off, int len) throws IOException {
ByteStreams.readFully(this, b, off, len);
}
@Override
public int skipBytes(int n) throws IOException {
return (int) in.skip(n);
}
@Override
public int readUnsignedByte() throws IOException {
int b1 = in.read();
if (0 > b1) {
throw new EOFException();
}
return b1;
}
/**
* Reads an unsigned {@code short} as specified by
* {@link DataInputStream#readUnsignedShort()}, except using little-endian
* byte order.
*
* @return the next two bytes of the input stream, interpreted as an
* unsigned 16-bit integer in little-endian byte order
* @throws IOException if an I/O error occurs
*/
@Override
public int readUnsignedShort() throws IOException {
byte b1 = readAndCheckByte();
byte b2 = readAndCheckByte();
return Ints.fromBytes((byte) 0, (byte) 0, b2, b1);
}
/**
* Reads an integer as specified by {@link DataInputStream#readInt()}, except
* using little-endian byte order.
*
* @return the next four bytes of the input stream, interpreted as an
* {@code int} in little-endian byte order
* @throws IOException if an I/O error occurs
*/
@Override
public int readInt() throws IOException {
byte b1 = readAndCheckByte();
byte b2 = readAndCheckByte();
byte b3 = readAndCheckByte();
byte b4 = readAndCheckByte();
return Ints.fromBytes( b4, b3, b2, b1);
}
/**
* Reads a {@code long} as specified by {@link DataInputStream#readLong()},
* except using little-endian byte order.
*
* @return the next eight bytes of the input stream, interpreted as a
* {@code long} in little-endian byte order
* @throws IOException if an I/O error occurs
*/
@Override
public long readLong() throws IOException {
byte b1 = readAndCheckByte();
byte b2 = readAndCheckByte();
byte b3 = readAndCheckByte();
byte b4 = readAndCheckByte();
byte b5 = readAndCheckByte();
byte b6 = readAndCheckByte();
byte b7 = readAndCheckByte();
byte b8 = readAndCheckByte();
return Longs.fromBytes(b8, b7, b6, b5, b4, b3, b2, b1);
}
/**
* Reads a {@code float} as specified by {@link DataInputStream#readFloat()},
* except using little-endian byte order.
*
* @return the next four bytes of the input stream, interpreted as a
* {@code float} in little-endian byte order
* @throws IOException if an I/O error occurs
*/
@Override
public float readFloat() throws IOException {
return Float.intBitsToFloat(readInt());
}
/**
* Reads a {@code double} as specified by
* {@link DataInputStream#readDouble()}, except using little-endian byte
* order.
*
* @return the next eight bytes of the input stream, interpreted as a
* {@code double} in little-endian byte order
* @throws IOException if an I/O error occurs
*/
@Override
public double readDouble() throws IOException {
return Double.longBitsToDouble(readLong());
}
@Override
public String readUTF() throws IOException {
return new DataInputStream(in).readUTF();
}
/**
* Reads a {@code short} as specified by {@link DataInputStream#readShort()},
* except using little-endian byte order.
*
* @return the next two bytes of the input stream, interpreted as a
* {@code short} in little-endian byte order.
* @throws IOException if an I/O error occurs.
*/
@Override
public short readShort() throws IOException {
return (short) readUnsignedShort();
}
/**
* Reads a char as specified by {@link DataInputStream#readChar()}, except
* using little-endian byte order.
*
* @return the next two bytes of the input stream, interpreted as a
* {@code char} in little-endian byte order
* @throws IOException if an I/O error occurs
*/
@Override
public char readChar() throws IOException {
return (char) readUnsignedShort();
}
@Override
public byte readByte() throws IOException {
return (byte) readUnsignedByte();
}
@Override
public boolean readBoolean() throws IOException {
return readUnsignedByte() != 0;
}
/**
* Reads a byte from the input stream checking that the end of file (EOF)
* has not been encountered.
*
* @return byte read from input
* @throws IOException if an error is encountered while reading
* @throws EOFException if the end of file (EOF) is encountered.
*/
private byte readAndCheckByte() throws IOException, EOFException {
int b1 = in.read();
if (-1 == b1) {
throw new EOFException();
}
return (byte) b1;
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/LittleEndianDataInputStream.java | Java | asf20 | 6,753 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import java.io.IOException;
/**
* A factory for writable streams of bytes or characters.
*
* @author Chris Nokleberg
* @since 1.0
* @deprecated For {@code OutputSupplier<? extends OutputStream>}, use
* {@link ByteSink} instead. For {@code OutputSupplier<? extends Writer>},
* use {@link CharSink}. Implementations of {@code OutputSupplier} that
* don't fall into one of those categories do not benefit from any of the
* methods in {@code common.io} and should use a different interface. This
* interface is scheduled for removal in June 2015.
*/
@Deprecated
public interface OutputSupplier<T> {
/**
* Returns an object that encapsulates a writable resource.
* <p>
* Like {@link Iterable#iterator}, this method may be called repeatedly to
* get independent channels to the same underlying resource.
* <p>
* Where the channel maintains a position within the resource, moving that
* cursor within one channel should not affect the starting position of
* channels returned by other calls.
*/
T getOutput() throws IOException;
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/OutputSupplier.java | Java | asf20 | 1,721 |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.Beta;
import com.google.common.base.Ascii;
import com.google.common.collect.ImmutableList;
import com.google.common.hash.Funnels;
import com.google.common.hash.HashCode;
import com.google.common.hash.HashFunction;
import com.google.common.hash.Hasher;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Reader;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.Iterator;
/**
* A readable source of bytes, such as a file. Unlike an {@link InputStream}, a
* {@code ByteSource} is not an open, stateful stream for input that can be read and closed.
* Instead, it is an immutable <i>supplier</i> of {@code InputStream} instances.
*
* <p>{@code ByteSource} provides two kinds of methods:
* <ul>
* <li><b>Methods that return a stream:</b> These methods should return a <i>new</i>, independent
* instance each time they are called. The caller is responsible for ensuring that the returned
* stream is closed.
* <li><b>Convenience methods:</b> These are implementations of common operations that are
* typically implemented by opening a stream using one of the methods in the first category, doing
* something and finally closing the stream that was opened.
* </ul>
*
* @since 14.0
* @author Colin Decker
*/
public abstract class ByteSource implements InputSupplier<InputStream> {
private static final int BUF_SIZE = 0x1000; // 4K
/**
* Constructor for use by subclasses.
*/
protected ByteSource() {}
/**
* Returns a {@link CharSource} view of this byte source that decodes bytes read from this source
* as characters using the given {@link Charset}.
*/
public CharSource asCharSource(Charset charset) {
return new AsCharSource(charset);
}
/**
* Opens a new {@link InputStream} for reading from this source. This method should return a new,
* independent stream each time it is called.
*
* <p>The caller is responsible for ensuring that the returned stream is closed.
*
* @throws IOException if an I/O error occurs in the process of opening the stream
*/
public abstract InputStream openStream() throws IOException;
/**
* This method is a temporary method provided for easing migration from suppliers to sources and
* sinks.
*
* @since 15.0
* @deprecated This method is only provided for temporary compatibility with the
* {@link InputSupplier} interface and should not be called directly. Use {@link #openStream}
* instead. This method is scheduled for removal in Guava 18.0.
*/
@Override
@Deprecated
public final InputStream getInput() throws IOException {
return openStream();
}
/**
* Opens a new buffered {@link InputStream} for reading from this source. The returned stream is
* not required to be a {@link BufferedInputStream} in order to allow implementations to simply
* delegate to {@link #openStream()} when the stream returned by that method does not benefit
* from additional buffering (for example, a {@code ByteArrayInputStream}). This method should
* return a new, independent stream each time it is called.
*
* <p>The caller is responsible for ensuring that the returned stream is closed.
*
* @throws IOException if an I/O error occurs in the process of opening the stream
* @since 15.0 (in 14.0 with return type {@link BufferedInputStream})
*/
public InputStream openBufferedStream() throws IOException {
InputStream in = openStream();
return (in instanceof BufferedInputStream)
? (BufferedInputStream) in
: new BufferedInputStream(in);
}
/**
* Returns a view of a slice of this byte source that is at most {@code length} bytes long
* starting at the given {@code offset}.
*
* @throws IllegalArgumentException if {@code offset} or {@code length} is negative
*/
public ByteSource slice(long offset, long length) {
return new SlicedByteSource(offset, length);
}
/**
* Returns whether the source has zero bytes. The default implementation is to open a stream and
* check for EOF.
*
* @throws IOException if an I/O error occurs
* @since 15.0
*/
public boolean isEmpty() throws IOException {
Closer closer = Closer.create();
try {
InputStream in = closer.register(openStream());
return in.read() == -1;
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Returns the size of this source in bytes. For most implementations, this is a heavyweight
* operation that will open a stream, read (or {@link InputStream#skip(long) skip}, if possible)
* to the end of the stream and return the total number of bytes that were read.
*
* <p>For some sources, such as a file, this method may use a more efficient implementation. Note
* that in such cases, it is <i>possible</i> that this method will return a different number of
* bytes than would be returned by reading all of the bytes (for example, some special files may
* return a size of 0 despite actually having content when read).
*
* <p>In either case, if this is a mutable source such as a file, the size it returns may not be
* the same number of bytes a subsequent read would return.
*
* @throws IOException if an I/O error occurs in the process of reading the size of this source
*/
public long size() throws IOException {
Closer closer = Closer.create();
try {
InputStream in = closer.register(openStream());
return countBySkipping(in);
} catch (IOException e) {
// skip may not be supported... at any rate, try reading
} finally {
closer.close();
}
closer = Closer.create();
try {
InputStream in = closer.register(openStream());
return countByReading(in);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Counts the bytes in the given input stream using skip if possible. Returns SKIP_FAILED if the
* first call to skip threw, in which case skip may just not be supported.
*/
private long countBySkipping(InputStream in) throws IOException {
long count = 0;
while (true) {
// don't try to skip more than available()
// things may work really wrong with FileInputStream otherwise
long skipped = in.skip(Math.min(in.available(), Integer.MAX_VALUE));
if (skipped <= 0) {
if (in.read() == -1) {
return count;
} else if (count == 0 && in.available() == 0) {
// if available is still zero after reading a single byte, it
// will probably always be zero, so we should countByReading
throw new IOException();
}
count++;
} else {
count += skipped;
}
}
}
private static final byte[] countBuffer = new byte[BUF_SIZE];
private long countByReading(InputStream in) throws IOException {
long count = 0;
long read;
while ((read = in.read(countBuffer)) != -1) {
count += read;
}
return count;
}
/**
* Copies the contents of this byte source to the given {@code OutputStream}. Does not close
* {@code output}.
*
* @throws IOException if an I/O error occurs in the process of reading from this source or
* writing to {@code output}
*/
public long copyTo(OutputStream output) throws IOException {
checkNotNull(output);
Closer closer = Closer.create();
try {
InputStream in = closer.register(openStream());
return ByteStreams.copy(in, output);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Copies the contents of this byte source to the given {@code ByteSink}.
*
* @throws IOException if an I/O error occurs in the process of reading from this source or
* writing to {@code sink}
*/
public long copyTo(ByteSink sink) throws IOException {
checkNotNull(sink);
Closer closer = Closer.create();
try {
InputStream in = closer.register(openStream());
OutputStream out = closer.register(sink.openStream());
return ByteStreams.copy(in, out);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Reads the full contents of this byte source as a byte array.
*
* @throws IOException if an I/O error occurs in the process of reading from this source
*/
public byte[] read() throws IOException {
Closer closer = Closer.create();
try {
InputStream in = closer.register(openStream());
return ByteStreams.toByteArray(in);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Reads the contents of this byte source using the given {@code processor} to process bytes as
* they are read. Stops when all bytes have been read or the consumer returns {@code false}.
* Returns the result produced by the processor.
*
* @throws IOException if an I/O error occurs in the process of reading from this source or if
* {@code processor} throws an {@code IOException}
* @since 16.0
*/
@Beta
public <T> T read(ByteProcessor<T> processor) throws IOException {
checkNotNull(processor);
Closer closer = Closer.create();
try {
InputStream in = closer.register(openStream());
return ByteStreams.readBytes(in, processor);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Hashes the contents of this byte source using the given hash function.
*
* @throws IOException if an I/O error occurs in the process of reading from this source
*/
public HashCode hash(HashFunction hashFunction) throws IOException {
Hasher hasher = hashFunction.newHasher();
copyTo(Funnels.asOutputStream(hasher));
return hasher.hash();
}
/**
* Checks that the contents of this byte source are equal to the contents of the given byte
* source.
*
* @throws IOException if an I/O error occurs in the process of reading from this source or
* {@code other}
*/
public boolean contentEquals(ByteSource other) throws IOException {
checkNotNull(other);
byte[] buf1 = new byte[BUF_SIZE];
byte[] buf2 = new byte[BUF_SIZE];
Closer closer = Closer.create();
try {
InputStream in1 = closer.register(openStream());
InputStream in2 = closer.register(other.openStream());
while (true) {
int read1 = ByteStreams.read(in1, buf1, 0, BUF_SIZE);
int read2 = ByteStreams.read(in2, buf2, 0, BUF_SIZE);
if (read1 != read2 || !Arrays.equals(buf1, buf2)) {
return false;
} else if (read1 != BUF_SIZE) {
return true;
}
}
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Concatenates multiple {@link ByteSource} instances into a single source. Streams returned from
* the source will contain the concatenated data from the streams of the underlying sources.
*
* <p>Only one underlying stream will be open at a time. Closing the concatenated stream will
* close the open underlying stream.
*
* @param sources the sources to concatenate
* @return a {@code ByteSource} containing the concatenated data
* @since 15.0
*/
public static ByteSource concat(Iterable<? extends ByteSource> sources) {
return new ConcatenatedByteSource(sources);
}
/**
* Concatenates multiple {@link ByteSource} instances into a single source. Streams returned from
* the source will contain the concatenated data from the streams of the underlying sources.
*
* <p>Only one underlying stream will be open at a time. Closing the concatenated stream will
* close the open underlying stream.
*
* <p>Note: The input {@code Iterator} will be copied to an {@code ImmutableList} when this
* method is called. This will fail if the iterator is infinite and may cause problems if the
* iterator eagerly fetches data for each source when iterated (rather than producing sources
* that only load data through their streams). Prefer using the {@link #concat(Iterable)}
* overload if possible.
*
* @param sources the sources to concatenate
* @return a {@code ByteSource} containing the concatenated data
* @throws NullPointerException if any of {@code sources} is {@code null}
* @since 15.0
*/
public static ByteSource concat(Iterator<? extends ByteSource> sources) {
return concat(ImmutableList.copyOf(sources));
}
/**
* Concatenates multiple {@link ByteSource} instances into a single source. Streams returned from
* the source will contain the concatenated data from the streams of the underlying sources.
*
* <p>Only one underlying stream will be open at a time. Closing the concatenated stream will
* close the open underlying stream.
*
* @param sources the sources to concatenate
* @return a {@code ByteSource} containing the concatenated data
* @throws NullPointerException if any of {@code sources} is {@code null}
* @since 15.0
*/
public static ByteSource concat(ByteSource... sources) {
return concat(ImmutableList.copyOf(sources));
}
/**
* Returns a view of the given byte array as a {@link ByteSource}. To view only a specific range
* in the array, use {@code ByteSource.wrap(b).slice(offset, length)}.
*
* @since 15.0 (since 14.0 as {@code ByteStreams.asByteSource(byte[])}).
*/
public static ByteSource wrap(byte[] b) {
return new ByteArrayByteSource(b);
}
/**
* Returns an immutable {@link ByteSource} that contains no bytes.
*
* @since 15.0
*/
public static ByteSource empty() {
return EmptyByteSource.INSTANCE;
}
/**
* A char source that reads bytes from this source and decodes them as characters using a
* charset.
*/
private final class AsCharSource extends CharSource {
private final Charset charset;
private AsCharSource(Charset charset) {
this.charset = checkNotNull(charset);
}
@Override
public Reader openStream() throws IOException {
return new InputStreamReader(ByteSource.this.openStream(), charset);
}
@Override
public String toString() {
return ByteSource.this.toString() + ".asCharSource(" + charset + ")";
}
}
/**
* A view of a subsection of the containing byte source.
*/
private final class SlicedByteSource extends ByteSource {
private final long offset;
private final long length;
private SlicedByteSource(long offset, long length) {
checkArgument(offset >= 0, "offset (%s) may not be negative", offset);
checkArgument(length >= 0, "length (%s) may not be negative", length);
this.offset = offset;
this.length = length;
}
@Override
public InputStream openStream() throws IOException {
return sliceStream(ByteSource.this.openStream());
}
@Override
public InputStream openBufferedStream() throws IOException {
return sliceStream(ByteSource.this.openBufferedStream());
}
private InputStream sliceStream(InputStream in) throws IOException {
if (offset > 0) {
try {
ByteStreams.skipFully(in, offset);
} catch (Throwable e) {
Closer closer = Closer.create();
closer.register(in);
try {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
}
return ByteStreams.limit(in, length);
}
@Override
public ByteSource slice(long offset, long length) {
checkArgument(offset >= 0, "offset (%s) may not be negative", offset);
checkArgument(length >= 0, "length (%s) may not be negative", length);
long maxLength = this.length - offset;
return ByteSource.this.slice(this.offset + offset, Math.min(length, maxLength));
}
@Override
public boolean isEmpty() throws IOException {
return length == 0 || super.isEmpty();
}
@Override
public String toString() {
return ByteSource.this.toString() + ".slice(" + offset + ", " + length + ")";
}
}
private static class ByteArrayByteSource extends ByteSource {
protected final byte[] bytes;
protected ByteArrayByteSource(byte[] bytes) {
this.bytes = checkNotNull(bytes);
}
@Override
public InputStream openStream() {
return new ByteArrayInputStream(bytes);
}
@Override
public InputStream openBufferedStream() throws IOException {
return openStream();
}
@Override
public boolean isEmpty() {
return bytes.length == 0;
}
@Override
public long size() {
return bytes.length;
}
@Override
public byte[] read() {
return bytes.clone();
}
@Override
public long copyTo(OutputStream output) throws IOException {
output.write(bytes);
return bytes.length;
}
@Override
public <T> T read(ByteProcessor<T> processor) throws IOException {
processor.processBytes(bytes, 0, bytes.length);
return processor.getResult();
}
@Override
public HashCode hash(HashFunction hashFunction) throws IOException {
return hashFunction.hashBytes(bytes);
}
// TODO(user): Possibly override slice()
@Override
public String toString() {
return "ByteSource.wrap("
+ Ascii.truncate(BaseEncoding.base16().encode(bytes), 30, "...") + ")";
}
}
private static final class EmptyByteSource extends ByteArrayByteSource {
private static final EmptyByteSource INSTANCE = new EmptyByteSource();
private EmptyByteSource() {
super(new byte[0]);
}
@Override
public CharSource asCharSource(Charset charset) {
checkNotNull(charset);
return CharSource.empty();
}
@Override
public byte[] read() {
return bytes; // length is 0, no need to clone
}
@Override
public String toString() {
return "ByteSource.empty()";
}
}
private static final class ConcatenatedByteSource extends ByteSource {
private final Iterable<? extends ByteSource> sources;
ConcatenatedByteSource(Iterable<? extends ByteSource> sources) {
this.sources = checkNotNull(sources);
}
@Override
public InputStream openStream() throws IOException {
return new MultiInputStream(sources.iterator());
}
@Override
public boolean isEmpty() throws IOException {
for (ByteSource source : sources) {
if (!source.isEmpty()) {
return false;
}
}
return true;
}
@Override
public long size() throws IOException {
long result = 0L;
for (ByteSource source : sources) {
result += source.size();
}
return result;
}
@Override
public String toString() {
return "ByteSource.concat(" + sources + ")";
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/ByteSource.java | Java | asf20 | 19,946 |
/*
* Copyright (C) 2006 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkNotNull;
import java.io.Closeable;
import java.io.Flushable;
import java.io.IOException;
import java.io.Writer;
import javax.annotation.Nullable;
/**
* Writer that places all output on an {@link Appendable} target. If the target
* is {@link Flushable} or {@link Closeable}, flush()es and close()s will also
* be delegated to the target.
*
* @author Alan Green
* @author Sebastian Kanthak
* @since 1.0
*/
class AppendableWriter extends Writer {
private final Appendable target;
private boolean closed;
/**
* Creates a new writer that appends everything it writes to {@code target}.
*
* @param target target to which to append output
*/
AppendableWriter(Appendable target) {
this.target = checkNotNull(target);
}
/*
* Abstract methods from Writer
*/
@Override public void write(char cbuf[], int off, int len)
throws IOException {
checkNotClosed();
// It turns out that creating a new String is usually as fast, or faster
// than wrapping cbuf in a light-weight CharSequence.
target.append(new String(cbuf, off, len));
}
@Override public void flush() throws IOException {
checkNotClosed();
if (target instanceof Flushable) {
((Flushable) target).flush();
}
}
@Override public void close() throws IOException {
this.closed = true;
if (target instanceof Closeable) {
((Closeable) target).close();
}
}
/*
* Override a few functions for performance reasons to avoid creating
* unnecessary strings.
*/
@Override public void write(int c) throws IOException {
checkNotClosed();
target.append((char) c);
}
@Override public void write(@Nullable String str) throws IOException {
checkNotClosed();
target.append(str);
}
@Override public void write(@Nullable String str, int off, int len) throws IOException {
checkNotClosed();
// tricky: append takes start, end pair...
target.append(str, off, off + len);
}
@Override public Writer append(char c) throws IOException {
checkNotClosed();
target.append(c);
return this;
}
@Override public Writer append(@Nullable CharSequence charSeq) throws IOException {
checkNotClosed();
target.append(charSeq);
return this;
}
@Override public Writer append(@Nullable CharSequence charSeq, int start, int end)
throws IOException {
checkNotClosed();
target.append(charSeq, start, end);
return this;
}
private void checkNotClosed() throws IOException {
if (closed) {
throw new IOException("Cannot write to a closed writer.");
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/AppendableWriter.java | Java | asf20 | 3,289 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import java.io.DataOutput;
import java.io.IOException;
/**
* An extension of {@code DataOutput} for writing to in-memory byte arrays; its
* methods offer identical functionality but do not throw {@link IOException}.
*
* @author Jayaprabhakar Kadarkarai
* @since 1.0
*/
public interface ByteArrayDataOutput extends DataOutput {
@Override void write(int b);
@Override void write(byte b[]);
@Override void write(byte b[], int off, int len);
@Override void writeBoolean(boolean v);
@Override void writeByte(int v);
@Override void writeShort(int v);
@Override void writeChar(int v);
@Override void writeInt(int v);
@Override void writeLong(long v);
@Override void writeFloat(float v);
@Override void writeDouble(double v);
@Override void writeChars(String s);
@Override void writeUTF(String s);
/**
* @deprecated This method is dangerous as it discards the high byte of
* every character. For UTF-8, use {@code write(s.getBytes(Charsets.UTF_8))}.
*/
@Deprecated @Override void writeBytes(String s);
/**
* Returns the contents that have been written to this instance,
* as a byte array.
*/
byte[] toByteArray();
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/ByteArrayDataOutput.java | Java | asf20 | 1,802 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkPositionIndex;
import com.google.common.annotations.Beta;
import com.google.common.base.Function;
import com.google.common.collect.Iterables;
import com.google.common.hash.HashCode;
import com.google.common.hash.HashFunction;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.WritableByteChannel;
import java.util.Arrays;
/**
* Provides utility methods for working with byte arrays and I/O streams.
*
* @author Chris Nokleberg
* @author Colin Decker
* @since 1.0
*/
@Beta
public final class ByteStreams {
private static final int BUF_SIZE = 0x1000; // 4K
private ByteStreams() {}
/**
* Returns a factory that will supply instances of
* {@link ByteArrayInputStream} that read from the given byte array.
*
* @param b the input buffer
* @return the factory
* @deprecated Use {@link ByteSource#wrap(byte[])} instead. This method is
* scheduled for removal in Guava 18.0.
*/
@Deprecated
public static InputSupplier<ByteArrayInputStream> newInputStreamSupplier(
byte[] b) {
return asInputSupplier(ByteSource.wrap(b));
}
/**
* Returns a factory that will supply instances of
* {@link ByteArrayInputStream} that read from the given byte array.
*
* @param b the input buffer
* @param off the offset in the buffer of the first byte to read
* @param len the maximum number of bytes to read from the buffer
* @return the factory
* @deprecated Use {@code ByteSource.wrap(b).slice(off, len)} instead. This
* method is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static InputSupplier<ByteArrayInputStream> newInputStreamSupplier(
final byte[] b, final int off, final int len) {
return asInputSupplier(ByteSource.wrap(b).slice(off, len));
}
/**
* Writes a byte array to an output stream from the given supplier.
*
* @param from the bytes to write
* @param to the output supplier
* @throws IOException if an I/O error occurs
* @deprecated Use {@link ByteSink#write(byte[])} instead. This method is
* scheduled for removal in Guava 18.0.
*/
@Deprecated
public static void write(byte[] from,
OutputSupplier<? extends OutputStream> to) throws IOException {
asByteSink(to).write(from);
}
/**
* Opens input and output streams from the given suppliers, copies all
* bytes from the input to the output, and closes the streams.
*
* @param from the input factory
* @param to the output factory
* @return the number of bytes copied
* @throws IOException if an I/O error occurs
* @deprecated Use {@link ByteSource#copyTo(ByteSink)} instead. This method
* is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static long copy(InputSupplier<? extends InputStream> from,
OutputSupplier<? extends OutputStream> to) throws IOException {
return asByteSource(from).copyTo(asByteSink(to));
}
/**
* Opens an input stream from the supplier, copies all bytes from the
* input to the output, and closes the input stream. Does not close
* or flush the output stream.
*
* @param from the input factory
* @param to the output stream to write to
* @return the number of bytes copied
* @throws IOException if an I/O error occurs
* @deprecated Use {@link ByteSource#copyTo(OutputStream)} instead. This
* method is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static long copy(InputSupplier<? extends InputStream> from,
OutputStream to) throws IOException {
return asByteSource(from).copyTo(to);
}
/**
* Opens an output stream from the supplier, copies all bytes from the input
* to the output, and closes the output stream. Does not close or flush the
* input stream.
*
* @param from the input stream to read from
* @param to the output factory
* @return the number of bytes copied
* @throws IOException if an I/O error occurs
* @since 10.0
* @deprecated Use {@link ByteSink#writeFrom(InputStream)} instead. This
* method is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static long copy(InputStream from,
OutputSupplier<? extends OutputStream> to) throws IOException {
return asByteSink(to).writeFrom(from);
}
/**
* Copies all bytes from the input stream to the output stream.
* Does not close or flush either stream.
*
* @param from the input stream to read from
* @param to the output stream to write to
* @return the number of bytes copied
* @throws IOException if an I/O error occurs
*/
public static long copy(InputStream from, OutputStream to)
throws IOException {
checkNotNull(from);
checkNotNull(to);
byte[] buf = new byte[BUF_SIZE];
long total = 0;
while (true) {
int r = from.read(buf);
if (r == -1) {
break;
}
to.write(buf, 0, r);
total += r;
}
return total;
}
/**
* Copies all bytes from the readable channel to the writable channel.
* Does not close or flush either channel.
*
* @param from the readable channel to read from
* @param to the writable channel to write to
* @return the number of bytes copied
* @throws IOException if an I/O error occurs
*/
public static long copy(ReadableByteChannel from,
WritableByteChannel to) throws IOException {
checkNotNull(from);
checkNotNull(to);
ByteBuffer buf = ByteBuffer.allocate(BUF_SIZE);
long total = 0;
while (from.read(buf) != -1) {
buf.flip();
while (buf.hasRemaining()) {
total += to.write(buf);
}
buf.clear();
}
return total;
}
/**
* Reads all bytes from an input stream into a byte array.
* Does not close the stream.
*
* @param in the input stream to read from
* @return a byte array containing all the bytes from the stream
* @throws IOException if an I/O error occurs
*/
public static byte[] toByteArray(InputStream in) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
copy(in, out);
return out.toByteArray();
}
/**
* Reads all bytes from an input stream into a byte array. The given
* expected size is used to create an initial byte array, but if the actual
* number of bytes read from the stream differs, the correct result will be
* returned anyway.
*/
static byte[] toByteArray(
InputStream in, int expectedSize) throws IOException {
byte[] bytes = new byte[expectedSize];
int remaining = expectedSize;
while (remaining > 0) {
int off = expectedSize - remaining;
int read = in.read(bytes, off, remaining);
if (read == -1) {
// end of stream before reading expectedSize bytes
// just return the bytes read so far
return Arrays.copyOf(bytes, off);
}
remaining -= read;
}
// bytes is now full
int b = in.read();
if (b == -1) {
return bytes;
}
// the stream was longer, so read the rest normally
FastByteArrayOutputStream out = new FastByteArrayOutputStream();
out.write(b); // write the byte we read when testing for end of stream
copy(in, out);
byte[] result = new byte[bytes.length + out.size()];
System.arraycopy(bytes, 0, result, 0, bytes.length);
out.writeTo(result, bytes.length);
return result;
}
/**
* BAOS that provides limited access to its internal byte array.
*/
private static final class FastByteArrayOutputStream
extends ByteArrayOutputStream {
/**
* Writes the contents of the internal buffer to the given array starting
* at the given offset. Assumes the array has space to hold count bytes.
*/
void writeTo(byte[] b, int off) {
System.arraycopy(buf, 0, b, off, count);
}
}
/**
* Returns the data from a {@link InputStream} factory as a byte array.
*
* @param supplier the factory
* @throws IOException if an I/O error occurs
* @deprecated Use {@link ByteSource#read()} instead. This method is
* scheduled for removal in Guava 18.0.
*/
@Deprecated
public static byte[] toByteArray(
InputSupplier<? extends InputStream> supplier) throws IOException {
return asByteSource(supplier).read();
}
/**
* Returns a new {@link ByteArrayDataInput} instance to read from the {@code
* bytes} array from the beginning.
*/
public static ByteArrayDataInput newDataInput(byte[] bytes) {
return newDataInput(new ByteArrayInputStream(bytes));
}
/**
* Returns a new {@link ByteArrayDataInput} instance to read from the {@code
* bytes} array, starting at the given position.
*
* @throws IndexOutOfBoundsException if {@code start} is negative or greater
* than the length of the array
*/
public static ByteArrayDataInput newDataInput(byte[] bytes, int start) {
checkPositionIndex(start, bytes.length);
return newDataInput(
new ByteArrayInputStream(bytes, start, bytes.length - start));
}
/**
* Returns a new {@link ByteArrayDataInput} instance to read from the given
* {@code ByteArrayInputStream}. The given input stream is not reset before
* being read from by the returned {@code ByteArrayDataInput}.
*
* @since 17.0
*/
public static ByteArrayDataInput newDataInput(
ByteArrayInputStream byteArrayInputStream) {
return new ByteArrayDataInputStream(checkNotNull(byteArrayInputStream));
}
private static class ByteArrayDataInputStream implements ByteArrayDataInput {
final DataInput input;
ByteArrayDataInputStream(ByteArrayInputStream byteArrayInputStream) {
this.input = new DataInputStream(byteArrayInputStream);
}
@Override public void readFully(byte b[]) {
try {
input.readFully(b);
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override public void readFully(byte b[], int off, int len) {
try {
input.readFully(b, off, len);
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override public int skipBytes(int n) {
try {
return input.skipBytes(n);
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override public boolean readBoolean() {
try {
return input.readBoolean();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override public byte readByte() {
try {
return input.readByte();
} catch (EOFException e) {
throw new IllegalStateException(e);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public int readUnsignedByte() {
try {
return input.readUnsignedByte();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override public short readShort() {
try {
return input.readShort();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override public int readUnsignedShort() {
try {
return input.readUnsignedShort();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override public char readChar() {
try {
return input.readChar();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override public int readInt() {
try {
return input.readInt();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override public long readLong() {
try {
return input.readLong();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override public float readFloat() {
try {
return input.readFloat();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override public double readDouble() {
try {
return input.readDouble();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override public String readLine() {
try {
return input.readLine();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override public String readUTF() {
try {
return input.readUTF();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
/**
* Returns a new {@link ByteArrayDataOutput} instance with a default size.
*/
public static ByteArrayDataOutput newDataOutput() {
return newDataOutput(new ByteArrayOutputStream());
}
/**
* Returns a new {@link ByteArrayDataOutput} instance sized to hold
* {@code size} bytes before resizing.
*
* @throws IllegalArgumentException if {@code size} is negative
*/
public static ByteArrayDataOutput newDataOutput(int size) {
checkArgument(size >= 0, "Invalid size: %s", size);
return newDataOutput(new ByteArrayOutputStream(size));
}
/**
* Returns a new {@link ByteArrayDataOutput} instance which writes to the
* given {@code ByteArrayOutputStream}. The given output stream is not reset
* before being written to by the returned {@code ByteArrayDataOutput} and
* new data will be appended to any existing content.
*
* <p>Note that if the given output stream was not empty or is modified after
* the {@code ByteArrayDataOutput} is created, the contract for
* {@link ByteArrayDataOutput#toByteArray} will not be honored (the bytes
* returned in the byte array may not be exactly what was written via calls to
* {@code ByteArrayDataOutput}).
*
* @since 17.0
*/
public static ByteArrayDataOutput newDataOutput(
ByteArrayOutputStream byteArrayOutputSteam) {
return new ByteArrayDataOutputStream(checkNotNull(byteArrayOutputSteam));
}
@SuppressWarnings("deprecation") // for writeBytes
private static class ByteArrayDataOutputStream
implements ByteArrayDataOutput {
final DataOutput output;
final ByteArrayOutputStream byteArrayOutputSteam;
ByteArrayDataOutputStream(ByteArrayOutputStream byteArrayOutputSteam) {
this.byteArrayOutputSteam = byteArrayOutputSteam;
output = new DataOutputStream(byteArrayOutputSteam);
}
@Override public void write(int b) {
try {
output.write(b);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public void write(byte[] b) {
try {
output.write(b);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public void write(byte[] b, int off, int len) {
try {
output.write(b, off, len);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public void writeBoolean(boolean v) {
try {
output.writeBoolean(v);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public void writeByte(int v) {
try {
output.writeByte(v);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public void writeBytes(String s) {
try {
output.writeBytes(s);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public void writeChar(int v) {
try {
output.writeChar(v);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public void writeChars(String s) {
try {
output.writeChars(s);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public void writeDouble(double v) {
try {
output.writeDouble(v);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public void writeFloat(float v) {
try {
output.writeFloat(v);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public void writeInt(int v) {
try {
output.writeInt(v);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public void writeLong(long v) {
try {
output.writeLong(v);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public void writeShort(int v) {
try {
output.writeShort(v);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public void writeUTF(String s) {
try {
output.writeUTF(s);
} catch (IOException impossible) {
throw new AssertionError(impossible);
}
}
@Override public byte[] toByteArray() {
return byteArrayOutputSteam.toByteArray();
}
}
private static final OutputStream NULL_OUTPUT_STREAM =
new OutputStream() {
/** Discards the specified byte. */
@Override public void write(int b) {
}
/** Discards the specified byte array. */
@Override public void write(byte[] b) {
checkNotNull(b);
}
/** Discards the specified byte array. */
@Override public void write(byte[] b, int off, int len) {
checkNotNull(b);
}
@Override
public String toString() {
return "ByteStreams.nullOutputStream()";
}
};
/**
* Returns an {@link OutputStream} that simply discards written bytes.
*
* @since 14.0 (since 1.0 as com.google.common.io.NullOutputStream)
*/
public static OutputStream nullOutputStream() {
return NULL_OUTPUT_STREAM;
}
/**
* Wraps a {@link InputStream}, limiting the number of bytes which can be
* read.
*
* @param in the input stream to be wrapped
* @param limit the maximum number of bytes to be read
* @return a length-limited {@link InputStream}
* @since 14.0 (since 1.0 as com.google.common.io.LimitInputStream)
*/
public static InputStream limit(InputStream in, long limit) {
return new LimitedInputStream(in, limit);
}
private static final class LimitedInputStream extends FilterInputStream {
private long left;
private long mark = -1;
LimitedInputStream(InputStream in, long limit) {
super(in);
checkNotNull(in);
checkArgument(limit >= 0, "limit must be non-negative");
left = limit;
}
@Override public int available() throws IOException {
return (int) Math.min(in.available(), left);
}
// it's okay to mark even if mark isn't supported, as reset won't work
@Override public synchronized void mark(int readLimit) {
in.mark(readLimit);
mark = left;
}
@Override public int read() throws IOException {
if (left == 0) {
return -1;
}
int result = in.read();
if (result != -1) {
--left;
}
return result;
}
@Override public int read(byte[] b, int off, int len) throws IOException {
if (left == 0) {
return -1;
}
len = (int) Math.min(len, left);
int result = in.read(b, off, len);
if (result != -1) {
left -= result;
}
return result;
}
@Override public synchronized void reset() throws IOException {
if (!in.markSupported()) {
throw new IOException("Mark not supported");
}
if (mark == -1) {
throw new IOException("Mark not set");
}
in.reset();
left = mark;
}
@Override public long skip(long n) throws IOException {
n = Math.min(n, left);
long skipped = in.skip(n);
left -= skipped;
return skipped;
}
}
/**
* Returns the length of a supplied input stream, in bytes.
*
* @deprecated Use {@link ByteSource#size()} instead. This method is
* scheduled for removal in Guava 18.0.
*/
@Deprecated
public static long length(
InputSupplier<? extends InputStream> supplier) throws IOException {
return asByteSource(supplier).size();
}
/**
* Returns true if the supplied input streams contain the same bytes.
*
* @throws IOException if an I/O error occurs
* @deprecated Use {@link ByteSource#contentEquals(ByteSource)} instead. This
* method is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static boolean equal(InputSupplier<? extends InputStream> supplier1,
InputSupplier<? extends InputStream> supplier2) throws IOException {
return asByteSource(supplier1).contentEquals(asByteSource(supplier2));
}
/**
* Attempts to read enough bytes from the stream to fill the given byte array,
* with the same behavior as {@link DataInput#readFully(byte[])}.
* Does not close the stream.
*
* @param in the input stream to read from.
* @param b the buffer into which the data is read.
* @throws EOFException if this stream reaches the end before reading all
* the bytes.
* @throws IOException if an I/O error occurs.
*/
public static void readFully(InputStream in, byte[] b) throws IOException {
readFully(in, b, 0, b.length);
}
/**
* Attempts to read {@code len} bytes from the stream into the given array
* starting at {@code off}, with the same behavior as
* {@link DataInput#readFully(byte[], int, int)}. Does not close the
* stream.
*
* @param in the input stream to read from.
* @param b the buffer into which the data is read.
* @param off an int specifying the offset into the data.
* @param len an int specifying the number of bytes to read.
* @throws EOFException if this stream reaches the end before reading all
* the bytes.
* @throws IOException if an I/O error occurs.
*/
public static void readFully(
InputStream in, byte[] b, int off, int len) throws IOException {
int read = read(in, b, off, len);
if (read != len) {
throw new EOFException("reached end of stream after reading "
+ read + " bytes; " + len + " bytes expected");
}
}
/**
* Discards {@code n} bytes of data from the input stream. This method
* will block until the full amount has been skipped. Does not close the
* stream.
*
* @param in the input stream to read from
* @param n the number of bytes to skip
* @throws EOFException if this stream reaches the end before skipping all
* the bytes
* @throws IOException if an I/O error occurs, or the stream does not
* support skipping
*/
public static void skipFully(InputStream in, long n) throws IOException {
long toSkip = n;
while (n > 0) {
long amt = in.skip(n);
if (amt == 0) {
// Force a blocking read to avoid infinite loop
if (in.read() == -1) {
long skipped = toSkip - n;
throw new EOFException("reached end of stream after skipping "
+ skipped + " bytes; " + toSkip + " bytes expected");
}
n--;
} else {
n -= amt;
}
}
}
/**
* Process the bytes of a supplied stream
*
* @param supplier the input stream factory
* @param processor the object to which to pass the bytes of the stream
* @return the result of the byte processor
* @throws IOException if an I/O error occurs
* @deprecated Use {@link ByteSource#read(ByteProcessor)} instead. This
* method is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static <T> T readBytes(
InputSupplier<? extends InputStream> supplier,
ByteProcessor<T> processor) throws IOException {
checkNotNull(supplier);
checkNotNull(processor);
Closer closer = Closer.create();
try {
InputStream in = closer.register(supplier.getInput());
return readBytes(in, processor);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Process the bytes of the given input stream using the given processor.
*
* @param input the input stream to process
* @param processor the object to which to pass the bytes of the stream
* @return the result of the byte processor
* @throws IOException if an I/O error occurs
* @since 14.0
*/
public static <T> T readBytes(
InputStream input, ByteProcessor<T> processor) throws IOException {
checkNotNull(input);
checkNotNull(processor);
byte[] buf = new byte[BUF_SIZE];
int read;
do {
read = input.read(buf);
} while (read != -1 && processor.processBytes(buf, 0, read));
return processor.getResult();
}
/**
* Computes the hash code of the data supplied by {@code supplier} using {@code
* hashFunction}.
*
* @param supplier the input stream factory
* @param hashFunction the hash function to use to hash the data
* @return the {@link HashCode} of all of the bytes in the input stream
* @throws IOException if an I/O error occurs
* @since 12.0
* @deprecated Use {@link ByteSource#hash(HashFunction)} instead. This method
* is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static HashCode hash(
InputSupplier<? extends InputStream> supplier, HashFunction hashFunction)
throws IOException {
return asByteSource(supplier).hash(hashFunction);
}
/**
* Reads some bytes from an input stream and stores them into the buffer array
* {@code b}. This method blocks until {@code len} bytes of input data have
* been read into the array, or end of file is detected. The number of bytes
* read is returned, possibly zero. Does not close the stream.
*
* <p>A caller can detect EOF if the number of bytes read is less than
* {@code len}. All subsequent calls on the same stream will return zero.
*
* <p>If {@code b} is null, a {@code NullPointerException} is thrown. If
* {@code off} is negative, or {@code len} is negative, or {@code off+len} is
* greater than the length of the array {@code b}, then an
* {@code IndexOutOfBoundsException} is thrown. If {@code len} is zero, then
* no bytes are read. Otherwise, the first byte read is stored into element
* {@code b[off]}, the next one into {@code b[off+1]}, and so on. The number
* of bytes read is, at most, equal to {@code len}.
*
* @param in the input stream to read from
* @param b the buffer into which the data is read
* @param off an int specifying the offset into the data
* @param len an int specifying the number of bytes to read
* @return the number of bytes read
* @throws IOException if an I/O error occurs
*/
public static int read(InputStream in, byte[] b, int off, int len)
throws IOException {
checkNotNull(in);
checkNotNull(b);
if (len < 0) {
throw new IndexOutOfBoundsException("len is negative");
}
int total = 0;
while (total < len) {
int result = in.read(b, off + total, len - total);
if (result == -1) {
break;
}
total += result;
}
return total;
}
/**
* Returns an {@link InputSupplier} that returns input streams from the
* an underlying supplier, where each stream starts at the given
* offset and is limited to the specified number of bytes.
*
* @param supplier the supplier from which to get the raw streams
* @param offset the offset in bytes into the underlying stream where
* the returned streams will start
* @param length the maximum length of the returned streams
* @throws IllegalArgumentException if offset or length are negative
* @deprecated Use {@link ByteSource#slice(int, int)} instead. This method is
* scheduled for removal in Guava 18.0.
*/
@Deprecated
public static InputSupplier<InputStream> slice(
final InputSupplier<? extends InputStream> supplier,
final long offset,
final long length) {
return asInputSupplier(asByteSource(supplier).slice(offset, length));
}
/**
* Joins multiple {@link InputStream} suppliers into a single supplier.
* Streams returned from the supplier will contain the concatenated data from
* the streams of the underlying suppliers.
*
* <p>Only one underlying input stream will be open at a time. Closing the
* joined stream will close the open underlying stream.
*
* <p>Reading from the joined stream will throw a {@link NullPointerException}
* if any of the suppliers are null or return null.
*
* @param suppliers the suppliers to concatenate
* @return a supplier that will return a stream containing the concatenated
* stream data
* @deprecated Use {@link ByteSource#concat(Iterable)} instead. This method
* is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static InputSupplier<InputStream> join(
final Iterable<? extends InputSupplier<? extends InputStream>> suppliers) {
checkNotNull(suppliers);
Iterable<ByteSource> sources = Iterables.transform(suppliers,
new Function<InputSupplier<? extends InputStream>, ByteSource>() {
@Override
public ByteSource apply(InputSupplier<? extends InputStream> input) {
return asByteSource(input);
}
});
return asInputSupplier(ByteSource.concat(sources));
}
/**
* Varargs form of {@link #join(Iterable)}.
*
* @deprecated Use {@link ByteSource#concat(ByteSource[])} instead. This
* method is scheduled for removal in Guava 18.0.
*/
@Deprecated
@SuppressWarnings("unchecked") // suppress "possible heap pollution" warning in JDK7
public static InputSupplier<InputStream> join(
InputSupplier<? extends InputStream>... suppliers) {
return join(Arrays.asList(suppliers));
}
// TODO(user): Remove these once Input/OutputSupplier methods are removed
/**
* Returns a view of the given {@code InputStream} supplier as a
* {@code ByteSource}.
*
* <p>This method is a temporary method provided for easing migration from
* suppliers to sources and sinks.
*
* @since 15.0
* @deprecated Convert all {@code InputSupplier<? extends InputStream>}
* implementations to extend {@link ByteSource} or provide a method for
* viewing the object as a {@code ByteSource}. This method is scheduled
* for removal in Guava 18.0.
*/
@Deprecated
public static ByteSource asByteSource(
final InputSupplier<? extends InputStream> supplier) {
checkNotNull(supplier);
return new ByteSource() {
@Override
public InputStream openStream() throws IOException {
return supplier.getInput();
}
@Override
public String toString() {
return "ByteStreams.asByteSource(" + supplier + ")";
}
};
}
/**
* Returns a view of the given {@code OutputStream} supplier as a
* {@code ByteSink}.
*
* <p>This method is a temporary method provided for easing migration from
* suppliers to sources and sinks.
*
* @since 15.0
* @deprecated Convert all {@code OutputSupplier<? extends OutputStream>}
* implementations to extend {@link ByteSink} or provide a method for
* viewing the object as a {@code ByteSink}. This method is scheduled
* for removal in Guava 18.0.
*/
@Deprecated
public static ByteSink asByteSink(
final OutputSupplier<? extends OutputStream> supplier) {
checkNotNull(supplier);
return new ByteSink() {
@Override
public OutputStream openStream() throws IOException {
return supplier.getOutput();
}
@Override
public String toString() {
return "ByteStreams.asByteSink(" + supplier + ")";
}
};
}
@SuppressWarnings("unchecked") // used internally where known to be safe
static <S extends InputStream> InputSupplier<S> asInputSupplier(
final ByteSource source) {
return (InputSupplier) checkNotNull(source);
}
@SuppressWarnings("unchecked") // used internally where known to be safe
static <S extends OutputStream> OutputSupplier<S> asOutputSupplier(
final ByteSink sink) {
return (OutputSupplier) checkNotNull(sink);
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/ByteStreams.java | Java | asf20 | 33,563 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import com.google.common.annotations.Beta;
import java.io.IOException;
/**
* A callback to be used with the streaming {@code readLines} methods.
*
* <p>{@link #processLine} will be called for each line that is read, and
* should return {@code false} when you want to stop processing.
*
* @author Miles Barr
* @since 1.0
*/
@Beta
public interface LineProcessor<T> {
/**
* This method will be called once for each line.
*
* @param line the line read from the input, without delimiter
* @return true to continue processing, false to stop
*/
boolean processLine(String line) throws IOException;
/** Return the result of processing all the lines. */
T getResult();
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/LineProcessor.java | Java | asf20 | 1,330 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import com.google.common.annotations.Beta;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Longs;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.OutputStream;
/**
* An implementation of {@link DataOutput} that uses little-endian byte ordering
* for writing {@code char}, {@code short}, {@code int}, {@code float}, {@code
* double}, and {@code long} values.
* <p>
* <b>Note:</b> This class intentionally violates the specification of its
* supertype {@code DataOutput}, which explicitly requires big-endian byte
* order.
*
* @author Chris Nokleberg
* @author Keith Bottner
* @since 8.0
*/
@Beta
public class LittleEndianDataOutputStream extends FilterOutputStream
implements DataOutput {
/**
* Creates a {@code LittleEndianDataOutputStream} that wraps the given stream.
*
* @param out the stream to delegate to
*/
public LittleEndianDataOutputStream(OutputStream out) {
super(new DataOutputStream(Preconditions.checkNotNull(out)));
}
@Override public void write(byte[] b, int off, int len) throws IOException {
// Override slow FilterOutputStream impl
out.write(b, off, len);
}
@Override public void writeBoolean(boolean v) throws IOException {
((DataOutputStream) out).writeBoolean(v);
}
@Override public void writeByte(int v) throws IOException {
((DataOutputStream) out).writeByte(v);
}
/**
* @deprecated The semantics of {@code writeBytes(String s)} are considered
* dangerous. Please use {@link #writeUTF(String s)},
* {@link #writeChars(String s)} or another write method instead.
*/
@Deprecated
@Override public void writeBytes(String s) throws IOException {
((DataOutputStream) out).writeBytes(s);
}
/**
* Writes a char as specified by {@link DataOutputStream#writeChar(int)},
* except using little-endian byte order.
*
* @throws IOException if an I/O error occurs
*/
@Override public void writeChar(int v) throws IOException {
writeShort(v);
}
/**
* Writes a {@code String} as specified by
* {@link DataOutputStream#writeChars(String)}, except each character is
* written using little-endian byte order.
*
* @throws IOException if an I/O error occurs
*/
@Override public void writeChars(String s) throws IOException {
for (int i = 0; i < s.length(); i++) {
writeChar(s.charAt(i));
}
}
/**
* Writes a {@code double} as specified by
* {@link DataOutputStream#writeDouble(double)}, except using little-endian
* byte order.
*
* @throws IOException if an I/O error occurs
*/
@Override public void writeDouble(double v) throws IOException {
writeLong(Double.doubleToLongBits(v));
}
/**
* Writes a {@code float} as specified by
* {@link DataOutputStream#writeFloat(float)}, except using little-endian byte
* order.
*
* @throws IOException if an I/O error occurs
*/
@Override public void writeFloat(float v) throws IOException {
writeInt(Float.floatToIntBits(v));
}
/**
* Writes an {@code int} as specified by
* {@link DataOutputStream#writeInt(int)}, except using little-endian byte
* order.
*
* @throws IOException if an I/O error occurs
*/
@Override public void writeInt(int v) throws IOException {
out.write(0xFF & v);
out.write(0xFF & (v >> 8));
out.write(0xFF & (v >> 16));
out.write(0xFF & (v >> 24));
}
/**
* Writes a {@code long} as specified by
* {@link DataOutputStream#writeLong(long)}, except using little-endian byte
* order.
*
* @throws IOException if an I/O error occurs
*/
@Override public void writeLong(long v) throws IOException {
byte[] bytes = Longs.toByteArray(Long.reverseBytes(v));
write(bytes, 0, bytes.length);
}
/**
* Writes a {@code short} as specified by
* {@link DataOutputStream#writeShort(int)}, except using little-endian byte
* order.
*
* @throws IOException if an I/O error occurs
*/
@Override public void writeShort(int v) throws IOException {
out.write(0xFF & v);
out.write(0xFF & (v >> 8));
}
@Override public void writeUTF(String str) throws IOException {
((DataOutputStream) out).writeUTF(str);
}
// Overriding close() because FilterOutputStream's close() method pre-JDK8 has bad behavior:
// it silently ignores any exception thrown by flush(). Instead, just close the delegate stream.
// It should flush itself if necessary.
@Override public void close() throws IOException {
out.close();
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/LittleEndianDataOutputStream.java | Java | asf20 | 5,268 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.Beta;
import com.google.common.base.Charsets;
import com.google.common.base.Objects;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.List;
/**
* Provides utility methods for working with resources in the classpath.
* Note that even though these methods use {@link URL} parameters, they
* are usually not appropriate for HTTP or other non-classpath resources.
*
* <p>All method parameters must be non-null unless documented otherwise.
*
* @author Chris Nokleberg
* @author Ben Yu
* @author Colin Decker
* @since 1.0
*/
@Beta
public final class Resources {
private Resources() {}
/**
* Returns a factory that will supply instances of {@link InputStream} that
* read from the given URL.
*
* @param url the URL to read from
* @return the factory
* @deprecated Use {@link #asByteSource(URL)} instead. This method is
* scheduled for removal in Guava 18.0.
*/
@Deprecated
public static InputSupplier<InputStream> newInputStreamSupplier(URL url) {
return ByteStreams.asInputSupplier(asByteSource(url));
}
/**
* Returns a {@link ByteSource} that reads from the given URL.
*
* @since 14.0
*/
public static ByteSource asByteSource(URL url) {
return new UrlByteSource(url);
}
/**
* A byte source that reads from a URL using {@link URL#openStream()}.
*/
private static final class UrlByteSource extends ByteSource {
private final URL url;
private UrlByteSource(URL url) {
this.url = checkNotNull(url);
}
@Override
public InputStream openStream() throws IOException {
return url.openStream();
}
@Override
public String toString() {
return "Resources.asByteSource(" + url + ")";
}
}
/**
* Returns a factory that will supply instances of
* {@link InputStreamReader} that read a URL using the given character set.
*
* @param url the URL to read from
* @param charset the charset used to decode the input stream; see {@link
* Charsets} for helpful predefined constants
* @return the factory
* @deprecated Use {@link #asCharSource(URL, Charset)} instead. This method
* is scheduled for removal in Guava 18.0.
*/
@Deprecated
public static InputSupplier<InputStreamReader> newReaderSupplier(
URL url, Charset charset) {
return CharStreams.asInputSupplier(asCharSource(url, charset));
}
/**
* Returns a {@link CharSource} that reads from the given URL using the given
* character set.
*
* @since 14.0
*/
public static CharSource asCharSource(URL url, Charset charset) {
return asByteSource(url).asCharSource(charset);
}
/**
* Reads all bytes from a URL into a byte array.
*
* @param url the URL to read from
* @return a byte array containing all the bytes from the URL
* @throws IOException if an I/O error occurs
*/
public static byte[] toByteArray(URL url) throws IOException {
return asByteSource(url).read();
}
/**
* Reads all characters from a URL into a {@link String}, using the given
* character set.
*
* @param url the URL to read from
* @param charset the charset used to decode the input stream; see {@link
* Charsets} for helpful predefined constants
* @return a string containing all the characters from the URL
* @throws IOException if an I/O error occurs.
*/
public static String toString(URL url, Charset charset) throws IOException {
return asCharSource(url, charset).read();
}
/**
* Streams lines from a URL, stopping when our callback returns false, or we
* have read all of the lines.
*
* @param url the URL to read from
* @param charset the charset used to decode the input stream; see {@link
* Charsets} for helpful predefined constants
* @param callback the LineProcessor to use to handle the lines
* @return the output of processing the lines
* @throws IOException if an I/O error occurs
*/
public static <T> T readLines(URL url, Charset charset,
LineProcessor<T> callback) throws IOException {
return CharStreams.readLines(newReaderSupplier(url, charset), callback);
}
/**
* Reads all of the lines from a URL. The lines do not include
* line-termination characters, but do include other leading and trailing
* whitespace.
*
* <p>This method returns a mutable {@code List}. For an
* {@code ImmutableList}, use
* {@code Resources.asCharSource(url, charset).readLines()}.
*
* @param url the URL to read from
* @param charset the charset used to decode the input stream; see {@link
* Charsets} for helpful predefined constants
* @return a mutable {@link List} containing all the lines
* @throws IOException if an I/O error occurs
*/
public static List<String> readLines(URL url, Charset charset)
throws IOException {
// don't use asCharSource(url, charset).readLines() because that returns
// an immutable list, which would change the behavior of this method
return readLines(url, charset, new LineProcessor<List<String>>() {
final List<String> result = Lists.newArrayList();
@Override
public boolean processLine(String line) {
result.add(line);
return true;
}
@Override
public List<String> getResult() {
return result;
}
});
}
/**
* Copies all bytes from a URL to an output stream.
*
* @param from the URL to read from
* @param to the output stream
* @throws IOException if an I/O error occurs
*/
public static void copy(URL from, OutputStream to) throws IOException {
asByteSource(from).copyTo(to);
}
/**
* Returns a {@code URL} pointing to {@code resourceName} if the resource is
* found using the {@linkplain Thread#getContextClassLoader() context class
* loader}. In simple environments, the context class loader will find
* resources from the class path. In environments where different threads can
* have different class loaders, for example app servers, the context class
* loader will typically have been set to an appropriate loader for the
* current thread.
*
* <p>In the unusual case where the context class loader is null, the class
* loader that loaded this class ({@code Resources}) will be used instead.
*
* @throws IllegalArgumentException if the resource is not found
*/
public static URL getResource(String resourceName) {
ClassLoader loader = Objects.firstNonNull(
Thread.currentThread().getContextClassLoader(),
Resources.class.getClassLoader());
URL url = loader.getResource(resourceName);
checkArgument(url != null, "resource %s not found.", resourceName);
return url;
}
/**
* Given a {@code resourceName} that is relative to {@code contextClass},
* returns a {@code URL} pointing to the named resource.
*
* @throws IllegalArgumentException if the resource is not found
*/
public static URL getResource(Class<?> contextClass, String resourceName) {
URL url = contextClass.getResource(resourceName);
checkArgument(url != null, "resource %s relative to %s not found.",
resourceName, contextClass.getName());
return url;
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/Resources.java | Java | asf20 | 8,179 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkNotNull;
import java.io.IOException;
import java.io.InputStream;
import java.util.Iterator;
import javax.annotation.Nullable;
/**
* An {@link InputStream} that concatenates multiple substreams. At most
* one stream will be open at a time.
*
* @author Chris Nokleberg
* @since 1.0
*/
final class MultiInputStream extends InputStream {
private Iterator<? extends ByteSource> it;
private InputStream in;
/**
* Creates a new instance.
*
* @param it an iterator of I/O suppliers that will provide each substream
*/
public MultiInputStream(
Iterator<? extends ByteSource> it) throws IOException {
this.it = checkNotNull(it);
advance();
}
@Override public void close() throws IOException {
if (in != null) {
try {
in.close();
} finally {
in = null;
}
}
}
/**
* Closes the current input stream and opens the next one, if any.
*/
private void advance() throws IOException {
close();
if (it.hasNext()) {
in = it.next().openStream();
}
}
@Override public int available() throws IOException {
if (in == null) {
return 0;
}
return in.available();
}
@Override public boolean markSupported() {
return false;
}
@Override public int read() throws IOException {
if (in == null) {
return -1;
}
int result = in.read();
if (result == -1) {
advance();
return read();
}
return result;
}
@Override public int read(@Nullable byte[] b, int off, int len) throws IOException {
if (in == null) {
return -1;
}
int result = in.read(b, off, len);
if (result == -1) {
advance();
return read(b, off, len);
}
return result;
}
@Override public long skip(long n) throws IOException {
if (in == null || n <= 0) {
return 0;
}
long result = in.skip(n);
if (result != 0) {
return result;
}
if (read() == -1) {
return 0;
}
return 1 + in.skip(n - 1);
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/MultiInputStream.java | Java | asf20 | 2,707 |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkNotNull;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.nio.charset.Charset;
/**
* A destination to which bytes can be written, such as a file. Unlike an {@link OutputStream}, a
* {@code ByteSink} is not an open, stateful stream that can be written to and closed. Instead, it
* is an immutable <i>supplier</i> of {@code OutputStream} instances.
*
* <p>{@code ByteSink} provides two kinds of methods:
* <ul>
* <li><b>Methods that return a stream:</b> These methods should return a <i>new</i>, independent
* instance each time they are called. The caller is responsible for ensuring that the returned
* stream is closed.
* <li><b>Convenience methods:</b> These are implementations of common operations that are
* typically implemented by opening a stream using one of the methods in the first category, doing
* something and finally closing the stream or channel that was opened.
* </ul>
*
* @since 14.0
* @author Colin Decker
*/
public abstract class ByteSink implements OutputSupplier<OutputStream> {
/**
* Constructor for use by subclasses.
*/
protected ByteSink() {}
/**
* Returns a {@link CharSink} view of this {@code ByteSink} that writes characters to this sink
* as bytes encoded with the given {@link Charset charset}.
*/
public CharSink asCharSink(Charset charset) {
return new AsCharSink(charset);
}
/**
* Opens a new {@link OutputStream} for writing to this sink. This method should return a new,
* independent stream each time it is called.
*
* <p>The caller is responsible for ensuring that the returned stream is closed.
*
* @throws IOException if an I/O error occurs in the process of opening the stream
*/
public abstract OutputStream openStream() throws IOException;
/**
* This method is a temporary method provided for easing migration from suppliers to sources and
* sinks.
*
* @since 15.0
* @deprecated This method is only provided for temporary compatibility with the
* {@link OutputSupplier} interface and should not be called directly. Use
* {@link #openStream} instead. This method is scheduled for removal in Guava 18.0.
*/
@Override
@Deprecated
public final OutputStream getOutput() throws IOException {
return openStream();
}
/**
* Opens a new buffered {@link OutputStream} for writing to this sink. The returned stream is
* not required to be a {@link BufferedOutputStream} in order to allow implementations to simply
* delegate to {@link #openStream()} when the stream returned by that method does not benefit
* from additional buffering (for example, a {@code ByteArrayOutputStream}). This method should
* return a new, independent stream each time it is called.
*
* <p>The caller is responsible for ensuring that the returned stream is closed.
*
* @throws IOException if an I/O error occurs in the process of opening the stream
* @since 15.0 (in 14.0 with return type {@link BufferedOutputStream})
*/
public OutputStream openBufferedStream() throws IOException {
OutputStream out = openStream();
return (out instanceof BufferedOutputStream)
? (BufferedOutputStream) out
: new BufferedOutputStream(out);
}
/**
* Writes all the given bytes to this sink.
*
* @throws IOException if an I/O occurs in the process of writing to this sink
*/
public void write(byte[] bytes) throws IOException {
checkNotNull(bytes);
Closer closer = Closer.create();
try {
OutputStream out = closer.register(openStream());
out.write(bytes);
out.flush(); // https://code.google.com/p/guava-libraries/issues/detail?id=1330
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Writes all the bytes from the given {@code InputStream} to this sink. Does not close
* {@code input}.
*
* @throws IOException if an I/O occurs in the process of reading from {@code input} or writing to
* this sink
*/
public long writeFrom(InputStream input) throws IOException {
checkNotNull(input);
Closer closer = Closer.create();
try {
OutputStream out = closer.register(openStream());
long written = ByteStreams.copy(input, out);
out.flush(); // https://code.google.com/p/guava-libraries/issues/detail?id=1330
return written;
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* A char sink that encodes written characters with a charset and writes resulting bytes to this
* byte sink.
*/
private final class AsCharSink extends CharSink {
private final Charset charset;
private AsCharSink(Charset charset) {
this.charset = checkNotNull(charset);
}
@Override
public Writer openStream() throws IOException {
return new OutputStreamWriter(ByteSink.this.openStream(), charset);
}
@Override
public String toString() {
return ByteSink.this.toString() + ".asCharSink(" + charset + ")";
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/ByteSink.java | Java | asf20 | 5,908 |
/*
* Copyright (C) 2013 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkPositionIndexes;
import java.io.IOException;
import java.io.Reader;
import java.nio.CharBuffer;
/**
* A {@link Reader} that reads the characters in a {@link CharSequence}. Like {@code StringReader},
* but works with any {@link CharSequence}.
*
* @author Colin Decker
*/
// TODO(user): make this public? as a type, or a method in CharStreams?
final class CharSequenceReader extends Reader {
private CharSequence seq;
private int pos;
private int mark;
/**
* Creates a new reader wrapping the given character sequence.
*/
public CharSequenceReader(CharSequence seq) {
this.seq = checkNotNull(seq);
}
private void checkOpen() throws IOException {
if (seq == null) {
throw new IOException("reader closed");
}
}
private boolean hasRemaining() {
return remaining() > 0;
}
private int remaining() {
return seq.length() - pos;
}
@Override
public synchronized int read(CharBuffer target) throws IOException {
checkNotNull(target);
checkOpen();
if (!hasRemaining()) {
return -1;
}
int charsToRead = Math.min(target.remaining(), remaining());
for (int i = 0; i < charsToRead; i++) {
target.put(seq.charAt(pos++));
}
return charsToRead;
}
@Override
public synchronized int read() throws IOException {
checkOpen();
return hasRemaining() ? seq.charAt(pos++) : -1;
}
@Override
public synchronized int read(char[] cbuf, int off, int len) throws IOException {
checkPositionIndexes(off, off + len, cbuf.length);
checkOpen();
if (!hasRemaining()) {
return -1;
}
int charsToRead = Math.min(len, remaining());
for (int i = 0; i < charsToRead; i++) {
cbuf[off + i] = seq.charAt(pos++);
}
return charsToRead;
}
@Override
public synchronized long skip(long n) throws IOException {
checkArgument(n >= 0, "n (%s) may not be negative", n);
checkOpen();
int charsToSkip = (int) Math.min(remaining(), n); // safe because remaining is an int
pos += charsToSkip;
return charsToSkip;
}
@Override
public synchronized boolean ready() throws IOException {
checkOpen();
return true;
}
@Override
public boolean markSupported() {
return true;
}
@Override
public synchronized void mark(int readAheadLimit) throws IOException {
checkArgument(readAheadLimit >= 0, "readAheadLimit (%s) may not be negative", readAheadLimit);
checkOpen();
mark = pos;
}
@Override
public synchronized void reset() throws IOException {
checkOpen();
pos = mark;
}
@Override
public synchronized void close() throws IOException {
seq = null;
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/CharSequenceReader.java | Java | asf20 | 3,484 |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.Beta;
import com.google.common.base.Ascii;
import com.google.common.base.Splitter;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.Reader;
import java.io.Writer;
import java.nio.charset.Charset;
import java.util.Iterator;
import java.util.List;
import java.util.regex.Pattern;
import javax.annotation.Nullable;
/**
* A readable source of characters, such as a text file. Unlike a {@link Reader}, a
* {@code CharSource} is not an open, stateful stream of characters that can be read and closed.
* Instead, it is an immutable <i>supplier</i> of {@code Reader} instances.
*
* <p>{@code CharSource} provides two kinds of methods:
* <ul>
* <li><b>Methods that return a reader:</b> These methods should return a <i>new</i>, independent
* instance each time they are called. The caller is responsible for ensuring that the returned
* reader is closed.
* <li><b>Convenience methods:</b> These are implementations of common operations that are
* typically implemented by opening a reader using one of the methods in the first category,
* doing something and finally closing the reader that was opened.
* </ul>
*
* <p>Several methods in this class, such as {@link #readLines()}, break the contents of the
* source into lines. Like {@link BufferedReader}, these methods break lines on any of {@code \n},
* {@code \r} or {@code \r\n}, do not include the line separator in each line and do not consider
* there to be an empty line at the end if the contents are terminated with a line separator.
*
* <p>Any {@link ByteSource} containing text encoded with a specific {@linkplain Charset character
* encoding} may be viewed as a {@code CharSource} using {@link ByteSource#asCharSource(Charset)}.
*
* @since 14.0
* @author Colin Decker
*/
public abstract class CharSource implements InputSupplier<Reader> {
/**
* Constructor for use by subclasses.
*/
protected CharSource() {}
/**
* Opens a new {@link Reader} for reading from this source. This method should return a new,
* independent reader each time it is called.
*
* <p>The caller is responsible for ensuring that the returned reader is closed.
*
* @throws IOException if an I/O error occurs in the process of opening the reader
*/
public abstract Reader openStream() throws IOException;
/**
* This method is a temporary method provided for easing migration from suppliers to sources and
* sinks.
*
* @since 15.0
* @deprecated This method is only provided for temporary compatibility with the
* {@link InputSupplier} interface and should not be called directly. Use {@link #openStream}
* instead. This method is scheduled for removal in Guava 18.0.
*/
@Override
@Deprecated
public final Reader getInput() throws IOException {
return openStream();
}
/**
* Opens a new {@link BufferedReader} for reading from this source. This method should return a
* new, independent reader each time it is called.
*
* <p>The caller is responsible for ensuring that the returned reader is closed.
*
* @throws IOException if an I/O error occurs in the process of opening the reader
*/
public BufferedReader openBufferedStream() throws IOException {
Reader reader = openStream();
return (reader instanceof BufferedReader)
? (BufferedReader) reader
: new BufferedReader(reader);
}
/**
* Appends the contents of this source to the given {@link Appendable} (such as a {@link Writer}).
* Does not close {@code appendable} if it is {@code Closeable}.
*
* @throws IOException if an I/O error occurs in the process of reading from this source or
* writing to {@code appendable}
*/
public long copyTo(Appendable appendable) throws IOException {
checkNotNull(appendable);
Closer closer = Closer.create();
try {
Reader reader = closer.register(openStream());
return CharStreams.copy(reader, appendable);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Copies the contents of this source to the given sink.
*
* @throws IOException if an I/O error occurs in the process of reading from this source or
* writing to {@code sink}
*/
public long copyTo(CharSink sink) throws IOException {
checkNotNull(sink);
Closer closer = Closer.create();
try {
Reader reader = closer.register(openStream());
Writer writer = closer.register(sink.openStream());
return CharStreams.copy(reader, writer);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Reads the contents of this source as a string.
*
* @throws IOException if an I/O error occurs in the process of reading from this source
*/
public String read() throws IOException {
Closer closer = Closer.create();
try {
Reader reader = closer.register(openStream());
return CharStreams.toString(reader);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Reads the first link of this source as a string. Returns {@code null} if this source is empty.
*
* <p>Like {@link BufferedReader}, this method breaks lines on any of {@code \n}, {@code \r} or
* {@code \r\n}, does not include the line separator in the returned line and does not consider
* there to be an extra empty line at the end if the content is terminated with a line separator.
*
* @throws IOException if an I/O error occurs in the process of reading from this source
*/
public @Nullable String readFirstLine() throws IOException {
Closer closer = Closer.create();
try {
BufferedReader reader = closer.register(openBufferedStream());
return reader.readLine();
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Reads all the lines of this source as a list of strings. The returned list will be empty if
* this source is empty.
*
* <p>Like {@link BufferedReader}, this method breaks lines on any of {@code \n}, {@code \r} or
* {@code \r\n}, does not include the line separator in the returned lines and does not consider
* there to be an extra empty line at the end if the content is terminated with a line separator.
*
* @throws IOException if an I/O error occurs in the process of reading from this source
*/
public ImmutableList<String> readLines() throws IOException {
Closer closer = Closer.create();
try {
BufferedReader reader = closer.register(openBufferedStream());
List<String> result = Lists.newArrayList();
String line;
while ((line = reader.readLine()) != null) {
result.add(line);
}
return ImmutableList.copyOf(result);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Reads lines of text from this source, processing each line as it is read using the given
* {@link LineProcessor processor}. Stops when all lines have been processed or the processor
* returns {@code false} and returns the result produced by the processor.
*
* <p>Like {@link BufferedReader}, this method breaks lines on any of {@code \n}, {@code \r} or
* {@code \r\n}, does not include the line separator in the lines passed to the {@code processor}
* and does not consider there to be an extra empty line at the end if the content is terminated
* with a line separator.
*
* @throws IOException if an I/O error occurs in the process of reading from this source or if
* {@code processor} throws an {@code IOException}
* @since 16.0
*/
@Beta
public <T> T readLines(LineProcessor<T> processor) throws IOException {
checkNotNull(processor);
Closer closer = Closer.create();
try {
Reader reader = closer.register(openStream());
return CharStreams.readLines(reader, processor);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Returns whether the source has zero chars. The default implementation is to open a stream and
* check for EOF.
*
* @throws IOException if an I/O error occurs
* @since 15.0
*/
public boolean isEmpty() throws IOException {
Closer closer = Closer.create();
try {
Reader reader = closer.register(openStream());
return reader.read() == -1;
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Concatenates multiple {@link CharSource} instances into a single source. Streams returned from
* the source will contain the concatenated data from the streams of the underlying sources.
*
* <p>Only one underlying stream will be open at a time. Closing the concatenated stream will
* close the open underlying stream.
*
* @param sources the sources to concatenate
* @return a {@code CharSource} containing the concatenated data
* @since 15.0
*/
public static CharSource concat(Iterable<? extends CharSource> sources) {
return new ConcatenatedCharSource(sources);
}
/**
* Concatenates multiple {@link CharSource} instances into a single source. Streams returned from
* the source will contain the concatenated data from the streams of the underlying sources.
*
* <p>Only one underlying stream will be open at a time. Closing the concatenated stream will
* close the open underlying stream.
*
* <p>Note: The input {@code Iterator} will be copied to an {@code ImmutableList} when this
* method is called. This will fail if the iterator is infinite and may cause problems if the
* iterator eagerly fetches data for each source when iterated (rather than producing sources
* that only load data through their streams). Prefer using the {@link #concat(Iterable)}
* overload if possible.
*
* @param sources the sources to concatenate
* @return a {@code CharSource} containing the concatenated data
* @throws NullPointerException if any of {@code sources} is {@code null}
* @since 15.0
*/
public static CharSource concat(Iterator<? extends CharSource> sources) {
return concat(ImmutableList.copyOf(sources));
}
/**
* Concatenates multiple {@link CharSource} instances into a single source. Streams returned from
* the source will contain the concatenated data from the streams of the underlying sources.
*
* <p>Only one underlying stream will be open at a time. Closing the concatenated stream will
* close the open underlying stream.
*
* @param sources the sources to concatenate
* @return a {@code CharSource} containing the concatenated data
* @throws NullPointerException if any of {@code sources} is {@code null}
* @since 15.0
*/
public static CharSource concat(CharSource... sources) {
return concat(ImmutableList.copyOf(sources));
}
/**
* Returns a view of the given character sequence as a {@link CharSource}. The behavior of the
* returned {@code CharSource} and any {@code Reader} instances created by it is unspecified if
* the {@code charSequence} is mutated while it is being read, so don't do that.
*
* @since 15.0 (since 14.0 as {@code CharStreams.asCharSource(String)})
*/
public static CharSource wrap(CharSequence charSequence) {
return new CharSequenceCharSource(charSequence);
}
/**
* Returns an immutable {@link CharSource} that contains no characters.
*
* @since 15.0
*/
public static CharSource empty() {
return EmptyCharSource.INSTANCE;
}
private static class CharSequenceCharSource extends CharSource {
private static final Splitter LINE_SPLITTER
= Splitter.on(Pattern.compile("\r\n|\n|\r"));
private final CharSequence seq;
protected CharSequenceCharSource(CharSequence seq) {
this.seq = checkNotNull(seq);
}
@Override
public Reader openStream() {
return new CharSequenceReader(seq);
}
@Override
public String read() {
return seq.toString();
}
@Override
public boolean isEmpty() {
return seq.length() == 0;
}
/**
* Returns an iterable over the lines in the string. If the string ends in
* a newline, a final empty string is not included to match the behavior of
* BufferedReader/LineReader.readLine().
*/
private Iterable<String> lines() {
return new Iterable<String>() {
@Override
public Iterator<String> iterator() {
return new AbstractIterator<String>() {
Iterator<String> lines = LINE_SPLITTER.split(seq).iterator();
@Override
protected String computeNext() {
if (lines.hasNext()) {
String next = lines.next();
// skip last line if it's empty
if (lines.hasNext() || !next.isEmpty()) {
return next;
}
}
return endOfData();
}
};
}
};
}
@Override
public String readFirstLine() {
Iterator<String> lines = lines().iterator();
return lines.hasNext() ? lines.next() : null;
}
@Override
public ImmutableList<String> readLines() {
return ImmutableList.copyOf(lines());
}
@Override
public <T> T readLines(LineProcessor<T> processor) throws IOException {
for (String line : lines()) {
if (!processor.processLine(line)) {
break;
}
}
return processor.getResult();
}
@Override
public String toString() {
return "CharSource.wrap(" + Ascii.truncate(seq, 30, "...") + ")";
}
}
private static final class EmptyCharSource extends CharSequenceCharSource {
private static final EmptyCharSource INSTANCE = new EmptyCharSource();
private EmptyCharSource() {
super("");
}
@Override
public String toString() {
return "CharSource.empty()";
}
}
private static final class ConcatenatedCharSource extends CharSource {
private final Iterable<? extends CharSource> sources;
ConcatenatedCharSource(Iterable<? extends CharSource> sources) {
this.sources = checkNotNull(sources);
}
@Override
public Reader openStream() throws IOException {
return new MultiReader(sources.iterator());
}
@Override
public boolean isEmpty() throws IOException {
for (CharSource source : sources) {
if (!source.isEmpty()) {
return false;
}
}
return true;
}
@Override
public String toString() {
return "CharSource.concat(" + sources + ")";
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/CharSource.java | Java | asf20 | 15,660 |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkPositionIndexes;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
import java.io.Writer;
/**
* Provides simple GWT-compatible substitutes for {@code InputStream}, {@code OutputStream},
* {@code Reader}, and {@code Writer} so that {@code BaseEncoding} can use streaming implementations
* while remaining GWT-compatible.
*
* @author Louis Wasserman
*/
@GwtCompatible(emulated = true)
final class GwtWorkarounds {
private GwtWorkarounds() {}
/**
* A GWT-compatible substitute for a {@code Reader}.
*/
interface CharInput {
int read() throws IOException;
void close() throws IOException;
}
/**
* Views a {@code Reader} as a {@code CharInput}.
*/
@GwtIncompatible("Reader")
static CharInput asCharInput(final Reader reader) {
checkNotNull(reader);
return new CharInput() {
@Override
public int read() throws IOException {
return reader.read();
}
@Override
public void close() throws IOException {
reader.close();
}
};
}
/**
* Views a {@code CharSequence} as a {@code CharInput}.
*/
static CharInput asCharInput(final CharSequence chars) {
checkNotNull(chars);
return new CharInput() {
int index = 0;
@Override
public int read() {
if (index < chars.length()) {
return chars.charAt(index++);
} else {
return -1;
}
}
@Override
public void close() {
index = chars.length();
}
};
}
/**
* A GWT-compatible substitute for an {@code InputStream}.
*/
interface ByteInput {
int read() throws IOException;
void close() throws IOException;
}
/**
* Views a {@code ByteInput} as an {@code InputStream}.
*/
@GwtIncompatible("InputStream")
static InputStream asInputStream(final ByteInput input) {
checkNotNull(input);
return new InputStream() {
@Override
public int read() throws IOException {
return input.read();
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
checkNotNull(b);
checkPositionIndexes(off, off + len, b.length);
if (len == 0) {
return 0;
}
int firstByte = read();
if (firstByte == -1) {
return -1;
}
b[off] = (byte) firstByte;
for (int dst = 1; dst < len; dst++) {
int readByte = read();
if (readByte == -1) {
return dst;
}
b[off + dst] = (byte) readByte;
}
return len;
}
@Override
public void close() throws IOException {
input.close();
}
};
}
/**
* A GWT-compatible substitute for an {@code OutputStream}.
*/
interface ByteOutput {
void write(byte b) throws IOException;
void flush() throws IOException;
void close() throws IOException;
}
/**
* Views a {@code ByteOutput} as an {@code OutputStream}.
*/
@GwtIncompatible("OutputStream")
static OutputStream asOutputStream(final ByteOutput output) {
checkNotNull(output);
return new OutputStream() {
@Override
public void write(int b) throws IOException {
output.write((byte) b);
}
@Override
public void flush() throws IOException {
output.flush();
}
@Override
public void close() throws IOException {
output.close();
}
};
}
/**
* A GWT-compatible substitute for a {@code Writer}.
*/
interface CharOutput {
void write(char c) throws IOException;
void flush() throws IOException;
void close() throws IOException;
}
/**
* Views a {@code Writer} as a {@code CharOutput}.
*/
@GwtIncompatible("Writer")
static CharOutput asCharOutput(final Writer writer) {
checkNotNull(writer);
return new CharOutput() {
@Override
public void write(char c) throws IOException {
writer.append(c);
}
@Override
public void flush() throws IOException {
writer.flush();
}
@Override
public void close() throws IOException {
writer.close();
}
};
}
/**
* Returns a {@code CharOutput} whose {@code toString()} method can be used
* to get the combined output.
*/
static CharOutput stringBuilderOutput(int initialSize) {
final StringBuilder builder = new StringBuilder(initialSize);
return new CharOutput() {
@Override
public void write(char c) {
builder.append(c);
}
@Override
public void flush() {}
@Override
public void close() {}
@Override
public String toString() {
return builder.toString();
}
};
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/GwtWorkarounds.java | Java | asf20 | 5,648 |
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import com.google.common.base.Preconditions;
import java.io.IOException;
import java.io.Reader;
import java.util.Iterator;
import javax.annotation.Nullable;
/**
* A {@link Reader} that concatenates multiple readers.
*
* @author Bin Zhu
* @since 1.0
*/
class MultiReader extends Reader {
private final Iterator<? extends CharSource> it;
private Reader current;
MultiReader(Iterator<? extends CharSource> readers) throws IOException {
this.it = readers;
advance();
}
/**
* Closes the current reader and opens the next one, if any.
*/
private void advance() throws IOException {
close();
if (it.hasNext()) {
current = it.next().openStream();
}
}
@Override public int read(@Nullable char cbuf[], int off, int len) throws IOException {
if (current == null) {
return -1;
}
int result = current.read(cbuf, off, len);
if (result == -1) {
advance();
return read(cbuf, off, len);
}
return result;
}
@Override public long skip(long n) throws IOException {
Preconditions.checkArgument(n >= 0, "n is negative");
if (n > 0) {
while (current != null) {
long result = current.skip(n);
if (result > 0) {
return result;
}
advance();
}
}
return 0;
}
@Override public boolean ready() throws IOException {
return (current != null) && current.ready();
}
@Override public void close() throws IOException {
if (current != null) {
try {
current.close();
} finally {
current = null;
}
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/MultiReader.java | Java | asf20 | 2,232 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import java.io.IOException;
/**
* Package-protected abstract class that implements the line reading
* algorithm used by {@link LineReader}. Line separators are per {@link
* java.io.BufferedReader}: line feed, carriage return, or carriage
* return followed immediately by a linefeed.
*
* <p>Subclasses must implement {@link #handleLine}, call {@link #add}
* to pass character data, and call {@link #finish} at the end of stream.
*
* @author Chris Nokleberg
* @since 1.0
*/
abstract class LineBuffer {
/** Holds partial line contents. */
private StringBuilder line = new StringBuilder();
/** Whether a line ending with a CR is pending processing. */
private boolean sawReturn;
/**
* Process additional characters from the stream. When a line separator
* is found the contents of the line and the line separator itself
* are passed to the abstract {@link #handleLine} method.
*
* @param cbuf the character buffer to process
* @param off the offset into the buffer
* @param len the number of characters to process
* @throws IOException if an I/O error occurs
* @see #finish
*/
protected void add(char[] cbuf, int off, int len) throws IOException {
int pos = off;
if (sawReturn && len > 0) {
// Last call to add ended with a CR; we can handle the line now.
if (finishLine(cbuf[pos] == '\n')) {
pos++;
}
}
int start = pos;
for (int end = off + len; pos < end; pos++) {
switch (cbuf[pos]) {
case '\r':
line.append(cbuf, start, pos - start);
sawReturn = true;
if (pos + 1 < end) {
if (finishLine(cbuf[pos + 1] == '\n')) {
pos++;
}
}
start = pos + 1;
break;
case '\n':
line.append(cbuf, start, pos - start);
finishLine(true);
start = pos + 1;
break;
default:
// do nothing
}
}
line.append(cbuf, start, off + len - start);
}
/** Called when a line is complete. */
private boolean finishLine(boolean sawNewline) throws IOException {
handleLine(line.toString(), sawReturn
? (sawNewline ? "\r\n" : "\r")
: (sawNewline ? "\n" : ""));
line = new StringBuilder();
sawReturn = false;
return sawNewline;
}
/**
* Subclasses must call this method after finishing character processing,
* in order to ensure that any unterminated line in the buffer is
* passed to {@link #handleLine}.
*
* @throws IOException if an I/O error occurs
*/
protected void finish() throws IOException {
if (sawReturn || line.length() > 0) {
finishLine(false);
}
}
/**
* Called for each line found in the character data passed to
* {@link #add}.
*
* @param line a line of text (possibly empty), without any line separators
* @param end the line separator; one of {@code "\r"}, {@code "\n"},
* {@code "\r\n"}, or {@code ""}
* @throws IOException if an I/O error occurs
*/
protected abstract void handleLine(String line, String end)
throws IOException;
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/LineBuffer.java | Java | asf20 | 3,755 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.Beta;
import java.io.IOException;
import java.io.Reader;
import java.nio.CharBuffer;
import java.util.LinkedList;
import java.util.Queue;
/**
* A class for reading lines of text. Provides the same functionality
* as {@link java.io.BufferedReader#readLine()} but for all {@link Readable}
* objects, not just instances of {@link Reader}.
*
* @author Chris Nokleberg
* @since 1.0
*/
@Beta
public final class LineReader {
private final Readable readable;
private final Reader reader;
private final char[] buf = new char[0x1000]; // 4K
private final CharBuffer cbuf = CharBuffer.wrap(buf);
private final Queue<String> lines = new LinkedList<String>();
private final LineBuffer lineBuf = new LineBuffer() {
@Override protected void handleLine(String line, String end) {
lines.add(line);
}
};
/**
* Creates a new instance that will read lines from the given
* {@code Readable} object.
*/
public LineReader(Readable readable) {
this.readable = checkNotNull(readable);
this.reader = (readable instanceof Reader) ? (Reader) readable : null;
}
/**
* Reads a line of text. A line is considered to be terminated by any
* one of a line feed ({@code '\n'}), a carriage return
* ({@code '\r'}), or a carriage return followed immediately by a linefeed
* ({@code "\r\n"}).
*
* @return a {@code String} containing the contents of the line, not
* including any line-termination characters, or {@code null} if the
* end of the stream has been reached.
* @throws IOException if an I/O error occurs
*/
public String readLine() throws IOException {
while (lines.peek() == null) {
cbuf.clear();
// The default implementation of Reader#read(CharBuffer) allocates a
// temporary char[], so we call Reader#read(char[], int, int) instead.
int read = (reader != null)
? reader.read(buf, 0, buf.length)
: readable.read(cbuf);
if (read == -1) {
lineBuf.finish();
break;
}
lineBuf.add(buf, 0, read);
}
return lines.poll();
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/LineReader.java | Java | asf20 | 2,827 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.io.FileWriteMode.APPEND;
import com.google.common.annotations.Beta;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import com.google.common.base.Predicate;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.TreeTraverser;
import com.google.common.hash.HashCode;
import com.google.common.hash.HashFunction;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.Closeable;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.RandomAccessFile;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileChannel.MapMode;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/**
* Provides utility methods for working with files.
*
* <p>All method parameters must be non-null unless documented otherwise.
*
* @author Chris Nokleberg
* @author Colin Decker
* @since 1.0
*/
@Beta
public final class Files {
/** Maximum loop count when creating temp directories. */
private static final int TEMP_DIR_ATTEMPTS = 10000;
private Files() {}
/**
* Returns a buffered reader that reads from a file using the given
* character set.
*
* @param file the file to read from
* @param charset the charset used to decode the input stream; see {@link
* Charsets} for helpful predefined constants
* @return the buffered reader
*/
public static BufferedReader newReader(File file, Charset charset)
throws FileNotFoundException {
checkNotNull(file);
checkNotNull(charset);
return new BufferedReader(
new InputStreamReader(new FileInputStream(file), charset));
}
/**
* Returns a buffered writer that writes to a file using the given
* character set.
*
* @param file the file to write to
* @param charset the charset used to encode the output stream; see {@link
* Charsets} for helpful predefined constants
* @return the buffered writer
*/
public static BufferedWriter newWriter(File file, Charset charset)
throws FileNotFoundException {
checkNotNull(file);
checkNotNull(charset);
return new BufferedWriter(
new OutputStreamWriter(new FileOutputStream(file), charset));
}
/**
* Returns a new {@link ByteSource} for reading bytes from the given file.
*
* @since 14.0
*/
public static ByteSource asByteSource(File file) {
return new FileByteSource(file);
}
private static final class FileByteSource extends ByteSource {
private final File file;
private FileByteSource(File file) {
this.file = checkNotNull(file);
}
@Override
public FileInputStream openStream() throws IOException {
return new FileInputStream(file);
}
@Override
public long size() throws IOException {
if (!file.isFile()) {
throw new FileNotFoundException(file.toString());
}
return file.length();
}
@Override
public byte[] read() throws IOException {
Closer closer = Closer.create();
try {
FileInputStream in = closer.register(openStream());
return readFile(in, in.getChannel().size());
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
@Override
public String toString() {
return "Files.asByteSource(" + file + ")";
}
}
/**
* Reads a file of the given expected size from the given input stream, if
* it will fit into a byte array. This method handles the case where the file
* size changes between when the size is read and when the contents are read
* from the stream.
*/
static byte[] readFile(
InputStream in, long expectedSize) throws IOException {
if (expectedSize > Integer.MAX_VALUE) {
throw new OutOfMemoryError("file is too large to fit in a byte array: "
+ expectedSize + " bytes");
}
// some special files may return size 0 but have content, so read
// the file normally in that case
return expectedSize == 0
? ByteStreams.toByteArray(in)
: ByteStreams.toByteArray(in, (int) expectedSize);
}
/**
* Returns a new {@link ByteSink} for writing bytes to the given file. The
* given {@code modes} control how the file is opened for writing. When no
* mode is provided, the file will be truncated before writing. When the
* {@link FileWriteMode#APPEND APPEND} mode is provided, writes will
* append to the end of the file without truncating it.
*
* @since 14.0
*/
public static ByteSink asByteSink(File file, FileWriteMode... modes) {
return new FileByteSink(file, modes);
}
private static final class FileByteSink extends ByteSink {
private final File file;
private final ImmutableSet<FileWriteMode> modes;
private FileByteSink(File file, FileWriteMode... modes) {
this.file = checkNotNull(file);
this.modes = ImmutableSet.copyOf(modes);
}
@Override
public FileOutputStream openStream() throws IOException {
return new FileOutputStream(file, modes.contains(APPEND));
}
@Override
public String toString() {
return "Files.asByteSink(" + file + ", " + modes + ")";
}
}
/**
* Returns a new {@link CharSource} for reading character data from the given
* file using the given character set.
*
* @since 14.0
*/
public static CharSource asCharSource(File file, Charset charset) {
return asByteSource(file).asCharSource(charset);
}
/**
* Returns a new {@link CharSink} for writing character data to the given
* file using the given character set. The given {@code modes} control how
* the file is opened for writing. When no mode is provided, the file
* will be truncated before writing. When the
* {@link FileWriteMode#APPEND APPEND} mode is provided, writes will
* append to the end of the file without truncating it.
*
* @since 14.0
*/
public static CharSink asCharSink(File file, Charset charset,
FileWriteMode... modes) {
return asByteSink(file, modes).asCharSink(charset);
}
/**
* Returns a factory that will supply instances of {@link FileInputStream}
* that read from a file.
*
* @param file the file to read from
* @return the factory
* @deprecated Use {@link #asByteSource(File)}. This method is scheduled for
* removal in Guava 18.0.
*/
@Deprecated
public static InputSupplier<FileInputStream> newInputStreamSupplier(
final File file) {
return ByteStreams.asInputSupplier(asByteSource(file));
}
/**
* Returns a factory that will supply instances of {@link FileOutputStream}
* that write to a file.
*
* @param file the file to write to
* @return the factory
* @deprecated Use {@link #asByteSink(File)}. This method is scheduled for
* removal in Guava 18.0.
*/
@Deprecated
public static OutputSupplier<FileOutputStream> newOutputStreamSupplier(
File file) {
return newOutputStreamSupplier(file, false);
}
/**
* Returns a factory that will supply instances of {@link FileOutputStream}
* that write to or append to a file.
*
* @param file the file to write to
* @param append if true, the encoded characters will be appended to the file;
* otherwise the file is overwritten
* @return the factory
* @deprecated Use {@link #asByteSink(File, FileWriteMode...)}, passing
* {@link FileWriteMode#APPEND} for append. This method is scheduled for
* removal in Guava 18.0.
*/
@Deprecated
public static OutputSupplier<FileOutputStream> newOutputStreamSupplier(
final File file, final boolean append) {
return ByteStreams.asOutputSupplier(asByteSink(file, modes(append)));
}
private static FileWriteMode[] modes(boolean append) {
return append
? new FileWriteMode[]{ FileWriteMode.APPEND }
: new FileWriteMode[0];
}
/**
* Returns a factory that will supply instances of
* {@link InputStreamReader} that read a file using the given character set.
*
* @param file the file to read from
* @param charset the charset used to decode the input stream; see {@link
* Charsets} for helpful predefined constants
* @return the factory
* @deprecated Use {@link #asCharSource(File, Charset)}. This method is
* scheduled for removal in Guava 18.0.
*/
@Deprecated
public static InputSupplier<InputStreamReader> newReaderSupplier(File file,
Charset charset) {
return CharStreams.asInputSupplier(asCharSource(file, charset));
}
/**
* Returns a factory that will supply instances of {@link OutputStreamWriter}
* that write to a file using the given character set.
*
* @param file the file to write to
* @param charset the charset used to encode the output stream; see {@link
* Charsets} for helpful predefined constants
* @return the factory
* @deprecated Use {@link #asCharSink(File, Charset)}. This method is
* scheduled for removal in Guava 18.0.
*/
@Deprecated
public static OutputSupplier<OutputStreamWriter> newWriterSupplier(File file,
Charset charset) {
return newWriterSupplier(file, charset, false);
}
/**
* Returns a factory that will supply instances of {@link OutputStreamWriter}
* that write to or append to a file using the given character set.
*
* @param file the file to write to
* @param charset the charset used to encode the output stream; see {@link
* Charsets} for helpful predefined constants
* @param append if true, the encoded characters will be appended to the file;
* otherwise the file is overwritten
* @return the factory
* @deprecated Use {@link #asCharSink(File, Charset, FileWriteMode...)},
* passing {@link FileWriteMode#APPEND} for append. This method is
* scheduled for removal in Guava 18.0.
*/
@Deprecated
public static OutputSupplier<OutputStreamWriter> newWriterSupplier(File file,
Charset charset, boolean append) {
return CharStreams.asOutputSupplier(asCharSink(file, charset, modes(append)));
}
/**
* Reads all bytes from a file into a byte array.
*
* @param file the file to read from
* @return a byte array containing all the bytes from file
* @throws IllegalArgumentException if the file is bigger than the largest
* possible byte array (2^31 - 1)
* @throws IOException if an I/O error occurs
*/
public static byte[] toByteArray(File file) throws IOException {
return asByteSource(file).read();
}
/**
* Reads all characters from a file into a {@link String}, using the given
* character set.
*
* @param file the file to read from
* @param charset the charset used to decode the input stream; see {@link
* Charsets} for helpful predefined constants
* @return a string containing all the characters from the file
* @throws IOException if an I/O error occurs
*/
public static String toString(File file, Charset charset) throws IOException {
return asCharSource(file, charset).read();
}
/**
* Copies to a file all bytes from an {@link InputStream} supplied by a
* factory.
*
* @param from the input factory
* @param to the destination file
* @throws IOException if an I/O error occurs
* @deprecated Use {@code from.copyTo(Files.asByteSink(to))} after changing
* {@code from} to a {@code ByteSource} if necessary. This method is
* scheduled to be removed in Guava 18.0.
*/
@Deprecated
public static void copy(InputSupplier<? extends InputStream> from, File to)
throws IOException {
ByteStreams.asByteSource(from).copyTo(asByteSink(to));
}
/**
* Overwrites a file with the contents of a byte array.
*
* @param from the bytes to write
* @param to the destination file
* @throws IOException if an I/O error occurs
*/
public static void write(byte[] from, File to) throws IOException {
asByteSink(to).write(from);
}
/**
* Copies all bytes from a file to an {@link OutputStream} supplied by
* a factory.
*
* @param from the source file
* @param to the output factory
* @throws IOException if an I/O error occurs
* @deprecated Use {@code Files.asByteSource(from).copyTo(to)} after changing
* {@code to} to a {@code ByteSink} if necessary. This method is
* scheduled to be removed in Guava 18.0.
*/
@Deprecated
public static void copy(File from, OutputSupplier<? extends OutputStream> to)
throws IOException {
asByteSource(from).copyTo(ByteStreams.asByteSink(to));
}
/**
* Copies all bytes from a file to an output stream.
*
* @param from the source file
* @param to the output stream
* @throws IOException if an I/O error occurs
*/
public static void copy(File from, OutputStream to) throws IOException {
asByteSource(from).copyTo(to);
}
/**
* Copies all the bytes from one file to another.
*
* <p><b>Warning:</b> If {@code to} represents an existing file, that file
* will be overwritten with the contents of {@code from}. If {@code to} and
* {@code from} refer to the <i>same</i> file, the contents of that file
* will be deleted.
*
* @param from the source file
* @param to the destination file
* @throws IOException if an I/O error occurs
* @throws IllegalArgumentException if {@code from.equals(to)}
*/
public static void copy(File from, File to) throws IOException {
checkArgument(!from.equals(to),
"Source %s and destination %s must be different", from, to);
asByteSource(from).copyTo(asByteSink(to));
}
/**
* Copies to a file all characters from a {@link Readable} and
* {@link Closeable} object supplied by a factory, using the given
* character set.
*
* @param from the readable supplier
* @param to the destination file
* @param charset the charset used to encode the output stream; see {@link
* Charsets} for helpful predefined constants
* @throws IOException if an I/O error occurs
* @deprecated Use {@code from.copyTo(Files.asCharSink(to, charset))} after
* changing {@code from} to a {@code CharSource} if necessary. This
* method is scheduled to be removed in Guava 18.0.
*/
@Deprecated
public static <R extends Readable & Closeable> void copy(
InputSupplier<R> from, File to, Charset charset) throws IOException {
CharStreams.asCharSource(from).copyTo(asCharSink(to, charset));
}
/**
* Writes a character sequence (such as a string) to a file using the given
* character set.
*
* @param from the character sequence to write
* @param to the destination file
* @param charset the charset used to encode the output stream; see {@link
* Charsets} for helpful predefined constants
* @throws IOException if an I/O error occurs
*/
public static void write(CharSequence from, File to, Charset charset)
throws IOException {
asCharSink(to, charset).write(from);
}
/**
* Appends a character sequence (such as a string) to a file using the given
* character set.
*
* @param from the character sequence to append
* @param to the destination file
* @param charset the charset used to encode the output stream; see {@link
* Charsets} for helpful predefined constants
* @throws IOException if an I/O error occurs
*/
public static void append(CharSequence from, File to, Charset charset)
throws IOException {
write(from, to, charset, true);
}
/**
* Private helper method. Writes a character sequence to a file,
* optionally appending.
*
* @param from the character sequence to append
* @param to the destination file
* @param charset the charset used to encode the output stream; see {@link
* Charsets} for helpful predefined constants
* @param append true to append, false to overwrite
* @throws IOException if an I/O error occurs
*/
private static void write(CharSequence from, File to, Charset charset,
boolean append) throws IOException {
asCharSink(to, charset, modes(append)).write(from);
}
/**
* Copies all characters from a file to a {@link Appendable} &
* {@link Closeable} object supplied by a factory, using the given
* character set.
*
* @param from the source file
* @param charset the charset used to decode the input stream; see {@link
* Charsets} for helpful predefined constants
* @param to the appendable supplier
* @throws IOException if an I/O error occurs
* @deprecated Use {@code Files.asCharSource(from, charset).copyTo(to)} after
* changing {@code to} to a {@code CharSink} if necessary. This method is
* scheduled to be removed in Guava 18.0.
*/
@Deprecated
public static <W extends Appendable & Closeable> void copy(File from,
Charset charset, OutputSupplier<W> to) throws IOException {
asCharSource(from, charset).copyTo(CharStreams.asCharSink(to));
}
/**
* Copies all characters from a file to an appendable object,
* using the given character set.
*
* @param from the source file
* @param charset the charset used to decode the input stream; see {@link
* Charsets} for helpful predefined constants
* @param to the appendable object
* @throws IOException if an I/O error occurs
*/
public static void copy(File from, Charset charset, Appendable to)
throws IOException {
asCharSource(from, charset).copyTo(to);
}
/**
* Returns true if the files contains the same bytes.
*
* @throws IOException if an I/O error occurs
*/
public static boolean equal(File file1, File file2) throws IOException {
checkNotNull(file1);
checkNotNull(file2);
if (file1 == file2 || file1.equals(file2)) {
return true;
}
/*
* Some operating systems may return zero as the length for files
* denoting system-dependent entities such as devices or pipes, in
* which case we must fall back on comparing the bytes directly.
*/
long len1 = file1.length();
long len2 = file2.length();
if (len1 != 0 && len2 != 0 && len1 != len2) {
return false;
}
return asByteSource(file1).contentEquals(asByteSource(file2));
}
/**
* Atomically creates a new directory somewhere beneath the system's
* temporary directory (as defined by the {@code java.io.tmpdir} system
* property), and returns its name.
*
* <p>Use this method instead of {@link File#createTempFile(String, String)}
* when you wish to create a directory, not a regular file. A common pitfall
* is to call {@code createTempFile}, delete the file and create a
* directory in its place, but this leads a race condition which can be
* exploited to create security vulnerabilities, especially when executable
* files are to be written into the directory.
*
* <p>This method assumes that the temporary volume is writable, has free
* inodes and free blocks, and that it will not be called thousands of times
* per second.
*
* @return the newly-created directory
* @throws IllegalStateException if the directory could not be created
*/
public static File createTempDir() {
File baseDir = new File(System.getProperty("java.io.tmpdir"));
String baseName = System.currentTimeMillis() + "-";
for (int counter = 0; counter < TEMP_DIR_ATTEMPTS; counter++) {
File tempDir = new File(baseDir, baseName + counter);
if (tempDir.mkdir()) {
return tempDir;
}
}
throw new IllegalStateException("Failed to create directory within "
+ TEMP_DIR_ATTEMPTS + " attempts (tried "
+ baseName + "0 to " + baseName + (TEMP_DIR_ATTEMPTS - 1) + ')');
}
/**
* Creates an empty file or updates the last updated timestamp on the
* same as the unix command of the same name.
*
* @param file the file to create or update
* @throws IOException if an I/O error occurs
*/
public static void touch(File file) throws IOException {
checkNotNull(file);
if (!file.createNewFile()
&& !file.setLastModified(System.currentTimeMillis())) {
throw new IOException("Unable to update modification time of " + file);
}
}
/**
* Creates any necessary but nonexistent parent directories of the specified
* file. Note that if this operation fails it may have succeeded in creating
* some (but not all) of the necessary parent directories.
*
* @throws IOException if an I/O error occurs, or if any necessary but
* nonexistent parent directories of the specified file could not be
* created.
* @since 4.0
*/
public static void createParentDirs(File file) throws IOException {
checkNotNull(file);
File parent = file.getCanonicalFile().getParentFile();
if (parent == null) {
/*
* The given directory is a filesystem root. All zero of its ancestors
* exist. This doesn't mean that the root itself exists -- consider x:\ on
* a Windows machine without such a drive -- or even that the caller can
* create it, but this method makes no such guarantees even for non-root
* files.
*/
return;
}
parent.mkdirs();
if (!parent.isDirectory()) {
throw new IOException("Unable to create parent directories of " + file);
}
}
/**
* Moves a file from one path to another. This method can rename a file
* and/or move it to a different directory. In either case {@code to} must
* be the target path for the file itself; not just the new name for the
* file or the path to the new parent directory.
*
* @param from the source file
* @param to the destination file
* @throws IOException if an I/O error occurs
* @throws IllegalArgumentException if {@code from.equals(to)}
*/
public static void move(File from, File to) throws IOException {
checkNotNull(from);
checkNotNull(to);
checkArgument(!from.equals(to),
"Source %s and destination %s must be different", from, to);
if (!from.renameTo(to)) {
copy(from, to);
if (!from.delete()) {
if (!to.delete()) {
throw new IOException("Unable to delete " + to);
}
throw new IOException("Unable to delete " + from);
}
}
}
/**
* Reads the first line from a file. The line does not include
* line-termination characters, but does include other leading and
* trailing whitespace.
*
* @param file the file to read from
* @param charset the charset used to decode the input stream; see {@link
* Charsets} for helpful predefined constants
* @return the first line, or null if the file is empty
* @throws IOException if an I/O error occurs
*/
public static String readFirstLine(File file, Charset charset)
throws IOException {
return asCharSource(file, charset).readFirstLine();
}
/**
* Reads all of the lines from a file. The lines do not include
* line-termination characters, but do include other leading and
* trailing whitespace.
*
* <p>This method returns a mutable {@code List}. For an
* {@code ImmutableList}, use
* {@code Files.asCharSource(file, charset).readLines()}.
*
* @param file the file to read from
* @param charset the charset used to decode the input stream; see {@link
* Charsets} for helpful predefined constants
* @return a mutable {@link List} containing all the lines
* @throws IOException if an I/O error occurs
*/
public static List<String> readLines(File file, Charset charset)
throws IOException {
// don't use asCharSource(file, charset).readLines() because that returns
// an immutable list, which would change the behavior of this method
return readLines(file, charset, new LineProcessor<List<String>>() {
final List<String> result = Lists.newArrayList();
@Override
public boolean processLine(String line) {
result.add(line);
return true;
}
@Override
public List<String> getResult() {
return result;
}
});
}
/**
* Streams lines from a {@link File}, stopping when our callback returns
* false, or we have read all of the lines.
*
* @param file the file to read from
* @param charset the charset used to decode the input stream; see {@link
* Charsets} for helpful predefined constants
* @param callback the {@link LineProcessor} to use to handle the lines
* @return the output of processing the lines
* @throws IOException if an I/O error occurs
*/
public static <T> T readLines(File file, Charset charset,
LineProcessor<T> callback) throws IOException {
return CharStreams.readLines(newReaderSupplier(file, charset), callback);
}
/**
* Process the bytes of a file.
*
* <p>(If this seems too complicated, maybe you're looking for
* {@link #toByteArray}.)
*
* @param file the file to read
* @param processor the object to which the bytes of the file are passed.
* @return the result of the byte processor
* @throws IOException if an I/O error occurs
*/
public static <T> T readBytes(File file, ByteProcessor<T> processor)
throws IOException {
return ByteStreams.readBytes(newInputStreamSupplier(file), processor);
}
/**
* Computes the hash code of the {@code file} using {@code hashFunction}.
*
* @param file the file to read
* @param hashFunction the hash function to use to hash the data
* @return the {@link HashCode} of all of the bytes in the file
* @throws IOException if an I/O error occurs
* @since 12.0
*/
public static HashCode hash(File file, HashFunction hashFunction)
throws IOException {
return asByteSource(file).hash(hashFunction);
}
/**
* Fully maps a file read-only in to memory as per
* {@link FileChannel#map(java.nio.channels.FileChannel.MapMode, long, long)}.
*
* <p>Files are mapped from offset 0 to its length.
*
* <p>This only works for files <= {@link Integer#MAX_VALUE} bytes.
*
* @param file the file to map
* @return a read-only buffer reflecting {@code file}
* @throws FileNotFoundException if the {@code file} does not exist
* @throws IOException if an I/O error occurs
*
* @see FileChannel#map(MapMode, long, long)
* @since 2.0
*/
public static MappedByteBuffer map(File file) throws IOException {
checkNotNull(file);
return map(file, MapMode.READ_ONLY);
}
/**
* Fully maps a file in to memory as per
* {@link FileChannel#map(java.nio.channels.FileChannel.MapMode, long, long)}
* using the requested {@link MapMode}.
*
* <p>Files are mapped from offset 0 to its length.
*
* <p>This only works for files <= {@link Integer#MAX_VALUE} bytes.
*
* @param file the file to map
* @param mode the mode to use when mapping {@code file}
* @return a buffer reflecting {@code file}
* @throws FileNotFoundException if the {@code file} does not exist
* @throws IOException if an I/O error occurs
*
* @see FileChannel#map(MapMode, long, long)
* @since 2.0
*/
public static MappedByteBuffer map(File file, MapMode mode)
throws IOException {
checkNotNull(file);
checkNotNull(mode);
if (!file.exists()) {
throw new FileNotFoundException(file.toString());
}
return map(file, mode, file.length());
}
/**
* Maps a file in to memory as per
* {@link FileChannel#map(java.nio.channels.FileChannel.MapMode, long, long)}
* using the requested {@link MapMode}.
*
* <p>Files are mapped from offset 0 to {@code size}.
*
* <p>If the mode is {@link MapMode#READ_WRITE} and the file does not exist,
* it will be created with the requested {@code size}. Thus this method is
* useful for creating memory mapped files which do not yet exist.
*
* <p>This only works for files <= {@link Integer#MAX_VALUE} bytes.
*
* @param file the file to map
* @param mode the mode to use when mapping {@code file}
* @return a buffer reflecting {@code file}
* @throws IOException if an I/O error occurs
*
* @see FileChannel#map(MapMode, long, long)
* @since 2.0
*/
public static MappedByteBuffer map(File file, MapMode mode, long size)
throws FileNotFoundException, IOException {
checkNotNull(file);
checkNotNull(mode);
Closer closer = Closer.create();
try {
RandomAccessFile raf = closer.register(
new RandomAccessFile(file, mode == MapMode.READ_ONLY ? "r" : "rw"));
return map(raf, mode, size);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
private static MappedByteBuffer map(RandomAccessFile raf, MapMode mode,
long size) throws IOException {
Closer closer = Closer.create();
try {
FileChannel channel = closer.register(raf.getChannel());
return channel.map(mode, 0, size);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
/**
* Returns the lexically cleaned form of the path name, <i>usually</i> (but
* not always) equivalent to the original. The following heuristics are used:
*
* <ul>
* <li>empty string becomes .
* <li>. stays as .
* <li>fold out ./
* <li>fold out ../ when possible
* <li>collapse multiple slashes
* <li>delete trailing slashes (unless the path is just "/")
* </ul>
*
* <p>These heuristics do not always match the behavior of the filesystem. In
* particular, consider the path {@code a/../b}, which {@code simplifyPath}
* will change to {@code b}. If {@code a} is a symlink to {@code x}, {@code
* a/../b} may refer to a sibling of {@code x}, rather than the sibling of
* {@code a} referred to by {@code b}.
*
* @since 11.0
*/
public static String simplifyPath(String pathname) {
checkNotNull(pathname);
if (pathname.length() == 0) {
return ".";
}
// split the path apart
Iterable<String> components =
Splitter.on('/').omitEmptyStrings().split(pathname);
List<String> path = new ArrayList<String>();
// resolve ., .., and //
for (String component : components) {
if (component.equals(".")) {
continue;
} else if (component.equals("..")) {
if (path.size() > 0 && !path.get(path.size() - 1).equals("..")) {
path.remove(path.size() - 1);
} else {
path.add("..");
}
} else {
path.add(component);
}
}
// put it back together
String result = Joiner.on('/').join(path);
if (pathname.charAt(0) == '/') {
result = "/" + result;
}
while (result.startsWith("/../")) {
result = result.substring(3);
}
if (result.equals("/..")) {
result = "/";
} else if ("".equals(result)) {
result = ".";
}
return result;
}
/**
* Returns the <a href="http://en.wikipedia.org/wiki/Filename_extension">file
* extension</a> for the given file name, or the empty string if the file has
* no extension. The result does not include the '{@code .}'.
*
* @since 11.0
*/
public static String getFileExtension(String fullName) {
checkNotNull(fullName);
String fileName = new File(fullName).getName();
int dotIndex = fileName.lastIndexOf('.');
return (dotIndex == -1) ? "" : fileName.substring(dotIndex + 1);
}
/**
* Returns the file name without its
* <a href="http://en.wikipedia.org/wiki/Filename_extension">file extension</a> or path. This is
* similar to the {@code basename} unix command. The result does not include the '{@code .}'.
*
* @param file The name of the file to trim the extension from. This can be either a fully
* qualified file name (including a path) or just a file name.
* @return The file name without its path or extension.
* @since 14.0
*/
public static String getNameWithoutExtension(String file) {
checkNotNull(file);
String fileName = new File(file).getName();
int dotIndex = fileName.lastIndexOf('.');
return (dotIndex == -1) ? fileName : fileName.substring(0, dotIndex);
}
/**
* Returns a {@link TreeTraverser} instance for {@link File} trees.
*
* <p><b>Warning:</b> {@code File} provides no support for symbolic links, and as such there is no
* way to ensure that a symbolic link to a directory is not followed when traversing the tree.
* In this case, iterables created by this traverser could contain files that are outside of the
* given directory or even be infinite if there is a symbolic link loop.
*
* @since 15.0
*/
public static TreeTraverser<File> fileTreeTraverser() {
return FILE_TREE_TRAVERSER;
}
private static final TreeTraverser<File> FILE_TREE_TRAVERSER = new TreeTraverser<File>() {
@Override
public Iterable<File> children(File file) {
// check isDirectory() just because it may be faster than listFiles() on a non-directory
if (file.isDirectory()) {
File[] files = file.listFiles();
if (files != null) {
return Collections.unmodifiableList(Arrays.asList(files));
}
}
return Collections.emptyList();
}
@Override
public String toString() {
return "Files.fileTreeTraverser()";
}
};
/**
* Returns a predicate that returns the result of {@link File#isDirectory} on input files.
*
* @since 15.0
*/
public static Predicate<File> isDirectory() {
return FilePredicate.IS_DIRECTORY;
}
/**
* Returns a predicate that returns the result of {@link File#isFile} on input files.
*
* @since 15.0
*/
public static Predicate<File> isFile() {
return FilePredicate.IS_FILE;
}
private enum FilePredicate implements Predicate<File> {
IS_DIRECTORY {
@Override
public boolean apply(File file) {
return file.isDirectory();
}
@Override
public String toString() {
return "Files.isDirectory()";
}
},
IS_FILE {
@Override
public boolean apply(File file) {
return file.isFile();
}
@Override
public String toString() {
return "Files.isFile()";
}
};
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/Files.java | Java | asf20 | 35,419 |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.Beta;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Throwables;
import java.io.Closeable;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.logging.Level;
import javax.annotation.Nullable;
/**
* A {@link Closeable} that collects {@code Closeable} resources and closes them all when it is
* {@linkplain #close closed}. This is intended to approximately emulate the behavior of Java 7's
* <a href="http://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html">
* try-with-resources</a> statement in JDK6-compatible code. Running on Java 7, code using this
* should be approximately equivalent in behavior to the same code written with try-with-resources.
* Running on Java 6, exceptions that cannot be thrown must be logged rather than being added to the
* thrown exception as a suppressed exception.
*
* <p>This class is intended to be used in the following pattern:
*
* <pre> {@code
* Closer closer = Closer.create();
* try {
* InputStream in = closer.register(openInputStream());
* OutputStream out = closer.register(openOutputStream());
* // do stuff
* } catch (Throwable e) {
* // ensure that any checked exception types other than IOException that could be thrown are
* // provided here, e.g. throw closer.rethrow(e, CheckedException.class);
* throw closer.rethrow(e);
* } finally {
* closer.close();
* }}</pre>
*
* <p>Note that this try-catch-finally block is not equivalent to a try-catch-finally block using
* try-with-resources. To get the equivalent of that, you must wrap the above code in <i>another</i>
* try block in order to catch any exception that may be thrown (including from the call to
* {@code close()}).
*
* <p>This pattern ensures the following:
*
* <ul>
* <li>Each {@code Closeable} resource that is successfully registered will be closed later.</li>
* <li>If a {@code Throwable} is thrown in the try block, no exceptions that occur when attempting
* to close resources will be thrown from the finally block. The throwable from the try block will
* be thrown.</li>
* <li>If no exceptions or errors were thrown in the try block, the <i>first</i> exception thrown
* by an attempt to close a resource will be thrown.</li>
* <li>Any exception caught when attempting to close a resource that is <i>not</i> thrown
* (because another exception is already being thrown) is <i>suppressed</i>.</li>
* </ul>
*
* <p>An exception that is suppressed is not thrown. The method of suppression used depends on the
* version of Java the code is running on:
*
* <ul>
* <li><b>Java 7+:</b> Exceptions are suppressed by adding them to the exception that <i>will</i>
* be thrown using {@code Throwable.addSuppressed(Throwable)}.</li>
* <li><b>Java 6:</b> Exceptions are suppressed by logging them instead.</li>
* </ul>
*
* @author Colin Decker
* @since 14.0
*/
// Coffee's for {@link Closer closers} only.
@Beta
public final class Closer implements Closeable {
/**
* The suppressor implementation to use for the current Java version.
*/
private static final Suppressor SUPPRESSOR = SuppressingSuppressor.isAvailable()
? SuppressingSuppressor.INSTANCE
: LoggingSuppressor.INSTANCE;
/**
* Creates a new {@link Closer}.
*/
public static Closer create() {
return new Closer(SUPPRESSOR);
}
@VisibleForTesting final Suppressor suppressor;
// only need space for 2 elements in most cases, so try to use the smallest array possible
private final Deque<Closeable> stack = new ArrayDeque<Closeable>(4);
private Throwable thrown;
@VisibleForTesting Closer(Suppressor suppressor) {
this.suppressor = checkNotNull(suppressor); // checkNotNull to satisfy null tests
}
/**
* Registers the given {@code closeable} to be closed when this {@code Closer} is
* {@linkplain #close closed}.
*
* @return the given {@code closeable}
*/
// close. this word no longer has any meaning to me.
public <C extends Closeable> C register(@Nullable C closeable) {
if (closeable != null) {
stack.addFirst(closeable);
}
return closeable;
}
/**
* Stores the given throwable and rethrows it. It will be rethrown as is if it is an
* {@code IOException}, {@code RuntimeException} or {@code Error}. Otherwise, it will be rethrown
* wrapped in a {@code RuntimeException}. <b>Note:</b> Be sure to declare all of the checked
* exception types your try block can throw when calling an overload of this method so as to avoid
* losing the original exception type.
*
* <p>This method always throws, and as such should be called as
* {@code throw closer.rethrow(e);} to ensure the compiler knows that it will throw.
*
* @return this method does not return; it always throws
* @throws IOException when the given throwable is an IOException
*/
public RuntimeException rethrow(Throwable e) throws IOException {
checkNotNull(e);
thrown = e;
Throwables.propagateIfPossible(e, IOException.class);
throw new RuntimeException(e);
}
/**
* Stores the given throwable and rethrows it. It will be rethrown as is if it is an
* {@code IOException}, {@code RuntimeException}, {@code Error} or a checked exception of the
* given type. Otherwise, it will be rethrown wrapped in a {@code RuntimeException}. <b>Note:</b>
* Be sure to declare all of the checked exception types your try block can throw when calling an
* overload of this method so as to avoid losing the original exception type.
*
* <p>This method always throws, and as such should be called as
* {@code throw closer.rethrow(e, ...);} to ensure the compiler knows that it will throw.
*
* @return this method does not return; it always throws
* @throws IOException when the given throwable is an IOException
* @throws X when the given throwable is of the declared type X
*/
public <X extends Exception> RuntimeException rethrow(Throwable e,
Class<X> declaredType) throws IOException, X {
checkNotNull(e);
thrown = e;
Throwables.propagateIfPossible(e, IOException.class);
Throwables.propagateIfPossible(e, declaredType);
throw new RuntimeException(e);
}
/**
* Stores the given throwable and rethrows it. It will be rethrown as is if it is an
* {@code IOException}, {@code RuntimeException}, {@code Error} or a checked exception of either
* of the given types. Otherwise, it will be rethrown wrapped in a {@code RuntimeException}.
* <b>Note:</b> Be sure to declare all of the checked exception types your try block can throw
* when calling an overload of this method so as to avoid losing the original exception type.
*
* <p>This method always throws, and as such should be called as
* {@code throw closer.rethrow(e, ...);} to ensure the compiler knows that it will throw.
*
* @return this method does not return; it always throws
* @throws IOException when the given throwable is an IOException
* @throws X1 when the given throwable is of the declared type X1
* @throws X2 when the given throwable is of the declared type X2
*/
public <X1 extends Exception, X2 extends Exception> RuntimeException rethrow(
Throwable e, Class<X1> declaredType1, Class<X2> declaredType2) throws IOException, X1, X2 {
checkNotNull(e);
thrown = e;
Throwables.propagateIfPossible(e, IOException.class);
Throwables.propagateIfPossible(e, declaredType1, declaredType2);
throw new RuntimeException(e);
}
/**
* Closes all {@code Closeable} instances that have been added to this {@code Closer}. If an
* exception was thrown in the try block and passed to one of the {@code exceptionThrown} methods,
* any exceptions thrown when attempting to close a closeable will be suppressed. Otherwise, the
* <i>first</i> exception to be thrown from an attempt to close a closeable will be thrown and any
* additional exceptions that are thrown after that will be suppressed.
*/
@Override
public void close() throws IOException {
Throwable throwable = thrown;
// close closeables in LIFO order
while (!stack.isEmpty()) {
Closeable closeable = stack.removeFirst();
try {
closeable.close();
} catch (Throwable e) {
if (throwable == null) {
throwable = e;
} else {
suppressor.suppress(closeable, throwable, e);
}
}
}
if (thrown == null && throwable != null) {
Throwables.propagateIfPossible(throwable, IOException.class);
throw new AssertionError(throwable); // not possible
}
}
/**
* Suppression strategy interface.
*/
@VisibleForTesting interface Suppressor {
/**
* Suppresses the given exception ({@code suppressed}) which was thrown when attempting to close
* the given closeable. {@code thrown} is the exception that is actually being thrown from the
* method. Implementations of this method should not throw under any circumstances.
*/
void suppress(Closeable closeable, Throwable thrown, Throwable suppressed);
}
/**
* Suppresses exceptions by logging them.
*/
@VisibleForTesting static final class LoggingSuppressor implements Suppressor {
static final LoggingSuppressor INSTANCE = new LoggingSuppressor();
@Override
public void suppress(Closeable closeable, Throwable thrown, Throwable suppressed) {
// log to the same place as Closeables
Closeables.logger.log(Level.WARNING,
"Suppressing exception thrown when closing " + closeable, suppressed);
}
}
/**
* Suppresses exceptions by adding them to the exception that will be thrown using JDK7's
* addSuppressed(Throwable) mechanism.
*/
@VisibleForTesting static final class SuppressingSuppressor implements Suppressor {
static final SuppressingSuppressor INSTANCE = new SuppressingSuppressor();
static boolean isAvailable() {
return addSuppressed != null;
}
static final Method addSuppressed = getAddSuppressed();
private static Method getAddSuppressed() {
try {
return Throwable.class.getMethod("addSuppressed", Throwable.class);
} catch (Throwable e) {
return null;
}
}
@Override
public void suppress(Closeable closeable, Throwable thrown, Throwable suppressed) {
// ensure no exceptions from addSuppressed
if (thrown == suppressed) {
return;
}
try {
addSuppressed.invoke(thrown, suppressed);
} catch (Throwable e) {
// if, somehow, IllegalAccessException or another exception is thrown, fall back to logging
LoggingSuppressor.INSTANCE.suppress(closeable, thrown, suppressed);
}
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/io/Closer.java | Java | asf20 | 11,607 |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Escapers
* for
* XML.
*
* <p>This package is a part of the open-source
* <a href="http://guava-libraries.googlecode.com">Guava libraries</a>.
*/
@ParametersAreNonnullByDefault
package com.google.common.xml;
import javax.annotation.ParametersAreNonnullByDefault;
| zzhhhhh-aw4rwer | guava/src/com/google/common/xml/package-info.java | Java | asf20 | 878 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.xml;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
import com.google.common.escape.Escaper;
import com.google.common.escape.Escapers;
/**
* {@code Escaper} instances suitable for strings to be included in XML
* attribute values and elements' text contents. When possible, avoid manual
* escaping by using templating systems and high-level APIs that provide
* autoescaping. For example, consider <a href="http://www.xom.nu/">XOM</a> or
* <a href="http://www.jdom.org/">JDOM</a>.
*
* <p><b>Note</b>: Currently the escapers provided by this class do not escape
* any characters outside the ASCII character range. Unlike HTML escaping the
* XML escapers will not escape non-ASCII characters to their numeric entity
* replacements. These XML escapers provide the minimal level of escaping to
* ensure that the output can be safely included in a Unicode XML document.
*
*
* <p>For details on the behavior of the escapers in this class, see sections
* <a href="http://www.w3.org/TR/2008/REC-xml-20081126/#charsets">2.2</a> and
* <a href="http://www.w3.org/TR/2008/REC-xml-20081126/#syntax">2.4</a> of the
* XML specification.
*
* @author Alex Matevossian
* @author David Beaumont
* @since 15.0
*/
@Beta
@GwtCompatible
public class XmlEscapers {
private XmlEscapers() {}
private static final char MIN_ASCII_CONTROL_CHAR = 0x00;
private static final char MAX_ASCII_CONTROL_CHAR = 0x1F;
// For each xxxEscaper() method, please add links to external reference pages
// that are considered authoritative for the behavior of that escaper.
// TODO(user): When this escaper strips \uFFFE & \uFFFF, add this doc.
// <p>This escaper also silently removes non-whitespace control characters and
// the character values {@code 0xFFFE} and {@code 0xFFFF} which are not
// permitted in XML. For more detail see section
// <a href="http://www.w3.org/TR/2008/REC-xml-20081126/#charsets">2.2</a> of
// the XML specification.
/**
* Returns an {@link Escaper} instance that escapes special characters in a
* string so it can safely be included in an XML document as element content.
* See section
* <a href="http://www.w3.org/TR/2008/REC-xml-20081126/#syntax">2.4</a> of the
* XML specification.
*
* <p><b>Note</b>: Double and single quotes are not escaped, so it is <b>not
* safe</b> to use this escaper to escape attribute values. Use
* {@link #xmlContentEscaper} if the output can appear in element content or
* {@link #xmlAttributeEscaper} in attribute values.
*
* <p>This escaper does not escape non-ASCII characters to their numeric
* character references (NCR). Any non-ASCII characters appearing in the input
* will be preserved in the output. Specifically "\r" (carriage return) is
* preserved in the output, which may result in it being silently converted to
* "\n" when the XML is parsed.
*
* <p>This escaper does not treat surrogate pairs specially and does not
* perform Unicode validation on its input.
*/
public static Escaper xmlContentEscaper() {
return XML_CONTENT_ESCAPER;
}
/**
* Returns an {@link Escaper} instance that escapes special characters in a
* string so it can safely be included in XML document as an attribute value.
* See section
* <a href="http://www.w3.org/TR/2008/REC-xml-20081126/#AVNormalize">3.3.3</a>
* of the XML specification.
*
* <p>This escaper does not escape non-ASCII characters to their numeric
* character references (NCR). However, horizontal tab {@code '\t'}, line feed
* {@code '\n'} and carriage return {@code '\r'} are escaped to a
* corresponding NCR {@code "	"}, {@code "
"}, and {@code "
"}
* respectively. Any other non-ASCII characters appearing in the input will
* be preserved in the output.
*
* <p>This escaper does not treat surrogate pairs specially and does not
* perform Unicode validation on its input.
*/
public static Escaper xmlAttributeEscaper() {
return XML_ATTRIBUTE_ESCAPER;
}
private static final Escaper XML_ESCAPER;
private static final Escaper XML_CONTENT_ESCAPER;
private static final Escaper XML_ATTRIBUTE_ESCAPER;
static {
Escapers.Builder builder = Escapers.builder();
// The char values \uFFFE and \uFFFF are explicitly not allowed in XML
// (Unicode code points above \uFFFF are represented via surrogate pairs
// which means they are treated as pairs of safe characters).
// TODO(user): When refactoring done change the \uFFFF below to \uFFFD
builder.setSafeRange(Character.MIN_VALUE, '\uFFFF');
// Unsafe characters are removed.
builder.setUnsafeReplacement("");
// Except for '\n', '\t' and '\r' we remove all ASCII control characters.
// An alternative to this would be to make a map that simply replaces the
// allowed ASCII whitespace characters with themselves and set the minimum
// safe character to 0x20. However this would slow down the escaping of
// simple strings that contain '\t','\n' or '\r'.
for (char c = MIN_ASCII_CONTROL_CHAR; c <= MAX_ASCII_CONTROL_CHAR; c++) {
if (c != '\t' && c != '\n' && c != '\r') {
builder.addEscape(c, "");
}
}
// Build the content escaper first and then add quote escaping for the
// general escaper.
builder.addEscape('&', "&");
builder.addEscape('<', "<");
builder.addEscape('>', ">");
XML_CONTENT_ESCAPER = builder.build();
builder.addEscape('\'', "'");
builder.addEscape('"', """);
XML_ESCAPER = builder.build();
builder.addEscape('\t', "	");
builder.addEscape('\n', "
");
builder.addEscape('\r', "
");
XML_ATTRIBUTE_ESCAPER = builder.build();
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/xml/XmlEscapers.java | Java | asf20 | 6,402 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.escape;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
import java.util.Map;
import javax.annotation.Nullable;
/**
* A {@link UnicodeEscaper} that uses an array to quickly look up replacement
* characters for a given code point. An additional safe range is provided that
* determines whether code points without specific replacements are to be
* considered safe and left unescaped or should be escaped in a general way.
*
* <p>A good example of usage of this class is for HTML escaping where the
* replacement array contains information about the named HTML entities
* such as {@code &} and {@code "} while {@link #escapeUnsafe} is
* overridden to handle general escaping of the form {@code &#NNNNN;}.
*
* <p>The size of the data structure used by {@link ArrayBasedUnicodeEscaper} is
* proportional to the highest valued code point that requires escaping.
* For example a replacement map containing the single character
* '{@code \}{@code u1000}' will require approximately 16K of memory. If you
* need to create multiple escaper instances that have the same character
* replacement mapping consider using {@link ArrayBasedEscaperMap}.
*
* @author David Beaumont
* @since 15.0
*/
@Beta
@GwtCompatible
public abstract class ArrayBasedUnicodeEscaper extends UnicodeEscaper {
// The replacement array (see ArrayBasedEscaperMap).
private final char[][] replacements;
// The number of elements in the replacement array.
private final int replacementsLength;
// The first code point in the safe range.
private final int safeMin;
// The last code point in the safe range.
private final int safeMax;
// Cropped values used in the fast path range checks.
private final char safeMinChar;
private final char safeMaxChar;
/**
* Creates a new ArrayBasedUnicodeEscaper instance with the given replacement
* map and specified safe range. If {@code safeMax < safeMin} then no code
* points are considered safe.
*
* <p>If a code point has no mapped replacement then it is checked against the
* safe range. If it lies outside that, then {@link #escapeUnsafe} is
* called, otherwise no escaping is performed.
*
* @param replacementMap a map of characters to their escaped representations
* @param safeMin the lowest character value in the safe range
* @param safeMax the highest character value in the safe range
* @param unsafeReplacement the default replacement for unsafe characters or
* null if no default replacement is required
*/
protected ArrayBasedUnicodeEscaper(Map<Character, String> replacementMap,
int safeMin, int safeMax, @Nullable String unsafeReplacement) {
this(ArrayBasedEscaperMap.create(replacementMap), safeMin, safeMax,
unsafeReplacement);
}
/**
* Creates a new ArrayBasedUnicodeEscaper instance with the given replacement
* map and specified safe range. If {@code safeMax < safeMin} then no code
* points are considered safe. This initializer is useful when explicit
* instances of ArrayBasedEscaperMap are used to allow the sharing of large
* replacement mappings.
*
* <p>If a code point has no mapped replacement then it is checked against the
* safe range. If it lies outside that, then {@link #escapeUnsafe} is
* called, otherwise no escaping is performed.
*
* @param escaperMap the map of replacements
* @param safeMin the lowest character value in the safe range
* @param safeMax the highest character value in the safe range
* @param unsafeReplacement the default replacement for unsafe characters or
* null if no default replacement is required
*/
protected ArrayBasedUnicodeEscaper(ArrayBasedEscaperMap escaperMap,
int safeMin, int safeMax, @Nullable String unsafeReplacement) {
checkNotNull(escaperMap); // GWT specific check (do not optimize)
this.replacements = escaperMap.getReplacementArray();
this.replacementsLength = replacements.length;
if (safeMax < safeMin) {
// If the safe range is empty, set the range limits to opposite extremes
// to ensure the first test of either value will fail.
safeMax = -1;
safeMin = Integer.MAX_VALUE;
}
this.safeMin = safeMin;
this.safeMax = safeMax;
// This is a bit of a hack but lets us do quicker per-character checks in
// the fast path code. The safe min/max values are very unlikely to extend
// into the range of surrogate characters, but if they do we must not test
// any values in that range. To see why, consider the case where:
// safeMin <= {hi,lo} <= safeMax
// where {hi,lo} are characters forming a surrogate pair such that:
// codePointOf(hi, lo) > safeMax
// which would result in the surrogate pair being (wrongly) considered safe.
// If we clip the safe range used during the per-character tests so it is
// below the values of characters in surrogate pairs, this cannot occur.
// This approach does mean that we break out of the fast path code in cases
// where we don't strictly need to, but this situation will almost never
// occur in practice.
if (safeMin >= Character.MIN_HIGH_SURROGATE) {
// The safe range is empty or the all safe code points lie in or above the
// surrogate range. Either way the character range is empty.
this.safeMinChar = Character.MAX_VALUE;
this.safeMaxChar = 0;
} else {
// The safe range is non empty and contains values below the surrogate
// range but may extend above it. We may need to clip the maximum value.
this.safeMinChar = (char) safeMin;
this.safeMaxChar = (char) Math.min(safeMax,
Character.MIN_HIGH_SURROGATE - 1);
}
}
/*
* This is overridden to improve performance. Rough benchmarking shows that
* this almost doubles the speed when processing strings that do not require
* any escaping.
*/
@Override
public final String escape(String s) {
checkNotNull(s); // GWT specific check (do not optimize)
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
if ((c < replacementsLength && replacements[c] != null) ||
c > safeMaxChar || c < safeMinChar) {
return escapeSlow(s, i);
}
}
return s;
}
/* Overridden for performance. */
@Override
protected final int nextEscapeIndex(CharSequence csq, int index, int end) {
while (index < end) {
char c = csq.charAt(index);
if ((c < replacementsLength && replacements[c] != null) ||
c > safeMaxChar || c < safeMinChar) {
break;
}
index++;
}
return index;
}
/**
* Escapes a single Unicode code point using the replacement array and safe
* range values. If the given character does not have an explicit replacement
* and lies outside the safe range then {@link #escapeUnsafe} is called.
*/
@Override
protected final char[] escape(int cp) {
if (cp < replacementsLength) {
char[] chars = replacements[cp];
if (chars != null) {
return chars;
}
}
if (cp >= safeMin && cp <= safeMax) {
return null;
}
return escapeUnsafe(cp);
}
/**
* Escapes a code point that has no direct explicit value in the replacement
* array and lies outside the stated safe range. Subclasses should override
* this method to provide generalized escaping for code points if required.
*
* <p>Note that arrays returned by this method must not be modified once they
* have been returned. However it is acceptable to return the same array
* multiple times (even for different input characters).
*
* @param cp the Unicode code point to escape
* @return the replacement characters, or {@code null} if no escaping was
* required
*/
protected abstract char[] escapeUnsafe(int cp);
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/escape/ArrayBasedUnicodeEscaper.java | Java | asf20 | 8,613 |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Interfaces, utilities, and simple implementations of escapers and encoders. The primary type is
* {@link com.google.common.escape.Escaper}.
*
* <p>Additional escapers implementations are found in the applicable packages: {@link
* com.google.common.html.HtmlEscapers} in {@code com.google.common.html}, {@link
* com.google.common.xml.XmlEscapers} in {@code com.google.common.xml}, and {@link
* com.google.common.net.UrlEscapers} in {@code com.google.common.net}.
*
* <p>This package is a part of the open-source
* <a href="http://guava-libraries.googlecode.com">Guava libraries</a>.
*/
@ParametersAreNonnullByDefault
package com.google.common.escape;
import javax.annotation.ParametersAreNonnullByDefault;
| zzhhhhh-aw4rwer | guava/src/com/google/common/escape/package-info.java | Java | asf20 | 1,327 |
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.escape;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
import com.google.common.base.Function;
/**
* An object that converts literal text into a format safe for inclusion in a particular context
* (such as an XML document). Typically (but not always), the inverse process of "unescaping" the
* text is performed automatically by the relevant parser.
*
* <p>For example, an XML escaper would convert the literal string {@code "Foo<Bar>"} into {@code
* "Foo<Bar>"} to prevent {@code "<Bar>"} from being confused with an XML tag. When the
* resulting XML document is parsed, the parser API will return this text as the original literal
* string {@code "Foo<Bar>"}.
*
* <p>An {@code Escaper} instance is required to be stateless, and safe when used concurrently by
* multiple threads.
*
* <p>Because, in general, escaping operates on the code points of a string and not on its
* individual {@code char} values, it is not safe to assume that {@code escape(s)} is equivalent to
* {@code escape(s.substring(0, n)) + escape(s.substing(n))} for arbitrary {@code n}. This is
* because of the possibility of splitting a surrogate pair. The only case in which it is safe to
* escape strings and concatenate the results is if you can rule out this possibility, either by
* splitting an existing long string into short strings adaptively around {@linkplain
* Character#isHighSurrogate surrogate} {@linkplain Character#isLowSurrogate pairs}, or by starting
* with short strings already known to be free of unpaired surrogates.
*
* <p>The two primary implementations of this interface are {@link CharEscaper} and {@link
* UnicodeEscaper}. They are heavily optimized for performance and greatly simplify the task of
* implementing new escapers. It is strongly recommended that when implementing a new escaper you
* extend one of these classes. If you find that you are unable to achieve the desired behavior
* using either of these classes, please contact the Java libraries team for advice.
*
* <p>Several popular escapers are defined as constants in classes like {@link
* com.google.common.html.HtmlEscapers}, {@link com.google.common.xml.XmlEscapers}, and {@link
* SourceCodeEscapers}. To create your own escapers, use {@link CharEscaperBuilder}, or extend
* {@code CharEscaper} or {@code UnicodeEscaper}.
*
* @author David Beaumont
* @since 15.0
*/
@Beta
@GwtCompatible
public abstract class Escaper {
// TODO(user): evaluate custom implementations, considering package private constructor.
/** Constructor for use by subclasses. */
protected Escaper() {}
/**
* Returns the escaped form of a given literal string.
*
* <p>Note that this method may treat input characters differently depending on the specific
* escaper implementation.
*
* <ul>
* <li>{@link UnicodeEscaper} handles <a href="http://en.wikipedia.org/wiki/UTF-16">UTF-16</a>
* correctly, including surrogate character pairs. If the input is badly formed the escaper
* should throw {@link IllegalArgumentException}.
* <li>{@link CharEscaper} handles Java characters independently and does not verify the input for
* well formed characters. A {@code CharEscaper} should not be used in situations where input
* is not guaranteed to be restricted to the Basic Multilingual Plane (BMP).
* </ul>
*
* @param string the literal string to be escaped
* @return the escaped form of {@code string}
* @throws NullPointerException if {@code string} is null
* @throws IllegalArgumentException if {@code string} contains badly formed UTF-16 or cannot be
* escaped for any other reason
*/
public abstract String escape(String string);
private final Function<String, String> asFunction =
new Function<String, String>() {
@Override
public String apply(String from) {
return escape(from);
}
};
/**
* Returns a {@link Function} that invokes {@link #escape(String)} on this escaper.
*/
public final Function<String, String> asFunction() {
return asFunction;
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/escape/Escaper.java | Java | asf20 | 4,769 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.escape;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
import java.util.HashMap;
import java.util.Map;
import javax.annotation.Nullable;
/**
* Static utility methods pertaining to {@link Escaper} instances.
*
* @author Sven Mawson
* @author David Beaumont
* @since 15.0
*/
@Beta
@GwtCompatible
public final class Escapers {
private Escapers() {}
/**
* Returns an {@link Escaper} that does no escaping, passing all character
* data through unchanged.
*/
public static Escaper nullEscaper() {
return NULL_ESCAPER;
}
// An Escaper that efficiently performs no escaping.
// Extending CharEscaper (instead of Escaper) makes Escapers.compose() easier.
private static final Escaper NULL_ESCAPER = new CharEscaper() {
@Override public String escape(String string) {
return checkNotNull(string);
}
@Override protected char[] escape(char c) {
// TODO: Fix tests not to call this directly and make it throw an error.
return null;
}
};
/**
* Returns a builder for creating simple, fast escapers. A builder instance
* can be reused and each escaper that is created will be a snapshot of the
* current builder state. Builders are not thread safe.
*
* <p>The initial state of the builder is such that:
* <ul>
* <li>There are no replacement mappings<li>
* <li>{@code safeMin == Character.MIN_VALUE}</li>
* <li>{@code safeMax == Character.MAX_VALUE}</li>
* <li>{@code unsafeReplacement == null}</li>
* </ul>
* <p>For performance reasons escapers created by this builder are not
* Unicode aware and will not validate the well-formedness of their input.
*/
public static Builder builder() {
return new Builder();
}
/**
* A builder for simple, fast escapers.
*
* <p>Typically an escaper needs to deal with the escaping of high valued
* characters or code points. In these cases it is necessary to extend either
* {@link ArrayBasedCharEscaper} or {@link ArrayBasedUnicodeEscaper} to
* provide the desired behavior. However this builder is suitable for creating
* escapers that replace a relative small set of characters.
*
* @author David Beaumont
* @since 15.0
*/
@Beta
public static final class Builder {
private final Map<Character, String> replacementMap =
new HashMap<Character, String>();
private char safeMin = Character.MIN_VALUE;
private char safeMax = Character.MAX_VALUE;
private String unsafeReplacement = null;
// The constructor is exposed via the builder() method above.
private Builder() {}
/**
* Sets the safe range of characters for the escaper. Characters in this
* range that have no explicit replacement are considered 'safe' and remain
* unescaped in the output. If {@code safeMax < safeMin} then the safe range
* is empty.
*
* @param safeMin the lowest 'safe' character
* @param safeMax the highest 'safe' character
* @return the builder instance
*/
public Builder setSafeRange(char safeMin, char safeMax) {
this.safeMin = safeMin;
this.safeMax = safeMax;
return this;
}
/**
* Sets the replacement string for any characters outside the 'safe' range
* that have no explicit replacement. If {@code unsafeReplacement} is
* {@code null} then no replacement will occur, if it is {@code ""} then
* the unsafe characters are removed from the output.
*
* @param unsafeReplacement the string to replace unsafe chracters
* @return the builder instance
*/
public Builder setUnsafeReplacement(@Nullable String unsafeReplacement) {
this.unsafeReplacement = unsafeReplacement;
return this;
}
/**
* Adds a replacement string for the given input character. The specified
* character will be replaced by the given string whenever it occurs in the
* input, irrespective of whether it lies inside or outside the 'safe'
* range.
*
* @param c the character to be replaced
* @param replacement the string to replace the given character
* @return the builder instance
* @throws NullPointerException if {@code replacement} is null
*/
public Builder addEscape(char c, String replacement) {
checkNotNull(replacement);
// This can replace an existing character (the builder is re-usable).
replacementMap.put(c, replacement);
return this;
}
/**
* Returns a new escaper based on the current state of the builder.
*/
public Escaper build() {
return new ArrayBasedCharEscaper(replacementMap, safeMin, safeMax) {
private final char[] replacementChars =
unsafeReplacement != null ? unsafeReplacement.toCharArray() : null;
@Override protected char[] escapeUnsafe(char c) {
return replacementChars;
}
};
}
}
/**
* Returns a {@link UnicodeEscaper} equivalent to the given escaper instance.
* If the escaper is already a UnicodeEscaper then it is simply returned,
* otherwise it is wrapped in a UnicodeEscaper.
*
* <p>When a {@link CharEscaper} escaper is wrapped by this method it acquires
* extra behavior with respect to the well-formedness of Unicode character
* sequences and will throw {@link IllegalArgumentException} when given bad
* input.
*
* @param escaper the instance to be wrapped
* @return a UnicodeEscaper with the same behavior as the given instance
* @throws NullPointerException if escaper is null
* @throws IllegalArgumentException if escaper is not a UnicodeEscaper or a
* CharEscaper
*/
static UnicodeEscaper asUnicodeEscaper(Escaper escaper) {
checkNotNull(escaper);
if (escaper instanceof UnicodeEscaper) {
return (UnicodeEscaper) escaper;
} else if (escaper instanceof CharEscaper) {
return wrap((CharEscaper) escaper);
}
// In practice this shouldn't happen because it would be very odd not to
// extend either CharEscaper or UnicodeEscaper for non trivial cases.
throw new IllegalArgumentException("Cannot create a UnicodeEscaper from: " +
escaper.getClass().getName());
}
/**
* Returns a string that would replace the given character in the specified
* escaper, or {@code null} if no replacement should be made. This method is
* intended for use in tests through the {@code EscaperAsserts} class;
* production users of {@link CharEscaper} should limit themselves to its
* public interface.
*
* @param c the character to escape if necessary
* @return the replacement string, or {@code null} if no escaping was needed
*/
public static String computeReplacement(CharEscaper escaper, char c) {
return stringOrNull(escaper.escape(c));
}
/**
* Returns a string that would replace the given character in the specified
* escaper, or {@code null} if no replacement should be made. This method is
* intended for use in tests through the {@code EscaperAsserts} class;
* production users of {@link UnicodeEscaper} should limit themselves to its
* public interface.
*
* @param cp the Unicode code point to escape if necessary
* @return the replacement string, or {@code null} if no escaping was needed
*/
public static String computeReplacement(UnicodeEscaper escaper, int cp) {
return stringOrNull(escaper.escape(cp));
}
private static String stringOrNull(char[] in) {
return (in == null) ? null : new String(in);
}
/** Private helper to wrap a CharEscaper as a UnicodeEscaper. */
private static UnicodeEscaper wrap(final CharEscaper escaper) {
return new UnicodeEscaper() {
@Override protected char[] escape(int cp) {
// If a code point maps to a single character, just escape that.
if (cp < Character.MIN_SUPPLEMENTARY_CODE_POINT) {
return escaper.escape((char) cp);
}
// Convert the code point to a surrogate pair and escape them both.
// Note: This code path is horribly slow and typically allocates 4 new
// char[] each time it is invoked. However this avoids any
// synchronization issues and makes the escaper thread safe.
char[] surrogateChars = new char[2];
Character.toChars(cp, surrogateChars, 0);
char[] hiChars = escaper.escape(surrogateChars[0]);
char[] loChars = escaper.escape(surrogateChars[1]);
// If either hiChars or lowChars are non-null, the CharEscaper is trying
// to escape the characters of a surrogate pair separately. This is
// uncommon and applies only to escapers that assume UCS-2 rather than
// UTF-16. See: http://en.wikipedia.org/wiki/UTF-16/UCS-2
if (hiChars == null && loChars == null) {
// We expect this to be the common code path for most escapers.
return null;
}
// Combine the characters and/or escaped sequences into a single array.
int hiCount = hiChars != null ? hiChars.length : 1;
int loCount = loChars != null ? loChars.length : 1;
char[] output = new char[hiCount + loCount];
if (hiChars != null) {
// TODO: Is this faster than System.arraycopy() for small arrays?
for (int n = 0; n < hiChars.length; ++n) {
output[n] = hiChars[n];
}
} else {
output[0] = surrogateChars[0];
}
if (loChars != null) {
for (int n = 0; n < loChars.length; ++n) {
output[hiCount + n] = loChars[n];
}
} else {
output[hiCount] = surrogateChars[1];
}
return output;
}
};
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/escape/Escapers.java | Java | asf20 | 10,404 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.escape;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.VisibleForTesting;
import java.util.Collections;
import java.util.Map;
/**
* An implementation-specific parameter class suitable for initializing
* {@link ArrayBasedCharEscaper} or {@link ArrayBasedUnicodeEscaper} instances.
* This class should be used when more than one escaper is created using the
* same character replacement mapping to allow the underlying (implementation
* specific) data structures to be shared.
*
* <p>The size of the data structure used by ArrayBasedCharEscaper and
* ArrayBasedUnicodeEscaper is proportional to the highest valued character that
* has a replacement. For example a replacement map containing the single
* character '{@literal \}u1000' will require approximately 16K of memory.
* As such sharing this data structure between escaper instances is the primary
* goal of this class.
*
* @author David Beaumont
* @since 15.0
*/
@Beta
@GwtCompatible
public final class ArrayBasedEscaperMap {
/**
* Returns a new ArrayBasedEscaperMap for creating ArrayBasedCharEscaper or
* ArrayBasedUnicodeEscaper instances.
*
* @param replacements a map of characters to their escaped representations
*/
public static ArrayBasedEscaperMap create(
Map<Character, String> replacements) {
return new ArrayBasedEscaperMap(createReplacementArray(replacements));
}
// The underlying replacement array we can share between multiple escaper
// instances.
private final char[][] replacementArray;
private ArrayBasedEscaperMap(char[][] replacementArray) {
this.replacementArray = replacementArray;
}
// Returns the non-null array of replacements for fast lookup.
char[][] getReplacementArray() {
return replacementArray;
}
// Creates a replacement array from the given map. The returned array is a
// linear lookup table of replacement character sequences indexed by the
// original character value.
@VisibleForTesting
static char[][] createReplacementArray(Map<Character, String> map) {
checkNotNull(map); // GWT specific check (do not optimize)
if (map.isEmpty()) {
return EMPTY_REPLACEMENT_ARRAY;
}
char max = Collections.max(map.keySet());
char[][] replacements = new char[max + 1][];
for (char c : map.keySet()) {
replacements[c] = map.get(c).toCharArray();
}
return replacements;
}
// Immutable empty array for when there are no replacements.
private static final char[][] EMPTY_REPLACEMENT_ARRAY = new char[0][0];
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/escape/ArrayBasedEscaperMap.java | Java | asf20 | 3,307 |
/*
* Copyright (C) 2006 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.escape;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
/**
* An object that converts literal text into a format safe for inclusion in a particular context
* (such as an XML document). Typically (but not always), the inverse process of "unescaping" the
* text is performed automatically by the relevant parser.
*
* <p>For example, an XML escaper would convert the literal string {@code "Foo<Bar>"} into {@code
* "Foo<Bar>"} to prevent {@code "<Bar>"} from being confused with an XML tag. When the
* resulting XML document is parsed, the parser API will return this text as the original literal
* string {@code "Foo<Bar>"}.
*
* <p>A {@code CharEscaper} instance is required to be stateless, and safe when used concurrently by
* multiple threads.
*
* <p>Several popular escapers are defined as constants in classes like {@link
* com.google.common.html.HtmlEscapers}, {@link com.google.common.xml.XmlEscapers}, and {@link
* SourceCodeEscapers}. To create your own escapers extend this class and implement the {@link
* #escape(char)} method.
*
* @author Sven Mawson
* @since 15.0
*/
@Beta
@GwtCompatible
public abstract class CharEscaper extends Escaper {
/** Constructor for use by subclasses. */
protected CharEscaper() {}
/**
* Returns the escaped form of a given literal string.
*
* @param string the literal string to be escaped
* @return the escaped form of {@code string}
* @throws NullPointerException if {@code string} is null
*/
@Override public String escape(String string) {
checkNotNull(string); // GWT specific check (do not optimize)
// Inlineable fast-path loop which hands off to escapeSlow() only if needed
int length = string.length();
for (int index = 0; index < length; index++) {
if (escape(string.charAt(index)) != null) {
return escapeSlow(string, index);
}
}
return string;
}
/**
* Returns the escaped form of a given literal string, starting at the given index. This method is
* called by the {@link #escape(String)} method when it discovers that escaping is required. It is
* protected to allow subclasses to override the fastpath escaping function to inline their
* escaping test. See {@link CharEscaperBuilder} for an example usage.
*
* @param s the literal string to be escaped
* @param index the index to start escaping from
* @return the escaped form of {@code string}
* @throws NullPointerException if {@code string} is null
*/
protected final String escapeSlow(String s, int index) {
int slen = s.length();
// Get a destination buffer and setup some loop variables.
char[] dest = Platform.charBufferFromThreadLocal();
int destSize = dest.length;
int destIndex = 0;
int lastEscape = 0;
// Loop through the rest of the string, replacing when needed into the
// destination buffer, which gets grown as needed as well.
for (; index < slen; index++) {
// Get a replacement for the current character.
char[] r = escape(s.charAt(index));
// If no replacement is needed, just continue.
if (r == null) continue;
int rlen = r.length;
int charsSkipped = index - lastEscape;
// This is the size needed to add the replacement, not the full size
// needed by the string. We only regrow when we absolutely must, and
// when we do grow, grow enough to avoid excessive growing. Grow.
int sizeNeeded = destIndex + charsSkipped + rlen;
if (destSize < sizeNeeded) {
destSize = sizeNeeded + DEST_PAD_MULTIPLIER * (slen - index);
dest = growBuffer(dest, destIndex, destSize);
}
// If we have skipped any characters, we need to copy them now.
if (charsSkipped > 0) {
s.getChars(lastEscape, index, dest, destIndex);
destIndex += charsSkipped;
}
// Copy the replacement string into the dest buffer as needed.
if (rlen > 0) {
System.arraycopy(r, 0, dest, destIndex, rlen);
destIndex += rlen;
}
lastEscape = index + 1;
}
// Copy leftover characters if there are any.
int charsLeft = slen - lastEscape;
if (charsLeft > 0) {
int sizeNeeded = destIndex + charsLeft;
if (destSize < sizeNeeded) {
// Regrow and copy, expensive! No padding as this is the final copy.
dest = growBuffer(dest, destIndex, sizeNeeded);
}
s.getChars(lastEscape, slen, dest, destIndex);
destIndex = sizeNeeded;
}
return new String(dest, 0, destIndex);
}
/**
* Returns the escaped form of the given character, or {@code null} if this character does not
* need to be escaped. If an empty array is returned, this effectively strips the input character
* from the resulting text.
*
* <p>If the character does not need to be escaped, this method should return {@code null}, rather
* than a one-character array containing the character itself. This enables the escaping algorithm
* to perform more efficiently.
*
* <p>An escaper is expected to be able to deal with any {@code char} value, so this method should
* not throw any exceptions.
*
* @param c the character to escape if necessary
* @return the replacement characters, or {@code null} if no escaping was needed
*/
protected abstract char[] escape(char c);
/**
* Helper method to grow the character buffer as needed, this only happens once in a while so it's
* ok if it's in a method call. If the index passed in is 0 then no copying will be done.
*/
private static char[] growBuffer(char[] dest, int index, int size) {
char[] copy = new char[size];
if (index > 0) {
System.arraycopy(dest, 0, copy, 0, index);
}
return copy;
}
/**
* The multiplier for padding to use when growing the escape buffer.
*/
private static final int DEST_PAD_MULTIPLIER = 2;
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/escape/CharEscaper.java | Java | asf20 | 6,633 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.escape;
import com.google.common.annotations.GwtCompatible;
/**
* Methods factored out so that they can be emulated differently in GWT.
*
* @author Jesse Wilson
*/
@GwtCompatible(emulated = true)
final class Platform {
private Platform() {}
/** Returns a thread-local 1024-char array. */
static char[] charBufferFromThreadLocal() {
return DEST_TL.get();
}
/**
* A thread-local destination buffer to keep us from creating new buffers.
* The starting size is 1024 characters. If we grow past this we don't
* put it back in the threadlocal, we just keep going and grow as needed.
*/
private static final ThreadLocal<char[]> DEST_TL = new ThreadLocal<char[]>() {
@Override
protected char[] initialValue() {
return new char[1024];
}
};
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/escape/Platform.java | Java | asf20 | 1,418 |
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.escape;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
/**
* An {@link Escaper} that converts literal text into a format safe for
* inclusion in a particular context (such as an XML document). Typically (but
* not always), the inverse process of "unescaping" the text is performed
* automatically by the relevant parser.
*
* <p>For example, an XML escaper would convert the literal string {@code
* "Foo<Bar>"} into {@code "Foo<Bar>"} to prevent {@code "<Bar>"} from
* being confused with an XML tag. When the resulting XML document is parsed,
* the parser API will return this text as the original literal string {@code
* "Foo<Bar>"}.
*
* <p><b>Note:</b> This class is similar to {@link CharEscaper} but with one
* very important difference. A CharEscaper can only process Java
* <a href="http://en.wikipedia.org/wiki/UTF-16">UTF16</a> characters in
* isolation and may not cope when it encounters surrogate pairs. This class
* facilitates the correct escaping of all Unicode characters.
*
* <p>As there are important reasons, including potential security issues, to
* handle Unicode correctly if you are considering implementing a new escaper
* you should favor using UnicodeEscaper wherever possible.
*
* <p>A {@code UnicodeEscaper} instance is required to be stateless, and safe
* when used concurrently by multiple threads.
*
* <p>Several popular escapers are defined as constants in classes like {@link
* com.google.common.html.HtmlEscapers}, {@link
* com.google.common.xml.XmlEscapers}, and {@link SourceCodeEscapers}. To create
* your own escapers extend this class and implement the {@link #escape(int)}
* method.
*
* @author David Beaumont
* @since 15.0
*/
@Beta
@GwtCompatible
public abstract class UnicodeEscaper extends Escaper {
/** The amount of padding (chars) to use when growing the escape buffer. */
private static final int DEST_PAD = 32;
/** Constructor for use by subclasses. */
protected UnicodeEscaper() {}
/**
* Returns the escaped form of the given Unicode code point, or {@code null}
* if this code point does not need to be escaped. When called as part of an
* escaping operation, the given code point is guaranteed to be in the range
* {@code 0 <= cp <= Character#MAX_CODE_POINT}.
*
* <p>If an empty array is returned, this effectively strips the input
* character from the resulting text.
*
* <p>If the character does not need to be escaped, this method should return
* {@code null}, rather than an array containing the character representation
* of the code point. This enables the escaping algorithm to perform more
* efficiently.
*
* <p>If the implementation of this method cannot correctly handle a
* particular code point then it should either throw an appropriate runtime
* exception or return a suitable replacement character. It must never
* silently discard invalid input as this may constitute a security risk.
*
* @param cp the Unicode code point to escape if necessary
* @return the replacement characters, or {@code null} if no escaping was
* needed
*/
protected abstract char[] escape(int cp);
/**
* Scans a sub-sequence of characters from a given {@link CharSequence},
* returning the index of the next character that requires escaping.
*
* <p><b>Note:</b> When implementing an escaper, it is a good idea to override
* this method for efficiency. The base class implementation determines
* successive Unicode code points and invokes {@link #escape(int)} for each of
* them. If the semantics of your escaper are such that code points in the
* supplementary range are either all escaped or all unescaped, this method
* can be implemented more efficiently using {@link CharSequence#charAt(int)}.
*
* <p>Note however that if your escaper does not escape characters in the
* supplementary range, you should either continue to validate the correctness
* of any surrogate characters encountered or provide a clear warning to users
* that your escaper does not validate its input.
*
* <p>See {@link com.google.common.net.PercentEscaper} for an example.
*
* @param csq a sequence of characters
* @param start the index of the first character to be scanned
* @param end the index immediately after the last character to be scanned
* @throws IllegalArgumentException if the scanned sub-sequence of {@code csq}
* contains invalid surrogate pairs
*/
protected int nextEscapeIndex(CharSequence csq, int start, int end) {
int index = start;
while (index < end) {
int cp = codePointAt(csq, index, end);
if (cp < 0 || escape(cp) != null) {
break;
}
index += Character.isSupplementaryCodePoint(cp) ? 2 : 1;
}
return index;
}
/**
* Returns the escaped form of a given literal string.
*
* <p>If you are escaping input in arbitrary successive chunks, then it is not
* generally safe to use this method. If an input string ends with an
* unmatched high surrogate character, then this method will throw
* {@link IllegalArgumentException}. You should ensure your input is valid <a
* href="http://en.wikipedia.org/wiki/UTF-16">UTF-16</a> before calling this
* method.
*
* <p><b>Note:</b> When implementing an escaper it is a good idea to override
* this method for efficiency by inlining the implementation of
* {@link #nextEscapeIndex(CharSequence, int, int)} directly. Doing this for
* {@link com.google.common.net.PercentEscaper} more than doubled the
* performance for unescaped strings (as measured by {@link
* CharEscapersBenchmark}).
*
* @param string the literal string to be escaped
* @return the escaped form of {@code string}
* @throws NullPointerException if {@code string} is null
* @throws IllegalArgumentException if invalid surrogate characters are
* encountered
*/
@Override
public String escape(String string) {
checkNotNull(string);
int end = string.length();
int index = nextEscapeIndex(string, 0, end);
return index == end ? string : escapeSlow(string, index);
}
/**
* Returns the escaped form of a given literal string, starting at the given
* index. This method is called by the {@link #escape(String)} method when it
* discovers that escaping is required. It is protected to allow subclasses
* to override the fastpath escaping function to inline their escaping test.
* See {@link CharEscaperBuilder} for an example usage.
*
* <p>This method is not reentrant and may only be invoked by the top level
* {@link #escape(String)} method.
*
* @param s the literal string to be escaped
* @param index the index to start escaping from
* @return the escaped form of {@code string}
* @throws NullPointerException if {@code string} is null
* @throws IllegalArgumentException if invalid surrogate characters are
* encountered
*/
protected final String escapeSlow(String s, int index) {
int end = s.length();
// Get a destination buffer and setup some loop variables.
char[] dest = Platform.charBufferFromThreadLocal();
int destIndex = 0;
int unescapedChunkStart = 0;
while (index < end) {
int cp = codePointAt(s, index, end);
if (cp < 0) {
throw new IllegalArgumentException(
"Trailing high surrogate at end of input");
}
// It is possible for this to return null because nextEscapeIndex() may
// (for performance reasons) yield some false positives but it must never
// give false negatives.
char[] escaped = escape(cp);
int nextIndex = index + (Character.isSupplementaryCodePoint(cp) ? 2 : 1);
if (escaped != null) {
int charsSkipped = index - unescapedChunkStart;
// This is the size needed to add the replacement, not the full
// size needed by the string. We only regrow when we absolutely must.
int sizeNeeded = destIndex + charsSkipped + escaped.length;
if (dest.length < sizeNeeded) {
int destLength = sizeNeeded + (end - index) + DEST_PAD;
dest = growBuffer(dest, destIndex, destLength);
}
// If we have skipped any characters, we need to copy them now.
if (charsSkipped > 0) {
s.getChars(unescapedChunkStart, index, dest, destIndex);
destIndex += charsSkipped;
}
if (escaped.length > 0) {
System.arraycopy(escaped, 0, dest, destIndex, escaped.length);
destIndex += escaped.length;
}
// If we dealt with an escaped character, reset the unescaped range.
unescapedChunkStart = nextIndex;
}
index = nextEscapeIndex(s, nextIndex, end);
}
// Process trailing unescaped characters - no need to account for escaped
// length or padding the allocation.
int charsSkipped = end - unescapedChunkStart;
if (charsSkipped > 0) {
int endIndex = destIndex + charsSkipped;
if (dest.length < endIndex) {
dest = growBuffer(dest, destIndex, endIndex);
}
s.getChars(unescapedChunkStart, end, dest, destIndex);
destIndex = endIndex;
}
return new String(dest, 0, destIndex);
}
/**
* Returns the Unicode code point of the character at the given index.
*
* <p>Unlike {@link Character#codePointAt(CharSequence, int)} or
* {@link String#codePointAt(int)} this method will never fail silently when
* encountering an invalid surrogate pair.
*
* <p>The behaviour of this method is as follows:
* <ol>
* <li>If {@code index >= end}, {@link IndexOutOfBoundsException} is thrown.
* <li><b>If the character at the specified index is not a surrogate, it is
* returned.</b>
* <li>If the first character was a high surrogate value, then an attempt is
* made to read the next character.
* <ol>
* <li><b>If the end of the sequence was reached, the negated value of
* the trailing high surrogate is returned.</b>
* <li><b>If the next character was a valid low surrogate, the code point
* value of the high/low surrogate pair is returned.</b>
* <li>If the next character was not a low surrogate value, then
* {@link IllegalArgumentException} is thrown.
* </ol>
* <li>If the first character was a low surrogate value,
* {@link IllegalArgumentException} is thrown.
* </ol>
*
* @param seq the sequence of characters from which to decode the code point
* @param index the index of the first character to decode
* @param end the index beyond the last valid character to decode
* @return the Unicode code point for the given index or the negated value of
* the trailing high surrogate character at the end of the sequence
*/
protected static int codePointAt(CharSequence seq, int index, int end) {
checkNotNull(seq);
if (index < end) {
char c1 = seq.charAt(index++);
if (c1 < Character.MIN_HIGH_SURROGATE ||
c1 > Character.MAX_LOW_SURROGATE) {
// Fast path (first test is probably all we need to do)
return c1;
} else if (c1 <= Character.MAX_HIGH_SURROGATE) {
// If the high surrogate was the last character, return its inverse
if (index == end) {
return -c1;
}
// Otherwise look for the low surrogate following it
char c2 = seq.charAt(index);
if (Character.isLowSurrogate(c2)) {
return Character.toCodePoint(c1, c2);
}
throw new IllegalArgumentException(
"Expected low surrogate but got char '" + c2 +
"' with value " + (int) c2 + " at index " + index +
" in '" + seq + "'");
} else {
throw new IllegalArgumentException(
"Unexpected low surrogate character '" + c1 +
"' with value " + (int) c1 + " at index " + (index - 1) +
" in '" + seq + "'");
}
}
throw new IndexOutOfBoundsException("Index exceeds specified range");
}
/**
* Helper method to grow the character buffer as needed, this only happens
* once in a while so it's ok if it's in a method call. If the index passed
* in is 0 then no copying will be done.
*/
private static char[] growBuffer(char[] dest, int index, int size) {
char[] copy = new char[size];
if (index > 0) {
System.arraycopy(dest, 0, copy, 0, index);
}
return copy;
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/escape/UnicodeEscaper.java | Java | asf20 | 13,237 |
/*
* Copyright (C) 2006 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.escape;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
import java.util.HashMap;
import java.util.Map;
/**
* Simple helper class to build a "sparse" array of objects based on the indexes that were added to
* it. The array will be from 0 to the maximum index given. All non-set indexes will contain null
* (so it's not really a sparse array, just a pseudo sparse array). The builder can also return a
* CharEscaper based on the generated array.
*
* @author Sven Mawson
* @since 15.0
*/
@Beta
@GwtCompatible
public final class CharEscaperBuilder {
/**
* Simple decorator that turns an array of replacement char[]s into a CharEscaper, this results in
* a very fast escape method.
*/
private static class CharArrayDecorator extends CharEscaper {
private final char[][] replacements;
private final int replaceLength;
CharArrayDecorator(char[][] replacements) {
this.replacements = replacements;
this.replaceLength = replacements.length;
}
/*
* Overriding escape method to be slightly faster for this decorator. We test the replacements
* array directly, saving a method call.
*/
@Override public String escape(String s) {
int slen = s.length();
for (int index = 0; index < slen; index++) {
char c = s.charAt(index);
if (c < replacements.length && replacements[c] != null) {
return escapeSlow(s, index);
}
}
return s;
}
@Override protected char[] escape(char c) {
return c < replaceLength ? replacements[c] : null;
}
}
// Replacement mappings.
private final Map<Character, String> map;
// The highest index we've seen so far.
private int max = -1;
/**
* Construct a new sparse array builder.
*/
public CharEscaperBuilder() {
this.map = new HashMap<Character, String>();
}
/**
* Add a new mapping from an index to an object to the escaping.
*/
public CharEscaperBuilder addEscape(char c, String r) {
map.put(c, checkNotNull(r));
if (c > max) {
max = c;
}
return this;
}
/**
* Add multiple mappings at once for a particular index.
*/
public CharEscaperBuilder addEscapes(char[] cs, String r) {
checkNotNull(r);
for (char c : cs) {
addEscape(c, r);
}
return this;
}
/**
* Convert this builder into an array of char[]s where the maximum index is the value of the
* highest character that has been seen. The array will be sparse in the sense that any unseen
* index will default to null.
*
* @return a "sparse" array that holds the replacement mappings.
*/
public char[][] toArray() {
char[][] result = new char[max + 1][];
for (Map.Entry<Character, String> entry : map.entrySet()) {
result[entry.getKey()] = entry.getValue().toCharArray();
}
return result;
}
/**
* Convert this builder into a char escaper which is just a decorator around the underlying array
* of replacement char[]s.
*
* @return an escaper that escapes based on the underlying array.
*/
public Escaper toEscaper() {
return new CharArrayDecorator(toArray());
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/escape/CharEscaperBuilder.java | Java | asf20 | 3,889 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.escape;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
import java.util.Map;
/**
* A {@link CharEscaper} that uses an array to quickly look up replacement
* characters for a given {@code char} value. An additional safe range is
* provided that determines whether {@code char} values without specific
* replacements are to be considered safe and left unescaped or should be
* escaped in a general way.
*
* <p>A good example of usage of this class is for Java source code escaping
* where the replacement array contains information about special ASCII
* characters such as {@code \\t} and {@code \\n} while {@link #escapeUnsafe}
* is overridden to handle general escaping of the form {@code \\uxxxx}.
*
* <p>The size of the data structure used by {@link ArrayBasedCharEscaper} is
* proportional to the highest valued character that requires escaping.
* For example a replacement map containing the single character
* '{@code \}{@code u1000}' will require approximately 16K of memory. If you
* need to create multiple escaper instances that have the same character
* replacement mapping consider using {@link ArrayBasedEscaperMap}.
*
* @author Sven Mawson
* @author David Beaumont
* @since 15.0
*/
@Beta
@GwtCompatible
public abstract class ArrayBasedCharEscaper extends CharEscaper {
// The replacement array (see ArrayBasedEscaperMap).
private final char[][] replacements;
// The number of elements in the replacement array.
private final int replacementsLength;
// The first character in the safe range.
private final char safeMin;
// The last character in the safe range.
private final char safeMax;
/**
* Creates a new ArrayBasedCharEscaper instance with the given replacement map
* and specified safe range. If {@code safeMax < safeMin} then no characters
* are considered safe.
*
* <p>If a character has no mapped replacement then it is checked against the
* safe range. If it lies outside that, then {@link #escapeUnsafe} is
* called, otherwise no escaping is performed.
*
* @param replacementMap a map of characters to their escaped representations
* @param safeMin the lowest character value in the safe range
* @param safeMax the highest character value in the safe range
*/
protected ArrayBasedCharEscaper(Map<Character, String> replacementMap,
char safeMin, char safeMax) {
this(ArrayBasedEscaperMap.create(replacementMap), safeMin, safeMax);
}
/**
* Creates a new ArrayBasedCharEscaper instance with the given replacement map
* and specified safe range. If {@code safeMax < safeMin} then no characters
* are considered safe. This initializer is useful when explicit instances of
* ArrayBasedEscaperMap are used to allow the sharing of large replacement
* mappings.
*
* <p>If a character has no mapped replacement then it is checked against the
* safe range. If it lies outside that, then {@link #escapeUnsafe} is
* called, otherwise no escaping is performed.
*
* @param escaperMap the mapping of characters to be escaped
* @param safeMin the lowest character value in the safe range
* @param safeMax the highest character value in the safe range
*/
protected ArrayBasedCharEscaper(ArrayBasedEscaperMap escaperMap,
char safeMin, char safeMax) {
checkNotNull(escaperMap); // GWT specific check (do not optimize)
this.replacements = escaperMap.getReplacementArray();
this.replacementsLength = replacements.length;
if (safeMax < safeMin) {
// If the safe range is empty, set the range limits to opposite extremes
// to ensure the first test of either value will (almost certainly) fail.
safeMax = Character.MIN_VALUE;
safeMin = Character.MAX_VALUE;
}
this.safeMin = safeMin;
this.safeMax = safeMax;
}
/*
* This is overridden to improve performance. Rough benchmarking shows that
* this almost doubles the speed when processing strings that do not require
* any escaping.
*/
@Override
public final String escape(String s) {
checkNotNull(s); // GWT specific check (do not optimize).
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
if ((c < replacementsLength && replacements[c] != null) ||
c > safeMax || c < safeMin) {
return escapeSlow(s, i);
}
}
return s;
}
/**
* Escapes a single character using the replacement array and safe range
* values. If the given character does not have an explicit replacement and
* lies outside the safe range then {@link #escapeUnsafe} is called.
*/
@Override protected final char[] escape(char c) {
if (c < replacementsLength) {
char[] chars = replacements[c];
if (chars != null) {
return chars;
}
}
if (c >= safeMin && c <= safeMax) {
return null;
}
return escapeUnsafe(c);
}
/**
* Escapes a {@code char} value that has no direct explicit value in the
* replacement array and lies outside the stated safe range. Subclasses should
* override this method to provide generalized escaping for characters.
*
* <p>Note that arrays returned by this method must not be modified once they
* have been returned. However it is acceptable to return the same array
* multiple times (even for different input characters).
*
* @param c the character to escape
* @return the replacement characters, or {@code null} if no escaping was
* required
*/
// TODO(user,cpovirk): Rename this something better once refactoring done
protected abstract char[] escapeUnsafe(char c);
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/escape/ArrayBasedCharEscaper.java | Java | asf20 | 6,351 |
/*
* Copyright (C) 2010 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Common annotation types. This package is a part of the open-source
* <a href="http://guava-libraries.googlecode.com">Guava libraries</a>.
*/
package com.google.common.annotations;
| zzhhhhh-aw4rwer | guava/src/com/google/common/annotations/package-info.java | Java | asf20 | 791 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.annotations;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* The presence of this annotation on a type indicates that the type may be
* used with the
* <a href="http://code.google.com/webtoolkit/">Google Web Toolkit</a> (GWT).
* When applied to a method, the return type of the method is GWT compatible.
* It's useful to indicate that an instance created by factory methods has a GWT
* serializable type. In the following example,
*
* <pre style="code">
* {@literal @}GwtCompatible
* class Lists {
* ...
* {@literal @}GwtCompatible(serializable = true)
* static <E> List<E> newArrayList(E... elements) {
* ...
* }
* }
* </pre>
* <p>The return value of {@code Lists.newArrayList(E[])} has GWT
* serializable type. It is also useful in specifying contracts of interface
* methods. In the following example,
*
* <pre style="code">
* {@literal @}GwtCompatible
* interface ListFactory {
* ...
* {@literal @}GwtCompatible(serializable = true)
* <E> List<E> newArrayList(E... elements);
* }
* </pre>
* <p>The {@code newArrayList(E[])} method of all implementations of {@code
* ListFactory} is expected to return a value with a GWT serializable type.
*
* <p>Note that a {@code GwtCompatible} type may have some {@link
* GwtIncompatible} methods.
*
* @author Charles Fry
* @author Hayward Chan
*/
@Retention(RetentionPolicy.CLASS)
@Target({ ElementType.TYPE, ElementType.METHOD })
@Documented
@GwtCompatible
public @interface GwtCompatible {
/**
* When {@code true}, the annotated type or the type of the method return
* value is GWT serializable.
*
* @see <a href="http://code.google.com/webtoolkit/doc/latest/DevGuideServerCommunication.html#DevGuideSerializableTypes">
* Documentation about GWT serialization</a>
*/
boolean serializable() default false;
/**
* When {@code true}, the annotated type is emulated in GWT. The emulated
* source (also known as super-source) is different from the implementation
* used by the JVM.
*
* @see <a href="http://code.google.com/webtoolkit/doc/latest/DevGuideOrganizingProjects.html#DevGuideModules">
* Documentation about GWT emulated source</a>
*/
boolean emulated() default false;
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/annotations/GwtCompatible.java | Java | asf20 | 3,034 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.annotations;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* The presence of this annotation on a method indicates that the method may
* <em>not</em> be used with the
* <a href="http://code.google.com/webtoolkit/">Google Web Toolkit</a> (GWT),
* even though its type is annotated as {@link GwtCompatible} and accessible in
* GWT. They can cause GWT compilation errors or simply unexpected exceptions
* when used in GWT.
*
* <p>Note that this annotation should only be applied to methods, fields, or
* inner classes of types which are annotated as {@link GwtCompatible}.
*
* @author Charles Fry
*/
@Retention(RetentionPolicy.CLASS)
@Target({
ElementType.TYPE, ElementType.METHOD,
ElementType.CONSTRUCTOR, ElementType.FIELD })
@Documented
@GwtCompatible
public @interface GwtIncompatible {
/**
* Describes why the annotated element is incompatible with GWT. Since this is
* generally due to a dependence on a type/method which GWT doesn't support,
* it is sufficient to simply reference the unsupported type/method. E.g.
* "Class.isInstance".
*/
String value();
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/annotations/GwtIncompatible.java | Java | asf20 | 1,891 |
/*
* Copyright (C) 2010 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.annotations;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Signifies that a public API (public class, method or field) is subject to
* incompatible changes, or even removal, in a future release. An API bearing
* this annotation is exempt from any compatibility guarantees made by its
* containing library. Note that the presence of this annotation implies nothing
* about the quality or performance of the API in question, only the fact that
* it is not "API-frozen."
*
* <p>It is generally safe for <i>applications</i> to depend on beta APIs, at
* the cost of some extra work during upgrades. However it is generally
* inadvisable for <i>libraries</i> (which get included on users' CLASSPATHs,
* outside the library developers' control) to do so.
*
*
* @author Kevin Bourrillion
*/
@Retention(RetentionPolicy.CLASS)
@Target({
ElementType.ANNOTATION_TYPE,
ElementType.CONSTRUCTOR,
ElementType.FIELD,
ElementType.METHOD,
ElementType.TYPE})
@Documented
@GwtCompatible
public @interface Beta {}
| zzhhhhh-aw4rwer | guava/src/com/google/common/annotations/Beta.java | Java | asf20 | 1,818 |
/*
* Copyright (C) 2006 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.annotations;
/**
* Annotates a program element that exists, or is more widely visible than
* otherwise necessary, only for use in test code.
*
* @author Johannes Henkel
*/
@GwtCompatible
public @interface VisibleForTesting {
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/annotations/VisibleForTesting.java | Java | asf20 | 861 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import static com.google.common.base.Preconditions.checkNotNull;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.AbstractQueuedSynchronizer;
import javax.annotation.Nullable;
/**
* An abstract implementation of the {@link ListenableFuture} interface. This
* class is preferable to {@link java.util.concurrent.FutureTask} for two
* reasons: It implements {@code ListenableFuture}, and it does not implement
* {@code Runnable}. (If you want a {@code Runnable} implementation of {@code
* ListenableFuture}, create a {@link ListenableFutureTask}, or submit your
* tasks to a {@link ListeningExecutorService}.)
*
* <p>This class implements all methods in {@code ListenableFuture}.
* Subclasses should provide a way to set the result of the computation through
* the protected methods {@link #set(Object)} and
* {@link #setException(Throwable)}. Subclasses may also override {@link
* #interruptTask()}, which will be invoked automatically if a call to {@link
* #cancel(boolean) cancel(true)} succeeds in canceling the future.
*
* <p>{@code AbstractFuture} uses an {@link AbstractQueuedSynchronizer} to deal
* with concurrency issues and guarantee thread safety.
*
* <p>The state changing methods all return a boolean indicating success or
* failure in changing the future's state. Valid states are running,
* completed, failed, or cancelled.
*
* <p>This class uses an {@link ExecutionList} to guarantee that all registered
* listeners will be executed, either when the future finishes or, for listeners
* that are added after the future completes, immediately.
* {@code Runnable}-{@code Executor} pairs are stored in the execution list but
* are not necessarily executed in the order in which they were added. (If a
* listener is added after the Future is complete, it will be executed
* immediately, even if earlier listeners have not been executed. Additionally,
* executors need not guarantee FIFO execution, or different listeners may run
* in different executors.)
*
* @author Sven Mawson
* @since 1.0
*/
public abstract class AbstractFuture<V> implements ListenableFuture<V> {
/** Synchronization control for AbstractFutures. */
private final Sync<V> sync = new Sync<V>();
// The execution list to hold our executors.
private final ExecutionList executionList = new ExecutionList();
/**
* Constructor for use by subclasses.
*/
protected AbstractFuture() {}
/*
* Improve the documentation of when InterruptedException is thrown. Our
* behavior matches the JDK's, but the JDK's documentation is misleading.
*/
/**
* {@inheritDoc}
*
* <p>The default {@link AbstractFuture} implementation throws {@code
* InterruptedException} if the current thread is interrupted before or during
* the call, even if the value is already available.
*
* @throws InterruptedException if the current thread was interrupted before
* or during the call (optional but recommended).
* @throws CancellationException {@inheritDoc}
*/
@Override
public V get(long timeout, TimeUnit unit) throws InterruptedException,
TimeoutException, ExecutionException {
return sync.get(unit.toNanos(timeout));
}
/*
* Improve the documentation of when InterruptedException is thrown. Our
* behavior matches the JDK's, but the JDK's documentation is misleading.
*/
/**
* {@inheritDoc}
*
* <p>The default {@link AbstractFuture} implementation throws {@code
* InterruptedException} if the current thread is interrupted before or during
* the call, even if the value is already available.
*
* @throws InterruptedException if the current thread was interrupted before
* or during the call (optional but recommended).
* @throws CancellationException {@inheritDoc}
*/
@Override
public V get() throws InterruptedException, ExecutionException {
return sync.get();
}
@Override
public boolean isDone() {
return sync.isDone();
}
@Override
public boolean isCancelled() {
return sync.isCancelled();
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
if (!sync.cancel(mayInterruptIfRunning)) {
return false;
}
executionList.execute();
if (mayInterruptIfRunning) {
interruptTask();
}
return true;
}
/**
* Subclasses can override this method to implement interruption of the
* future's computation. The method is invoked automatically by a successful
* call to {@link #cancel(boolean) cancel(true)}.
*
* <p>The default implementation does nothing.
*
* @since 10.0
*/
protected void interruptTask() {
}
/**
* Returns true if this future was cancelled with {@code
* mayInterruptIfRunning} set to {@code true}.
*
* @since 14.0
*/
protected final boolean wasInterrupted() {
return sync.wasInterrupted();
}
/**
* {@inheritDoc}
*
* @since 10.0
*/
@Override
public void addListener(Runnable listener, Executor exec) {
executionList.add(listener, exec);
}
/**
* Subclasses should invoke this method to set the result of the computation
* to {@code value}. This will set the state of the future to
* {@link AbstractFuture.Sync#COMPLETED} and invoke the listeners if the
* state was successfully changed.
*
* @param value the value that was the result of the task.
* @return true if the state was successfully changed.
*/
protected boolean set(@Nullable V value) {
boolean result = sync.set(value);
if (result) {
executionList.execute();
}
return result;
}
/**
* Subclasses should invoke this method to set the result of the computation
* to an error, {@code throwable}. This will set the state of the future to
* {@link AbstractFuture.Sync#COMPLETED} and invoke the listeners if the
* state was successfully changed.
*
* @param throwable the exception that the task failed with.
* @return true if the state was successfully changed.
*/
protected boolean setException(Throwable throwable) {
boolean result = sync.setException(checkNotNull(throwable));
if (result) {
executionList.execute();
}
return result;
}
/**
* <p>Following the contract of {@link AbstractQueuedSynchronizer} we create a
* private subclass to hold the synchronizer. This synchronizer is used to
* implement the blocking and waiting calls as well as to handle state changes
* in a thread-safe manner. The current state of the future is held in the
* Sync state, and the lock is released whenever the state changes to
* {@link #COMPLETED}, {@link #CANCELLED}, or {@link #INTERRUPTED}
*
* <p>To avoid races between threads doing release and acquire, we transition
* to the final state in two steps. One thread will successfully CAS from
* RUNNING to COMPLETING, that thread will then set the result of the
* computation, and only then transition to COMPLETED, CANCELLED, or
* INTERRUPTED.
*
* <p>We don't use the integer argument passed between acquire methods so we
* pass around a -1 everywhere.
*/
static final class Sync<V> extends AbstractQueuedSynchronizer {
private static final long serialVersionUID = 0L;
/* Valid states. */
static final int RUNNING = 0;
static final int COMPLETING = 1;
static final int COMPLETED = 2;
static final int CANCELLED = 4;
static final int INTERRUPTED = 8;
private V value;
private Throwable exception;
/*
* Acquisition succeeds if the future is done, otherwise it fails.
*/
@Override
protected int tryAcquireShared(int ignored) {
if (isDone()) {
return 1;
}
return -1;
}
/*
* We always allow a release to go through, this means the state has been
* successfully changed and the result is available.
*/
@Override
protected boolean tryReleaseShared(int finalState) {
setState(finalState);
return true;
}
/**
* Blocks until the task is complete or the timeout expires. Throws a
* {@link TimeoutException} if the timer expires, otherwise behaves like
* {@link #get()}.
*/
V get(long nanos) throws TimeoutException, CancellationException,
ExecutionException, InterruptedException {
// Attempt to acquire the shared lock with a timeout.
if (!tryAcquireSharedNanos(-1, nanos)) {
throw new TimeoutException("Timeout waiting for task.");
}
return getValue();
}
/**
* Blocks until {@link #complete(Object, Throwable, int)} has been
* successfully called. Throws a {@link CancellationException} if the task
* was cancelled, or a {@link ExecutionException} if the task completed with
* an error.
*/
V get() throws CancellationException, ExecutionException,
InterruptedException {
// Acquire the shared lock allowing interruption.
acquireSharedInterruptibly(-1);
return getValue();
}
/**
* Implementation of the actual value retrieval. Will return the value
* on success, an exception on failure, a cancellation on cancellation, or
* an illegal state if the synchronizer is in an invalid state.
*/
private V getValue() throws CancellationException, ExecutionException {
int state = getState();
switch (state) {
case COMPLETED:
if (exception != null) {
throw new ExecutionException(exception);
} else {
return value;
}
case CANCELLED:
case INTERRUPTED:
throw cancellationExceptionWithCause(
"Task was cancelled.", exception);
default:
throw new IllegalStateException(
"Error, synchronizer in invalid state: " + state);
}
}
/**
* Checks if the state is {@link #COMPLETED}, {@link #CANCELLED}, or {@link
* INTERRUPTED}.
*/
boolean isDone() {
return (getState() & (COMPLETED | CANCELLED | INTERRUPTED)) != 0;
}
/**
* Checks if the state is {@link #CANCELLED} or {@link #INTERRUPTED}.
*/
boolean isCancelled() {
return (getState() & (CANCELLED | INTERRUPTED)) != 0;
}
/**
* Checks if the state is {@link #INTERRUPTED}.
*/
boolean wasInterrupted() {
return getState() == INTERRUPTED;
}
/**
* Transition to the COMPLETED state and set the value.
*/
boolean set(@Nullable V v) {
return complete(v, null, COMPLETED);
}
/**
* Transition to the COMPLETED state and set the exception.
*/
boolean setException(Throwable t) {
return complete(null, t, COMPLETED);
}
/**
* Transition to the CANCELLED or INTERRUPTED state.
*/
boolean cancel(boolean interrupt) {
return complete(null, null, interrupt ? INTERRUPTED : CANCELLED);
}
/**
* Implementation of completing a task. Either {@code v} or {@code t} will
* be set but not both. The {@code finalState} is the state to change to
* from {@link #RUNNING}. If the state is not in the RUNNING state we
* return {@code false} after waiting for the state to be set to a valid
* final state ({@link #COMPLETED}, {@link #CANCELLED}, or {@link
* #INTERRUPTED}).
*
* @param v the value to set as the result of the computation.
* @param t the exception to set as the result of the computation.
* @param finalState the state to transition to.
*/
private boolean complete(@Nullable V v, @Nullable Throwable t,
int finalState) {
boolean doCompletion = compareAndSetState(RUNNING, COMPLETING);
if (doCompletion) {
// If this thread successfully transitioned to COMPLETING, set the value
// and exception and then release to the final state.
this.value = v;
// Don't actually construct a CancellationException until necessary.
this.exception = ((finalState & (CANCELLED | INTERRUPTED)) != 0)
? new CancellationException("Future.cancel() was called.") : t;
releaseShared(finalState);
} else if (getState() == COMPLETING) {
// If some other thread is currently completing the future, block until
// they are done so we can guarantee completion.
acquireShared(-1);
}
return doCompletion;
}
}
static final CancellationException cancellationExceptionWithCause(
@Nullable String message, @Nullable Throwable cause) {
CancellationException exception = new CancellationException(message);
exception.initCause(cause);
return exception;
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/AbstractFuture.java | Java | asf20 | 13,496 |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import com.google.common.annotations.Beta;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Ticker;
import java.util.concurrent.TimeUnit;
import javax.annotation.concurrent.ThreadSafe;
/**
* A rate limiter. Conceptually, a rate limiter distributes permits at a
* configurable rate. Each {@link #acquire()} blocks if necessary until a permit is
* available, and then takes it. Once acquired, permits need not be released.
*
* <p>Rate limiters are often used to restrict the rate at which some
* physical or logical resource is accessed. This is in contrast to {@link
* java.util.concurrent.Semaphore} which restricts the number of concurrent
* accesses instead of the rate (note though that concurrency and rate are closely related,
* e.g. see <a href="http://en.wikipedia.org/wiki/Little's_law">Little's Law</a>).
*
* <p>A {@code RateLimiter} is defined primarily by the rate at which permits
* are issued. Absent additional configuration, permits will be distributed at a
* fixed rate, defined in terms of permits per second. Permits will be distributed
* smoothly, with the delay between individual permits being adjusted to ensure
* that the configured rate is maintained.
*
* <p>It is possible to configure a {@code RateLimiter} to have a warmup
* period during which time the permits issued each second steadily increases until
* it hits the stable rate.
*
* <p>As an example, imagine that we have a list of tasks to execute, but we don't want to
* submit more than 2 per second:
*<pre> {@code
* final RateLimiter rateLimiter = RateLimiter.create(2.0); // rate is "2 permits per second"
* void submitTasks(List<Runnable> tasks, Executor executor) {
* for (Runnable task : tasks) {
* rateLimiter.acquire(); // may wait
* executor.execute(task);
* }
* }
*}</pre>
*
* <p>As another example, imagine that we produce a stream of data, and we want to cap it
* at 5kb per second. This could be accomplished by requiring a permit per byte, and specifying
* a rate of 5000 permits per second:
*<pre> {@code
* final RateLimiter rateLimiter = RateLimiter.create(5000.0); // rate = 5000 permits per second
* void submitPacket(byte[] packet) {
* rateLimiter.acquire(packet.length);
* networkService.send(packet);
* }
*}</pre>
*
* <p>It is important to note that the number of permits requested <i>never</i>
* affect the throttling of the request itself (an invocation to {@code acquire(1)}
* and an invocation to {@code acquire(1000)} will result in exactly the same throttling, if any),
* but it affects the throttling of the <i>next</i> request. I.e., if an expensive task
* arrives at an idle RateLimiter, it will be granted immediately, but it is the <i>next</i>
* request that will experience extra throttling, thus paying for the cost of the expensive
* task.
*
* <p>Note: {@code RateLimiter} does not provide fairness guarantees.
*
* @author Dimitris Andreou
* @since 13.0
*/
// TODO(user): switch to nano precision. A natural unit of cost is "bytes", and a micro precision
// would mean a maximum rate of "1MB/s", which might be small in some cases.
@ThreadSafe
@Beta
public abstract class RateLimiter {
/*
* How is the RateLimiter designed, and why?
*
* The primary feature of a RateLimiter is its "stable rate", the maximum rate that
* is should allow at normal conditions. This is enforced by "throttling" incoming
* requests as needed, i.e. compute, for an incoming request, the appropriate throttle time,
* and make the calling thread wait as much.
*
* The simplest way to maintain a rate of QPS is to keep the timestamp of the last
* granted request, and ensure that (1/QPS) seconds have elapsed since then. For example,
* for a rate of QPS=5 (5 tokens per second), if we ensure that a request isn't granted
* earlier than 200ms after the last one, then we achieve the intended rate.
* If a request comes and the last request was granted only 100ms ago, then we wait for
* another 100ms. At this rate, serving 15 fresh permits (i.e. for an acquire(15) request)
* naturally takes 3 seconds.
*
* It is important to realize that such a RateLimiter has a very superficial memory
* of the past: it only remembers the last request. What if the RateLimiter was unused for
* a long period of time, then a request arrived and was immediately granted?
* This RateLimiter would immediately forget about that past underutilization. This may
* result in either underutilization or overflow, depending on the real world consequences
* of not using the expected rate.
*
* Past underutilization could mean that excess resources are available. Then, the RateLimiter
* should speed up for a while, to take advantage of these resources. This is important
* when the rate is applied to networking (limiting bandwidth), where past underutilization
* typically translates to "almost empty buffers", which can be filled immediately.
*
* On the other hand, past underutilization could mean that "the server responsible for
* handling the request has become less ready for future requests", i.e. its caches become
* stale, and requests become more likely to trigger expensive operations (a more extreme
* case of this example is when a server has just booted, and it is mostly busy with getting
* itself up to speed).
*
* To deal with such scenarios, we add an extra dimension, that of "past underutilization",
* modeled by "storedPermits" variable. This variable is zero when there is no
* underutilization, and it can grow up to maxStoredPermits, for sufficiently large
* underutilization. So, the requested permits, by an invocation acquire(permits),
* are served from:
* - stored permits (if available)
* - fresh permits (for any remaining permits)
*
* How this works is best explained with an example:
*
* For a RateLimiter that produces 1 token per second, every second
* that goes by with the RateLimiter being unused, we increase storedPermits by 1.
* Say we leave the RateLimiter unused for 10 seconds (i.e., we expected a request at time
* X, but we are at time X + 10 seconds before a request actually arrives; this is
* also related to the point made in the last paragraph), thus storedPermits
* becomes 10.0 (assuming maxStoredPermits >= 10.0). At that point, a request of acquire(3)
* arrives. We serve this request out of storedPermits, and reduce that to 7.0 (how this is
* translated to throttling time is discussed later). Immediately after, assume that an
* acquire(10) request arriving. We serve the request partly from storedPermits,
* using all the remaining 7.0 permits, and the remaining 3.0, we serve them by fresh permits
* produced by the rate limiter.
*
* We already know how much time it takes to serve 3 fresh permits: if the rate is
* "1 token per second", then this will take 3 seconds. But what does it mean to serve 7
* stored permits? As explained above, there is no unique answer. If we are primarily
* interested to deal with underutilization, then we want stored permits to be given out
* /faster/ than fresh ones, because underutilization = free resources for the taking.
* If we are primarily interested to deal with overflow, then stored permits could
* be given out /slower/ than fresh ones. Thus, we require a (different in each case)
* function that translates storedPermits to throtting time.
*
* This role is played by storedPermitsToWaitTime(double storedPermits, double permitsToTake).
* The underlying model is a continuous function mapping storedPermits
* (from 0.0 to maxStoredPermits) onto the 1/rate (i.e. intervals) that is effective at the given
* storedPermits. "storedPermits" essentially measure unused time; we spend unused time
* buying/storing permits. Rate is "permits / time", thus "1 / rate = time / permits".
* Thus, "1/rate" (time / permits) times "permits" gives time, i.e., integrals on this
* function (which is what storedPermitsToWaitTime() computes) correspond to minimum intervals
* between subsequent requests, for the specified number of requested permits.
*
* Here is an example of storedPermitsToWaitTime:
* If storedPermits == 10.0, and we want 3 permits, we take them from storedPermits,
* reducing them to 7.0, and compute the throttling for these as a call to
* storedPermitsToWaitTime(storedPermits = 10.0, permitsToTake = 3.0), which will
* evaluate the integral of the function from 7.0 to 10.0.
*
* Using integrals guarantees that the effect of a single acquire(3) is equivalent
* to { acquire(1); acquire(1); acquire(1); }, or { acquire(2); acquire(1); }, etc,
* since the integral of the function in [7.0, 10.0] is equivalent to the sum of the
* integrals of [7.0, 8.0], [8.0, 9.0], [9.0, 10.0] (and so on), no matter
* what the function is. This guarantees that we handle correctly requests of varying weight
* (permits), /no matter/ what the actual function is - so we can tweak the latter freely.
* (The only requirement, obviously, is that we can compute its integrals).
*
* Note well that if, for this function, we chose a horizontal line, at height of exactly
* (1/QPS), then the effect of the function is non-existent: we serve storedPermits at
* exactly the same cost as fresh ones (1/QPS is the cost for each). We use this trick later.
*
* If we pick a function that goes /below/ that horizontal line, it means that we reduce
* the area of the function, thus time. Thus, the RateLimiter becomes /faster/ after a
* period of underutilization. If, on the other hand, we pick a function that
* goes /above/ that horizontal line, then it means that the area (time) is increased,
* thus storedPermits are more costly than fresh permits, thus the RateLimiter becomes
* /slower/ after a period of underutilization.
*
* Last, but not least: consider a RateLimiter with rate of 1 permit per second, currently
* completely unused, and an expensive acquire(100) request comes. It would be nonsensical
* to just wait for 100 seconds, and /then/ start the actual task. Why wait without doing
* anything? A much better approach is to /allow/ the request right away (as if it was an
* acquire(1) request instead), and postpone /subsequent/ requests as needed. In this version,
* we allow starting the task immediately, and postpone by 100 seconds future requests,
* thus we allow for work to get done in the meantime instead of waiting idly.
*
* This has important consequences: it means that the RateLimiter doesn't remember the time
* of the _last_ request, but it remembers the (expected) time of the _next_ request. This
* also enables us to tell immediately (see tryAcquire(timeout)) whether a particular
* timeout is enough to get us to the point of the next scheduling time, since we always
* maintain that. And what we mean by "an unused RateLimiter" is also defined by that
* notion: when we observe that the "expected arrival time of the next request" is actually
* in the past, then the difference (now - past) is the amount of time that the RateLimiter
* was formally unused, and it is that amount of time which we translate to storedPermits.
* (We increase storedPermits with the amount of permits that would have been produced
* in that idle time). So, if rate == 1 permit per second, and arrivals come exactly
* one second after the previous, then storedPermits is _never_ increased -- we would only
* increase it for arrivals _later_ than the expected one second.
*/
/**
* Creates a {@code RateLimiter} with the specified stable throughput, given as
* "permits per second" (commonly referred to as <i>QPS</i>, queries per second).
*
* <p>The returned {@code RateLimiter} ensures that on average no more than {@code
* permitsPerSecond} are issued during any given second, with sustained requests
* being smoothly spread over each second. When the incoming request rate exceeds
* {@code permitsPerSecond} the rate limiter will release one permit every {@code
* (1.0 / permitsPerSecond)} seconds. When the rate limiter is unused,
* bursts of up to {@code permitsPerSecond} permits will be allowed, with subsequent
* requests being smoothly limited at the stable rate of {@code permitsPerSecond}.
*
* @param permitsPerSecond the rate of the returned {@code RateLimiter}, measured in
* how many permits become available per second. Must be positive
*/
// TODO(user): "This is equivalent to
// {@code createWithCapacity(permitsPerSecond, 1, TimeUnit.SECONDS)}".
public static RateLimiter create(double permitsPerSecond) {
/*
* The default RateLimiter configuration can save the unused permits of up to one second.
* This is to avoid unnecessary stalls in situations like this: A RateLimiter of 1qps,
* and 4 threads, all calling acquire() at these moments:
*
* T0 at 0 seconds
* T1 at 1.05 seconds
* T2 at 2 seconds
* T3 at 3 seconds
*
* Due to the slight delay of T1, T2 would have to sleep till 2.05 seconds,
* and T3 would also have to sleep till 3.05 seconds.
*/
return create(SleepingTicker.SYSTEM_TICKER, permitsPerSecond);
}
@VisibleForTesting
static RateLimiter create(SleepingTicker ticker, double permitsPerSecond) {
RateLimiter rateLimiter = new Bursty(ticker, 1.0 /* maxBurstSeconds */);
rateLimiter.setRate(permitsPerSecond);
return rateLimiter;
}
/**
* Creates a {@code RateLimiter} with the specified stable throughput, given as
* "permits per second" (commonly referred to as <i>QPS</i>, queries per second), and a
* <i>warmup period</i>, during which the {@code RateLimiter} smoothly ramps up its rate,
* until it reaches its maximum rate at the end of the period (as long as there are enough
* requests to saturate it). Similarly, if the {@code RateLimiter} is left <i>unused</i> for
* a duration of {@code warmupPeriod}, it will gradually return to its "cold" state,
* i.e. it will go through the same warming up process as when it was first created.
*
* <p>The returned {@code RateLimiter} is intended for cases where the resource that actually
* fulfills the requests (e.g., a remote server) needs "warmup" time, rather than
* being immediately accessed at the stable (maximum) rate.
*
* <p>The returned {@code RateLimiter} starts in a "cold" state (i.e. the warmup period
* will follow), and if it is left unused for long enough, it will return to that state.
*
* @param permitsPerSecond the rate of the returned {@code RateLimiter}, measured in
* how many permits become available per second. Must be positive
* @param warmupPeriod the duration of the period where the {@code RateLimiter} ramps up its
* rate, before reaching its stable (maximum) rate
* @param unit the time unit of the warmupPeriod argument
*/
public static RateLimiter create(double permitsPerSecond, long warmupPeriod, TimeUnit unit) {
return create(SleepingTicker.SYSTEM_TICKER, permitsPerSecond, warmupPeriod, unit);
}
@VisibleForTesting
static RateLimiter create(
SleepingTicker ticker, double permitsPerSecond, long warmupPeriod, TimeUnit unit) {
RateLimiter rateLimiter = new WarmingUp(ticker, warmupPeriod, unit);
rateLimiter.setRate(permitsPerSecond);
return rateLimiter;
}
@VisibleForTesting
static RateLimiter createWithCapacity(
SleepingTicker ticker, double permitsPerSecond, long maxBurstBuildup, TimeUnit unit) {
double maxBurstSeconds = unit.toNanos(maxBurstBuildup) / 1E+9;
Bursty rateLimiter = new Bursty(ticker, maxBurstSeconds);
rateLimiter.setRate(permitsPerSecond);
return rateLimiter;
}
/**
* The underlying timer; used both to measure elapsed time and sleep as necessary. A separate
* object to facilitate testing.
*/
private final SleepingTicker ticker;
/**
* The timestamp when the RateLimiter was created; used to avoid possible overflow/time-wrapping
* errors.
*/
private final long offsetNanos;
/**
* The currently stored permits.
*/
double storedPermits;
/**
* The maximum number of stored permits.
*/
double maxPermits;
/**
* The interval between two unit requests, at our stable rate. E.g., a stable rate of 5 permits
* per second has a stable interval of 200ms.
*/
volatile double stableIntervalMicros;
private final Object mutex = new Object();
/**
* The time when the next request (no matter its size) will be granted. After granting a request,
* this is pushed further in the future. Large requests push this further than small requests.
*/
private long nextFreeTicketMicros = 0L; // could be either in the past or future
private RateLimiter(SleepingTicker ticker) {
this.ticker = ticker;
this.offsetNanos = ticker.read();
}
/**
* Updates the stable rate of this {@code RateLimiter}, that is, the
* {@code permitsPerSecond} argument provided in the factory method that
* constructed the {@code RateLimiter}. Currently throttled threads will <b>not</b>
* be awakened as a result of this invocation, thus they do not observe the new rate;
* only subsequent requests will.
*
* <p>Note though that, since each request repays (by waiting, if necessary) the cost
* of the <i>previous</i> request, this means that the very next request
* after an invocation to {@code setRate} will not be affected by the new rate;
* it will pay the cost of the previous request, which is in terms of the previous rate.
*
* <p>The behavior of the {@code RateLimiter} is not modified in any other way,
* e.g. if the {@code RateLimiter} was configured with a warmup period of 20 seconds,
* it still has a warmup period of 20 seconds after this method invocation.
*
* @param permitsPerSecond the new stable rate of this {@code RateLimiter}. Must be positive
*/
public final void setRate(double permitsPerSecond) {
Preconditions.checkArgument(permitsPerSecond > 0.0
&& !Double.isNaN(permitsPerSecond), "rate must be positive");
synchronized (mutex) {
resync(readSafeMicros());
double stableIntervalMicros = TimeUnit.SECONDS.toMicros(1L) / permitsPerSecond;
this.stableIntervalMicros = stableIntervalMicros;
doSetRate(permitsPerSecond, stableIntervalMicros);
}
}
abstract void doSetRate(double permitsPerSecond, double stableIntervalMicros);
/**
* Returns the stable rate (as {@code permits per seconds}) with which this
* {@code RateLimiter} is configured with. The initial value of this is the same as
* the {@code permitsPerSecond} argument passed in the factory method that produced
* this {@code RateLimiter}, and it is only updated after invocations
* to {@linkplain #setRate}.
*/
public final double getRate() {
return TimeUnit.SECONDS.toMicros(1L) / stableIntervalMicros;
}
/**
* Acquires a single permit from this {@code RateLimiter}, blocking until the
* request can be granted. Tells the amount of time slept, if any.
*
* <p>This method is equivalent to {@code acquire(1)}.
*
* @return time spent sleeping to enforce rate, in seconds; 0.0 if not rate-limited
* @since 16.0 (present in 13.0 with {@code void} return type})
*/
public double acquire() {
return acquire(1);
}
/**
* Acquires the given number of permits from this {@code RateLimiter}, blocking until the
* request can be granted. Tells the amount of time slept, if any.
*
* @param permits the number of permits to acquire
* @return time spent sleeping to enforce rate, in seconds; 0.0 if not rate-limited
* @since 16.0 (present in 13.0 with {@code void} return type})
*/
public double acquire(int permits) {
long microsToWait = reserve(permits);
ticker.sleepMicrosUninterruptibly(microsToWait);
return 1.0 * microsToWait / TimeUnit.SECONDS.toMicros(1L);
}
/**
* Reserves a single permit from this {@code RateLimiter} for future use, returning the number of
* microseconds until the reservation.
*
* <p>This method is equivalent to {@code reserve(1)}.
*
* @return time in microseconds to wait until the resource can be acquired.
*/
long reserve() {
return reserve(1);
}
/**
* Reserves the given number of permits from this {@code RateLimiter} for future use, returning
* the number of microseconds until the reservation can be consumed.
*
* @return time in microseconds to wait until the resource can be acquired.
*/
long reserve(int permits) {
checkPermits(permits);
synchronized (mutex) {
return reserveNextTicket(permits, readSafeMicros());
}
}
/**
* Acquires a permit from this {@code RateLimiter} if it can be obtained
* without exceeding the specified {@code timeout}, or returns {@code false}
* immediately (without waiting) if the permit would not have been granted
* before the timeout expired.
*
* <p>This method is equivalent to {@code tryAcquire(1, timeout, unit)}.
*
* @param timeout the maximum time to wait for the permit
* @param unit the time unit of the timeout argument
* @return {@code true} if the permit was acquired, {@code false} otherwise
*/
public boolean tryAcquire(long timeout, TimeUnit unit) {
return tryAcquire(1, timeout, unit);
}
/**
* Acquires permits from this {@link RateLimiter} if it can be acquired immediately without delay.
*
* <p>
* This method is equivalent to {@code tryAcquire(permits, 0, anyUnit)}.
*
* @param permits the number of permits to acquire
* @return {@code true} if the permits were acquired, {@code false} otherwise
* @since 14.0
*/
public boolean tryAcquire(int permits) {
return tryAcquire(permits, 0, TimeUnit.MICROSECONDS);
}
/**
* Acquires a permit from this {@link RateLimiter} if it can be acquired immediately without
* delay.
*
* <p>
* This method is equivalent to {@code tryAcquire(1)}.
*
* @return {@code true} if the permit was acquired, {@code false} otherwise
* @since 14.0
*/
public boolean tryAcquire() {
return tryAcquire(1, 0, TimeUnit.MICROSECONDS);
}
/**
* Acquires the given number of permits from this {@code RateLimiter} if it can be obtained
* without exceeding the specified {@code timeout}, or returns {@code false}
* immediately (without waiting) if the permits would not have been granted
* before the timeout expired.
*
* @param permits the number of permits to acquire
* @param timeout the maximum time to wait for the permits
* @param unit the time unit of the timeout argument
* @return {@code true} if the permits were acquired, {@code false} otherwise
*/
public boolean tryAcquire(int permits, long timeout, TimeUnit unit) {
long timeoutMicros = unit.toMicros(timeout);
checkPermits(permits);
long microsToWait;
synchronized (mutex) {
long nowMicros = readSafeMicros();
if (nextFreeTicketMicros > nowMicros + timeoutMicros) {
return false;
} else {
microsToWait = reserveNextTicket(permits, nowMicros);
}
}
ticker.sleepMicrosUninterruptibly(microsToWait);
return true;
}
private static void checkPermits(int permits) {
Preconditions.checkArgument(permits > 0, "Requested permits must be positive");
}
/**
* Reserves next ticket and returns the wait time that the caller must wait for.
*
* <p>The return value is guaranteed to be non-negative.
*/
private long reserveNextTicket(double requiredPermits, long nowMicros) {
resync(nowMicros);
long microsToNextFreeTicket = Math.max(0, nextFreeTicketMicros - nowMicros);
double storedPermitsToSpend = Math.min(requiredPermits, this.storedPermits);
double freshPermits = requiredPermits - storedPermitsToSpend;
long waitMicros = storedPermitsToWaitTime(this.storedPermits, storedPermitsToSpend)
+ (long) (freshPermits * stableIntervalMicros);
this.nextFreeTicketMicros = nextFreeTicketMicros + waitMicros;
this.storedPermits -= storedPermitsToSpend;
return microsToNextFreeTicket;
}
/**
* Translates a specified portion of our currently stored permits which we want to
* spend/acquire, into a throttling time. Conceptually, this evaluates the integral
* of the underlying function we use, for the range of
* [(storedPermits - permitsToTake), storedPermits].
*
* This always holds: {@code 0 <= permitsToTake <= storedPermits}
*/
abstract long storedPermitsToWaitTime(double storedPermits, double permitsToTake);
private void resync(long nowMicros) {
// if nextFreeTicket is in the past, resync to now
if (nowMicros > nextFreeTicketMicros) {
storedPermits = Math.min(maxPermits,
storedPermits + (nowMicros - nextFreeTicketMicros) / stableIntervalMicros);
nextFreeTicketMicros = nowMicros;
}
}
private long readSafeMicros() {
return TimeUnit.NANOSECONDS.toMicros(ticker.read() - offsetNanos);
}
@Override
public String toString() {
return String.format("RateLimiter[stableRate=%3.1fqps]", 1000000.0 / stableIntervalMicros);
}
/**
* This implements the following function:
*
* ^ throttling
* |
* 3*stable + /
* interval | /.
* (cold) | / .
* | / . <-- "warmup period" is the area of the trapezoid between
* 2*stable + / . halfPermits and maxPermits
* interval | / .
* | / .
* | / .
* stable +----------/ WARM . }
* interval | . UP . } <-- this rectangle (from 0 to maxPermits, and
* | . PERIOD. } height == stableInterval) defines the cooldown period,
* | . . } and we want cooldownPeriod == warmupPeriod
* |---------------------------------> storedPermits
* (halfPermits) (maxPermits)
*
* Before going into the details of this particular function, let's keep in mind the basics:
* 1) The state of the RateLimiter (storedPermits) is a vertical line in this figure.
* 2) When the RateLimiter is not used, this goes right (up to maxPermits)
* 3) When the RateLimiter is used, this goes left (down to zero), since if we have storedPermits,
* we serve from those first
* 4) When _unused_, we go right at the same speed (rate)! I.e., if our rate is
* 2 permits per second, and 3 unused seconds pass, we will always save 6 permits
* (no matter what our initial position was), up to maxPermits.
* If we invert the rate, we get the "stableInterval" (interval between two requests
* in a perfectly spaced out sequence of requests of the given rate). Thus, if you
* want to see "how much time it will take to go from X storedPermits to X+K storedPermits?",
* the answer is always stableInterval * K. In the same example, for 2 permits per second,
* stableInterval is 500ms. Thus to go from X storedPermits to X+6 storedPermits, we
* require 6 * 500ms = 3 seconds.
*
* In short, the time it takes to move to the right (save K permits) is equal to the
* rectangle of width == K and height == stableInterval.
* 4) When _used_, the time it takes, as explained in the introductory class note, is
* equal to the integral of our function, between X permits and X-K permits, assuming
* we want to spend K saved permits.
*
* In summary, the time it takes to move to the left (spend K permits), is equal to the
* area of the function of width == K.
*
* Let's dive into this function now:
*
* When we have storedPermits <= halfPermits (the left portion of the function), then
* we spend them at the exact same rate that
* fresh permits would be generated anyway (that rate is 1/stableInterval). We size
* this area to be equal to _half_ the specified warmup period. Why we need this?
* And why half? We'll explain shortly below (after explaining the second part).
*
* Stored permits that are beyond halfPermits, are mapped to an ascending line, that goes
* from stableInterval to 3 * stableInterval. The average height for that part is
* 2 * stableInterval, and is sized appropriately to have an area _equal_ to the
* specified warmup period. Thus, by point (4) above, it takes "warmupPeriod" amount of time
* to go from maxPermits to halfPermits.
*
* BUT, by point (3) above, it only takes "warmupPeriod / 2" amount of time to return back
* to maxPermits, from halfPermits! (Because the trapezoid has double the area of the rectangle
* of height stableInterval and equivalent width). We decided that the "cooldown period"
* time should be equivalent to "warmup period", thus a fully saturated RateLimiter
* (with zero stored permits, serving only fresh ones) can go to a fully unsaturated
* (with storedPermits == maxPermits) in the same amount of time it takes for a fully
* unsaturated RateLimiter to return to the stableInterval -- which happens in halfPermits,
* since beyond that point, we use a horizontal line of "stableInterval" height, simulating
* the regular rate.
*
* Thus, we have figured all dimensions of this shape, to give all the desired
* properties:
* - the width is warmupPeriod / stableInterval, to make cooldownPeriod == warmupPeriod
* - the slope starts at the middle, and goes from stableInterval to 3*stableInterval so
* to have halfPermits being spend in double the usual time (half the rate), while their
* respective rate is steadily ramping up
*/
private static class WarmingUp extends RateLimiter {
final long warmupPeriodMicros;
/**
* The slope of the line from the stable interval (when permits == 0), to the cold interval
* (when permits == maxPermits)
*/
private double slope;
private double halfPermits;
WarmingUp(SleepingTicker ticker, long warmupPeriod, TimeUnit timeUnit) {
super(ticker);
this.warmupPeriodMicros = timeUnit.toMicros(warmupPeriod);
}
@Override
void doSetRate(double permitsPerSecond, double stableIntervalMicros) {
double oldMaxPermits = maxPermits;
maxPermits = warmupPeriodMicros / stableIntervalMicros;
halfPermits = maxPermits / 2.0;
// Stable interval is x, cold is 3x, so on average it's 2x. Double the time -> halve the rate
double coldIntervalMicros = stableIntervalMicros * 3.0;
slope = (coldIntervalMicros - stableIntervalMicros) / halfPermits;
if (oldMaxPermits == Double.POSITIVE_INFINITY) {
// if we don't special-case this, we would get storedPermits == NaN, below
storedPermits = 0.0;
} else {
storedPermits = (oldMaxPermits == 0.0)
? maxPermits // initial state is cold
: storedPermits * maxPermits / oldMaxPermits;
}
}
@Override
long storedPermitsToWaitTime(double storedPermits, double permitsToTake) {
double availablePermitsAboveHalf = storedPermits - halfPermits;
long micros = 0;
// measuring the integral on the right part of the function (the climbing line)
if (availablePermitsAboveHalf > 0.0) {
double permitsAboveHalfToTake = Math.min(availablePermitsAboveHalf, permitsToTake);
micros = (long) (permitsAboveHalfToTake * (permitsToTime(availablePermitsAboveHalf)
+ permitsToTime(availablePermitsAboveHalf - permitsAboveHalfToTake)) / 2.0);
permitsToTake -= permitsAboveHalfToTake;
}
// measuring the integral on the left part of the function (the horizontal line)
micros += (stableIntervalMicros * permitsToTake);
return micros;
}
private double permitsToTime(double permits) {
return stableIntervalMicros + permits * slope;
}
}
/**
* This implements a "bursty" RateLimiter, where storedPermits are translated to
* zero throttling. The maximum number of permits that can be saved (when the RateLimiter is
* unused) is defined in terms of time, in this sense: if a RateLimiter is 2qps, and this
* time is specified as 10 seconds, we can save up to 2 * 10 = 20 permits.
*/
private static class Bursty extends RateLimiter {
/** The work (permits) of how many seconds can be saved up if this RateLimiter is unused? */
final double maxBurstSeconds;
Bursty(SleepingTicker ticker, double maxBurstSeconds) {
super(ticker);
this.maxBurstSeconds = maxBurstSeconds;
}
@Override
void doSetRate(double permitsPerSecond, double stableIntervalMicros) {
double oldMaxPermits = this.maxPermits;
maxPermits = maxBurstSeconds * permitsPerSecond;
storedPermits = (oldMaxPermits == 0.0)
? 0.0 // initial state
: storedPermits * maxPermits / oldMaxPermits;
}
@Override
long storedPermitsToWaitTime(double storedPermits, double permitsToTake) {
return 0L;
}
}
@VisibleForTesting
static abstract class SleepingTicker extends Ticker {
abstract void sleepMicrosUninterruptibly(long micros);
static final SleepingTicker SYSTEM_TICKER = new SleepingTicker() {
@Override
public long read() {
return systemTicker().read();
}
@Override
public void sleepMicrosUninterruptibly(long micros) {
if (micros > 0) {
Uninterruptibles.sleepUninterruptibly(micros, TimeUnit.MICROSECONDS);
}
}
};
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/RateLimiter.java | Java | asf20 | 34,588 |
/*
* Written by Doug Lea and Martin Buchholz with assistance from
* members of JCP JSR-166 Expert Group and released to the public
* domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
/*
* Source:
* http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/extra/AtomicDouble.java?revision=1.13
* (Modified to adapt to guava coding conventions and
* to use AtomicLongFieldUpdater instead of sun.misc.Unsafe)
*/
package com.google.common.util.concurrent;
import static java.lang.Double.doubleToRawLongBits;
import static java.lang.Double.longBitsToDouble;
import java.util.concurrent.atomic.AtomicLongFieldUpdater;
/**
* A {@code double} value that may be updated atomically. See the
* {@link java.util.concurrent.atomic} package specification for
* description of the properties of atomic variables. An {@code
* AtomicDouble} is used in applications such as atomic accumulation,
* and cannot be used as a replacement for a {@link Double}. However,
* this class does extend {@code Number} to allow uniform access by
* tools and utilities that deal with numerically-based classes.
*
* <p><a name="bitEquals">This class compares primitive {@code double}
* values in methods such as {@link #compareAndSet} by comparing their
* bitwise representation using {@link Double#doubleToRawLongBits},
* which differs from both the primitive double {@code ==} operator
* and from {@link Double#equals}, as if implemented by:
* <pre> {@code
* static boolean bitEquals(double x, double y) {
* long xBits = Double.doubleToRawLongBits(x);
* long yBits = Double.doubleToRawLongBits(y);
* return xBits == yBits;
* }}</pre>
*
* <p>It is possible to write a more scalable updater, at the cost of
* giving up strict atomicity. See for example
* <a href="http://gee.cs.oswego.edu/dl/jsr166/dist/jsr166edocs/jsr166e/DoubleAdder.html">
* DoubleAdder</a>
* and
* <a href="http://gee.cs.oswego.edu/dl/jsr166/dist/jsr166edocs/jsr166e/DoubleMaxUpdater.html">
* DoubleMaxUpdater</a>.
*
* @author Doug Lea
* @author Martin Buchholz
* @since 11.0
*/
public class AtomicDouble extends Number implements java.io.Serializable {
private static final long serialVersionUID = 0L;
private transient volatile long value;
private static final AtomicLongFieldUpdater<AtomicDouble> updater =
AtomicLongFieldUpdater.newUpdater(AtomicDouble.class, "value");
/**
* Creates a new {@code AtomicDouble} with the given initial value.
*
* @param initialValue the initial value
*/
public AtomicDouble(double initialValue) {
value = doubleToRawLongBits(initialValue);
}
/**
* Creates a new {@code AtomicDouble} with initial value {@code 0.0}.
*/
public AtomicDouble() {
// assert doubleToRawLongBits(0.0) == 0L;
}
/**
* Gets the current value.
*
* @return the current value
*/
public final double get() {
return longBitsToDouble(value);
}
/**
* Sets to the given value.
*
* @param newValue the new value
*/
public final void set(double newValue) {
long next = doubleToRawLongBits(newValue);
value = next;
}
/**
* Eventually sets to the given value.
*
* @param newValue the new value
*/
public final void lazySet(double newValue) {
set(newValue);
// TODO(user): replace with code below when jdk5 support is dropped.
// long next = doubleToRawLongBits(newValue);
// updater.lazySet(this, next);
}
/**
* Atomically sets to the given value and returns the old value.
*
* @param newValue the new value
* @return the previous value
*/
public final double getAndSet(double newValue) {
long next = doubleToRawLongBits(newValue);
return longBitsToDouble(updater.getAndSet(this, next));
}
/**
* Atomically sets the value to the given updated value
* if the current value is <a href="#bitEquals">bitwise equal</a>
* to the expected value.
*
* @param expect the expected value
* @param update the new value
* @return {@code true} if successful. False return indicates that
* the actual value was not bitwise equal to the expected value.
*/
public final boolean compareAndSet(double expect, double update) {
return updater.compareAndSet(this,
doubleToRawLongBits(expect),
doubleToRawLongBits(update));
}
/**
* Atomically sets the value to the given updated value
* if the current value is <a href="#bitEquals">bitwise equal</a>
* to the expected value.
*
* <p>May <a
* href="http://download.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/package-summary.html#Spurious">
* fail spuriously</a>
* and does not provide ordering guarantees, so is only rarely an
* appropriate alternative to {@code compareAndSet}.
*
* @param expect the expected value
* @param update the new value
* @return {@code true} if successful
*/
public final boolean weakCompareAndSet(double expect, double update) {
return updater.weakCompareAndSet(this,
doubleToRawLongBits(expect),
doubleToRawLongBits(update));
}
/**
* Atomically adds the given value to the current value.
*
* @param delta the value to add
* @return the previous value
*/
public final double getAndAdd(double delta) {
while (true) {
long current = value;
double currentVal = longBitsToDouble(current);
double nextVal = currentVal + delta;
long next = doubleToRawLongBits(nextVal);
if (updater.compareAndSet(this, current, next)) {
return currentVal;
}
}
}
/**
* Atomically adds the given value to the current value.
*
* @param delta the value to add
* @return the updated value
*/
public final double addAndGet(double delta) {
while (true) {
long current = value;
double currentVal = longBitsToDouble(current);
double nextVal = currentVal + delta;
long next = doubleToRawLongBits(nextVal);
if (updater.compareAndSet(this, current, next)) {
return nextVal;
}
}
}
/**
* Returns the String representation of the current value.
* @return the String representation of the current value
*/
public String toString() {
return Double.toString(get());
}
/**
* Returns the value of this {@code AtomicDouble} as an {@code int}
* after a narrowing primitive conversion.
*/
public int intValue() {
return (int) get();
}
/**
* Returns the value of this {@code AtomicDouble} as a {@code long}
* after a narrowing primitive conversion.
*/
public long longValue() {
return (long) get();
}
/**
* Returns the value of this {@code AtomicDouble} as a {@code float}
* after a narrowing primitive conversion.
*/
public float floatValue() {
return (float) get();
}
/**
* Returns the value of this {@code AtomicDouble} as a {@code double}.
*/
public double doubleValue() {
return get();
}
/**
* Saves the state to a stream (that is, serializes it).
*
* @serialData The current value is emitted (a {@code double}).
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeDouble(get());
}
/**
* Reconstitutes the instance from a stream (that is, deserializes it).
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
set(s.readDouble());
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/AtomicDouble.java | Java | asf20 | 7,638 |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.base.Predicates.equalTo;
import static com.google.common.base.Predicates.in;
import static com.google.common.base.Predicates.instanceOf;
import static com.google.common.base.Predicates.not;
import static com.google.common.util.concurrent.Service.State.FAILED;
import static com.google.common.util.concurrent.Service.State.NEW;
import static com.google.common.util.concurrent.Service.State.RUNNING;
import static com.google.common.util.concurrent.Service.State.STARTING;
import static com.google.common.util.concurrent.Service.State.STOPPING;
import static com.google.common.util.concurrent.Service.State.TERMINATED;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import com.google.common.annotations.Beta;
import com.google.common.base.Function;
import com.google.common.base.Objects;
import com.google.common.base.Stopwatch;
import com.google.common.base.Supplier;
import com.google.common.collect.Collections2;
import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableMultimap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSetMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Multimaps;
import com.google.common.collect.Multiset;
import com.google.common.collect.Ordering;
import com.google.common.collect.SetMultimap;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Service.State;
import java.lang.ref.WeakReference;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.concurrent.GuardedBy;
import javax.annotation.concurrent.Immutable;
/**
* A manager for monitoring and controlling a set of {@linkplain Service services}. This class
* provides methods for {@linkplain #startAsync() starting}, {@linkplain #stopAsync() stopping} and
* {@linkplain #servicesByState inspecting} a collection of {@linkplain Service services}.
* Additionally, users can monitor state transitions with the {@linkplain Listener listener}
* mechanism.
*
* <p>While it is recommended that service lifecycles be managed via this class, state transitions
* initiated via other mechanisms do not impact the correctness of its methods. For example, if the
* services are started by some mechanism besides {@link #startAsync}, the listeners will be invoked
* when appropriate and {@link #awaitHealthy} will still work as expected.
*
* <p>Here is a simple example of how to use a {@code ServiceManager} to start a server.
* <pre> {@code
* class Server {
* public static void main(String[] args) {
* Set<Service> services = ...;
* ServiceManager manager = new ServiceManager(services);
* manager.addListener(new Listener() {
* public void stopped() {}
* public void healthy() {
* // Services have been initialized and are healthy, start accepting requests...
* }
* public void failure(Service service) {
* // Something failed, at this point we could log it, notify a load balancer, or take
* // some other action. For now we will just exit.
* System.exit(1);
* }
* },
* MoreExecutors.sameThreadExecutor());
*
* Runtime.getRuntime().addShutdownHook(new Thread() {
* public void run() {
* // Give the services 5 seconds to stop to ensure that we are responsive to shutdown
* // requests.
* try {
* manager.stopAsync().awaitStopped(5, TimeUnit.SECONDS);
* } catch (TimeoutException timeout) {
* // stopping timed out
* }
* }
* });
* manager.startAsync(); // start all the services asynchronously
* }
* }}</pre>
*
* <p>This class uses the ServiceManager's methods to start all of its services, to respond to
* service failure and to ensure that when the JVM is shutting down all the services are stopped.
*
* @author Luke Sandberg
* @since 14.0
*/
@Beta
public final class ServiceManager {
private static final Logger logger = Logger.getLogger(ServiceManager.class.getName());
/**
* A listener for the aggregate state changes of the services that are under management. Users
* that need to listen to more fine-grained events (such as when each particular {@linkplain
* Service service} starts, or terminates), should attach {@linkplain Service.Listener service
* listeners} to each individual service.
*
* @author Luke Sandberg
* @since 15.0 (present as an interface in 14.0)
*/
@Beta // Should come out of Beta when ServiceManager does
public abstract static class Listener {
/**
* Called when the service initially becomes healthy.
*
* <p>This will be called at most once after all the services have entered the
* {@linkplain State#RUNNING running} state. If any services fail during start up or
* {@linkplain State#FAILED fail}/{@linkplain State#TERMINATED terminate} before all other
* services have started {@linkplain State#RUNNING running} then this method will not be called.
*/
public void healthy() {}
/**
* Called when the all of the component services have reached a terminal state, either
* {@linkplain State#TERMINATED terminated} or {@linkplain State#FAILED failed}.
*/
public void stopped() {}
/**
* Called when a component service has {@linkplain State#FAILED failed}.
*
* @param service The service that failed.
*/
public void failure(Service service) {}
}
/**
* An encapsulation of all of the state that is accessed by the {@linkplain ServiceListener
* service listeners}. This is extracted into its own object so that {@link ServiceListener}
* could be made {@code static} and its instances can be safely constructed and added in the
* {@link ServiceManager} constructor without having to close over the partially constructed
* {@link ServiceManager} instance (i.e. avoid leaking a pointer to {@code this}).
*/
private final ServiceManagerState state;
private final ImmutableList<Service> services;
/**
* Constructs a new instance for managing the given services.
*
* @param services The services to manage
*
* @throws IllegalArgumentException if not all services are {@linkplain State#NEW new} or if there
* are any duplicate services.
*/
public ServiceManager(Iterable<? extends Service> services) {
ImmutableList<Service> copy = ImmutableList.copyOf(services);
if (copy.isEmpty()) {
// Having no services causes the manager to behave strangely. Notably, listeners are never
// fired. To avoid this we substitute a placeholder service.
logger.log(Level.WARNING,
"ServiceManager configured with no services. Is your application configured properly?",
new EmptyServiceManagerWarning());
copy = ImmutableList.<Service>of(new NoOpService());
}
this.state = new ServiceManagerState(copy);
this.services = copy;
WeakReference<ServiceManagerState> stateReference =
new WeakReference<ServiceManagerState>(state);
for (Service service : copy) {
// We give each listener its own SynchronizedExecutor to ensure that the state transitions
// are run in the same order that they occur. The Service.Listener api guarantees us only
// that the transitions are submitted to the executor in the same order that they occur, so by
// synchronizing the executions of each listeners callbacks we can ensure that the entire
// execution of the listener occurs in the same order as the transitions themselves.
//
// This is necessary to prevent transitions being played back in the wrong order due to thread
// races to acquire the monitor in ServiceManagerState.
service.addListener(new ServiceListener(service, stateReference), new SynchronizedExecutor());
// We check the state after adding the listener as a way to ensure that our listener was added
// to a NEW service.
checkArgument(service.state() == NEW, "Can only manage NEW services, %s", service);
}
// We have installed all of our listeners and after this point any state transition should be
// correct.
this.state.markReady();
}
/**
* Registers a {@link Listener} to be {@linkplain Executor#execute executed} on the given
* executor. The listener will not have previous state changes replayed, so it is
* suggested that listeners are added before any of the managed services are
* {@linkplain Service#startAsync started}.
*
* <p>There is no guaranteed ordering of execution of listeners, but any listener added through
* this method is guaranteed to be called whenever there is a state change.
*
* <p>Exceptions thrown by a listener will be propagated up to the executor. Any exception thrown
* during {@code Executor.execute} (e.g., a {@code RejectedExecutionException} or an exception
* thrown by {@linkplain MoreExecutors#sameThreadExecutor inline execution}) will be caught and
* logged.
*
* <p> For fast, lightweight listeners that would be safe to execute in any thread, consider
* calling {@link #addListener(Listener)}.
*
* @param listener the listener to run when the manager changes state
* @param executor the executor in which the listeners callback methods will be run.
*/
public void addListener(Listener listener, Executor executor) {
state.addListener(listener, executor);
}
/**
* Registers a {@link Listener} to be run when this {@link ServiceManager} changes state. The
* listener will not have previous state changes replayed, so it is suggested that listeners are
* added before any of the managed services are {@linkplain Service#startAsync started}.
*
* <p>There is no guaranteed ordering of execution of listeners, but any listener added through
* this method is guaranteed to be called whenever there is a state change.
*
* <p>Exceptions thrown by a listener will be will be caught and logged.
*
* @param listener the listener to run when the manager changes state
*/
public void addListener(Listener listener) {
state.addListener(listener, MoreExecutors.sameThreadExecutor());
}
/**
* Initiates service {@linkplain Service#startAsync startup} on all the services being managed.
* It is only valid to call this method if all of the services are {@linkplain State#NEW new}.
*
* @return this
* @throws IllegalStateException if any of the Services are not {@link State#NEW new} when the
* method is called.
*/
public ServiceManager startAsync() {
for (Service service : services) {
State state = service.state();
checkState(state == NEW, "Service %s is %s, cannot start it.", service, state);
}
for (Service service : services) {
try {
service.startAsync();
} catch (IllegalStateException e) {
// This can happen if the service has already been started or stopped (e.g. by another
// service or listener). Our contract says it is safe to call this method if
// all services were NEW when it was called, and this has already been verified above, so we
// don't propagate the exception.
logger.log(Level.WARNING, "Unable to start Service " + service, e);
}
}
return this;
}
/**
* Waits for the {@link ServiceManager} to become {@linkplain #isHealthy() healthy}. The manager
* will become healthy after all the component services have reached the {@linkplain State#RUNNING
* running} state.
*
* @throws IllegalStateException if the service manager reaches a state from which it cannot
* become {@linkplain #isHealthy() healthy}.
*/
public void awaitHealthy() {
state.awaitHealthy();
}
/**
* Waits for the {@link ServiceManager} to become {@linkplain #isHealthy() healthy} for no more
* than the given time. The manager will become healthy after all the component services have
* reached the {@linkplain State#RUNNING running} state.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @throws TimeoutException if not all of the services have finished starting within the deadline
* @throws IllegalStateException if the service manager reaches a state from which it cannot
* become {@linkplain #isHealthy() healthy}.
*/
public void awaitHealthy(long timeout, TimeUnit unit) throws TimeoutException {
state.awaitHealthy(timeout, unit);
}
/**
* Initiates service {@linkplain Service#stopAsync shutdown} if necessary on all the services
* being managed.
*
* @return this
*/
public ServiceManager stopAsync() {
for (Service service : services) {
service.stopAsync();
}
return this;
}
/**
* Waits for the all the services to reach a terminal state. After this method returns all
* services will either be {@linkplain Service.State#TERMINATED terminated} or {@linkplain
* Service.State#FAILED failed}.
*/
public void awaitStopped() {
state.awaitStopped();
}
/**
* Waits for the all the services to reach a terminal state for no more than the given time. After
* this method returns all services will either be {@linkplain Service.State#TERMINATED
* terminated} or {@linkplain Service.State#FAILED failed}.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @throws TimeoutException if not all of the services have stopped within the deadline
*/
public void awaitStopped(long timeout, TimeUnit unit) throws TimeoutException {
state.awaitStopped(timeout, unit);
}
/**
* Returns true if all services are currently in the {@linkplain State#RUNNING running} state.
*
* <p>Users who want more detailed information should use the {@link #servicesByState} method to
* get detailed information about which services are not running.
*/
public boolean isHealthy() {
for (Service service : services) {
if (!service.isRunning()) {
return false;
}
}
return true;
}
/**
* Provides a snapshot of the current state of all the services under management.
*
* <p>N.B. This snapshot is guaranteed to be consistent, i.e. the set of states returned will
* correspond to a point in time view of the services.
*/
public ImmutableMultimap<State, Service> servicesByState() {
return state.servicesByState();
}
/**
* Returns the service load times. This value will only return startup times for services that
* have finished starting.
*
* @return Map of services and their corresponding startup time in millis, the map entries will be
* ordered by startup time.
*/
public ImmutableMap<Service, Long> startupTimes() {
return state.startupTimes();
}
@Override public String toString() {
return Objects.toStringHelper(ServiceManager.class)
.add("services", Collections2.filter(services, not(instanceOf(NoOpService.class))))
.toString();
}
/**
* An encapsulation of all the mutable state of the {@link ServiceManager} that needs to be
* accessed by instances of {@link ServiceListener}.
*/
private static final class ServiceManagerState {
final Monitor monitor = new Monitor();
@GuardedBy("monitor")
final SetMultimap<State, Service> servicesByState =
Multimaps.newSetMultimap(new EnumMap<State, Collection<Service>>(State.class),
new Supplier<Set<Service>>() {
@Override public Set<Service> get() {
return Sets.newLinkedHashSet();
}
});
@GuardedBy("monitor")
final Multiset<State> states = servicesByState.keys();
@GuardedBy("monitor")
final Map<Service, Stopwatch> startupTimers = Maps.newIdentityHashMap();
/**
* These two booleans are used to mark the state as ready to start.
* {@link #ready}: is set by {@link #markReady} to indicate that all listeners have been
* correctly installed
* {@link #transitioned}: is set by {@link #transitionService} to indicate that some transition
* has been performed.
*
* <p>Together, they allow us to enforce that all services have their listeners installed prior
* to any service performing a transition, then we can fail in the ServiceManager constructor
* rather than in a Service.Listener callback.
*/
@GuardedBy("monitor")
boolean ready;
@GuardedBy("monitor")
boolean transitioned;
final int numberOfServices;
/**
* Controls how long to wait for all the services to either become healthy or reach a
* state from which it is guaranteed that it can never become healthy.
*/
final Monitor.Guard awaitHealthGuard = new Monitor.Guard(monitor) {
@Override public boolean isSatisfied() {
// All services have started or some service has terminated/failed.
return states.count(RUNNING) == numberOfServices
|| states.contains(STOPPING)
|| states.contains(TERMINATED)
|| states.contains(FAILED);
}
};
/**
* Controls how long to wait for all services to reach a terminal state.
*/
final Monitor.Guard stoppedGuard = new Monitor.Guard(monitor) {
@Override public boolean isSatisfied() {
return states.count(TERMINATED) + states.count(FAILED) == numberOfServices;
}
};
/** The listeners to notify during a state transition. */
@GuardedBy("monitor")
final List<ListenerExecutorPair> listeners = Lists.newArrayList();
/**
* The queue of listeners that are waiting to be executed.
*
* <p>Enqueue operations should be protected by {@link #monitor} while dequeue operations are
* not protected. Holding {@link #monitor} while enqueuing ensures that listeners in the queue
* are in the correct order and {@link ExecutionQueue} ensures that they are executed in the
* correct order.
*/
@GuardedBy("monitor")
final ExecutionQueue queuedListeners = new ExecutionQueue();
/**
* It is implicitly assumed that all the services are NEW and that they will all remain NEW
* until all the Listeners are installed and {@link #markReady()} is called. It is our caller's
* responsibility to only call {@link #markReady()} if all services were new at the time this
* method was called and when all the listeners were installed.
*/
ServiceManagerState(ImmutableCollection<Service> services) {
this.numberOfServices = services.size();
servicesByState.putAll(NEW, services);
for (Service service : services) {
startupTimers.put(service, Stopwatch.createUnstarted());
}
}
/**
* Marks the {@link State} as ready to receive transitions. Returns true if no transitions have
* been observed yet.
*/
void markReady() {
monitor.enter();
try {
if (!transitioned) {
// nothing has transitioned since construction, good.
ready = true;
} else {
// This should be an extremely rare race condition.
List<Service> servicesInBadStates = Lists.newArrayList();
for (Service service : servicesByState().values()) {
if (service.state() != NEW) {
servicesInBadStates.add(service);
}
}
throw new IllegalArgumentException("Services started transitioning asynchronously before "
+ "the ServiceManager was constructed: " + servicesInBadStates);
}
} finally {
monitor.leave();
}
}
void addListener(Listener listener, Executor executor) {
checkNotNull(listener, "listener");
checkNotNull(executor, "executor");
monitor.enter();
try {
// no point in adding a listener that will never be called
if (!stoppedGuard.isSatisfied()) {
listeners.add(new ListenerExecutorPair(listener, executor));
}
} finally {
monitor.leave();
}
}
void awaitHealthy() {
monitor.enterWhenUninterruptibly(awaitHealthGuard);
try {
checkHealthy();
} finally {
monitor.leave();
}
}
void awaitHealthy(long timeout, TimeUnit unit) throws TimeoutException {
monitor.enter();
try {
if (!monitor.waitForUninterruptibly(awaitHealthGuard, timeout, unit)) {
throw new TimeoutException("Timeout waiting for the services to become healthy. The "
+ "following services have not started: "
+ Multimaps.filterKeys(servicesByState, in(ImmutableSet.of(NEW, STARTING))));
}
checkHealthy();
} finally {
monitor.leave();
}
}
void awaitStopped() {
monitor.enterWhenUninterruptibly(stoppedGuard);
monitor.leave();
}
void awaitStopped(long timeout, TimeUnit unit) throws TimeoutException {
monitor.enter();
try {
if (!monitor.waitForUninterruptibly(stoppedGuard, timeout, unit)) {
throw new TimeoutException("Timeout waiting for the services to stop. The following "
+ "services have not stopped: "
+ Multimaps.filterKeys(servicesByState,
not(in(ImmutableSet.of(TERMINATED, FAILED)))));
}
} finally {
monitor.leave();
}
}
ImmutableMultimap<State, Service> servicesByState() {
ImmutableSetMultimap.Builder<State, Service> builder = ImmutableSetMultimap.builder();
monitor.enter();
try {
for (Entry<State, Service> entry : servicesByState.entries()) {
if (!(entry.getValue() instanceof NoOpService)) {
builder.put(entry.getKey(), entry.getValue());
}
}
} finally {
monitor.leave();
}
return builder.build();
}
ImmutableMap<Service, Long> startupTimes() {
List<Entry<Service, Long>> loadTimes;
monitor.enter();
try {
loadTimes = Lists.newArrayListWithCapacity(
states.size() - states.count(NEW) + states.count(STARTING));
for (Entry<Service, Stopwatch> entry : startupTimers.entrySet()) {
Service service = entry.getKey();
Stopwatch stopWatch = entry.getValue();
// N.B. we check the service state in the multimap rather than via Service.state() because
// the multimap is guaranteed to be in sync with our timers while the Service.state() is
// not. Due to happens-before ness of the monitor this 'weirdness' will not be observable
// by our caller.
if (!stopWatch.isRunning() && !servicesByState.containsEntry(NEW, service)
&& !(service instanceof NoOpService)) {
loadTimes.add(Maps.immutableEntry(service, stopWatch.elapsed(MILLISECONDS)));
}
}
} finally {
monitor.leave();
}
Collections.sort(loadTimes, Ordering.<Long>natural()
.onResultOf(new Function<Entry<Service, Long>, Long>() {
@Override public Long apply(Map.Entry<Service, Long> input) {
return input.getValue();
}
}));
ImmutableMap.Builder<Service, Long> builder = ImmutableMap.builder();
for (Entry<Service, Long> entry : loadTimes) {
builder.put(entry);
}
return builder.build();
}
/**
* Updates the state with the given service transition.
*
* <p>This method performs the main logic of ServiceManager in the following steps.
* <ol>
* <li>Update the {@link #servicesByState()}
* <li>Update the {@link #startupTimers}
* <li>Based on the new state queue listeners to run
* <li>Run the listeners (outside of the lock)
* </ol>
*/
void transitionService(final Service service, State from, State to) {
checkNotNull(service);
checkArgument(from != to);
monitor.enter();
try {
transitioned = true;
if (!ready) {
return;
}
// Update state.
checkState(servicesByState.remove(from, service),
"Service %s not at the expected location in the state map %s", service, from);
checkState(servicesByState.put(to, service),
"Service %s in the state map unexpectedly at %s", service, to);
// Update the timer
Stopwatch stopwatch = startupTimers.get(service);
if (from == NEW) {
stopwatch.start();
}
if (to.compareTo(RUNNING) >= 0 && stopwatch.isRunning()) {
// N.B. if we miss the STARTING event then we will never record a startup time.
stopwatch.stop();
if (!(service instanceof NoOpService)) {
logger.log(Level.FINE, "Started {0} in {1}.",
new Object[] {service, stopwatch});
}
}
// Queue our listeners
// Did a service fail?
if (to == FAILED) {
fireFailedListeners(service);
}
if (states.count(RUNNING) == numberOfServices) {
// This means that the manager is currently healthy. N.B. If other threads call isHealthy
// they are not guaranteed to get 'true', because any service could fail right now.
fireHealthyListeners();
} else if (states.count(TERMINATED) + states.count(FAILED) == numberOfServices) {
fireStoppedListeners();
// no more listeners could possibly be called, so clear them out to save some memory.
listeners.clear();
}
} finally {
monitor.leave();
// Run our executors outside of the lock
executeListeners();
}
}
@GuardedBy("monitor")
void fireStoppedListeners() {
for (final ListenerExecutorPair pair : listeners) {
queuedListeners.add(new Runnable() {
@Override public void run() {
pair.listener.stopped();
}
}, pair.executor);
}
}
@GuardedBy("monitor")
void fireHealthyListeners() {
for (final ListenerExecutorPair pair : listeners) {
queuedListeners.add(new Runnable() {
@Override public void run() {
pair.listener.healthy();
}
}, pair.executor);
}
}
@GuardedBy("monitor")
void fireFailedListeners(final Service service) {
for (final ListenerExecutorPair pair : listeners) {
queuedListeners.add(new Runnable() {
@Override public void run() {
pair.listener.failure(service);
}
}, pair.executor);
}
}
/** Attempts to execute all the listeners in {@link #queuedListeners}. */
void executeListeners() {
checkState(!monitor.isOccupiedByCurrentThread(),
"It is incorrect to execute listeners with the monitor held.");
queuedListeners.execute();
}
@GuardedBy("monitor")
void checkHealthy() {
if (states.count(RUNNING) != numberOfServices) {
throw new IllegalStateException("Expected to be healthy after starting. "
+ "The following services are not running: " +
Multimaps.filterKeys(servicesByState, not(equalTo(RUNNING))));
}
}
}
/**
* A {@link Service} that wraps another service and times how long it takes for it to start and
* also calls the {@link ServiceManagerState#transitionService(Service, State, State)},
* to record the state transitions.
*/
private static final class ServiceListener extends Service.Listener {
final Service service;
// We store the state in a weak reference to ensure that if something went wrong while
// constructing the ServiceManager we don't pointlessly keep updating the state.
final WeakReference<ServiceManagerState> state;
ServiceListener(Service service, WeakReference<ServiceManagerState> state) {
this.service = service;
this.state = state;
}
@Override public void starting() {
ServiceManagerState state = this.state.get();
if (state != null) {
state.transitionService(service, NEW, STARTING);
if (!(service instanceof NoOpService)) {
logger.log(Level.FINE, "Starting {0}.", service);
}
}
}
@Override public void running() {
ServiceManagerState state = this.state.get();
if (state != null) {
state.transitionService(service, STARTING, RUNNING);
}
}
@Override public void stopping(State from) {
ServiceManagerState state = this.state.get();
if (state != null) {
state.transitionService(service, from, STOPPING);
}
}
@Override public void terminated(State from) {
ServiceManagerState state = this.state.get();
if (state != null) {
if (!(service instanceof NoOpService)) {
logger.log(Level.FINE, "Service {0} has terminated. Previous state was: {1}",
new Object[] {service, from});
}
state.transitionService(service, from, TERMINATED);
}
}
@Override public void failed(State from, Throwable failure) {
ServiceManagerState state = this.state.get();
if (state != null) {
// Log before the transition, so that if the process exits in response to server failure,
// there is a higher likelihood that the cause will be in the logs.
if (!(service instanceof NoOpService)) {
logger.log(Level.SEVERE, "Service " + service + " has failed in the " + from + " state.",
failure);
}
state.transitionService(service, from, FAILED);
}
}
}
/** Simple value object binding a listener to its executor. */
@Immutable private static final class ListenerExecutorPair {
final Listener listener;
final Executor executor;
ListenerExecutorPair(Listener listener, Executor executor) {
this.listener = listener;
this.executor = executor;
}
}
/**
* A {@link Service} instance that does nothing. This is only useful as a placeholder to
* ensure that the {@link ServiceManager} functions properly even when it is managing no services.
*
* <p>The use of this class is considered an implementation detail of ServiceManager and as such
* it is excluded from {@link #servicesByState}, {@link #startupTimes}, {@link #toString} and all
* logging statements.
*/
private static final class NoOpService extends AbstractService {
@Override protected void doStart() { notifyStarted(); }
@Override protected void doStop() { notifyStopped(); }
}
/** This is never thrown but only used for logging. */
private static final class EmptyServiceManagerWarning extends Throwable {}
/**
* A same-thread executor that executes all the runnables from within a synchronized block.
*
* <p>This ensures that tasks submitted to the executor run in the same order that they were
* submitted.
*/
private static final class SynchronizedExecutor implements Executor {
@Override public synchronized void execute(Runnable command) {
command.run();
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/ServiceManager.java | Java | asf20 | 32,693 |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import com.google.common.annotations.Beta;
import java.util.concurrent.ScheduledFuture;
/**
* Helper interface to implement both {@link ListenableFuture} and
* {@link ScheduledFuture}.
*
* @author Anthony Zana
*
* @since 15.0
*/
@Beta
public interface ListenableScheduledFuture<V>
extends ScheduledFuture<V>, ListenableFuture<V> {}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/ListenableScheduledFuture.java | Java | asf20 | 991 |
/*
* Copyright (C) 2006 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.Beta;
import com.google.common.collect.ObjectArrays;
import com.google.common.collect.Sets;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* A TimeLimiter that runs method calls in the background using an
* {@link ExecutorService}. If the time limit expires for a given method call,
* the thread running the call will be interrupted.
*
* @author Kevin Bourrillion
* @since 1.0
*/
@Beta
public final class SimpleTimeLimiter implements TimeLimiter {
private final ExecutorService executor;
/**
* Constructs a TimeLimiter instance using the given executor service to
* execute proxied method calls.
* <p>
* <b>Warning:</b> using a bounded executor
* may be counterproductive! If the thread pool fills up, any time callers
* spend waiting for a thread may count toward their time limit, and in
* this case the call may even time out before the target method is ever
* invoked.
*
* @param executor the ExecutorService that will execute the method calls on
* the target objects; for example, a {@link
* Executors#newCachedThreadPool()}.
*/
public SimpleTimeLimiter(ExecutorService executor) {
this.executor = checkNotNull(executor);
}
/**
* Constructs a TimeLimiter instance using a {@link
* Executors#newCachedThreadPool()} to execute proxied method calls.
*
* <p><b>Warning:</b> using a bounded executor may be counterproductive! If
* the thread pool fills up, any time callers spend waiting for a thread may
* count toward their time limit, and in this case the call may even time out
* before the target method is ever invoked.
*/
public SimpleTimeLimiter() {
this(Executors.newCachedThreadPool());
}
@Override
public <T> T newProxy(final T target, Class<T> interfaceType,
final long timeoutDuration, final TimeUnit timeoutUnit) {
checkNotNull(target);
checkNotNull(interfaceType);
checkNotNull(timeoutUnit);
checkArgument(timeoutDuration > 0, "bad timeout: %s", timeoutDuration);
checkArgument(interfaceType.isInterface(),
"interfaceType must be an interface type");
final Set<Method> interruptibleMethods
= findInterruptibleMethods(interfaceType);
InvocationHandler handler = new InvocationHandler() {
@Override
public Object invoke(Object obj, final Method method, final Object[] args)
throws Throwable {
Callable<Object> callable = new Callable<Object>() {
@Override
public Object call() throws Exception {
try {
return method.invoke(target, args);
} catch (InvocationTargetException e) {
throwCause(e, false);
throw new AssertionError("can't get here");
}
}
};
return callWithTimeout(callable, timeoutDuration, timeoutUnit,
interruptibleMethods.contains(method));
}
};
return newProxy(interfaceType, handler);
}
// TODO: should this actually throw only ExecutionException?
@Override
public <T> T callWithTimeout(Callable<T> callable, long timeoutDuration,
TimeUnit timeoutUnit, boolean amInterruptible) throws Exception {
checkNotNull(callable);
checkNotNull(timeoutUnit);
checkArgument(timeoutDuration > 0, "timeout must be positive: %s",
timeoutDuration);
Future<T> future = executor.submit(callable);
try {
if (amInterruptible) {
try {
return future.get(timeoutDuration, timeoutUnit);
} catch (InterruptedException e) {
future.cancel(true);
throw e;
}
} else {
return Uninterruptibles.getUninterruptibly(future,
timeoutDuration, timeoutUnit);
}
} catch (ExecutionException e) {
throw throwCause(e, true);
} catch (TimeoutException e) {
future.cancel(true);
throw new UncheckedTimeoutException(e);
}
}
private static Exception throwCause(Exception e, boolean combineStackTraces)
throws Exception {
Throwable cause = e.getCause();
if (cause == null) {
throw e;
}
if (combineStackTraces) {
StackTraceElement[] combined = ObjectArrays.concat(cause.getStackTrace(),
e.getStackTrace(), StackTraceElement.class);
cause.setStackTrace(combined);
}
if (cause instanceof Exception) {
throw (Exception) cause;
}
if (cause instanceof Error) {
throw (Error) cause;
}
// The cause is a weird kind of Throwable, so throw the outer exception.
throw e;
}
private static Set<Method> findInterruptibleMethods(Class<?> interfaceType) {
Set<Method> set = Sets.newHashSet();
for (Method m : interfaceType.getMethods()) {
if (declaresInterruptedEx(m)) {
set.add(m);
}
}
return set;
}
private static boolean declaresInterruptedEx(Method method) {
for (Class<?> exType : method.getExceptionTypes()) {
// debate: == or isAssignableFrom?
if (exType == InterruptedException.class) {
return true;
}
}
return false;
}
// TODO: replace with version in common.reflect if and when it's open-sourced
private static <T> T newProxy(
Class<T> interfaceType, InvocationHandler handler) {
Object object = Proxy.newProxyInstance(interfaceType.getClassLoader(),
new Class<?>[] { interfaceType }, handler);
return interfaceType.cast(object);
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/SimpleTimeLimiter.java | Java | asf20 | 6,674 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.base.Supplier;
import java.util.concurrent.Callable;
import javax.annotation.Nullable;
/**
* Static utility methods pertaining to the {@link Callable} interface.
*
* @author Isaac Shum
* @since 1.0
*/
public final class Callables {
private Callables() {}
/**
* Creates a {@code Callable} which immediately returns a preset value each
* time it is called.
*/
public static <T> Callable<T> returning(final @Nullable T value) {
return new Callable<T>() {
@Override public T call() {
return value;
}
};
}
/**
* Wraps the given callable such that for the duration of {@link Callable#call} the thread that is
* running will have the given name.
*
* @param callable The callable to wrap
* @param nameSupplier The supplier of thread names, {@link Supplier#get get} will be called once
* for each invocation of the wrapped callable.
*/
static <T> Callable<T> threadRenaming(final Callable<T> callable,
final Supplier<String> nameSupplier) {
checkNotNull(nameSupplier);
checkNotNull(callable);
return new Callable<T>() {
@Override public T call() throws Exception {
Thread currentThread = Thread.currentThread();
String oldName = currentThread.getName();
boolean restoreName = trySetName(nameSupplier.get(), currentThread);
try {
return callable.call();
} finally {
if (restoreName) {
trySetName(oldName, currentThread);
}
}
}
};
}
/**
* Wraps the given runnable such that for the duration of {@link Runnable#run} the thread that is
* running with have the given name.
*
* @param task The Runnable to wrap
* @param nameSupplier The supplier of thread names, {@link Supplier#get get} will be called once
* for each invocation of the wrapped callable.
*/
static Runnable threadRenaming(final Runnable task, final Supplier<String> nameSupplier) {
checkNotNull(nameSupplier);
checkNotNull(task);
return new Runnable() {
@Override public void run() {
Thread currentThread = Thread.currentThread();
String oldName = currentThread.getName();
boolean restoreName = trySetName(nameSupplier.get(), currentThread);
try {
task.run();
} finally {
if (restoreName) {
trySetName(oldName, currentThread);
}
}
}
};
}
/** Tries to set name of the given {@link Thread}, returns true if successful. */
private static boolean trySetName(final String threadName, Thread currentThread) {
// In AppEngine this will always fail, should we test for that explicitly using
// MoreExecutors.isAppEngine. More generally, is there a way to see if we have the modifyThread
// permission without catching an exception?
try {
currentThread.setName(threadName);
return true;
} catch (SecurityException e) {
return false;
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/Callables.java | Java | asf20 | 3,729 |
/*
* Copyright (C) 2006 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.util.concurrent.MoreExecutors.sameThreadExecutor;
import static com.google.common.util.concurrent.Uninterruptibles.getUninterruptibly;
import static java.lang.Thread.currentThread;
import static java.util.Arrays.asList;
import com.google.common.annotations.Beta;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Ordering;
import com.google.common.collect.Queues;
import com.google.common.collect.Sets;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.UndeclaredThrowableException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Nullable;
/**
* Static utility methods pertaining to the {@link Future} interface.
*
* <p>Many of these methods use the {@link ListenableFuture} API; consult the
* Guava User Guide article on <a href=
* "http://code.google.com/p/guava-libraries/wiki/ListenableFutureExplained">
* {@code ListenableFuture}</a>.
*
* @author Kevin Bourrillion
* @author Nishant Thakkar
* @author Sven Mawson
* @since 1.0
*/
@Beta
public final class Futures {
private Futures() {}
/**
* Creates a {@link CheckedFuture} out of a normal {@link ListenableFuture}
* and a {@link Function} that maps from {@link Exception} instances into the
* appropriate checked type.
*
* <p>The given mapping function will be applied to an
* {@link InterruptedException}, a {@link CancellationException}, or an
* {@link ExecutionException}.
* See {@link Future#get()} for details on the exceptions thrown.
*
* @since 9.0 (source-compatible since 1.0)
*/
public static <V, X extends Exception> CheckedFuture<V, X> makeChecked(
ListenableFuture<V> future, Function<Exception, X> mapper) {
return new MappingCheckedFuture<V, X>(checkNotNull(future), mapper);
}
private abstract static class ImmediateFuture<V>
implements ListenableFuture<V> {
private static final Logger log =
Logger.getLogger(ImmediateFuture.class.getName());
@Override
public void addListener(Runnable listener, Executor executor) {
checkNotNull(listener, "Runnable was null.");
checkNotNull(executor, "Executor was null.");
try {
executor.execute(listener);
} catch (RuntimeException e) {
// ListenableFuture's contract is that it will not throw unchecked
// exceptions, so log the bad runnable and/or executor and swallow it.
log.log(Level.SEVERE, "RuntimeException while executing runnable "
+ listener + " with executor " + executor, e);
}
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return false;
}
@Override
public abstract V get() throws ExecutionException;
@Override
public V get(long timeout, TimeUnit unit) throws ExecutionException {
checkNotNull(unit);
return get();
}
@Override
public boolean isCancelled() {
return false;
}
@Override
public boolean isDone() {
return true;
}
}
private static class ImmediateSuccessfulFuture<V> extends ImmediateFuture<V> {
@Nullable private final V value;
ImmediateSuccessfulFuture(@Nullable V value) {
this.value = value;
}
@Override
public V get() {
return value;
}
}
private static class ImmediateSuccessfulCheckedFuture<V, X extends Exception>
extends ImmediateFuture<V> implements CheckedFuture<V, X> {
@Nullable private final V value;
ImmediateSuccessfulCheckedFuture(@Nullable V value) {
this.value = value;
}
@Override
public V get() {
return value;
}
@Override
public V checkedGet() {
return value;
}
@Override
public V checkedGet(long timeout, TimeUnit unit) {
checkNotNull(unit);
return value;
}
}
private static class ImmediateFailedFuture<V> extends ImmediateFuture<V> {
private final Throwable thrown;
ImmediateFailedFuture(Throwable thrown) {
this.thrown = thrown;
}
@Override
public V get() throws ExecutionException {
throw new ExecutionException(thrown);
}
}
private static class ImmediateCancelledFuture<V> extends ImmediateFuture<V> {
private final CancellationException thrown;
ImmediateCancelledFuture() {
this.thrown = new CancellationException("Immediate cancelled future.");
}
@Override
public boolean isCancelled() {
return true;
}
@Override
public V get() {
throw AbstractFuture.cancellationExceptionWithCause(
"Task was cancelled.", thrown);
}
}
private static class ImmediateFailedCheckedFuture<V, X extends Exception>
extends ImmediateFuture<V> implements CheckedFuture<V, X> {
private final X thrown;
ImmediateFailedCheckedFuture(X thrown) {
this.thrown = thrown;
}
@Override
public V get() throws ExecutionException {
throw new ExecutionException(thrown);
}
@Override
public V checkedGet() throws X {
throw thrown;
}
@Override
public V checkedGet(long timeout, TimeUnit unit) throws X {
checkNotNull(unit);
throw thrown;
}
}
/**
* Creates a {@code ListenableFuture} which has its value set immediately upon
* construction. The getters just return the value. This {@code Future} can't
* be canceled or timed out and its {@code isDone()} method always returns
* {@code true}.
*/
public static <V> ListenableFuture<V> immediateFuture(@Nullable V value) {
return new ImmediateSuccessfulFuture<V>(value);
}
/**
* Returns a {@code CheckedFuture} which has its value set immediately upon
* construction.
*
* <p>The returned {@code Future} can't be cancelled, and its {@code isDone()}
* method always returns {@code true}. Calling {@code get()} or {@code
* checkedGet()} will immediately return the provided value.
*/
public static <V, X extends Exception> CheckedFuture<V, X>
immediateCheckedFuture(@Nullable V value) {
return new ImmediateSuccessfulCheckedFuture<V, X>(value);
}
/**
* Returns a {@code ListenableFuture} which has an exception set immediately
* upon construction.
*
* <p>The returned {@code Future} can't be cancelled, and its {@code isDone()}
* method always returns {@code true}. Calling {@code get()} will immediately
* throw the provided {@code Throwable} wrapped in an {@code
* ExecutionException}.
*/
public static <V> ListenableFuture<V> immediateFailedFuture(
Throwable throwable) {
checkNotNull(throwable);
return new ImmediateFailedFuture<V>(throwable);
}
/**
* Creates a {@code ListenableFuture} which is cancelled immediately upon
* construction, so that {@code isCancelled()} always returns {@code true}.
*
* @since 14.0
*/
public static <V> ListenableFuture<V> immediateCancelledFuture() {
return new ImmediateCancelledFuture<V>();
}
/**
* Returns a {@code CheckedFuture} which has an exception set immediately upon
* construction.
*
* <p>The returned {@code Future} can't be cancelled, and its {@code isDone()}
* method always returns {@code true}. Calling {@code get()} will immediately
* throw the provided {@code Exception} wrapped in an {@code
* ExecutionException}, and calling {@code checkedGet()} will throw the
* provided exception itself.
*/
public static <V, X extends Exception> CheckedFuture<V, X>
immediateFailedCheckedFuture(X exception) {
checkNotNull(exception);
return new ImmediateFailedCheckedFuture<V, X>(exception);
}
/**
* Returns a {@code Future} whose result is taken from the given primary
* {@code input} or, if the primary input fails, from the {@code Future}
* provided by the {@code fallback}. {@link FutureFallback#create} is not
* invoked until the primary input has failed, so if the primary input
* succeeds, it is never invoked. If, during the invocation of {@code
* fallback}, an exception is thrown, this exception is used as the result of
* the output {@code Future}.
*
* <p>Below is an example of a fallback that returns a default value if an
* exception occurs:
*
* <pre> {@code
* ListenableFuture<Integer> fetchCounterFuture = ...;
*
* // Falling back to a zero counter in case an exception happens when
* // processing the RPC to fetch counters.
* ListenableFuture<Integer> faultTolerantFuture = Futures.withFallback(
* fetchCounterFuture, new FutureFallback<Integer>() {
* public ListenableFuture<Integer> create(Throwable t) {
* // Returning "0" as the default for the counter when the
* // exception happens.
* return immediateFuture(0);
* }
* });}</pre>
*
* <p>The fallback can also choose to propagate the original exception when
* desired:
*
* <pre> {@code
* ListenableFuture<Integer> fetchCounterFuture = ...;
*
* // Falling back to a zero counter only in case the exception was a
* // TimeoutException.
* ListenableFuture<Integer> faultTolerantFuture = Futures.withFallback(
* fetchCounterFuture, new FutureFallback<Integer>() {
* public ListenableFuture<Integer> create(Throwable t) {
* if (t instanceof TimeoutException) {
* return immediateFuture(0);
* }
* return immediateFailedFuture(t);
* }
* });}</pre>
*
* <p>Note: If the derived {@code Future} is slow or heavyweight to create
* (whether the {@code Future} itself is slow or heavyweight to complete is
* irrelevant), consider {@linkplain #withFallback(ListenableFuture,
* FutureFallback, Executor) supplying an executor}. If you do not supply an
* executor, {@code withFallback} will use {@link
* MoreExecutors#sameThreadExecutor sameThreadExecutor}, which carries some
* caveats for heavier operations. For example, the call to {@code
* fallback.create} may run on an unpredictable or undesirable thread:
*
* <ul>
* <li>If the input {@code Future} is done at the time {@code withFallback}
* is called, {@code withFallback} will call {@code fallback.create} inline.
* <li>If the input {@code Future} is not yet done, {@code withFallback} will
* schedule {@code fallback.create} to be run by the thread that completes
* the input {@code Future}, which may be an internal system thread such as
* an RPC network thread.
* </ul>
*
* <p>Also note that, regardless of which thread executes the {@code
* sameThreadExecutor} {@code fallback.create}, all other registered but
* unexecuted listeners are prevented from running during its execution, even
* if those listeners are to run in other executors.
*
* @param input the primary input {@code Future}
* @param fallback the {@link FutureFallback} implementation to be called if
* {@code input} fails
* @since 14.0
*/
public static <V> ListenableFuture<V> withFallback(
ListenableFuture<? extends V> input,
FutureFallback<? extends V> fallback) {
return withFallback(input, fallback, sameThreadExecutor());
}
/**
* Returns a {@code Future} whose result is taken from the given primary
* {@code input} or, if the primary input fails, from the {@code Future}
* provided by the {@code fallback}. {@link FutureFallback#create} is not
* invoked until the primary input has failed, so if the primary input
* succeeds, it is never invoked. If, during the invocation of {@code
* fallback}, an exception is thrown, this exception is used as the result of
* the output {@code Future}.
*
* <p>Below is an example of a fallback that returns a default value if an
* exception occurs:
*
* <pre> {@code
* ListenableFuture<Integer> fetchCounterFuture = ...;
*
* // Falling back to a zero counter in case an exception happens when
* // processing the RPC to fetch counters.
* ListenableFuture<Integer> faultTolerantFuture = Futures.withFallback(
* fetchCounterFuture, new FutureFallback<Integer>() {
* public ListenableFuture<Integer> create(Throwable t) {
* // Returning "0" as the default for the counter when the
* // exception happens.
* return immediateFuture(0);
* }
* }, sameThreadExecutor());}</pre>
*
* <p>The fallback can also choose to propagate the original exception when
* desired:
*
* <pre> {@code
* ListenableFuture<Integer> fetchCounterFuture = ...;
*
* // Falling back to a zero counter only in case the exception was a
* // TimeoutException.
* ListenableFuture<Integer> faultTolerantFuture = Futures.withFallback(
* fetchCounterFuture, new FutureFallback<Integer>() {
* public ListenableFuture<Integer> create(Throwable t) {
* if (t instanceof TimeoutException) {
* return immediateFuture(0);
* }
* return immediateFailedFuture(t);
* }
* }, sameThreadExecutor());}</pre>
*
* <p>When the execution of {@code fallback.create} is fast and lightweight
* (though the {@code Future} it returns need not meet these criteria),
* consider {@linkplain #withFallback(ListenableFuture, FutureFallback)
* omitting the executor} or explicitly specifying {@code
* sameThreadExecutor}. However, be aware of the caveats documented in the
* link above.
*
* @param input the primary input {@code Future}
* @param fallback the {@link FutureFallback} implementation to be called if
* {@code input} fails
* @param executor the executor that runs {@code fallback} if {@code input}
* fails
* @since 14.0
*/
public static <V> ListenableFuture<V> withFallback(
ListenableFuture<? extends V> input,
FutureFallback<? extends V> fallback, Executor executor) {
checkNotNull(fallback);
return new FallbackFuture<V>(input, fallback, executor);
}
/**
* A future that falls back on a second, generated future, in case its
* original future fails.
*/
private static class FallbackFuture<V> extends AbstractFuture<V> {
private volatile ListenableFuture<? extends V> running;
FallbackFuture(ListenableFuture<? extends V> input,
final FutureFallback<? extends V> fallback,
final Executor executor) {
running = input;
addCallback(running, new FutureCallback<V>() {
@Override
public void onSuccess(V value) {
set(value);
}
@Override
public void onFailure(Throwable t) {
if (isCancelled()) {
return;
}
try {
running = fallback.create(t);
if (isCancelled()) { // in case cancel called in the meantime
running.cancel(wasInterrupted());
return;
}
addCallback(running, new FutureCallback<V>() {
@Override
public void onSuccess(V value) {
set(value);
}
@Override
public void onFailure(Throwable t) {
if (running.isCancelled()) {
cancel(false);
} else {
setException(t);
}
}
}, sameThreadExecutor());
} catch (Throwable e) {
setException(e);
}
}
}, executor);
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
if (super.cancel(mayInterruptIfRunning)) {
running.cancel(mayInterruptIfRunning);
return true;
}
return false;
}
}
/**
* Returns a new {@code ListenableFuture} whose result is asynchronously
* derived from the result of the given {@code Future}. More precisely, the
* returned {@code Future} takes its result from a {@code Future} produced by
* applying the given {@code AsyncFunction} to the result of the original
* {@code Future}. Example:
*
* <pre> {@code
* ListenableFuture<RowKey> rowKeyFuture = indexService.lookUp(query);
* AsyncFunction<RowKey, QueryResult> queryFunction =
* new AsyncFunction<RowKey, QueryResult>() {
* public ListenableFuture<QueryResult> apply(RowKey rowKey) {
* return dataService.read(rowKey);
* }
* };
* ListenableFuture<QueryResult> queryFuture =
* transform(rowKeyFuture, queryFunction);}</pre>
*
* <p>Note: If the derived {@code Future} is slow or heavyweight to create
* (whether the {@code Future} itself is slow or heavyweight to complete is
* irrelevant), consider {@linkplain #transform(ListenableFuture,
* AsyncFunction, Executor) supplying an executor}. If you do not supply an
* executor, {@code transform} will use {@link
* MoreExecutors#sameThreadExecutor sameThreadExecutor}, which carries some
* caveats for heavier operations. For example, the call to {@code
* function.apply} may run on an unpredictable or undesirable thread:
*
* <ul>
* <li>If the input {@code Future} is done at the time {@code transform} is
* called, {@code transform} will call {@code function.apply} inline.
* <li>If the input {@code Future} is not yet done, {@code transform} will
* schedule {@code function.apply} to be run by the thread that completes the
* input {@code Future}, which may be an internal system thread such as an
* RPC network thread.
* </ul>
*
* <p>Also note that, regardless of which thread executes the {@code
* sameThreadExecutor} {@code function.apply}, all other registered but
* unexecuted listeners are prevented from running during its execution, even
* if those listeners are to run in other executors.
*
* <p>The returned {@code Future} attempts to keep its cancellation state in
* sync with that of the input future and that of the future returned by the
* function. That is, if the returned {@code Future} is cancelled, it will
* attempt to cancel the other two, and if either of the other two is
* cancelled, the returned {@code Future} will receive a callback in which it
* will attempt to cancel itself.
*
* @param input The future to transform
* @param function A function to transform the result of the input future
* to the result of the output future
* @return A future that holds result of the function (if the input succeeded)
* or the original input's failure (if not)
* @since 11.0
*/
public static <I, O> ListenableFuture<O> transform(ListenableFuture<I> input,
AsyncFunction<? super I, ? extends O> function) {
return transform(input, function, MoreExecutors.sameThreadExecutor());
}
/**
* Returns a new {@code ListenableFuture} whose result is asynchronously
* derived from the result of the given {@code Future}. More precisely, the
* returned {@code Future} takes its result from a {@code Future} produced by
* applying the given {@code AsyncFunction} to the result of the original
* {@code Future}. Example:
*
* <pre> {@code
* ListenableFuture<RowKey> rowKeyFuture = indexService.lookUp(query);
* AsyncFunction<RowKey, QueryResult> queryFunction =
* new AsyncFunction<RowKey, QueryResult>() {
* public ListenableFuture<QueryResult> apply(RowKey rowKey) {
* return dataService.read(rowKey);
* }
* };
* ListenableFuture<QueryResult> queryFuture =
* transform(rowKeyFuture, queryFunction, executor);}</pre>
*
* <p>The returned {@code Future} attempts to keep its cancellation state in
* sync with that of the input future and that of the future returned by the
* chain function. That is, if the returned {@code Future} is cancelled, it
* will attempt to cancel the other two, and if either of the other two is
* cancelled, the returned {@code Future} will receive a callback in which it
* will attempt to cancel itself.
*
* <p>When the execution of {@code function.apply} is fast and lightweight
* (though the {@code Future} it returns need not meet these criteria),
* consider {@linkplain #transform(ListenableFuture, AsyncFunction) omitting
* the executor} or explicitly specifying {@code sameThreadExecutor}.
* However, be aware of the caveats documented in the link above.
*
* @param input The future to transform
* @param function A function to transform the result of the input future
* to the result of the output future
* @param executor Executor to run the function in.
* @return A future that holds result of the function (if the input succeeded)
* or the original input's failure (if not)
* @since 11.0
*/
public static <I, O> ListenableFuture<O> transform(ListenableFuture<I> input,
AsyncFunction<? super I, ? extends O> function,
Executor executor) {
ChainingListenableFuture<I, O> output =
new ChainingListenableFuture<I, O>(function, input);
input.addListener(output, executor);
return output;
}
/**
* Returns a new {@code ListenableFuture} whose result is the product of
* applying the given {@code Function} to the result of the given {@code
* Future}. Example:
*
* <pre> {@code
* ListenableFuture<QueryResult> queryFuture = ...;
* Function<QueryResult, List<Row>> rowsFunction =
* new Function<QueryResult, List<Row>>() {
* public List<Row> apply(QueryResult queryResult) {
* return queryResult.getRows();
* }
* };
* ListenableFuture<List<Row>> rowsFuture =
* transform(queryFuture, rowsFunction);}</pre>
*
* <p>Note: If the transformation is slow or heavyweight, consider {@linkplain
* #transform(ListenableFuture, Function, Executor) supplying an executor}.
* If you do not supply an executor, {@code transform} will use {@link
* MoreExecutors#sameThreadExecutor sameThreadExecutor}, which carries some
* caveats for heavier operations. For example, the call to {@code
* function.apply} may run on an unpredictable or undesirable thread:
*
* <ul>
* <li>If the input {@code Future} is done at the time {@code transform} is
* called, {@code transform} will call {@code function.apply} inline.
* <li>If the input {@code Future} is not yet done, {@code transform} will
* schedule {@code function.apply} to be run by the thread that completes the
* input {@code Future}, which may be an internal system thread such as an
* RPC network thread.
* </ul>
*
* <p>Also note that, regardless of which thread executes the {@code
* sameThreadExecutor} {@code function.apply}, all other registered but
* unexecuted listeners are prevented from running during its execution, even
* if those listeners are to run in other executors.
*
* <p>The returned {@code Future} attempts to keep its cancellation state in
* sync with that of the input future. That is, if the returned {@code Future}
* is cancelled, it will attempt to cancel the input, and if the input is
* cancelled, the returned {@code Future} will receive a callback in which it
* will attempt to cancel itself.
*
* <p>An example use of this method is to convert a serializable object
* returned from an RPC into a POJO.
*
* @param input The future to transform
* @param function A Function to transform the results of the provided future
* to the results of the returned future. This will be run in the thread
* that notifies input it is complete.
* @return A future that holds result of the transformation.
* @since 9.0 (in 1.0 as {@code compose})
*/
public static <I, O> ListenableFuture<O> transform(ListenableFuture<I> input,
final Function<? super I, ? extends O> function) {
return transform(input, function, MoreExecutors.sameThreadExecutor());
}
/**
* Returns a new {@code ListenableFuture} whose result is the product of
* applying the given {@code Function} to the result of the given {@code
* Future}. Example:
*
* <pre> {@code
* ListenableFuture<QueryResult> queryFuture = ...;
* Function<QueryResult, List<Row>> rowsFunction =
* new Function<QueryResult, List<Row>>() {
* public List<Row> apply(QueryResult queryResult) {
* return queryResult.getRows();
* }
* };
* ListenableFuture<List<Row>> rowsFuture =
* transform(queryFuture, rowsFunction, executor);}</pre>
*
* <p>The returned {@code Future} attempts to keep its cancellation state in
* sync with that of the input future. That is, if the returned {@code Future}
* is cancelled, it will attempt to cancel the input, and if the input is
* cancelled, the returned {@code Future} will receive a callback in which it
* will attempt to cancel itself.
*
* <p>An example use of this method is to convert a serializable object
* returned from an RPC into a POJO.
*
* <p>When the transformation is fast and lightweight, consider {@linkplain
* #transform(ListenableFuture, Function) omitting the executor} or
* explicitly specifying {@code sameThreadExecutor}. However, be aware of the
* caveats documented in the link above.
*
* @param input The future to transform
* @param function A Function to transform the results of the provided future
* to the results of the returned future.
* @param executor Executor to run the function in.
* @return A future that holds result of the transformation.
* @since 9.0 (in 2.0 as {@code compose})
*/
public static <I, O> ListenableFuture<O> transform(ListenableFuture<I> input,
final Function<? super I, ? extends O> function, Executor executor) {
checkNotNull(function);
AsyncFunction<I, O> wrapperFunction
= new AsyncFunction<I, O>() {
@Override public ListenableFuture<O> apply(I input) {
O output = function.apply(input);
return immediateFuture(output);
}
};
return transform(input, wrapperFunction, executor);
}
/**
* Like {@link #transform(ListenableFuture, Function)} except that the
* transformation {@code function} is invoked on each call to
* {@link Future#get() get()} on the returned future.
*
* <p>The returned {@code Future} reflects the input's cancellation
* state directly, and any attempt to cancel the returned Future is likewise
* passed through to the input Future.
*
* <p>Note that calls to {@linkplain Future#get(long, TimeUnit) timed get}
* only apply the timeout to the execution of the underlying {@code Future},
* <em>not</em> to the execution of the transformation function.
*
* <p>The primary audience of this method is callers of {@code transform}
* who don't have a {@code ListenableFuture} available and
* do not mind repeated, lazy function evaluation.
*
* @param input The future to transform
* @param function A Function to transform the results of the provided future
* to the results of the returned future.
* @return A future that returns the result of the transformation.
* @since 10.0
*/
public static <I, O> Future<O> lazyTransform(final Future<I> input,
final Function<? super I, ? extends O> function) {
checkNotNull(input);
checkNotNull(function);
return new Future<O>() {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return input.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return input.isCancelled();
}
@Override
public boolean isDone() {
return input.isDone();
}
@Override
public O get() throws InterruptedException, ExecutionException {
return applyTransformation(input.get());
}
@Override
public O get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return applyTransformation(input.get(timeout, unit));
}
private O applyTransformation(I input) throws ExecutionException {
try {
return function.apply(input);
} catch (Throwable t) {
throw new ExecutionException(t);
}
}
};
}
/**
* An implementation of {@code ListenableFuture} that also implements
* {@code Runnable} so that it can be used to nest ListenableFutures.
* Once the passed-in {@code ListenableFuture} is complete, it calls the
* passed-in {@code Function} to generate the result.
*
* <p>For historical reasons, this class has a special case in its exception
* handling: If the given {@code AsyncFunction} throws an {@code
* UndeclaredThrowableException}, {@code ChainingListenableFuture} unwraps it
* and uses its <i>cause</i> as the output future's exception, rather than
* using the {@code UndeclaredThrowableException} itself as it would for other
* exception types. The reason for this is that {@code Futures.transform} used
* to require a {@code Function}, whose {@code apply} method is not allowed to
* throw checked exceptions. Nowadays, {@code Futures.transform} has an
* overload that accepts an {@code AsyncFunction}, whose {@code apply} method
* <i>is</i> allowed to throw checked exception. Users who wish to throw
* checked exceptions should use that overload instead, and <a
* href="http://code.google.com/p/guava-libraries/issues/detail?id=1548">we
* should remove the {@code UndeclaredThrowableException} special case</a>.
*/
private static class ChainingListenableFuture<I, O>
extends AbstractFuture<O> implements Runnable {
private AsyncFunction<? super I, ? extends O> function;
private ListenableFuture<? extends I> inputFuture;
private volatile ListenableFuture<? extends O> outputFuture;
private final CountDownLatch outputCreated = new CountDownLatch(1);
private ChainingListenableFuture(
AsyncFunction<? super I, ? extends O> function,
ListenableFuture<? extends I> inputFuture) {
this.function = checkNotNull(function);
this.inputFuture = checkNotNull(inputFuture);
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
/*
* Our additional cancellation work needs to occur even if
* !mayInterruptIfRunning, so we can't move it into interruptTask().
*/
if (super.cancel(mayInterruptIfRunning)) {
// This should never block since only one thread is allowed to cancel
// this Future.
cancel(inputFuture, mayInterruptIfRunning);
cancel(outputFuture, mayInterruptIfRunning);
return true;
}
return false;
}
private void cancel(@Nullable Future<?> future,
boolean mayInterruptIfRunning) {
if (future != null) {
future.cancel(mayInterruptIfRunning);
}
}
@Override
public void run() {
try {
I sourceResult;
try {
sourceResult = getUninterruptibly(inputFuture);
} catch (CancellationException e) {
// Cancel this future and return.
// At this point, inputFuture is cancelled and outputFuture doesn't
// exist, so the value of mayInterruptIfRunning is irrelevant.
cancel(false);
return;
} catch (ExecutionException e) {
// Set the cause of the exception as this future's exception
setException(e.getCause());
return;
}
final ListenableFuture<? extends O> outputFuture = this.outputFuture =
Preconditions.checkNotNull(function.apply(sourceResult),
"AsyncFunction may not return null.");
if (isCancelled()) {
outputFuture.cancel(wasInterrupted());
this.outputFuture = null;
return;
}
outputFuture.addListener(new Runnable() {
@Override
public void run() {
try {
set(getUninterruptibly(outputFuture));
} catch (CancellationException e) {
// Cancel this future and return.
// At this point, inputFuture and outputFuture are done, so the
// value of mayInterruptIfRunning is irrelevant.
cancel(false);
return;
} catch (ExecutionException e) {
// Set the cause of the exception as this future's exception
setException(e.getCause());
} finally {
// Don't pin inputs beyond completion
ChainingListenableFuture.this.outputFuture = null;
}
}
}, MoreExecutors.sameThreadExecutor());
} catch (UndeclaredThrowableException e) {
// Set the cause of the exception as this future's exception
setException(e.getCause());
} catch (Throwable t) {
// This exception is irrelevant in this thread, but useful for the
// client
setException(t);
} finally {
// Don't pin inputs beyond completion
function = null;
inputFuture = null;
// Allow our get routines to examine outputFuture now.
outputCreated.countDown();
}
}
}
/**
* Returns a new {@code ListenableFuture} whose result is the product of
* calling {@code get()} on the {@code Future} nested within the given {@code
* Future}, effectively chaining the futures one after the other. Example:
*
* <pre> {@code
* SettableFuture<ListenableFuture<String>> nested = SettableFuture.create();
* ListenableFuture<String> dereferenced = dereference(nested);}</pre>
*
* <p>This call has the same cancellation and execution semantics as {@link
* #transform(ListenableFuture, AsyncFunction)}, in that the returned {@code
* Future} attempts to keep its cancellation state in sync with both the
* input {@code Future} and the nested {@code Future}. The transformation
* is very lightweight and therefore takes place in the same thread (either
* the thread that called {@code dereference}, or the thread in which the
* dereferenced future completes).
*
* @param nested The nested future to transform.
* @return A future that holds result of the inner future.
* @since 13.0
*/
@SuppressWarnings({"rawtypes", "unchecked"})
public static <V> ListenableFuture<V> dereference(
ListenableFuture<? extends ListenableFuture<? extends V>> nested) {
return Futures.transform((ListenableFuture) nested, (AsyncFunction) DEREFERENCER);
}
/**
* Helper {@code Function} for {@link #dereference}.
*/
private static final AsyncFunction<ListenableFuture<Object>, Object> DEREFERENCER =
new AsyncFunction<ListenableFuture<Object>, Object>() {
@Override public ListenableFuture<Object> apply(ListenableFuture<Object> input) {
return input;
}
};
/**
* Creates a new {@code ListenableFuture} whose value is a list containing the
* values of all its input futures, if all succeed. If any input fails, the
* returned future fails.
*
* <p>The list of results is in the same order as the input list.
*
* <p>Canceling this future will attempt to cancel all the component futures,
* and if any of the provided futures fails or is canceled, this one is,
* too.
*
* @param futures futures to combine
* @return a future that provides a list of the results of the component
* futures
* @since 10.0
*/
@Beta
public static <V> ListenableFuture<List<V>> allAsList(
ListenableFuture<? extends V>... futures) {
return listFuture(ImmutableList.copyOf(futures), true,
MoreExecutors.sameThreadExecutor());
}
/**
* Creates a new {@code ListenableFuture} whose value is a list containing the
* values of all its input futures, if all succeed. If any input fails, the
* returned future fails.
*
* <p>The list of results is in the same order as the input list.
*
* <p>Canceling this future will attempt to cancel all the component futures,
* and if any of the provided futures fails or is canceled, this one is,
* too.
*
* @param futures futures to combine
* @return a future that provides a list of the results of the component
* futures
* @since 10.0
*/
@Beta
public static <V> ListenableFuture<List<V>> allAsList(
Iterable<? extends ListenableFuture<? extends V>> futures) {
return listFuture(ImmutableList.copyOf(futures), true,
MoreExecutors.sameThreadExecutor());
}
/**
* Creates a new {@code ListenableFuture} whose result is set from the
* supplied future when it completes. Cancelling the supplied future
* will also cancel the returned future, but cancelling the returned
* future will have no effect on the supplied future.
*
* @since 15.0
*/
public static <V> ListenableFuture<V> nonCancellationPropagating(
ListenableFuture<V> future) {
return new NonCancellationPropagatingFuture<V>(future);
}
/**
* A wrapped future that does not propagate cancellation to its delegate.
*/
private static class NonCancellationPropagatingFuture<V>
extends AbstractFuture<V> {
NonCancellationPropagatingFuture(final ListenableFuture<V> delegate) {
checkNotNull(delegate);
addCallback(delegate, new FutureCallback<V>() {
@Override
public void onSuccess(V result) {
set(result);
}
@Override
public void onFailure(Throwable t) {
if (delegate.isCancelled()) {
cancel(false);
} else {
setException(t);
}
}
}, sameThreadExecutor());
}
}
/**
* Creates a new {@code ListenableFuture} whose value is a list containing the
* values of all its successful input futures. The list of results is in the
* same order as the input list, and if any of the provided futures fails or
* is canceled, its corresponding position will contain {@code null} (which is
* indistinguishable from the future having a successful value of
* {@code null}).
*
* <p>Canceling this future will attempt to cancel all the component futures.
*
* @param futures futures to combine
* @return a future that provides a list of the results of the component
* futures
* @since 10.0
*/
@Beta
public static <V> ListenableFuture<List<V>> successfulAsList(
ListenableFuture<? extends V>... futures) {
return listFuture(ImmutableList.copyOf(futures), false,
MoreExecutors.sameThreadExecutor());
}
/**
* Creates a new {@code ListenableFuture} whose value is a list containing the
* values of all its successful input futures. The list of results is in the
* same order as the input list, and if any of the provided futures fails or
* is canceled, its corresponding position will contain {@code null} (which is
* indistinguishable from the future having a successful value of
* {@code null}).
*
* <p>Canceling this future will attempt to cancel all the component futures.
*
* @param futures futures to combine
* @return a future that provides a list of the results of the component
* futures
* @since 10.0
*/
@Beta
public static <V> ListenableFuture<List<V>> successfulAsList(
Iterable<? extends ListenableFuture<? extends V>> futures) {
return listFuture(ImmutableList.copyOf(futures), false,
MoreExecutors.sameThreadExecutor());
}
/**
* Returns a list of delegate futures that correspond to the futures received in the order
* that they complete. Delegate futures return the same value or throw the same exception
* as the corresponding input future returns/throws.
*
* <p>Cancelling a delegate future has no effect on any input future, since the delegate future
* does not correspond to a specific input future until the appropriate number of input
* futures have completed. At that point, it is too late to cancel the input future.
* The input future's result, which cannot be stored into the cancelled delegate future,
* is ignored.
*
* @since 17.0
*/
@Beta
public static <T> ImmutableList<ListenableFuture<T>> inCompletionOrder(
Iterable<? extends ListenableFuture<? extends T>> futures) {
// A CLQ may be overkill here. We could save some pointers/memory by synchronizing on an
// ArrayDeque
final ConcurrentLinkedQueue<AsyncSettableFuture<T>> delegates =
Queues.newConcurrentLinkedQueue();
ImmutableList.Builder<ListenableFuture<T>> listBuilder = ImmutableList.builder();
// Using SerializingExecutor here will ensure that each CompletionOrderListener executes
// atomically and therefore that each returned future is guaranteed to be in completion order.
// N.B. there are some cases where the use of this executor could have possibly surprising
// effects when input futures finish at approximately the same time _and_ the output futures
// have sameThreadExecutor listeners. In this situation, the listeners may end up running on a
// different thread than if they were attached to the corresponding input future. We believe
// this to be a negligible cost since:
// 1. Using the sameThreadExecutor implies that your callback is safe to run on any thread.
// 2. This would likely only be noticeable if you were doing something expensive or blocking on
// a sameThreadExecutor listener on one of the output futures which is an antipattern anyway.
SerializingExecutor executor = new SerializingExecutor(MoreExecutors.sameThreadExecutor());
for (final ListenableFuture<? extends T> future : futures) {
AsyncSettableFuture<T> delegate = AsyncSettableFuture.create();
// Must make sure to add the delegate to the queue first in case the future is already done
delegates.add(delegate);
future.addListener(new Runnable() {
@Override public void run() {
delegates.remove().setFuture(future);
}
}, executor);
listBuilder.add(delegate);
}
return listBuilder.build();
}
/**
* Registers separate success and failure callbacks to be run when the {@code
* Future}'s computation is {@linkplain java.util.concurrent.Future#isDone()
* complete} or, if the computation is already complete, immediately.
*
* <p>There is no guaranteed ordering of execution of callbacks, but any
* callback added through this method is guaranteed to be called once the
* computation is complete.
*
* Example: <pre> {@code
* ListenableFuture<QueryResult> future = ...;
* addCallback(future,
* new FutureCallback<QueryResult> {
* public void onSuccess(QueryResult result) {
* storeInCache(result);
* }
* public void onFailure(Throwable t) {
* reportError(t);
* }
* });}</pre>
*
* <p>Note: If the callback is slow or heavyweight, consider {@linkplain
* #addCallback(ListenableFuture, FutureCallback, Executor) supplying an
* executor}. If you do not supply an executor, {@code addCallback} will use
* {@link MoreExecutors#sameThreadExecutor sameThreadExecutor}, which carries
* some caveats for heavier operations. For example, the callback may run on
* an unpredictable or undesirable thread:
*
* <ul>
* <li>If the input {@code Future} is done at the time {@code addCallback} is
* called, {@code addCallback} will execute the callback inline.
* <li>If the input {@code Future} is not yet done, {@code addCallback} will
* schedule the callback to be run by the thread that completes the input
* {@code Future}, which may be an internal system thread such as an RPC
* network thread.
* </ul>
*
* <p>Also note that, regardless of which thread executes the {@code
* sameThreadExecutor} callback, all other registered but unexecuted listeners
* are prevented from running during its execution, even if those listeners
* are to run in other executors.
*
* <p>For a more general interface to attach a completion listener to a
* {@code Future}, see {@link ListenableFuture#addListener addListener}.
*
* @param future The future attach the callback to.
* @param callback The callback to invoke when {@code future} is completed.
* @since 10.0
*/
public static <V> void addCallback(ListenableFuture<V> future,
FutureCallback<? super V> callback) {
addCallback(future, callback, MoreExecutors.sameThreadExecutor());
}
/**
* Registers separate success and failure callbacks to be run when the {@code
* Future}'s computation is {@linkplain java.util.concurrent.Future#isDone()
* complete} or, if the computation is already complete, immediately.
*
* <p>The callback is run in {@code executor}.
* There is no guaranteed ordering of execution of callbacks, but any
* callback added through this method is guaranteed to be called once the
* computation is complete.
*
* Example: <pre> {@code
* ListenableFuture<QueryResult> future = ...;
* Executor e = ...
* addCallback(future,
* new FutureCallback<QueryResult> {
* public void onSuccess(QueryResult result) {
* storeInCache(result);
* }
* public void onFailure(Throwable t) {
* reportError(t);
* }
* }, e);}</pre>
*
* <p>When the callback is fast and lightweight, consider {@linkplain
* #addCallback(ListenableFuture, FutureCallback) omitting the executor} or
* explicitly specifying {@code sameThreadExecutor}. However, be aware of the
* caveats documented in the link above.
*
* <p>For a more general interface to attach a completion listener to a
* {@code Future}, see {@link ListenableFuture#addListener addListener}.
*
* @param future The future attach the callback to.
* @param callback The callback to invoke when {@code future} is completed.
* @param executor The executor to run {@code callback} when the future
* completes.
* @since 10.0
*/
public static <V> void addCallback(final ListenableFuture<V> future,
final FutureCallback<? super V> callback, Executor executor) {
Preconditions.checkNotNull(callback);
Runnable callbackListener = new Runnable() {
@Override
public void run() {
final V value;
try {
// TODO(user): (Before Guava release), validate that this
// is the thing for IE.
value = getUninterruptibly(future);
} catch (ExecutionException e) {
callback.onFailure(e.getCause());
return;
} catch (RuntimeException e) {
callback.onFailure(e);
return;
} catch (Error e) {
callback.onFailure(e);
return;
}
callback.onSuccess(value);
}
};
future.addListener(callbackListener, executor);
}
/**
* Returns the result of {@link Future#get()}, converting most exceptions to a
* new instance of the given checked exception type. This reduces boilerplate
* for a common use of {@code Future} in which it is unnecessary to
* programmatically distinguish between exception types or to extract other
* information from the exception instance.
*
* <p>Exceptions from {@code Future.get} are treated as follows:
* <ul>
* <li>Any {@link ExecutionException} has its <i>cause</i> wrapped in an
* {@code X} if the cause is a checked exception, an {@link
* UncheckedExecutionException} if the cause is a {@code
* RuntimeException}, or an {@link ExecutionError} if the cause is an
* {@code Error}.
* <li>Any {@link InterruptedException} is wrapped in an {@code X} (after
* restoring the interrupt).
* <li>Any {@link CancellationException} is propagated untouched, as is any
* other {@link RuntimeException} (though {@code get} implementations are
* discouraged from throwing such exceptions).
* </ul>
*
* <p>The overall principle is to continue to treat every checked exception as a
* checked exception, every unchecked exception as an unchecked exception, and
* every error as an error. In addition, the cause of any {@code
* ExecutionException} is wrapped in order to ensure that the new stack trace
* matches that of the current thread.
*
* <p>Instances of {@code exceptionClass} are created by choosing an arbitrary
* public constructor that accepts zero or more arguments, all of type {@code
* String} or {@code Throwable} (preferring constructors with at least one
* {@code String}) and calling the constructor via reflection. If the
* exception did not already have a cause, one is set by calling {@link
* Throwable#initCause(Throwable)} on it. If no such constructor exists, an
* {@code IllegalArgumentException} is thrown.
*
* @throws X if {@code get} throws any checked exception except for an {@code
* ExecutionException} whose cause is not itself a checked exception
* @throws UncheckedExecutionException if {@code get} throws an {@code
* ExecutionException} with a {@code RuntimeException} as its cause
* @throws ExecutionError if {@code get} throws an {@code ExecutionException}
* with an {@code Error} as its cause
* @throws CancellationException if {@code get} throws a {@code
* CancellationException}
* @throws IllegalArgumentException if {@code exceptionClass} extends {@code
* RuntimeException} or does not have a suitable constructor
* @since 10.0
*/
public static <V, X extends Exception> V get(
Future<V> future, Class<X> exceptionClass) throws X {
checkNotNull(future);
checkArgument(!RuntimeException.class.isAssignableFrom(exceptionClass),
"Futures.get exception type (%s) must not be a RuntimeException",
exceptionClass);
try {
return future.get();
} catch (InterruptedException e) {
currentThread().interrupt();
throw newWithCause(exceptionClass, e);
} catch (ExecutionException e) {
wrapAndThrowExceptionOrError(e.getCause(), exceptionClass);
throw new AssertionError();
}
}
/**
* Returns the result of {@link Future#get(long, TimeUnit)}, converting most
* exceptions to a new instance of the given checked exception type. This
* reduces boilerplate for a common use of {@code Future} in which it is
* unnecessary to programmatically distinguish between exception types or to
* extract other information from the exception instance.
*
* <p>Exceptions from {@code Future.get} are treated as follows:
* <ul>
* <li>Any {@link ExecutionException} has its <i>cause</i> wrapped in an
* {@code X} if the cause is a checked exception, an {@link
* UncheckedExecutionException} if the cause is a {@code
* RuntimeException}, or an {@link ExecutionError} if the cause is an
* {@code Error}.
* <li>Any {@link InterruptedException} is wrapped in an {@code X} (after
* restoring the interrupt).
* <li>Any {@link TimeoutException} is wrapped in an {@code X}.
* <li>Any {@link CancellationException} is propagated untouched, as is any
* other {@link RuntimeException} (though {@code get} implementations are
* discouraged from throwing such exceptions).
* </ul>
*
* <p>The overall principle is to continue to treat every checked exception as a
* checked exception, every unchecked exception as an unchecked exception, and
* every error as an error. In addition, the cause of any {@code
* ExecutionException} is wrapped in order to ensure that the new stack trace
* matches that of the current thread.
*
* <p>Instances of {@code exceptionClass} are created by choosing an arbitrary
* public constructor that accepts zero or more arguments, all of type {@code
* String} or {@code Throwable} (preferring constructors with at least one
* {@code String}) and calling the constructor via reflection. If the
* exception did not already have a cause, one is set by calling {@link
* Throwable#initCause(Throwable)} on it. If no such constructor exists, an
* {@code IllegalArgumentException} is thrown.
*
* @throws X if {@code get} throws any checked exception except for an {@code
* ExecutionException} whose cause is not itself a checked exception
* @throws UncheckedExecutionException if {@code get} throws an {@code
* ExecutionException} with a {@code RuntimeException} as its cause
* @throws ExecutionError if {@code get} throws an {@code ExecutionException}
* with an {@code Error} as its cause
* @throws CancellationException if {@code get} throws a {@code
* CancellationException}
* @throws IllegalArgumentException if {@code exceptionClass} extends {@code
* RuntimeException} or does not have a suitable constructor
* @since 10.0
*/
public static <V, X extends Exception> V get(
Future<V> future, long timeout, TimeUnit unit, Class<X> exceptionClass)
throws X {
checkNotNull(future);
checkNotNull(unit);
checkArgument(!RuntimeException.class.isAssignableFrom(exceptionClass),
"Futures.get exception type (%s) must not be a RuntimeException",
exceptionClass);
try {
return future.get(timeout, unit);
} catch (InterruptedException e) {
currentThread().interrupt();
throw newWithCause(exceptionClass, e);
} catch (TimeoutException e) {
throw newWithCause(exceptionClass, e);
} catch (ExecutionException e) {
wrapAndThrowExceptionOrError(e.getCause(), exceptionClass);
throw new AssertionError();
}
}
private static <X extends Exception> void wrapAndThrowExceptionOrError(
Throwable cause, Class<X> exceptionClass) throws X {
if (cause instanceof Error) {
throw new ExecutionError((Error) cause);
}
if (cause instanceof RuntimeException) {
throw new UncheckedExecutionException(cause);
}
throw newWithCause(exceptionClass, cause);
}
/**
* Returns the result of calling {@link Future#get()} uninterruptibly on a
* task known not to throw a checked exception. This makes {@code Future} more
* suitable for lightweight, fast-running tasks that, barring bugs in the
* code, will not fail. This gives it exception-handling behavior similar to
* that of {@code ForkJoinTask.join}.
*
* <p>Exceptions from {@code Future.get} are treated as follows:
* <ul>
* <li>Any {@link ExecutionException} has its <i>cause</i> wrapped in an
* {@link UncheckedExecutionException} (if the cause is an {@code
* Exception}) or {@link ExecutionError} (if the cause is an {@code
* Error}).
* <li>Any {@link InterruptedException} causes a retry of the {@code get}
* call. The interrupt is restored before {@code getUnchecked} returns.
* <li>Any {@link CancellationException} is propagated untouched. So is any
* other {@link RuntimeException} ({@code get} implementations are
* discouraged from throwing such exceptions).
* </ul>
*
* <p>The overall principle is to eliminate all checked exceptions: to loop to
* avoid {@code InterruptedException}, to pass through {@code
* CancellationException}, and to wrap any exception from the underlying
* computation in an {@code UncheckedExecutionException} or {@code
* ExecutionError}.
*
* <p>For an uninterruptible {@code get} that preserves other exceptions, see
* {@link Uninterruptibles#getUninterruptibly(Future)}.
*
* @throws UncheckedExecutionException if {@code get} throws an {@code
* ExecutionException} with an {@code Exception} as its cause
* @throws ExecutionError if {@code get} throws an {@code ExecutionException}
* with an {@code Error} as its cause
* @throws CancellationException if {@code get} throws a {@code
* CancellationException}
* @since 10.0
*/
public static <V> V getUnchecked(Future<V> future) {
checkNotNull(future);
try {
return getUninterruptibly(future);
} catch (ExecutionException e) {
wrapAndThrowUnchecked(e.getCause());
throw new AssertionError();
}
}
private static void wrapAndThrowUnchecked(Throwable cause) {
if (cause instanceof Error) {
throw new ExecutionError((Error) cause);
}
/*
* It's a non-Error, non-Exception Throwable. From my survey of such
* classes, I believe that most users intended to extend Exception, so we'll
* treat it like an Exception.
*/
throw new UncheckedExecutionException(cause);
}
/*
* TODO(user): FutureChecker interface for these to be static methods on? If
* so, refer to it in the (static-method) Futures.get documentation
*/
/*
* Arguably we don't need a timed getUnchecked because any operation slow
* enough to require a timeout is heavyweight enough to throw a checked
* exception and therefore be inappropriate to use with getUnchecked. Further,
* it's not clear that converting the checked TimeoutException to a
* RuntimeException -- especially to an UncheckedExecutionException, since it
* wasn't thrown by the computation -- makes sense, and if we don't convert
* it, the user still has to write a try-catch block.
*
* If you think you would use this method, let us know.
*/
private static <X extends Exception> X newWithCause(
Class<X> exceptionClass, Throwable cause) {
// getConstructors() guarantees this as long as we don't modify the array.
@SuppressWarnings("unchecked")
List<Constructor<X>> constructors =
(List) Arrays.asList(exceptionClass.getConstructors());
for (Constructor<X> constructor : preferringStrings(constructors)) {
@Nullable X instance = newFromConstructor(constructor, cause);
if (instance != null) {
if (instance.getCause() == null) {
instance.initCause(cause);
}
return instance;
}
}
throw new IllegalArgumentException(
"No appropriate constructor for exception of type " + exceptionClass
+ " in response to chained exception", cause);
}
private static <X extends Exception> List<Constructor<X>>
preferringStrings(List<Constructor<X>> constructors) {
return WITH_STRING_PARAM_FIRST.sortedCopy(constructors);
}
private static final Ordering<Constructor<?>> WITH_STRING_PARAM_FIRST =
Ordering.natural().onResultOf(new Function<Constructor<?>, Boolean>() {
@Override public Boolean apply(Constructor<?> input) {
return asList(input.getParameterTypes()).contains(String.class);
}
}).reverse();
@Nullable private static <X> X newFromConstructor(
Constructor<X> constructor, Throwable cause) {
Class<?>[] paramTypes = constructor.getParameterTypes();
Object[] params = new Object[paramTypes.length];
for (int i = 0; i < paramTypes.length; i++) {
Class<?> paramType = paramTypes[i];
if (paramType.equals(String.class)) {
params[i] = cause.toString();
} else if (paramType.equals(Throwable.class)) {
params[i] = cause;
} else {
return null;
}
}
try {
return constructor.newInstance(params);
} catch (IllegalArgumentException e) {
return null;
} catch (InstantiationException e) {
return null;
} catch (IllegalAccessException e) {
return null;
} catch (InvocationTargetException e) {
return null;
}
}
private interface FutureCombiner<V, C> {
C combine(List<Optional<V>> values);
}
private static class CombinedFuture<V, C> extends AbstractFuture<C> {
private static final Logger logger =
Logger.getLogger(CombinedFuture.class.getName());
ImmutableCollection<? extends ListenableFuture<? extends V>> futures;
final boolean allMustSucceed;
final AtomicInteger remaining;
FutureCombiner<V, C> combiner;
List<Optional<V>> values;
final Object seenExceptionsLock = new Object();
Set<Throwable> seenExceptions;
CombinedFuture(
ImmutableCollection<? extends ListenableFuture<? extends V>> futures,
boolean allMustSucceed, Executor listenerExecutor,
FutureCombiner<V, C> combiner) {
this.futures = futures;
this.allMustSucceed = allMustSucceed;
this.remaining = new AtomicInteger(futures.size());
this.combiner = combiner;
this.values = Lists.newArrayListWithCapacity(futures.size());
init(listenerExecutor);
}
/**
* Must be called at the end of the constructor.
*/
protected void init(final Executor listenerExecutor) {
// First, schedule cleanup to execute when the Future is done.
addListener(new Runnable() {
@Override
public void run() {
// Cancel all the component futures.
if (CombinedFuture.this.isCancelled()) {
for (ListenableFuture<?> future : CombinedFuture.this.futures) {
future.cancel(CombinedFuture.this.wasInterrupted());
}
}
// Let go of the memory held by other futures
CombinedFuture.this.futures = null;
// By now the values array has either been set as the Future's value,
// or (in case of failure) is no longer useful.
CombinedFuture.this.values = null;
// The combiner may also hold state, so free that as well
CombinedFuture.this.combiner = null;
}
}, MoreExecutors.sameThreadExecutor());
// Now begin the "real" initialization.
// Corner case: List is empty.
if (futures.isEmpty()) {
set(combiner.combine(ImmutableList.<Optional<V>>of()));
return;
}
// Populate the results list with null initially.
for (int i = 0; i < futures.size(); ++i) {
values.add(null);
}
// Register a listener on each Future in the list to update
// the state of this future.
// Note that if all the futures on the list are done prior to completing
// this loop, the last call to addListener() will callback to
// setOneValue(), transitively call our cleanup listener, and set
// this.futures to null.
// This is not actually a problem, since the foreach only needs
// this.futures to be non-null at the beginning of the loop.
int i = 0;
for (final ListenableFuture<? extends V> listenable : futures) {
final int index = i++;
listenable.addListener(new Runnable() {
@Override
public void run() {
setOneValue(index, listenable);
}
}, listenerExecutor);
}
}
/**
* Fails this future with the given Throwable if {@link #allMustSucceed} is
* true. Also, logs the throwable if it is an {@link Error} or if
* {@link #allMustSucceed} is {@code true}, the throwable did not cause
* this future to fail, and it is the first time we've seen that particular Throwable.
*/
private void setExceptionAndMaybeLog(Throwable throwable) {
boolean visibleFromOutputFuture = false;
boolean firstTimeSeeingThisException = true;
if (allMustSucceed) {
// As soon as the first one fails, throw the exception up.
// The result of all other inputs is then ignored.
visibleFromOutputFuture = super.setException(throwable);
synchronized (seenExceptionsLock) {
if (seenExceptions == null) {
seenExceptions = Sets.newHashSet();
}
firstTimeSeeingThisException = seenExceptions.add(throwable);
}
}
if (throwable instanceof Error
|| (allMustSucceed && !visibleFromOutputFuture && firstTimeSeeingThisException)) {
logger.log(Level.SEVERE, "input future failed.", throwable);
}
}
/**
* Sets the value at the given index to that of the given future.
*/
private void setOneValue(int index, Future<? extends V> future) {
List<Optional<V>> localValues = values;
// TODO(user): This check appears to be redundant since values is
// assigned null only after the future completes. However, values
// is not volatile so it may be possible for us to observe the changes
// to these two values in a different order... which I think is why
// we need to check both. Clear up this craziness either by making
// values volatile or proving that it doesn't need to be for some other
// reason.
if (isDone() || localValues == null) {
// Some other future failed or has been cancelled, causing this one to
// also be cancelled or have an exception set. This should only happen
// if allMustSucceed is true or if the output itself has been
// cancelled.
checkState(allMustSucceed || isCancelled(),
"Future was done before all dependencies completed");
}
try {
checkState(future.isDone(),
"Tried to set value from future which is not done");
V returnValue = getUninterruptibly(future);
if (localValues != null) {
localValues.set(index, Optional.fromNullable(returnValue));
}
} catch (CancellationException e) {
if (allMustSucceed) {
// Set ourselves as cancelled. Let the input futures keep running
// as some of them may be used elsewhere.
cancel(false);
}
} catch (ExecutionException e) {
setExceptionAndMaybeLog(e.getCause());
} catch (Throwable t) {
setExceptionAndMaybeLog(t);
} finally {
int newRemaining = remaining.decrementAndGet();
checkState(newRemaining >= 0, "Less than 0 remaining futures");
if (newRemaining == 0) {
FutureCombiner<V, C> localCombiner = combiner;
if (localCombiner != null && localValues != null) {
set(localCombiner.combine(localValues));
} else {
checkState(isDone());
}
}
}
}
}
/** Used for {@link #allAsList} and {@link #successfulAsList}. */
private static <V> ListenableFuture<List<V>> listFuture(
ImmutableList<ListenableFuture<? extends V>> futures,
boolean allMustSucceed, Executor listenerExecutor) {
return new CombinedFuture<V, List<V>>(
futures, allMustSucceed, listenerExecutor,
new FutureCombiner<V, List<V>>() {
@Override
public List<V> combine(List<Optional<V>> values) {
List<V> result = Lists.newArrayList();
for (Optional<V> element : values) {
result.add(element != null ? element.orNull() : null);
}
return Collections.unmodifiableList(result);
}
});
}
/**
* A checked future that uses a function to map from exceptions to the
* appropriate checked type.
*/
private static class MappingCheckedFuture<V, X extends Exception> extends
AbstractCheckedFuture<V, X> {
final Function<Exception, X> mapper;
MappingCheckedFuture(ListenableFuture<V> delegate,
Function<Exception, X> mapper) {
super(delegate);
this.mapper = checkNotNull(mapper);
}
@Override
protected X mapException(Exception e) {
return mapper.apply(e);
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/Futures.java | Java | asf20 | 69,312 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import javax.annotation.Nullable;
/**
* A {@link ListenableFuture} whose result may be set by a {@link #set(Object)}
* or {@link #setException(Throwable)} call. It may also be cancelled.
*
* @author Sven Mawson
* @since 9.0 (in 1.0 as {@code ValueFuture})
*/
public final class SettableFuture<V> extends AbstractFuture<V> {
/**
* Creates a new {@code SettableFuture} in the default state.
*/
public static <V> SettableFuture<V> create() {
return new SettableFuture<V>();
}
/**
* Explicit private constructor, use the {@link #create} factory method to
* create instances of {@code SettableFuture}.
*/
private SettableFuture() {}
/**
* Sets the value of this future. This method will return {@code true} if
* the value was successfully set, or {@code false} if the future has already
* been set or cancelled.
*
* @param value the value the future should hold.
* @return true if the value was successfully set.
*/
@Override
public boolean set(@Nullable V value) {
return super.set(value);
}
/**
* Sets the future to having failed with the given exception. This exception
* will be wrapped in an {@code ExecutionException} and thrown from the {@code
* get} methods. This method will return {@code true} if the exception was
* successfully set, or {@code false} if the future has already been set or
* cancelled.
*
* @param throwable the exception the future should hold.
* @return true if the exception was successfully set.
*/
@Override
public boolean setException(Throwable throwable) {
return super.setException(throwable);
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/SettableFuture.java | Java | asf20 | 2,286 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import com.google.common.base.Preconditions;
import com.google.common.collect.ForwardingObject;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* A {@link Future} which forwards all its method calls to another future.
* Subclasses should override one or more methods to modify the behavior of
* the backing future as desired per the <a
* href="http://en.wikipedia.org/wiki/Decorator_pattern">decorator pattern</a>.
*
* <p>Most subclasses can just use {@link SimpleForwardingFuture}.
*
* @author Sven Mawson
* @since 1.0
*/
public abstract class ForwardingFuture<V> extends ForwardingObject
implements Future<V> {
/** Constructor for use by subclasses. */
protected ForwardingFuture() {}
@Override protected abstract Future<V> delegate();
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return delegate().cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return delegate().isCancelled();
}
@Override
public boolean isDone() {
return delegate().isDone();
}
@Override
public V get() throws InterruptedException, ExecutionException {
return delegate().get();
}
@Override
public V get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return delegate().get(timeout, unit);
}
/*
* TODO(cpovirk): Use standard Javadoc form for SimpleForwarding* class and
* constructor
*/
/**
* A simplified version of {@link ForwardingFuture} where subclasses
* can pass in an already constructed {@link Future} as the delegate.
*
* @since 9.0
*/
public abstract static class SimpleForwardingFuture<V>
extends ForwardingFuture<V> {
private final Future<V> delegate;
protected SimpleForwardingFuture(Future<V> delegate) {
this.delegate = Preconditions.checkNotNull(delegate);
}
@Override
protected final Future<V> delegate() {
return delegate;
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/ForwardingFuture.java | Java | asf20 | 2,754 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Concurrency utilities.
*
* <p>Commonly used types include {@link
* com.google.common.util.concurrent.ListenableFuture} and {@link
* com.google.common.util.concurrent.Service}.
*
* <p>Commonly used utilities include {@link
* com.google.common.util.concurrent.Futures}, {@link
* com.google.common.util.concurrent.MoreExecutors}, and {@link
* com.google.common.util.concurrent.ThreadFactoryBuilder}.
*
* <p>This package is a part of the open-source
* <a href="http://guava-libraries.googlecode.com">Guava libraries</a>.
*/
@ParametersAreNonnullByDefault
package com.google.common.util.concurrent;
import javax.annotation.ParametersAreNonnullByDefault;
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/package-info.java | Java | asf20 | 1,273 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import com.google.common.annotations.Beta;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* An object with an operational state, plus asynchronous {@link #startAsync()} and
* {@link #stopAsync()} lifecycle methods to transition between states. Example services include
* webservers, RPC servers and timers.
*
* <p>The normal lifecycle of a service is:
* <ul>
* <li>{@linkplain State#NEW NEW} ->
* <li>{@linkplain State#STARTING STARTING} ->
* <li>{@linkplain State#RUNNING RUNNING} ->
* <li>{@linkplain State#STOPPING STOPPING} ->
* <li>{@linkplain State#TERMINATED TERMINATED}
* </ul>
*
* <p>There are deviations from this if there are failures or if {@link Service#stopAsync} is called
* before the {@link Service} reaches the {@linkplain State#RUNNING RUNNING} state. The set of legal
* transitions form a <a href="http://en.wikipedia.org/wiki/Directed_acyclic_graph">DAG</a>,
* therefore every method of the listener will be called at most once. N.B. The {@link State#FAILED}
* and {@link State#TERMINATED} states are terminal states, once a service enters either of these
* states it cannot ever leave them.
*
* <p>Implementors of this interface are strongly encouraged to extend one of the abstract classes
* in this package which implement this interface and make the threading and state management
* easier.
*
* @author Jesse Wilson
* @author Luke Sandberg
* @since 9.0 (in 1.0 as {@code com.google.common.base.Service})
*/
@Beta
public interface Service {
/**
* If the service state is {@link State#NEW}, this initiates service startup and returns
* immediately. A stopped service may not be restarted.
*
* @return this
* @throws IllegalStateException if the service is not {@link State#NEW}
*
* @since 15.0
*/
Service startAsync();
/**
* Returns {@code true} if this service is {@linkplain State#RUNNING running}.
*/
boolean isRunning();
/**
* Returns the lifecycle state of the service.
*/
State state();
/**
* If the service is {@linkplain State#STARTING starting} or {@linkplain State#RUNNING running},
* this initiates service shutdown and returns immediately. If the service is
* {@linkplain State#NEW new}, it is {@linkplain State#TERMINATED terminated} without having been
* started nor stopped. If the service has already been stopped, this method returns immediately
* without taking action.
*
* @return this
* @since 15.0
*/
Service stopAsync();
/**
* Waits for the {@link Service} to reach the {@linkplain State#RUNNING running state}.
*
* @throws IllegalStateException if the service reaches a state from which it is not possible to
* enter the {@link State#RUNNING} state. e.g. if the {@code state} is
* {@code State#TERMINATED} when this method is called then this will throw an
* IllegalStateException.
*
* @since 15.0
*/
void awaitRunning();
/**
* Waits for the {@link Service} to reach the {@linkplain State#RUNNING running state} for no
* more than the given time.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @throws TimeoutException if the service has not reached the given state within the deadline
* @throws IllegalStateException if the service reaches a state from which it is not possible to
* enter the {@link State#RUNNING RUNNING} state. e.g. if the {@code state} is
* {@code State#TERMINATED} when this method is called then this will throw an
* IllegalStateException.
*
* @since 15.0
*/
void awaitRunning(long timeout, TimeUnit unit) throws TimeoutException;
/**
* Waits for the {@link Service} to reach the {@linkplain State#TERMINATED terminated state}.
*
* @throws IllegalStateException if the service {@linkplain State#FAILED fails}.
*
* @since 15.0
*/
void awaitTerminated();
/**
* Waits for the {@link Service} to reach a terminal state (either
* {@link Service.State#TERMINATED terminated} or {@link Service.State#FAILED failed}) for no
* more than the given time.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @throws TimeoutException if the service has not reached the given state within the deadline
* @throws IllegalStateException if the service {@linkplain State#FAILED fails}.
* @since 15.0
*/
void awaitTerminated(long timeout, TimeUnit unit) throws TimeoutException;
/**
* Returns the {@link Throwable} that caused this service to fail.
*
* @throws IllegalStateException if this service's state isn't {@linkplain State#FAILED FAILED}.
*
* @since 14.0
*/
Throwable failureCause();
/**
* Registers a {@link Listener} to be {@linkplain Executor#execute executed} on the given
* executor. The listener will have the corresponding transition method called whenever the
* service changes state. The listener will not have previous state changes replayed, so it is
* suggested that listeners are added before the service starts.
*
* <p>There is no guaranteed ordering of execution of listeners, but any listener added through
* this method is guaranteed to be called whenever there is a state change.
*
* <p>Exceptions thrown by a listener will be propagated up to the executor. Any exception thrown
* during {@code Executor.execute} (e.g., a {@code RejectedExecutionException} or an exception
* thrown by {@linkplain MoreExecutors#sameThreadExecutor inline execution}) will be caught and
* logged.
*
* @param listener the listener to run when the service changes state is complete
* @param executor the executor in which the listeners callback methods will be run. For fast,
* lightweight listeners that would be safe to execute in any thread, consider
* {@link MoreExecutors#sameThreadExecutor}.
* @since 13.0
*/
void addListener(Listener listener, Executor executor);
/**
* The lifecycle states of a service.
*
* <p>The ordering of the {@link State} enum is defined such that if there is a state transition
* from {@code A -> B} then {@code A.compareTo(B} < 0}. N.B. The converse is not true, i.e. if
* {@code A.compareTo(B} < 0} then there is <b>not</b> guaranteed to be a valid state transition
* {@code A -> B}.
*
* @since 9.0 (in 1.0 as {@code com.google.common.base.Service.State})
*/
@Beta // should come out of Beta when Service does
enum State {
/**
* A service in this state is inactive. It does minimal work and consumes
* minimal resources.
*/
NEW {
@Override boolean isTerminal() {
return false;
}
},
/**
* A service in this state is transitioning to {@link #RUNNING}.
*/
STARTING {
@Override boolean isTerminal() {
return false;
}
},
/**
* A service in this state is operational.
*/
RUNNING {
@Override boolean isTerminal() {
return false;
}
},
/**
* A service in this state is transitioning to {@link #TERMINATED}.
*/
STOPPING {
@Override boolean isTerminal() {
return false;
}
},
/**
* A service in this state has completed execution normally. It does minimal work and consumes
* minimal resources.
*/
TERMINATED {
@Override boolean isTerminal() {
return true;
}
},
/**
* A service in this state has encountered a problem and may not be operational. It cannot be
* started nor stopped.
*/
FAILED {
@Override boolean isTerminal() {
return true;
}
};
/** Returns true if this state is terminal. */
abstract boolean isTerminal();
}
/**
* A listener for the various state changes that a {@link Service} goes through in its lifecycle.
*
* <p>All methods are no-ops by default, implementors should override the ones they care about.
*
* @author Luke Sandberg
* @since 15.0 (present as an interface in 13.0)
*/
@Beta // should come out of Beta when Service does
abstract class Listener {
/**
* Called when the service transitions from {@linkplain State#NEW NEW} to
* {@linkplain State#STARTING STARTING}. This occurs when {@link Service#startAsync} is called
* the first time.
*/
public void starting() {}
/**
* Called when the service transitions from {@linkplain State#STARTING STARTING} to
* {@linkplain State#RUNNING RUNNING}. This occurs when a service has successfully started.
*/
public void running() {}
/**
* Called when the service transitions to the {@linkplain State#STOPPING STOPPING} state. The
* only valid values for {@code from} are {@linkplain State#STARTING STARTING} or
* {@linkplain State#RUNNING RUNNING}. This occurs when {@link Service#stopAsync} is called.
*
* @param from The previous state that is being transitioned from.
*/
public void stopping(State from) {}
/**
* Called when the service transitions to the {@linkplain State#TERMINATED TERMINATED} state.
* The {@linkplain State#TERMINATED TERMINATED} state is a terminal state in the transition
* diagram. Therefore, if this method is called, no other methods will be called on the
* {@link Listener}.
*
* @param from The previous state that is being transitioned from. The only valid values for
* this are {@linkplain State#NEW NEW}, {@linkplain State#RUNNING RUNNING} or
* {@linkplain State#STOPPING STOPPING}.
*/
public void terminated(State from) {}
/**
* Called when the service transitions to the {@linkplain State#FAILED FAILED} state. The
* {@linkplain State#FAILED FAILED} state is a terminal state in the transition diagram.
* Therefore, if this method is called, no other methods will be called on the {@link Listener}.
*
* @param from The previous state that is being transitioned from. Failure can occur in any
* state with the exception of {@linkplain State#NEW NEW} or
* {@linkplain State#TERMINATED TERMINATED}.
* @param failure The exception that caused the failure.
*/
public void failed(State from, Throwable failure) {}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/Service.java | Java | asf20 | 11,138 |
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import com.google.common.annotations.GwtCompatible;
import javax.annotation.Nullable;
/**
* {@link Error} variant of {@link java.util.concurrent.ExecutionException}. As
* with {@code ExecutionException}, the error's {@linkplain #getCause() cause}
* comes from a failed task, possibly run in another thread. That cause should
* itself be an {@code Error}; if not, use {@code ExecutionException} or {@link
* UncheckedExecutionException}. This allows the client code to continue to
* distinguish between exceptions and errors, even when they come from other
* threads.
*
* @author Chris Povirk
* @since 10.0
*/
@GwtCompatible
public class ExecutionError extends Error {
/**
* Creates a new instance with {@code null} as its detail message.
*/
protected ExecutionError() {}
/**
* Creates a new instance with the given detail message.
*/
protected ExecutionError(@Nullable String message) {
super(message);
}
/**
* Creates a new instance with the given detail message and cause.
*/
public ExecutionError(@Nullable String message, @Nullable Error cause) {
super(message, cause);
}
/**
* Creates a new instance with the given cause.
*/
public ExecutionError(@Nullable Error cause) {
super(cause);
}
private static final long serialVersionUID = 0;
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/ExecutionError.java | Java | asf20 | 1,965 |
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import java.util.concurrent.Callable;
import java.util.concurrent.Executor;
import java.util.concurrent.FutureTask;
import javax.annotation.Nullable;
/**
* A {@link FutureTask} that also implements the {@link ListenableFuture}
* interface. Unlike {@code FutureTask}, {@code ListenableFutureTask} does not
* provide an overrideable {@link FutureTask#done() done()} method. For similar
* functionality, call {@link #addListener}.
*
* <p>
*
* @author Sven Mawson
* @since 1.0
*/
public class ListenableFutureTask<V> extends FutureTask<V>
implements ListenableFuture<V> {
// TODO(cpovirk): explore ways of making ListenableFutureTask final. There are
// some valid reasons such as BoundedQueueExecutorService to allow extends but it
// would be nice to make it final to avoid unintended usage.
// The execution list to hold our listeners.
private final ExecutionList executionList = new ExecutionList();
/**
* Creates a {@code ListenableFutureTask} that will upon running, execute the
* given {@code Callable}.
*
* @param callable the callable task
* @since 10.0
*/
public static <V> ListenableFutureTask<V> create(Callable<V> callable) {
return new ListenableFutureTask<V>(callable);
}
/**
* Creates a {@code ListenableFutureTask} that will upon running, execute the
* given {@code Runnable}, and arrange that {@code get} will return the
* given result on successful completion.
*
* @param runnable the runnable task
* @param result the result to return on successful completion. If you don't
* need a particular result, consider using constructions of the form:
* {@code ListenableFuture<?> f = ListenableFutureTask.create(runnable,
* null)}
* @since 10.0
*/
public static <V> ListenableFutureTask<V> create(
Runnable runnable, @Nullable V result) {
return new ListenableFutureTask<V>(runnable, result);
}
ListenableFutureTask(Callable<V> callable) {
super(callable);
}
ListenableFutureTask(Runnable runnable, @Nullable V result) {
super(runnable, result);
}
@Override
public void addListener(Runnable listener, Executor exec) {
executionList.add(listener, exec);
}
/**
* Internal implementation detail used to invoke the listeners.
*/
@Override
protected void done() {
executionList.execute();
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/ListenableFutureTask.java | Java | asf20 | 3,012 |
/*
* Copyright (C) 2013 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
/**
* Static utility methods pertaining to the {@link Runnable} interface.
*
* @since 16.0
*/
@Beta
@GwtCompatible
public final class Runnables {
private static final Runnable EMPTY_RUNNABLE = new Runnable() {
@Override
public void run() {
}
};
/**
* Returns a {@link Runnable} instance that does nothing when run.
*/
public static Runnable doNothing() {
return EMPTY_RUNNABLE;
}
private Runnables() {
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/Runnables.java | Java | asf20 | 1,191 |
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import com.google.common.base.Preconditions;
import java.util.ArrayDeque;
import java.util.Queue;
import java.util.concurrent.Executor;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.concurrent.GuardedBy;
/**
* Executor ensuring that all Runnables submitted are executed in order,
* using the provided Executor, and serially such that no two will ever
* be running at the same time.
*
* TODO(user): The tasks are given to the underlying executor as a single
* task, which means the semantics of the executor may be changed, e.g. the
* executor may have an afterExecute method that runs after every task
*
* TODO(user): What happens in case of shutdown or shutdownNow? Should
* TaskRunner check for interruption?
*
* TODO(user): It would be nice to provide a handle to individual task
* results using Future. Maybe SerializingExecutorService?
*
* @author JJ Furman
*/
final class SerializingExecutor implements Executor {
private static final Logger log =
Logger.getLogger(SerializingExecutor.class.getName());
/** Underlying executor that all submitted Runnable objects are run on. */
private final Executor executor;
/** A list of Runnables to be run in order. */
@GuardedBy("internalLock")
private final Queue<Runnable> waitQueue = new ArrayDeque<Runnable>();
/**
* We explicitly keep track of if the TaskRunner is currently scheduled to
* run. If it isn't, we start it. We can't just use
* waitQueue.isEmpty() as a proxy because we need to ensure that only one
* Runnable submitted is running at a time so even if waitQueue is empty
* the isThreadScheduled isn't set to false until after the Runnable is
* finished.
*/
@GuardedBy("internalLock")
private boolean isThreadScheduled = false;
/** The object that actually runs the Runnables submitted, reused. */
private final TaskRunner taskRunner = new TaskRunner();
/**
* Creates a SerializingExecutor, running tasks using {@code executor}.
*
* @param executor Executor in which tasks should be run. Must not be null.
*/
public SerializingExecutor(Executor executor) {
Preconditions.checkNotNull(executor, "'executor' must not be null.");
this.executor = executor;
}
private final Object internalLock = new Object() {
@Override public String toString() {
return "SerializingExecutor lock: " + super.toString();
}
};
/**
* Runs the given runnable strictly after all Runnables that were submitted
* before it, and using the {@code executor} passed to the constructor. .
*/
@Override
public void execute(Runnable r) {
Preconditions.checkNotNull(r, "'r' must not be null.");
boolean scheduleTaskRunner = false;
synchronized (internalLock) {
waitQueue.add(r);
if (!isThreadScheduled) {
isThreadScheduled = true;
scheduleTaskRunner = true;
}
}
if (scheduleTaskRunner) {
boolean threw = true;
try {
executor.execute(taskRunner);
threw = false;
} finally {
if (threw) {
synchronized (internalLock) {
// It is possible that at this point that there are still tasks in
// the queue, it would be nice to keep trying but the error may not
// be recoverable. So we update our state and propogate so that if
// our caller deems it recoverable we won't be stuck.
isThreadScheduled = false;
}
}
}
}
}
/**
* Task that actually runs the Runnables. It takes the Runnables off of the
* queue one by one and runs them. After it is done with all Runnables and
* there are no more to run, puts the SerializingExecutor in the state where
* isThreadScheduled = false and returns. This allows the current worker
* thread to return to the original pool.
*/
private class TaskRunner implements Runnable {
@Override
public void run() {
boolean stillRunning = true;
try {
while (true) {
Preconditions.checkState(isThreadScheduled);
Runnable nextToRun;
synchronized (internalLock) {
nextToRun = waitQueue.poll();
if (nextToRun == null) {
isThreadScheduled = false;
stillRunning = false;
break;
}
}
// Always run while not holding the lock, to avoid deadlocks.
try {
nextToRun.run();
} catch (RuntimeException e) {
// Log it and keep going.
log.log(Level.SEVERE, "Exception while executing runnable "
+ nextToRun, e);
}
}
} finally {
if (stillRunning) {
// An Error is bubbling up, we should mark ourselves as no longer
// running, that way if anyone tries to keep using us we won't be
// corrupted.
synchronized (internalLock) {
isThreadScheduled = false;
}
}
}
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/SerializingExecutor.java | Java | asf20 | 5,686 |
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import com.google.common.annotations.Beta;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* A {@code CheckedFuture} is a {@link ListenableFuture} that includes versions
* of the {@code get} methods that can throw a checked exception. This makes it
* easier to create a future that executes logic which can throw an exception.
*
* <p>A common implementation is {@link Futures#immediateCheckedFuture}.
*
* <p>Implementations of this interface must adapt the exceptions thrown by
* {@code Future#get()}: {@link CancellationException},
* {@link ExecutionException} and {@link InterruptedException} into the type
* specified by the {@code X} type parameter.
*
* <p>This interface also extends the ListenableFuture interface to allow
* listeners to be added. This allows the future to be used as a normal
* {@link Future} or as an asynchronous callback mechanism as needed. This
* allows multiple callbacks to be registered for a particular task, and the
* future will guarantee execution of all listeners when the task completes.
*
* <p>For a simpler alternative to CheckedFuture, consider accessing Future
* values with {@link Futures#get(Future, Class) Futures.get()}.
*
* @author Sven Mawson
* @since 1.0
*/
@Beta
public interface CheckedFuture<V, X extends Exception>
extends ListenableFuture<V> {
/**
* Exception checking version of {@link Future#get()} that will translate
* {@link InterruptedException}, {@link CancellationException} and
* {@link ExecutionException} into application-specific exceptions.
*
* @return the result of executing the future.
* @throws X on interruption, cancellation or execution exceptions.
*/
V checkedGet() throws X;
/**
* Exception checking version of {@link Future#get(long, TimeUnit)} that will
* translate {@link InterruptedException}, {@link CancellationException} and
* {@link ExecutionException} into application-specific exceptions. On
* timeout this method throws a normal {@link TimeoutException}.
*
* @return the result of executing the future.
* @throws TimeoutException if retrieving the result timed out.
* @throws X on interruption, cancellation or execution exceptions.
*/
V checkedGet(long timeout, TimeUnit unit) throws TimeoutException, X;
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/CheckedFuture.java | Java | asf20 | 3,107 |
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import com.google.common.annotations.Beta;
import java.util.concurrent.AbstractExecutorService;
import java.util.concurrent.Callable;
import javax.annotation.Nullable;
/**
* Abstract {@link ListeningExecutorService} implementation that creates
* {@link ListenableFutureTask} instances for each {@link Runnable} and {@link Callable} submitted
* to it. These tasks are run with the abstract {@link #execute execute(Runnable)} method.
*
* <p>In addition to {@link #execute}, subclasses must implement all methods related to shutdown and
* termination.
*
* @author Chris Povirk
* @since 14.0
*/
@Beta
public abstract class AbstractListeningExecutorService
extends AbstractExecutorService implements ListeningExecutorService {
@Override protected final <T> ListenableFutureTask<T> newTaskFor(Runnable runnable, T value) {
return ListenableFutureTask.create(runnable, value);
}
@Override protected final <T> ListenableFutureTask<T> newTaskFor(Callable<T> callable) {
return ListenableFutureTask.create(callable);
}
@Override public ListenableFuture<?> submit(Runnable task) {
return (ListenableFuture<?>) super.submit(task);
}
@Override public <T> ListenableFuture<T> submit(Runnable task, @Nullable T result) {
return (ListenableFuture<T>) super.submit(task, result);
}
@Override public <T> ListenableFuture<T> submit(Callable<T> task) {
return (ListenableFuture<T>) super.submit(task);
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/AbstractListeningExecutorService.java | Java | asf20 | 2,094 |
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import com.google.common.annotations.Beta;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* A delegating wrapper around a {@link ListenableFuture} that adds support for
* the {@link #checkedGet()} and {@link #checkedGet(long, TimeUnit)} methods.
*
* @author Sven Mawson
* @since 1.0
*/
@Beta
public abstract class AbstractCheckedFuture<V, X extends Exception>
extends ForwardingListenableFuture.SimpleForwardingListenableFuture<V>
implements CheckedFuture<V, X> {
/**
* Constructs an {@code AbstractCheckedFuture} that wraps a delegate.
*/
protected AbstractCheckedFuture(ListenableFuture<V> delegate) {
super(delegate);
}
/**
* Translates from an {@link InterruptedException},
* {@link CancellationException} or {@link ExecutionException} thrown by
* {@code get} to an exception of type {@code X} to be thrown by
* {@code checkedGet}. Subclasses must implement this method.
*
* <p>If {@code e} is an {@code InterruptedException}, the calling
* {@code checkedGet} method has already restored the interrupt after catching
* the exception. If an implementation of {@link #mapException(Exception)}
* wishes to swallow the interrupt, it can do so by calling
* {@link Thread#interrupted()}.
*
* <p>Subclasses may choose to throw, rather than return, a subclass of
* {@code RuntimeException} to allow creating a CheckedFuture that throws
* both checked and unchecked exceptions.
*/
protected abstract X mapException(Exception e);
/**
* {@inheritDoc}
*
* <p>This implementation calls {@link #get()} and maps that method's standard
* exceptions to instances of type {@code X} using {@link #mapException}.
*
* <p>In addition, if {@code get} throws an {@link InterruptedException}, this
* implementation will set the current thread's interrupt status before
* calling {@code mapException}.
*
* @throws X if {@link #get()} throws an {@link InterruptedException},
* {@link CancellationException}, or {@link ExecutionException}
*/
@Override
public V checkedGet() throws X {
try {
return get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw mapException(e);
} catch (CancellationException e) {
throw mapException(e);
} catch (ExecutionException e) {
throw mapException(e);
}
}
/**
* {@inheritDoc}
*
* <p>This implementation calls {@link #get(long, TimeUnit)} and maps that
* method's standard exceptions (excluding {@link TimeoutException}, which is
* propagated) to instances of type {@code X} using {@link #mapException}.
*
* <p>In addition, if {@code get} throws an {@link InterruptedException}, this
* implementation will set the current thread's interrupt status before
* calling {@code mapException}.
*
* @throws X if {@link #get()} throws an {@link InterruptedException},
* {@link CancellationException}, or {@link ExecutionException}
* @throws TimeoutException {@inheritDoc}
*/
@Override
public V checkedGet(long timeout, TimeUnit unit) throws TimeoutException, X {
try {
return get(timeout, unit);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw mapException(e);
} catch (CancellationException e) {
throw mapException(e);
} catch (ExecutionException e) {
throw mapException(e);
}
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/AbstractCheckedFuture.java | Java | asf20 | 4,219 |
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.GwtCompatible;
import com.google.common.base.Function;
import com.google.common.collect.Maps;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
/**
* A map containing {@code long} values that can be atomically updated. While writes to a
* traditional {@code Map} rely on {@code put(K, V)}, the typical mechanism for writing to this map
* is {@code addAndGet(K, long)}, which adds a {@code long} to the value currently associated with
* {@code K}. If a key has not yet been associated with a value, its implicit value is zero.
*
* <p>Most methods in this class treat absent values and zero values identically, as individually
* documented. Exceptions to this are {@link #containsKey}, {@link #size}, {@link #isEmpty},
* {@link #asMap}, and {@link #toString}.
*
* <p>Instances of this class may be used by multiple threads concurrently. All operations are
* atomic unless otherwise noted.
*
* <p><b>Note:</b> If your values are always positive and less than 2^31, you may wish to use a
* {@link com.google.common.collect.Multiset} such as
* {@link com.google.common.collect.ConcurrentHashMultiset} instead.
*
* <b>Warning:</b> Unlike {@code Multiset}, entries whose values are zero are not automatically
* removed from the map. Instead they must be removed manually with {@link #removeAllZeros}.
*
* @author Charles Fry
* @since 11.0
*/
@GwtCompatible
public final class AtomicLongMap<K> {
private final ConcurrentHashMap<K, AtomicLong> map;
private AtomicLongMap(ConcurrentHashMap<K, AtomicLong> map) {
this.map = checkNotNull(map);
}
/**
* Creates an {@code AtomicLongMap}.
*/
public static <K> AtomicLongMap<K> create() {
return new AtomicLongMap<K>(new ConcurrentHashMap<K, AtomicLong>());
}
/**
* Creates an {@code AtomicLongMap} with the same mappings as the specified {@code Map}.
*/
public static <K> AtomicLongMap<K> create(Map<? extends K, ? extends Long> m) {
AtomicLongMap<K> result = create();
result.putAll(m);
return result;
}
/**
* Returns the value associated with {@code key}, or zero if there is no value associated with
* {@code key}.
*/
public long get(K key) {
AtomicLong atomic = map.get(key);
return atomic == null ? 0L : atomic.get();
}
/**
* Increments by one the value currently associated with {@code key}, and returns the new value.
*/
public long incrementAndGet(K key) {
return addAndGet(key, 1);
}
/**
* Decrements by one the value currently associated with {@code key}, and returns the new value.
*/
public long decrementAndGet(K key) {
return addAndGet(key, -1);
}
/**
* Adds {@code delta} to the value currently associated with {@code key}, and returns the new
* value.
*/
public long addAndGet(K key, long delta) {
outer: for (;;) {
AtomicLong atomic = map.get(key);
if (atomic == null) {
atomic = map.putIfAbsent(key, new AtomicLong(delta));
if (atomic == null) {
return delta;
}
// atomic is now non-null; fall through
}
for (;;) {
long oldValue = atomic.get();
if (oldValue == 0L) {
// don't compareAndSet a zero
if (map.replace(key, atomic, new AtomicLong(delta))) {
return delta;
}
// atomic replaced
continue outer;
}
long newValue = oldValue + delta;
if (atomic.compareAndSet(oldValue, newValue)) {
return newValue;
}
// value changed
}
}
}
/**
* Increments by one the value currently associated with {@code key}, and returns the old value.
*/
public long getAndIncrement(K key) {
return getAndAdd(key, 1);
}
/**
* Decrements by one the value currently associated with {@code key}, and returns the old value.
*/
public long getAndDecrement(K key) {
return getAndAdd(key, -1);
}
/**
* Adds {@code delta} to the value currently associated with {@code key}, and returns the old
* value.
*/
public long getAndAdd(K key, long delta) {
outer: for (;;) {
AtomicLong atomic = map.get(key);
if (atomic == null) {
atomic = map.putIfAbsent(key, new AtomicLong(delta));
if (atomic == null) {
return 0L;
}
// atomic is now non-null; fall through
}
for (;;) {
long oldValue = atomic.get();
if (oldValue == 0L) {
// don't compareAndSet a zero
if (map.replace(key, atomic, new AtomicLong(delta))) {
return 0L;
}
// atomic replaced
continue outer;
}
long newValue = oldValue + delta;
if (atomic.compareAndSet(oldValue, newValue)) {
return oldValue;
}
// value changed
}
}
}
/**
* Associates {@code newValue} with {@code key} in this map, and returns the value previously
* associated with {@code key}, or zero if there was no such value.
*/
public long put(K key, long newValue) {
outer: for (;;) {
AtomicLong atomic = map.get(key);
if (atomic == null) {
atomic = map.putIfAbsent(key, new AtomicLong(newValue));
if (atomic == null) {
return 0L;
}
// atomic is now non-null; fall through
}
for (;;) {
long oldValue = atomic.get();
if (oldValue == 0L) {
// don't compareAndSet a zero
if (map.replace(key, atomic, new AtomicLong(newValue))) {
return 0L;
}
// atomic replaced
continue outer;
}
if (atomic.compareAndSet(oldValue, newValue)) {
return oldValue;
}
// value changed
}
}
}
/**
* Copies all of the mappings from the specified map to this map. The effect of this call is
* equivalent to that of calling {@code put(k, v)} on this map once for each mapping from key
* {@code k} to value {@code v} in the specified map. The behavior of this operation is undefined
* if the specified map is modified while the operation is in progress.
*/
public void putAll(Map<? extends K, ? extends Long> m) {
for (Map.Entry<? extends K, ? extends Long> entry : m.entrySet()) {
put(entry.getKey(), entry.getValue());
}
}
/**
* Removes and returns the value associated with {@code key}. If {@code key} is not
* in the map, this method has no effect and returns zero.
*/
public long remove(K key) {
AtomicLong atomic = map.get(key);
if (atomic == null) {
return 0L;
}
for (;;) {
long oldValue = atomic.get();
if (oldValue == 0L || atomic.compareAndSet(oldValue, 0L)) {
// only remove after setting to zero, to avoid concurrent updates
map.remove(key, atomic);
// succeed even if the remove fails, since the value was already adjusted
return oldValue;
}
}
}
/**
* Removes all mappings from this map whose values are zero.
*
* <p>This method is not atomic: the map may be visible in intermediate states, where some
* of the zero values have been removed and others have not.
*/
public void removeAllZeros() {
for (K key : map.keySet()) {
AtomicLong atomic = map.get(key);
if (atomic != null && atomic.get() == 0L) {
map.remove(key, atomic);
}
}
}
/**
* Returns the sum of all values in this map.
*
* <p>This method is not atomic: the sum may or may not include other concurrent operations.
*/
public long sum() {
long sum = 0L;
for (AtomicLong value : map.values()) {
sum = sum + value.get();
}
return sum;
}
private transient Map<K, Long> asMap;
/**
* Returns a live, read-only view of the map backing this {@code AtomicLongMap}.
*/
public Map<K, Long> asMap() {
Map<K, Long> result = asMap;
return (result == null) ? asMap = createAsMap() : result;
}
private Map<K, Long> createAsMap() {
return Collections.unmodifiableMap(
Maps.transformValues(map, new Function<AtomicLong, Long>() {
@Override
public Long apply(AtomicLong atomic) {
return atomic.get();
}
}));
}
/**
* Returns true if this map contains a mapping for the specified key.
*/
public boolean containsKey(Object key) {
return map.containsKey(key);
}
/**
* Returns the number of key-value mappings in this map. If the map contains more than
* {@code Integer.MAX_VALUE} elements, returns {@code Integer.MAX_VALUE}.
*/
public int size() {
return map.size();
}
/**
* Returns {@code true} if this map contains no key-value mappings.
*/
public boolean isEmpty() {
return map.isEmpty();
}
/**
* Removes all of the mappings from this map. The map will be empty after this call returns.
*
* <p>This method is not atomic: the map may not be empty after returning if there were concurrent
* writes.
*/
public void clear() {
map.clear();
}
@Override
public String toString() {
return map.toString();
}
/*
* ConcurrentMap operations which we may eventually add.
*
* The problem with these is that remove(K, long) has to be done in two phases by definition ---
* first decrementing to zero, and then removing. putIfAbsent or replace could observe the
* intermediate zero-state. Ways we could deal with this are:
*
* - Don't define any of the ConcurrentMap operations. This is the current state of affairs.
*
* - Define putIfAbsent and replace as treating zero and absent identically (as currently
* implemented below). This is a bit surprising with putIfAbsent, which really becomes
* putIfZero.
*
* - Allow putIfAbsent and replace to distinguish between zero and absent, but don't implement
* remove(K, long). Without any two-phase operations it becomes feasible for all remaining
* operations to distinguish between zero and absent. If we do this, then perhaps we should add
* replace(key, long).
*
* - Introduce a special-value private static final AtomicLong that would have the meaning of
* removal-in-progress, and rework all operations to properly distinguish between zero and
* absent.
*/
/**
* If {@code key} is not already associated with a value or if {@code key} is associated with
* zero, associate it with {@code newValue}. Returns the previous value associated with
* {@code key}, or zero if there was no mapping for {@code key}.
*/
long putIfAbsent(K key, long newValue) {
for (;;) {
AtomicLong atomic = map.get(key);
if (atomic == null) {
atomic = map.putIfAbsent(key, new AtomicLong(newValue));
if (atomic == null) {
return 0L;
}
// atomic is now non-null; fall through
}
long oldValue = atomic.get();
if (oldValue == 0L) {
// don't compareAndSet a zero
if (map.replace(key, atomic, new AtomicLong(newValue))) {
return 0L;
}
// atomic replaced
continue;
}
return oldValue;
}
}
/**
* If {@code (key, expectedOldValue)} is currently in the map, this method replaces
* {@code expectedOldValue} with {@code newValue} and returns true; otherwise, this method
* returns false.
*
* <p>If {@code expectedOldValue} is zero, this method will succeed if {@code (key, zero)}
* is currently in the map, or if {@code key} is not in the map at all.
*/
boolean replace(K key, long expectedOldValue, long newValue) {
if (expectedOldValue == 0L) {
return putIfAbsent(key, newValue) == 0L;
} else {
AtomicLong atomic = map.get(key);
return (atomic == null) ? false : atomic.compareAndSet(expectedOldValue, newValue);
}
}
/**
* If {@code (key, value)} is currently in the map, this method removes it and returns
* true; otherwise, this method returns false.
*/
boolean remove(K key, long value) {
AtomicLong atomic = map.get(key);
if (atomic == null) {
return false;
}
long oldValue = atomic.get();
if (oldValue != value) {
return false;
}
if (oldValue == 0L || atomic.compareAndSet(oldValue, 0L)) {
// only remove after setting to zero, to avoid concurrent updates
map.remove(key, atomic);
// succeed even if the remove fails, since the value was already adjusted
return true;
}
// value changed
return false;
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/AtomicLongMap.java | Java | asf20 | 13,359 |
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import com.google.common.annotations.Beta;
import java.util.concurrent.Callable;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
/**
* A {@link ScheduledExecutorService} that returns {@link ListenableFuture}
* instances from its {@code ExecutorService} methods. To create an instance
* from an existing {@link ScheduledExecutorService}, call
* {@link MoreExecutors#listeningDecorator(ScheduledExecutorService)}.
*
* @author Chris Povirk
* @since 10.0
*/
@Beta
public interface ListeningScheduledExecutorService
extends ScheduledExecutorService, ListeningExecutorService {
/** @since 15.0 (previously returned ScheduledFuture) */
@Override
ListenableScheduledFuture<?> schedule(
Runnable command, long delay, TimeUnit unit);
/** @since 15.0 (previously returned ScheduledFuture) */
@Override
<V> ListenableScheduledFuture<V> schedule(
Callable<V> callable, long delay, TimeUnit unit);
/** @since 15.0 (previously returned ScheduledFuture) */
@Override
ListenableScheduledFuture<?> scheduleAtFixedRate(
Runnable command, long initialDelay, long period, TimeUnit unit);
/** @since 15.0 (previously returned ScheduledFuture) */
@Override
ListenableScheduledFuture<?> scheduleWithFixedDelay(
Runnable command, long initialDelay, long delay, TimeUnit unit);
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/ListeningScheduledExecutorService.java | Java | asf20 | 2,010 |
/*
* Copyright (C) 2010 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.atomic.AtomicReferenceArray;
import javax.annotation.Nullable;
/**
* Static utility methods pertaining to classes in the
* {@code java.util.concurrent.atomic} package.
*
* @author Kurt Alfred Kluever
* @since 10.0
*/
public final class Atomics {
private Atomics() {}
/**
* Creates an {@code AtomicReference} instance with no initial value.
*
* @return a new {@code AtomicReference} with no initial value
*/
public static <V> AtomicReference<V> newReference() {
return new AtomicReference<V>();
}
/**
* Creates an {@code AtomicReference} instance with the given initial value.
*
* @param initialValue the initial value
* @return a new {@code AtomicReference} with the given initial value
*/
public static <V> AtomicReference<V> newReference(@Nullable V initialValue) {
return new AtomicReference<V>(initialValue);
}
/**
* Creates an {@code AtomicReferenceArray} instance of given length.
*
* @param length the length of the array
* @return a new {@code AtomicReferenceArray} with the given length
*/
public static <E> AtomicReferenceArray<E> newReferenceArray(int length) {
return new AtomicReferenceArray<E>(length);
}
/**
* Creates an {@code AtomicReferenceArray} instance with the same length as,
* and all elements copied from, the given array.
*
* @param array the array to copy elements from
* @return a new {@code AtomicReferenceArray} copied from the given array
*/
public static <E> AtomicReferenceArray<E> newReferenceArray(E[] array) {
return new AtomicReferenceArray<E>(array);
}
}
| zzhhhhh-aw4rwer | guava/src/com/google/common/util/concurrent/Atomics.java | Java | asf20 | 2,333 |