file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
path.rs | //! This module contains code for abstracting object locations that work
//! across different backing implementations and platforms.
use itertools::Itertools;
use percent_encoding::{percent_decode_str, percent_encode, AsciiSet, CONTROLS};
use std::path::PathBuf;
/// Universal interface for handling paths and locations for objects and
/// directories in the object store.
///
/// It allows IOx to be completely decoupled from the underlying object store
/// implementations.
///
/// Deliberately does not implement `Display` or `ToString`! Use one of the
/// converters.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Default)]
pub struct ObjectStorePath {
parts: Vec<PathPart>,
}
impl ObjectStorePath {
/// For use when receiving a path from an object store API directly, not
/// when building a path. Assumes DELIMITER is the separator.
///
/// TODO: Improve performance by implementing a CoW-type model to delay
/// parsing until needed TODO: This should only be available to cloud
/// storage
pub fn from_cloud_unchecked(path: impl Into<String>) -> Self {
let path = path.into();
Self {
parts: path
.split_terminator(DELIMITER)
.map(|s| PathPart(s.to_string()))
.collect(),
}
}
/// For use when receiving a path from a filesystem directly, not
/// when building a path. Uses the standard library's path splitting
/// implementation to separate into parts.
pub fn from_path_buf_unchecked(path: impl Into<PathBuf>) -> Self {
let path = path.into();
Self {
parts: path
.iter()
.flat_map(|s| s.to_os_string().into_string().map(PathPart))
.collect(),
}
}
/// Add a part to the end of the path, encoding any restricted characters.
pub fn push(&mut self, part: impl Into<String>) {
let part = part.into();
self.parts.push((&*part).into());
}
/// Add a `PathPart` to the end of the path. Infallible because the
/// `PathPart` should already have been checked for restricted
/// characters.
pub fn push_part(&mut self, part: &PathPart) {
self.parts.push(part.to_owned());
}
/// Add the parts of `ObjectStorePath` to the end of the path. Notably does
/// *not* behave as `PathBuf::push` does: no existing part of `self`
/// will be replaced as part of this call.
pub fn push_path(&mut self, path: &Self) {
self.parts.extend_from_slice(&path.parts);
}
/// Push a bunch of parts in one go.
pub fn push_all<'a>(&mut self, parts: impl AsRef<[&'a str]>) {
self.parts.extend(parts.as_ref().iter().map(|&v| v.into()));
}
/// Return the component parts of the path.
pub fn as_parts(&self) -> &[PathPart] {
self.parts.as_ref()
}
/// Pops a part from the path and returns it, or `None` if it's empty.
pub fn pop(&mut self) -> Option<&PathPart> {
unimplemented!()
}
/// Determines whether `prefix` is a prefix of `self`.
pub fn starts_with(&self, prefix: &Self) -> bool {
let diff = itertools::diff_with(self.parts.iter(), prefix.parts.iter(), |a, b| a == b);
match diff {
None => true,
Some(itertools::Diff::Shorter(..)) => true,
Some(itertools::Diff::FirstMismatch(_, mut remaining_self, mut remaining_prefix)) => {
let first_prefix = remaining_prefix.next().expect("must be at least one value");
// there must not be any other remaining parts in the prefix
remaining_prefix.next().is_none()
// and the next item in self must start with the last item in the prefix
&& remaining_self
.next()
.expect("must be at least one value")
.0
.starts_with(&first_prefix.0)
}
_ => false,
}
}
/// Returns delimiter-separated parts contained in `self` after `prefix`.
pub fn parts_after_prefix(&self, _prefix: &Self) -> &[PathPart] {
unimplemented!()
}
}
// TODO: I made these structs rather than functions because I could see
// `convert` being part of a trait, possibly, but that seemed a bit overly
// complex for now.
/// Converts `ObjectStorePath`s to `String`s that are appropriate for use as
/// locations in cloud storage.
#[derive(Debug, Clone, Copy)]
pub struct CloudConverter {}
impl CloudConverter {
/// Creates a cloud storage location by joining this `ObjectStorePath`'s
/// parts with `DELIMITER`
pub fn convert(object_store_path: &ObjectStorePath) -> String {
object_store_path.parts.iter().map(|p| &p.0).join(DELIMITER)
}
}
/// Converts `ObjectStorePath`s to `String`s that are appropriate for use as
/// locations in filesystem storage.
#[derive(Debug, Clone, Copy)]
pub struct FileConverter {}
impl FileConverter {
/// Creates a filesystem `PathBuf` location by using the standard library's
/// `PathBuf` building implementation appropriate for the current
/// platform.
pub fn convert(object_store_path: &ObjectStorePath) -> PathBuf {
object_store_path.parts.iter().map(|p| &p.0).collect()
}
}
/// The delimiter to separate object namespaces, creating a directory structure.
pub const DELIMITER: &str = "/";
// percent_encode's API needs this as a byte
const DELIMITER_BYTE: u8 = DELIMITER.as_bytes()[0];
/// The PathPart type exists to validate the directory/file names that form part
/// of a path.
///
/// A PathPart instance is guaranteed to contain no `/` characters as it can
/// only be constructed by going through the `try_from` impl.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Default)]
pub struct PathPart(String);
/// Characters we want to encode.
const INVALID: &AsciiSet = &CONTROLS
// The delimiter we are reserving for internal hierarchy
.add(DELIMITER_BYTE)
// Characters AWS recommends avoiding for object keys
// https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
.add(b'\\')
.add(b'{')
// TODO: Non-printable ASCII characters (128–255 decimal characters)
.add(b'^')
.add(b'}')
.add(b'%')
.add(b'`')
.add(b']')
.add(b'"') // " <-- my editor is confused about double quotes within single quotes
.add(b'>')
.add(b'[')
.add(b'~')
.add(b'<')
.add(b'#')
.add(b'|')
// Characters Google Cloud Storage recommends avoiding for object names
// https://cloud.google.com/storage/docs/naming-objects
.add(b'\r')
.add(b'\n')
.add(b'*')
.add(b'?');
impl From<&str> for PathPart {
fn from(v: &str) -> Self {
match v {
// We don't want to encode `.` generally, but we do want to disallow parts of paths
// to be equal to `.` or `..` to prevent file system traversal shenanigans.
"." => Self(String::from("%2E")),
".." => Self(String::from("%2E%2E")),
other => Self(percent_encode(other.as_bytes(), INVALID).to_string()),
}
}
}
impl std::fmt::Display for PathPart {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
percent_decode_str(&self.0)
.decode_utf8()
.expect("Valid UTF-8 that came from String")
.fmt(f)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn path_part_delimiter_gets_encoded() {
let part: PathPart = "foo/bar".into();
assert_eq!(part, PathPart(String::from("foo%2Fbar")));
}
#[test]
fn path_part_gets_decoded_for_display() {
let part: PathPart = "foo/bar".into();
assert_eq!(part.to_string(), "foo/bar");
}
#[test]
fn path_part_given_already_encoded_string() {
let part: PathPart = "foo%2Fbar".into();
assert_eq!(part, PathPart(String::from("foo%252Fbar")));
assert_eq!(part.to_string(), "foo%2Fbar");
}
#[test]
fn path_part_cant_be_one_dot() {
let part: PathPart = ".".into();
assert_eq!(part, PathPart(String::from("%2E")));
assert_eq!(part.to_string(), ".");
}
#[test]
fn path_part_cant_be_two_dots() {
let part: PathPart = "..".into();
assert_eq!(part, PathPart(String::from("%2E%2E")));
assert_eq!(part.to_string(), "..");
}
// Invariants to maintain/document/test:
//
// - always ends in DELIMITER if it's a directory. If it's the end object, it
// should have some sort of file extension like .parquet, .json, or .segment
// - does not contain unencoded DELIMITER
// - for file paths: does not escape root dir
// - for object storage: looks like directories
// - Paths that come from object stores directly don't need to be
// parsed/validated
// - Within a process, the same backing store will always be used
//
#[test]
fn cloud_prefix_no_trailing_delimiter_or_filename() {
// Use case: a file named `test_file.json` exists in object storage and it
// should be returned for a search on prefix `test`, so the prefix path
// should not get a trailing delimiter automatically added
let mut prefix = ObjectStorePath::default();
prefix.push("test");
let converted = CloudConverter::convert(&prefix);
assert_eq!(converted, "test");
}
#[test]
fn cloud_prefix_with_trailing_delimiter() {
// Use case: files exist in object storage named `foo/bar.json` and
// `foo_test.json`. A search for the prefix `foo/` should return
// `foo/bar.json` but not `foo_test.json'.
let mut prefix = ObjectStorePath::default();
prefix.push_all(&["test", ""]);
let converted = CloudConverter::convert(&prefix);
assert_eq!(converted, "test/"); | fn push_encodes() {
let mut location = ObjectStorePath::default();
location.push("foo/bar");
location.push("baz%2Ftest");
let converted = CloudConverter::convert(&location);
assert_eq!(converted, "foo%2Fbar/baz%252Ftest");
}
#[test]
fn push_all_encodes() {
let mut location = ObjectStorePath::default();
location.push_all(&["foo/bar", "baz%2Ftest"]);
let converted = CloudConverter::convert(&location);
assert_eq!(converted, "foo%2Fbar/baz%252Ftest");
}
#[test]
fn starts_with_parts() {
let mut haystack = ObjectStorePath::default();
haystack.push_all(&["foo/bar", "baz%2Ftest", "something"]);
assert!(
haystack.starts_with(&haystack),
"{:?} should have started with {:?}",
haystack,
haystack
);
let mut needle = haystack.clone();
needle.push("longer now");
assert!(
!haystack.starts_with(&needle),
"{:?} shouldn't have started with {:?}",
haystack,
needle
);
let mut needle = ObjectStorePath::default();
needle.push("foo/bar");
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
needle.push("baz%2Ftest");
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
let mut needle = ObjectStorePath::default();
needle.push("f");
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
needle.push("oo/bar");
assert!(
!haystack.starts_with(&needle),
"{:?} shouldn't have started with {:?}",
haystack,
needle
);
let mut needle = ObjectStorePath::default();
needle.push_all(&["foo/bar", "baz"]);
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
}
} | }
#[test] | random_line_split |
mod.rs | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! The device layer.
pub(crate) mod arp;
pub(crate) mod ethernet;
pub(crate) mod ndp;
use std::fmt::{self, Debug, Display, Formatter};
use log::{debug, trace};
use net_types::ethernet::Mac;
use net_types::ip::{AddrSubnet, IpAddress, Ipv4Addr, Ipv6, Ipv6Addr};
use net_types::{LinkLocalAddr, MulticastAddr};
use packet::{BufferMut, Serializer};
use crate::data_structures::{IdMap, IdMapCollectionKey};
use crate::device::ethernet::{EthernetDeviceState, EthernetDeviceStateBuilder};
use crate::{BufferDispatcher, Context, EventDispatcher, StackState};
/// An ID identifying a device.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct DeviceId {
id: usize,
protocol: DeviceProtocol,
}
impl DeviceId {
/// Construct a new `DeviceId` for an Ethernet device.
pub(crate) fn new_ethernet(id: usize) -> DeviceId {
DeviceId { id, protocol: DeviceProtocol::Ethernet }
}
/// Get the protocol-specific ID for this `DeviceId`.
pub fn id(self) -> usize {
self.id
}
/// Get the protocol for this `DeviceId`.
pub fn protocol(self) -> DeviceProtocol {
self.protocol
}
}
impl Display for DeviceId {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(f, "{}:{}", self.protocol, self.id)
}
}
impl Debug for DeviceId {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
Display::fmt(self, f)
}
}
impl IdMapCollectionKey for DeviceId {
const VARIANT_COUNT: usize = 1;
fn get_variant(&self) -> usize {
match self.protocol {
DeviceProtocol::Ethernet => 0,
}
}
fn get_id(&self) -> usize {
self.id as usize
}
}
/// Type of device protocol.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub enum DeviceProtocol {
Ethernet,
}
impl Display for DeviceProtocol {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(
f,
"{}",
match self {
DeviceProtocol::Ethernet => "Ethernet",
}
)
}
}
// TODO(joshlf): Does the IP layer ever need to distinguish between broadcast
// and multicast frames?
/// The type of address used as the source address in a device-layer frame:
/// unicast or broadcast.
///
/// `FrameDestination` is used to implement RFC 1122 section 3.2.2 and RFC 4443
/// section 2.4.e, which govern when to avoid sending an ICMP error message for
/// ICMP and ICMPv6 respectively.
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum FrameDestination {
/// A unicast address - one which is neither multicast nor broadcast.
Unicast,
/// A multicast address; if the addressing scheme supports overlap between
/// multicast and broadcast, then broadcast addresses should use the
/// `Broadcast` variant.
Multicast,
/// A broadcast address; if the addressing scheme supports overlap between
/// multicast and broadcast, then broadcast addresses should use the
/// `Broadcast` variant.
Broadcast,
}
impl FrameDestination {
/// Is this `FrameDestination::Multicast`?
pub(crate) fn is_multicast(self) -> bool {
self == FrameDestination::Multicast
}
/// Is this `FrameDestination::Broadcast`?
pub(crate) fn is_broadcast(self) -> bool {
self == FrameDestination::Broadcast
}
}
/// Builder for a [`DeviceLayerState`].
#[derive(Clone)]
pub struct DeviceStateBuilder {
/// Default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`].
default_ndp_configs: ndp::NdpConfigurations,
}
impl Default for DeviceStateBuilder {
fn default() -> Self {
Self { default_ndp_configs: ndp::NdpConfigurations::default() }
}
}
impl DeviceStateBuilder {
/// Set the default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`] for more details.
pub fn set_default_ndp_configs(&mut self, v: ndp::NdpConfigurations) {
self.default_ndp_configs = v;
}
/// Build the [`DeviceLayerState`].
pub(crate) fn build(self) -> DeviceLayerState {
DeviceLayerState { ethernet: IdMap::new(), default_ndp_configs: self.default_ndp_configs }
}
}
/// The state associated with the device layer.
pub(crate) struct DeviceLayerState {
ethernet: IdMap<DeviceState<EthernetDeviceState>>,
default_ndp_configs: ndp::NdpConfigurations,
}
impl DeviceLayerState {
/// Add a new ethernet device to the device layer.
///
/// `add` adds a new `EthernetDeviceState` with the given MAC address and
/// MTU. The MTU will be taken as a limit on the size of Ethernet payloads -
/// the Ethernet header is not counted towards the MTU.
pub(crate) fn add_ethernet_device(&mut self, mac: Mac, mtu: u32) -> DeviceId {
let mut builder = EthernetDeviceStateBuilder::new(mac, mtu);
builder.set_ndp_configs(self.default_ndp_configs.clone());
let mut ethernet_state = DeviceState::new(builder.build());
let id = self.ethernet.push(ethernet_state);
debug!("adding Ethernet device with ID {} and MTU {}", id, mtu);
DeviceId::new_ethernet(id)
}
// TODO(rheacock, NET-2140): Add ability to remove inactive devices
}
/// Common state across devices.
#[derive(Default)]
pub(crate) struct CommonDeviceState {
/// Is the device initialized?
is_initialized: bool,
}
/// Device state.
///
/// `D` is the device-specific state.
pub(crate) struct DeviceState<D> {
/// Device-independant state.
common: CommonDeviceState,
/// Device-specific state.
device: D,
}
impl<D> DeviceState<D> {
/// Create a new `DeviceState` with a device-specific state `device`.
pub(crate) fn new(device: D) -> Self {
Self { common: CommonDeviceState::default(), device }
}
/// Get a reference to the common (device-independant) state.
pub(crate) fn common(&self) -> &CommonDeviceState {
&self.common
}
/// Get a mutable reference to the common (device-independant) state.
pub(crate) fn common_mut(&mut self) -> &mut CommonDeviceState {
&mut self.common
}
/// Get a reference to the inner (device-specific) state.
pub(crate) fn device(&self) -> &D {
&self.device
}
/// Get a mutable reference to the inner (device-specific) state.
pub(crate) fn device_mut(&mut self) -> &mut D {
&mut self.device
}
}
/// The identifier for timer events in the device layer.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
pub(crate) enum DeviceLayerTimerId {
/// A timer event in the ARP layer with a protocol type of IPv4
ArpIpv4(arp::ArpTimerId<usize, Ipv4Addr>),
Ndp(ndp::NdpTimerId),
}
impl From<arp::ArpTimerId<usize, Ipv4Addr>> for DeviceLayerTimerId {
fn from(id: arp::ArpTimerId<usize, Ipv4Addr>) -> DeviceLayerTimerId {
DeviceLayerTimerId::ArpIpv4(id)
}
}
/// Handle a timer event firing in the device layer.
pub(crate) fn handle_timeout<D: EventDispatcher>(ctx: &mut Context<D>, id: DeviceLayerTimerId) {
match id {
DeviceLayerTimerId::ArpIpv4(inner_id) => arp::handle_timer(ctx, inner_id),
DeviceLayerTimerId::Ndp(inner_id) => ndp::handle_timeout(ctx, inner_id),
}
}
/// An event dispatcher for the device layer.
///
/// See the `EventDispatcher` trait in the crate root for more details.
pub trait DeviceLayerEventDispatcher<B: BufferMut> {
/// Send a frame to a device driver.
///
/// If there was an MTU error while attempting to serialize the frame, the
/// original serializer is returned in the `Err` variant. All other errors
/// (for example, errors in allocating a buffer) are silently ignored and
/// reported as success.
///
/// Note, until `device` has been initialized, the netstack promises to not
/// send any outbound traffic to it. See [`initialize_device`] for more
/// information.
fn send_frame<S: Serializer<Buffer = B>>(
&mut self,
device: DeviceId,
frame: S,
) -> Result<(), S>;
}
/// Is `device` initialized?
pub(crate) fn is_device_initialized<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> bool {
get_common_device_state(state, device).is_initialized
}
/// Initialize a device.
///
/// `initialize_device` will start soliciting IPv6 routers on the link if `device` is configured to
/// be a host.
///
/// `initialize_device` MUST be called after adding the device to the netstack. A device MUST NOT
/// be used until it has been initialized.
///
/// This initialize step is kept separated from the device creation/allocation step so that
/// implementations have a chance to do some work (such as updating implementation specific IDs or
/// state, configure the device or driver, etc.) before the device is actually initialized and used
/// by this netstack.
///
/// See [`StackState::add_ethernet_device`] for information about adding ethernet devices.
///
/// # Panics
///
/// Panics if `device` is already initialized.
pub fn initialize_device<D: EventDispatcher>(ctx: &mut Context<D>, device: DeviceId) {
let state = get_common_device_state_mut(ctx.state_mut(), device);
// `device` must not already be initialized.
assert!(!state.is_initialized);
state.is_initialized = true;
// RFC 4861 section 6.3.7, it implies only a host sends router
// solicitation messages, so if this node is a router, do nothing.
if crate::ip::is_router::<_, Ipv6>(ctx) {
trace!("intialize_device: node is a router so not starting router solicitations");
return;
}
match device.protocol {
DeviceProtocol::Ethernet => {
ndp::start_soliciting_routers::<_, ethernet::EthernetNdpDevice>(ctx, device.id)
}
}
}
/// Send an IP packet in a device layer frame.
///
/// `send_ip_frame` accepts a device ID, a local IP address, and a
/// `SerializationRequest`. It computes the routing information and serializes
/// the request in a new device layer frame and sends it.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn send_ip_frame<B: BufferMut, D: BufferDispatcher<B>, A, S>(
ctx: &mut Context<D>,
device: DeviceId,
local_addr: A,
body: S,
) -> Result<(), S>
where
A: IpAddress,
S: Serializer<Buffer = B>,
{
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::send_ip_frame(ctx, device.id, local_addr, body),
}
}
/// Receive a device layer frame from the network.
/// | /// # Panics
///
/// Panics if `device` is not initialized.
pub fn receive_frame<B: BufferMut, D: BufferDispatcher<B>>(
ctx: &mut Context<D>,
device: DeviceId,
buffer: B,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::receive_frame(ctx, device.id, buffer),
}
}
/// Get the IP address and subnet associated with this device.
///
/// Note, tentative IP addresses (addresses which are not yet fully bound to a
/// device) will not returned by `get_ip_addr_subnet`.
pub fn get_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<AddrSubnet<A>> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ip_addr_subnet(ctx, device.id),
}
}
/// Get the IP address and subnet associated with this device, including tentative
/// address.
pub fn get_ip_addr_subnet_with_tentative<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<Tentative<AddrSubnet<A>>> {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::get_ip_addr_subnet_with_tentative(ctx, device.id)
}
}
}
/// Set the IP address and subnet associated with this device.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn set_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
addr_sub: AddrSubnet<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("set_ip_addr_subnet: setting addr {:?} for device {:?}", addr_sub, device);
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::set_ip_addr_subnet(ctx, device.id, addr_sub),
}
}
/// Add `device` to a multicast group `multicast_addr`.
///
/// If `device` is already in the multicast group `multicast_addr`,
/// `join_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn join_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} joining multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::join_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Remove `device` from a multicast group `multicast_addr`.
///
/// If `device` is not in the multicast group `multicast_addr`,
/// `leave_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn leave_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} leaving multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::leave_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Is `device` part of the IP multicast group `multicast_addr`.
pub(crate) fn is_in_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) -> bool {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::is_in_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Get the MTU associated with this device.
pub(crate) fn get_mtu<D: EventDispatcher>(state: &StackState<D>, device: DeviceId) -> u32 {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_mtu(state, device.id),
}
}
/// Gets the IPv6 link-local address associated with this device.
// TODO(brunodalbo) when our device model allows for multiple IPs we can have
// a single function go get all the IP addresses associated with a device, which
// would be cleaner and remove the need for this function.
pub fn get_ipv6_link_local_addr<D: EventDispatcher>(
ctx: &Context<D>,
device: DeviceId,
) -> LinkLocalAddr<Ipv6Addr> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ipv6_link_local_addr(ctx, device.id),
}
}
/// Determine if an IP Address is considered tentative on a device.
///
/// Returns `true` if the address is tentative on a device; `false` otherwise.
/// Note, if the `addr` is not assigned to `device` but is considered tentative
/// on another device, `is_addr_tentative_on_device` will return `false`.
pub(crate) fn is_addr_tentative_on_device<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
addr: A,
device: DeviceId,
) -> bool {
get_ip_addr_subnet_with_tentative::<_, A>(ctx, device)
.map(|x| (x.inner().addr() == addr) && x.is_tentative())
.unwrap_or(false)
}
/// Get a reference to the common device state for a `device`.
fn get_common_device_state<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> &CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common(),
}
}
/// Get a mutable reference to the common device state for a `device`.
fn get_common_device_state_mut<D: EventDispatcher>(
state: &mut StackState<D>,
device: DeviceId,
) -> &mut CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get_mut(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common_mut(),
}
}
/// An address that may be "tentative" in that it has not yet passed
/// duplicate address detection (DAD).
///
/// A tentative address is one for which DAD is currently being performed.
/// An address is only considered assigned to an interface once DAD has
/// completed without detecting any duplicates. See [RFC 4862] for more details.
///
/// [RFC 4862]: https://tools.ietf.org/html/rfc4862
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Tentative<T>(T, bool);
impl<T> Tentative<T> {
/// Create a new address that is marked as tentative.
pub(crate) fn new_tentative(t: T) -> Self {
Self(t, true)
}
/// Create a new address that is marked as permanent/assigned.
pub(crate) fn new_permanent(t: T) -> Self {
Self(t, false)
}
/// Returns whether the value is tentative.
pub(crate) fn is_tentative(&self) -> bool {
self.1
}
/// Gets the value that is stored inside.
pub(crate) fn into_inner(self) -> T {
self.0
}
/// Converts a `Tentative<T>` into a `Option<T>` in the way that
/// a tentative value corresponds to a `None`.
pub(crate) fn try_into_permanent(self) -> Option<T> {
if self.is_tentative() {
None
} else {
Some(self.into_inner())
}
}
/// Borrow the content which is stored inside.
pub(crate) fn inner(&self) -> &T {
&self.0
}
/// Similar to `Option::map`.
pub(crate) fn map<U, F>(self, f: F) -> Tentative<U>
where
F: FnOnce(T) -> U,
{
Tentative(f(self.0), self.1)
}
/// Make the tentative value to be permanent.
pub(crate) fn mark_permanent(&mut self) {
self.1 = false
}
} | random_line_split | |
mod.rs | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! The device layer.
pub(crate) mod arp;
pub(crate) mod ethernet;
pub(crate) mod ndp;
use std::fmt::{self, Debug, Display, Formatter};
use log::{debug, trace};
use net_types::ethernet::Mac;
use net_types::ip::{AddrSubnet, IpAddress, Ipv4Addr, Ipv6, Ipv6Addr};
use net_types::{LinkLocalAddr, MulticastAddr};
use packet::{BufferMut, Serializer};
use crate::data_structures::{IdMap, IdMapCollectionKey};
use crate::device::ethernet::{EthernetDeviceState, EthernetDeviceStateBuilder};
use crate::{BufferDispatcher, Context, EventDispatcher, StackState};
/// An ID identifying a device.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct DeviceId {
id: usize,
protocol: DeviceProtocol,
}
impl DeviceId {
/// Construct a new `DeviceId` for an Ethernet device.
pub(crate) fn new_ethernet(id: usize) -> DeviceId {
DeviceId { id, protocol: DeviceProtocol::Ethernet }
}
/// Get the protocol-specific ID for this `DeviceId`.
pub fn id(self) -> usize {
self.id
}
/// Get the protocol for this `DeviceId`.
pub fn protocol(self) -> DeviceProtocol {
self.protocol
}
}
impl Display for DeviceId {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(f, "{}:{}", self.protocol, self.id)
}
}
impl Debug for DeviceId {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
Display::fmt(self, f)
}
}
impl IdMapCollectionKey for DeviceId {
const VARIANT_COUNT: usize = 1;
fn get_variant(&self) -> usize {
match self.protocol {
DeviceProtocol::Ethernet => 0,
}
}
fn get_id(&self) -> usize {
self.id as usize
}
}
/// Type of device protocol.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub enum DeviceProtocol {
Ethernet,
}
impl Display for DeviceProtocol {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(
f,
"{}",
match self {
DeviceProtocol::Ethernet => "Ethernet",
}
)
}
}
// TODO(joshlf): Does the IP layer ever need to distinguish between broadcast
// and multicast frames?
/// The type of address used as the source address in a device-layer frame:
/// unicast or broadcast.
///
/// `FrameDestination` is used to implement RFC 1122 section 3.2.2 and RFC 4443
/// section 2.4.e, which govern when to avoid sending an ICMP error message for
/// ICMP and ICMPv6 respectively.
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum FrameDestination {
/// A unicast address - one which is neither multicast nor broadcast.
Unicast,
/// A multicast address; if the addressing scheme supports overlap between
/// multicast and broadcast, then broadcast addresses should use the
/// `Broadcast` variant.
Multicast,
/// A broadcast address; if the addressing scheme supports overlap between
/// multicast and broadcast, then broadcast addresses should use the
/// `Broadcast` variant.
Broadcast,
}
impl FrameDestination {
/// Is this `FrameDestination::Multicast`?
pub(crate) fn is_multicast(self) -> bool {
self == FrameDestination::Multicast
}
/// Is this `FrameDestination::Broadcast`?
pub(crate) fn is_broadcast(self) -> bool {
self == FrameDestination::Broadcast
}
}
/// Builder for a [`DeviceLayerState`].
#[derive(Clone)]
pub struct DeviceStateBuilder {
/// Default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`].
default_ndp_configs: ndp::NdpConfigurations,
}
impl Default for DeviceStateBuilder {
fn default() -> Self {
Self { default_ndp_configs: ndp::NdpConfigurations::default() }
}
}
impl DeviceStateBuilder {
/// Set the default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`] for more details.
pub fn set_default_ndp_configs(&mut self, v: ndp::NdpConfigurations) {
self.default_ndp_configs = v;
}
/// Build the [`DeviceLayerState`].
pub(crate) fn build(self) -> DeviceLayerState {
DeviceLayerState { ethernet: IdMap::new(), default_ndp_configs: self.default_ndp_configs }
}
}
/// The state associated with the device layer.
pub(crate) struct DeviceLayerState {
ethernet: IdMap<DeviceState<EthernetDeviceState>>,
default_ndp_configs: ndp::NdpConfigurations,
}
impl DeviceLayerState {
/// Add a new ethernet device to the device layer.
///
/// `add` adds a new `EthernetDeviceState` with the given MAC address and
/// MTU. The MTU will be taken as a limit on the size of Ethernet payloads -
/// the Ethernet header is not counted towards the MTU.
pub(crate) fn add_ethernet_device(&mut self, mac: Mac, mtu: u32) -> DeviceId {
let mut builder = EthernetDeviceStateBuilder::new(mac, mtu);
builder.set_ndp_configs(self.default_ndp_configs.clone());
let mut ethernet_state = DeviceState::new(builder.build());
let id = self.ethernet.push(ethernet_state);
debug!("adding Ethernet device with ID {} and MTU {}", id, mtu);
DeviceId::new_ethernet(id)
}
// TODO(rheacock, NET-2140): Add ability to remove inactive devices
}
/// Common state across devices.
#[derive(Default)]
pub(crate) struct CommonDeviceState {
/// Is the device initialized?
is_initialized: bool,
}
/// Device state.
///
/// `D` is the device-specific state.
pub(crate) struct DeviceState<D> {
/// Device-independant state.
common: CommonDeviceState,
/// Device-specific state.
device: D,
}
impl<D> DeviceState<D> {
/// Create a new `DeviceState` with a device-specific state `device`.
pub(crate) fn new(device: D) -> Self {
Self { common: CommonDeviceState::default(), device }
}
/// Get a reference to the common (device-independant) state.
pub(crate) fn common(&self) -> &CommonDeviceState {
&self.common
}
/// Get a mutable reference to the common (device-independant) state.
pub(crate) fn common_mut(&mut self) -> &mut CommonDeviceState {
&mut self.common
}
/// Get a reference to the inner (device-specific) state.
pub(crate) fn device(&self) -> &D {
&self.device
}
/// Get a mutable reference to the inner (device-specific) state.
pub(crate) fn device_mut(&mut self) -> &mut D {
&mut self.device
}
}
/// The identifier for timer events in the device layer.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
pub(crate) enum DeviceLayerTimerId {
/// A timer event in the ARP layer with a protocol type of IPv4
ArpIpv4(arp::ArpTimerId<usize, Ipv4Addr>),
Ndp(ndp::NdpTimerId),
}
impl From<arp::ArpTimerId<usize, Ipv4Addr>> for DeviceLayerTimerId {
fn from(id: arp::ArpTimerId<usize, Ipv4Addr>) -> DeviceLayerTimerId {
DeviceLayerTimerId::ArpIpv4(id)
}
}
/// Handle a timer event firing in the device layer.
pub(crate) fn handle_timeout<D: EventDispatcher>(ctx: &mut Context<D>, id: DeviceLayerTimerId) {
match id {
DeviceLayerTimerId::ArpIpv4(inner_id) => arp::handle_timer(ctx, inner_id),
DeviceLayerTimerId::Ndp(inner_id) => ndp::handle_timeout(ctx, inner_id),
}
}
/// An event dispatcher for the device layer.
///
/// See the `EventDispatcher` trait in the crate root for more details.
pub trait DeviceLayerEventDispatcher<B: BufferMut> {
/// Send a frame to a device driver.
///
/// If there was an MTU error while attempting to serialize the frame, the
/// original serializer is returned in the `Err` variant. All other errors
/// (for example, errors in allocating a buffer) are silently ignored and
/// reported as success.
///
/// Note, until `device` has been initialized, the netstack promises to not
/// send any outbound traffic to it. See [`initialize_device`] for more
/// information.
fn send_frame<S: Serializer<Buffer = B>>(
&mut self,
device: DeviceId,
frame: S,
) -> Result<(), S>;
}
/// Is `device` initialized?
pub(crate) fn is_device_initialized<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> bool {
get_common_device_state(state, device).is_initialized
}
/// Initialize a device.
///
/// `initialize_device` will start soliciting IPv6 routers on the link if `device` is configured to
/// be a host.
///
/// `initialize_device` MUST be called after adding the device to the netstack. A device MUST NOT
/// be used until it has been initialized.
///
/// This initialize step is kept separated from the device creation/allocation step so that
/// implementations have a chance to do some work (such as updating implementation specific IDs or
/// state, configure the device or driver, etc.) before the device is actually initialized and used
/// by this netstack.
///
/// See [`StackState::add_ethernet_device`] for information about adding ethernet devices.
///
/// # Panics
///
/// Panics if `device` is already initialized.
pub fn initialize_device<D: EventDispatcher>(ctx: &mut Context<D>, device: DeviceId) {
let state = get_common_device_state_mut(ctx.state_mut(), device);
// `device` must not already be initialized.
assert!(!state.is_initialized);
state.is_initialized = true;
// RFC 4861 section 6.3.7, it implies only a host sends router
// solicitation messages, so if this node is a router, do nothing.
if crate::ip::is_router::<_, Ipv6>(ctx) {
trace!("intialize_device: node is a router so not starting router solicitations");
return;
}
match device.protocol {
DeviceProtocol::Ethernet => {
ndp::start_soliciting_routers::<_, ethernet::EthernetNdpDevice>(ctx, device.id)
}
}
}
/// Send an IP packet in a device layer frame.
///
/// `send_ip_frame` accepts a device ID, a local IP address, and a
/// `SerializationRequest`. It computes the routing information and serializes
/// the request in a new device layer frame and sends it.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn send_ip_frame<B: BufferMut, D: BufferDispatcher<B>, A, S>(
ctx: &mut Context<D>,
device: DeviceId,
local_addr: A,
body: S,
) -> Result<(), S>
where
A: IpAddress,
S: Serializer<Buffer = B>,
{
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::send_ip_frame(ctx, device.id, local_addr, body),
}
}
/// Receive a device layer frame from the network.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn receive_frame<B: BufferMut, D: BufferDispatcher<B>>(
ctx: &mut Context<D>,
device: DeviceId,
buffer: B,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::receive_frame(ctx, device.id, buffer),
}
}
/// Get the IP address and subnet associated with this device.
///
/// Note, tentative IP addresses (addresses which are not yet fully bound to a
/// device) will not returned by `get_ip_addr_subnet`.
pub fn get_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<AddrSubnet<A>> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ip_addr_subnet(ctx, device.id),
}
}
/// Get the IP address and subnet associated with this device, including tentative
/// address.
pub fn get_ip_addr_subnet_with_tentative<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<Tentative<AddrSubnet<A>>> {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::get_ip_addr_subnet_with_tentative(ctx, device.id)
}
}
}
/// Set the IP address and subnet associated with this device.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn set_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
addr_sub: AddrSubnet<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("set_ip_addr_subnet: setting addr {:?} for device {:?}", addr_sub, device);
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::set_ip_addr_subnet(ctx, device.id, addr_sub),
}
}
/// Add `device` to a multicast group `multicast_addr`.
///
/// If `device` is already in the multicast group `multicast_addr`,
/// `join_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn join_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} joining multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::join_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Remove `device` from a multicast group `multicast_addr`.
///
/// If `device` is not in the multicast group `multicast_addr`,
/// `leave_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn leave_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} leaving multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::leave_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Is `device` part of the IP multicast group `multicast_addr`.
pub(crate) fn is_in_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) -> bool {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::is_in_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Get the MTU associated with this device.
pub(crate) fn get_mtu<D: EventDispatcher>(state: &StackState<D>, device: DeviceId) -> u32 {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_mtu(state, device.id),
}
}
/// Gets the IPv6 link-local address associated with this device.
// TODO(brunodalbo) when our device model allows for multiple IPs we can have
// a single function go get all the IP addresses associated with a device, which
// would be cleaner and remove the need for this function.
pub fn get_ipv6_link_local_addr<D: EventDispatcher>(
ctx: &Context<D>,
device: DeviceId,
) -> LinkLocalAddr<Ipv6Addr> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ipv6_link_local_addr(ctx, device.id),
}
}
/// Determine if an IP Address is considered tentative on a device.
///
/// Returns `true` if the address is tentative on a device; `false` otherwise.
/// Note, if the `addr` is not assigned to `device` but is considered tentative
/// on another device, `is_addr_tentative_on_device` will return `false`.
pub(crate) fn is_addr_tentative_on_device<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
addr: A,
device: DeviceId,
) -> bool {
get_ip_addr_subnet_with_tentative::<_, A>(ctx, device)
.map(|x| (x.inner().addr() == addr) && x.is_tentative())
.unwrap_or(false)
}
/// Get a reference to the common device state for a `device`.
fn get_common_device_state<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> &CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common(),
}
}
/// Get a mutable reference to the common device state for a `device`.
fn get_common_device_state_mut<D: EventDispatcher>(
state: &mut StackState<D>,
device: DeviceId,
) -> &mut CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get_mut(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common_mut(),
}
}
/// An address that may be "tentative" in that it has not yet passed
/// duplicate address detection (DAD).
///
/// A tentative address is one for which DAD is currently being performed.
/// An address is only considered assigned to an interface once DAD has
/// completed without detecting any duplicates. See [RFC 4862] for more details.
///
/// [RFC 4862]: https://tools.ietf.org/html/rfc4862
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Tentative<T>(T, bool);
impl<T> Tentative<T> {
/// Create a new address that is marked as tentative.
pub(crate) fn new_tentative(t: T) -> Self {
Self(t, true)
}
/// Create a new address that is marked as permanent/assigned.
pub(crate) fn new_permanent(t: T) -> Self {
Self(t, false)
}
/// Returns whether the value is tentative.
pub(crate) fn is_tentative(&self) -> bool {
self.1
}
/// Gets the value that is stored inside.
pub(crate) fn into_inner(self) -> T {
self.0
}
/// Converts a `Tentative<T>` into a `Option<T>` in the way that
/// a tentative value corresponds to a `None`.
pub(crate) fn try_into_permanent(self) -> Option<T> |
/// Borrow the content which is stored inside.
pub(crate) fn inner(&self) -> &T {
&self.0
}
/// Similar to `Option::map`.
pub(crate) fn map<U, F>(self, f: F) -> Tentative<U>
where
F: FnOnce(T) -> U,
{
Tentative(f(self.0), self.1)
}
/// Make the tentative value to be permanent.
pub(crate) fn mark_permanent(&mut self) {
self.1 = false
}
}
| {
if self.is_tentative() {
None
} else {
Some(self.into_inner())
}
} | identifier_body |
mod.rs | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! The device layer.
pub(crate) mod arp;
pub(crate) mod ethernet;
pub(crate) mod ndp;
use std::fmt::{self, Debug, Display, Formatter};
use log::{debug, trace};
use net_types::ethernet::Mac;
use net_types::ip::{AddrSubnet, IpAddress, Ipv4Addr, Ipv6, Ipv6Addr};
use net_types::{LinkLocalAddr, MulticastAddr};
use packet::{BufferMut, Serializer};
use crate::data_structures::{IdMap, IdMapCollectionKey};
use crate::device::ethernet::{EthernetDeviceState, EthernetDeviceStateBuilder};
use crate::{BufferDispatcher, Context, EventDispatcher, StackState};
/// An ID identifying a device.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct DeviceId {
id: usize,
protocol: DeviceProtocol,
}
impl DeviceId {
/// Construct a new `DeviceId` for an Ethernet device.
pub(crate) fn new_ethernet(id: usize) -> DeviceId {
DeviceId { id, protocol: DeviceProtocol::Ethernet }
}
/// Get the protocol-specific ID for this `DeviceId`.
pub fn id(self) -> usize {
self.id
}
/// Get the protocol for this `DeviceId`.
pub fn protocol(self) -> DeviceProtocol {
self.protocol
}
}
impl Display for DeviceId {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(f, "{}:{}", self.protocol, self.id)
}
}
impl Debug for DeviceId {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
Display::fmt(self, f)
}
}
impl IdMapCollectionKey for DeviceId {
const VARIANT_COUNT: usize = 1;
fn get_variant(&self) -> usize {
match self.protocol {
DeviceProtocol::Ethernet => 0,
}
}
fn get_id(&self) -> usize {
self.id as usize
}
}
/// Type of device protocol.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub enum DeviceProtocol {
Ethernet,
}
impl Display for DeviceProtocol {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(
f,
"{}",
match self {
DeviceProtocol::Ethernet => "Ethernet",
}
)
}
}
// TODO(joshlf): Does the IP layer ever need to distinguish between broadcast
// and multicast frames?
/// The type of address used as the source address in a device-layer frame:
/// unicast or broadcast.
///
/// `FrameDestination` is used to implement RFC 1122 section 3.2.2 and RFC 4443
/// section 2.4.e, which govern when to avoid sending an ICMP error message for
/// ICMP and ICMPv6 respectively.
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum FrameDestination {
/// A unicast address - one which is neither multicast nor broadcast.
Unicast,
/// A multicast address; if the addressing scheme supports overlap between
/// multicast and broadcast, then broadcast addresses should use the
/// `Broadcast` variant.
Multicast,
/// A broadcast address; if the addressing scheme supports overlap between
/// multicast and broadcast, then broadcast addresses should use the
/// `Broadcast` variant.
Broadcast,
}
impl FrameDestination {
/// Is this `FrameDestination::Multicast`?
pub(crate) fn is_multicast(self) -> bool {
self == FrameDestination::Multicast
}
/// Is this `FrameDestination::Broadcast`?
pub(crate) fn is_broadcast(self) -> bool {
self == FrameDestination::Broadcast
}
}
/// Builder for a [`DeviceLayerState`].
#[derive(Clone)]
pub struct DeviceStateBuilder {
/// Default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`].
default_ndp_configs: ndp::NdpConfigurations,
}
impl Default for DeviceStateBuilder {
fn default() -> Self {
Self { default_ndp_configs: ndp::NdpConfigurations::default() }
}
}
impl DeviceStateBuilder {
/// Set the default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`] for more details.
pub fn set_default_ndp_configs(&mut self, v: ndp::NdpConfigurations) {
self.default_ndp_configs = v;
}
/// Build the [`DeviceLayerState`].
pub(crate) fn build(self) -> DeviceLayerState {
DeviceLayerState { ethernet: IdMap::new(), default_ndp_configs: self.default_ndp_configs }
}
}
/// The state associated with the device layer.
pub(crate) struct DeviceLayerState {
ethernet: IdMap<DeviceState<EthernetDeviceState>>,
default_ndp_configs: ndp::NdpConfigurations,
}
impl DeviceLayerState {
/// Add a new ethernet device to the device layer.
///
/// `add` adds a new `EthernetDeviceState` with the given MAC address and
/// MTU. The MTU will be taken as a limit on the size of Ethernet payloads -
/// the Ethernet header is not counted towards the MTU.
pub(crate) fn add_ethernet_device(&mut self, mac: Mac, mtu: u32) -> DeviceId {
let mut builder = EthernetDeviceStateBuilder::new(mac, mtu);
builder.set_ndp_configs(self.default_ndp_configs.clone());
let mut ethernet_state = DeviceState::new(builder.build());
let id = self.ethernet.push(ethernet_state);
debug!("adding Ethernet device with ID {} and MTU {}", id, mtu);
DeviceId::new_ethernet(id)
}
// TODO(rheacock, NET-2140): Add ability to remove inactive devices
}
/// Common state across devices.
#[derive(Default)]
pub(crate) struct CommonDeviceState {
/// Is the device initialized?
is_initialized: bool,
}
/// Device state.
///
/// `D` is the device-specific state.
pub(crate) struct DeviceState<D> {
/// Device-independant state.
common: CommonDeviceState,
/// Device-specific state.
device: D,
}
impl<D> DeviceState<D> {
/// Create a new `DeviceState` with a device-specific state `device`.
pub(crate) fn new(device: D) -> Self {
Self { common: CommonDeviceState::default(), device }
}
/// Get a reference to the common (device-independant) state.
pub(crate) fn common(&self) -> &CommonDeviceState {
&self.common
}
/// Get a mutable reference to the common (device-independant) state.
pub(crate) fn common_mut(&mut self) -> &mut CommonDeviceState {
&mut self.common
}
/// Get a reference to the inner (device-specific) state.
pub(crate) fn device(&self) -> &D {
&self.device
}
/// Get a mutable reference to the inner (device-specific) state.
pub(crate) fn device_mut(&mut self) -> &mut D {
&mut self.device
}
}
/// The identifier for timer events in the device layer.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
pub(crate) enum DeviceLayerTimerId {
/// A timer event in the ARP layer with a protocol type of IPv4
ArpIpv4(arp::ArpTimerId<usize, Ipv4Addr>),
Ndp(ndp::NdpTimerId),
}
impl From<arp::ArpTimerId<usize, Ipv4Addr>> for DeviceLayerTimerId {
fn from(id: arp::ArpTimerId<usize, Ipv4Addr>) -> DeviceLayerTimerId {
DeviceLayerTimerId::ArpIpv4(id)
}
}
/// Handle a timer event firing in the device layer.
pub(crate) fn handle_timeout<D: EventDispatcher>(ctx: &mut Context<D>, id: DeviceLayerTimerId) {
match id {
DeviceLayerTimerId::ArpIpv4(inner_id) => arp::handle_timer(ctx, inner_id),
DeviceLayerTimerId::Ndp(inner_id) => ndp::handle_timeout(ctx, inner_id),
}
}
/// An event dispatcher for the device layer.
///
/// See the `EventDispatcher` trait in the crate root for more details.
pub trait DeviceLayerEventDispatcher<B: BufferMut> {
/// Send a frame to a device driver.
///
/// If there was an MTU error while attempting to serialize the frame, the
/// original serializer is returned in the `Err` variant. All other errors
/// (for example, errors in allocating a buffer) are silently ignored and
/// reported as success.
///
/// Note, until `device` has been initialized, the netstack promises to not
/// send any outbound traffic to it. See [`initialize_device`] for more
/// information.
fn send_frame<S: Serializer<Buffer = B>>(
&mut self,
device: DeviceId,
frame: S,
) -> Result<(), S>;
}
/// Is `device` initialized?
pub(crate) fn is_device_initialized<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> bool {
get_common_device_state(state, device).is_initialized
}
/// Initialize a device.
///
/// `initialize_device` will start soliciting IPv6 routers on the link if `device` is configured to
/// be a host.
///
/// `initialize_device` MUST be called after adding the device to the netstack. A device MUST NOT
/// be used until it has been initialized.
///
/// This initialize step is kept separated from the device creation/allocation step so that
/// implementations have a chance to do some work (such as updating implementation specific IDs or
/// state, configure the device or driver, etc.) before the device is actually initialized and used
/// by this netstack.
///
/// See [`StackState::add_ethernet_device`] for information about adding ethernet devices.
///
/// # Panics
///
/// Panics if `device` is already initialized.
pub fn initialize_device<D: EventDispatcher>(ctx: &mut Context<D>, device: DeviceId) {
let state = get_common_device_state_mut(ctx.state_mut(), device);
// `device` must not already be initialized.
assert!(!state.is_initialized);
state.is_initialized = true;
// RFC 4861 section 6.3.7, it implies only a host sends router
// solicitation messages, so if this node is a router, do nothing.
if crate::ip::is_router::<_, Ipv6>(ctx) {
trace!("intialize_device: node is a router so not starting router solicitations");
return;
}
match device.protocol {
DeviceProtocol::Ethernet => {
ndp::start_soliciting_routers::<_, ethernet::EthernetNdpDevice>(ctx, device.id)
}
}
}
/// Send an IP packet in a device layer frame.
///
/// `send_ip_frame` accepts a device ID, a local IP address, and a
/// `SerializationRequest`. It computes the routing information and serializes
/// the request in a new device layer frame and sends it.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn send_ip_frame<B: BufferMut, D: BufferDispatcher<B>, A, S>(
ctx: &mut Context<D>,
device: DeviceId,
local_addr: A,
body: S,
) -> Result<(), S>
where
A: IpAddress,
S: Serializer<Buffer = B>,
{
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::send_ip_frame(ctx, device.id, local_addr, body),
}
}
/// Receive a device layer frame from the network.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn receive_frame<B: BufferMut, D: BufferDispatcher<B>>(
ctx: &mut Context<D>,
device: DeviceId,
buffer: B,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::receive_frame(ctx, device.id, buffer),
}
}
/// Get the IP address and subnet associated with this device.
///
/// Note, tentative IP addresses (addresses which are not yet fully bound to a
/// device) will not returned by `get_ip_addr_subnet`.
pub fn get_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<AddrSubnet<A>> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ip_addr_subnet(ctx, device.id),
}
}
/// Get the IP address and subnet associated with this device, including tentative
/// address.
pub fn | <D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<Tentative<AddrSubnet<A>>> {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::get_ip_addr_subnet_with_tentative(ctx, device.id)
}
}
}
/// Set the IP address and subnet associated with this device.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn set_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
addr_sub: AddrSubnet<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("set_ip_addr_subnet: setting addr {:?} for device {:?}", addr_sub, device);
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::set_ip_addr_subnet(ctx, device.id, addr_sub),
}
}
/// Add `device` to a multicast group `multicast_addr`.
///
/// If `device` is already in the multicast group `multicast_addr`,
/// `join_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn join_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} joining multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::join_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Remove `device` from a multicast group `multicast_addr`.
///
/// If `device` is not in the multicast group `multicast_addr`,
/// `leave_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn leave_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} leaving multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::leave_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Is `device` part of the IP multicast group `multicast_addr`.
pub(crate) fn is_in_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) -> bool {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::is_in_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Get the MTU associated with this device.
pub(crate) fn get_mtu<D: EventDispatcher>(state: &StackState<D>, device: DeviceId) -> u32 {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_mtu(state, device.id),
}
}
/// Gets the IPv6 link-local address associated with this device.
// TODO(brunodalbo) when our device model allows for multiple IPs we can have
// a single function go get all the IP addresses associated with a device, which
// would be cleaner and remove the need for this function.
pub fn get_ipv6_link_local_addr<D: EventDispatcher>(
ctx: &Context<D>,
device: DeviceId,
) -> LinkLocalAddr<Ipv6Addr> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ipv6_link_local_addr(ctx, device.id),
}
}
/// Determine if an IP Address is considered tentative on a device.
///
/// Returns `true` if the address is tentative on a device; `false` otherwise.
/// Note, if the `addr` is not assigned to `device` but is considered tentative
/// on another device, `is_addr_tentative_on_device` will return `false`.
pub(crate) fn is_addr_tentative_on_device<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
addr: A,
device: DeviceId,
) -> bool {
get_ip_addr_subnet_with_tentative::<_, A>(ctx, device)
.map(|x| (x.inner().addr() == addr) && x.is_tentative())
.unwrap_or(false)
}
/// Get a reference to the common device state for a `device`.
fn get_common_device_state<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> &CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common(),
}
}
/// Get a mutable reference to the common device state for a `device`.
fn get_common_device_state_mut<D: EventDispatcher>(
state: &mut StackState<D>,
device: DeviceId,
) -> &mut CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get_mut(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common_mut(),
}
}
/// An address that may be "tentative" in that it has not yet passed
/// duplicate address detection (DAD).
///
/// A tentative address is one for which DAD is currently being performed.
/// An address is only considered assigned to an interface once DAD has
/// completed without detecting any duplicates. See [RFC 4862] for more details.
///
/// [RFC 4862]: https://tools.ietf.org/html/rfc4862
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Tentative<T>(T, bool);
impl<T> Tentative<T> {
/// Create a new address that is marked as tentative.
pub(crate) fn new_tentative(t: T) -> Self {
Self(t, true)
}
/// Create a new address that is marked as permanent/assigned.
pub(crate) fn new_permanent(t: T) -> Self {
Self(t, false)
}
/// Returns whether the value is tentative.
pub(crate) fn is_tentative(&self) -> bool {
self.1
}
/// Gets the value that is stored inside.
pub(crate) fn into_inner(self) -> T {
self.0
}
/// Converts a `Tentative<T>` into a `Option<T>` in the way that
/// a tentative value corresponds to a `None`.
pub(crate) fn try_into_permanent(self) -> Option<T> {
if self.is_tentative() {
None
} else {
Some(self.into_inner())
}
}
/// Borrow the content which is stored inside.
pub(crate) fn inner(&self) -> &T {
&self.0
}
/// Similar to `Option::map`.
pub(crate) fn map<U, F>(self, f: F) -> Tentative<U>
where
F: FnOnce(T) -> U,
{
Tentative(f(self.0), self.1)
}
/// Make the tentative value to be permanent.
pub(crate) fn mark_permanent(&mut self) {
self.1 = false
}
}
| get_ip_addr_subnet_with_tentative | identifier_name |
aula-principal.js | /*Arquivo principal com os testes feitos juntos com a aula iniciante de Javascript
Aula: https://www.youtube.com/watch?v=i6Oi-YtXnAU&feature=youtu.be
Anotações/explicações: https://drive.google.com/file/d/1WXCz5PgXpumV59kG_m1KnmHIsIcCuTd8/view?usp=sharing*/
//Exercício 01 variáveis:
let nome = 'Mariana';
let sobrenome = 'Fernandes';
let idade = 16;
let altura = 175;
let estaAprovado = true;
console.log('Nome: '+nome+'\n Idade: '+idade+'\n Altura: '+altura);
//Constantes
const valorIngressoAdulto = 20;
console.log(valorIngressoAdulto);
//Tipos primitivos
//let nome = 'Mariana'; -> String literal
// let idade = 16; -> number literal
// let estaAprovado = true; -> boolean
// let sobrenome; -> undefined tipo será definido quando for atribuído algum valor
// let corSelecionada = null; -> valor será atribuído depois
//objetos
let pessoa = {
//key: 'value'
nome: 'Mariana',
sobrenome: 'Fernandes',
idade: 16,
altura: 175,
estaAprovado: true
};
//array
let camy = ['Camylly', 17, 'São Paulo', 'DS', true];
console.log(camy.length); //tamanho do array
console.log(camy[0]); //mostra o que está na posição 0 desse array
//function
let corSite = "azul";
function resetaCor(cor, tonalidade){
corSite = cor + ' ' + tonalidade;
return corSite;
};
console.log(corSite);
resetaCor('verde', 'escuro');
console.log(corSite);
//incremento e decremento
console.log(idade++); // imprime 16, pois como o ++ tá depois da apresentação, ele não vai mostrar a idade atualizada
console.log(idade); //imprime 17, por causa da operação da linha anterior
console.log(++idade); //imprime 18, porque ele faz a operação antes de apresentar
idade -= 2;
//operadores de igualdade
//igualdade estrita (mais recomendado)
console.log(1 === 1); //imprime true, porque é o mesmo valor e o mesmo tipo
console.log('1' === 1) //imprime a false, porque apesar do valor ser o mesmo, eles são de tipos diferentes
//igualdade solta
console.log(1 == 1); //imprime true porque tem o mesmo valor
console.log('1' == 1); //imprime true porque continua tendo o mesmo valor
//operador ternário
let pontos = 100;
let tipo = pontos > 100 ? 'premium' : 'comum';
//let ex = (condicao) ? 'a' : 'b';
//ou seja, se a condicao estiver correta, a variavel ex vai receber 'a', se não vai receber 'b'
//operadores lógicos
//operador AND (&&)
let maiorDeIdade = true;
let possuiCarteira = false;
let podeAplicar = maiorDeIdade && possuiCarteira;
//se as duas variáveis comparadas forem true, ele retornará true, mas se alguma delas for false ele será false, nesse caso ele é true
console.log(podeAplicar); //vai imprimir false porque o possuiCarteira é false
//operador OR (||)
podeAplicar = maiorDeIdade || possuiCarteira;
//se pelo menos uma das variáveis comparadas forem true, ele já vai retornar true
console.log('candidato pode aplicar: ', podeAplicar); //vai imprimir true porque o maiorDeIdade é true
//Operador NOT (!)
let candidatoRecusado = !podeAplicar;
//se a variável podeAplicar for true, então candidatoRecusado receberá false, mas se podeAplicar for false o candidatoRecusado será true
console.log('candidato recusado: ', candidatoRecusado);//retorna false porque o candidato pode aplicar
//Condicionais: if... else e switch... case
numero = 263;
//Esse pequeno código verifica se a condição entre parênteses é verdadeira, caso seja ele executa o que tá no if, caso contrário ele executa o que está no "else"
if(numero % 2 == 0){ //nesse caso ele verifica se a variável numero é par
console.log('Par');
}
else if (numero == 0){//Se o laço não cair na condição anterior, ele faz essa verificação
console.log('Zero');
}
else{
console.log('ímpar');
}
//Laços de repetição: for, while, do... while, for... in e for... of
for (let i = 0; i < 5; i++){
//i = índice, executar enquanto a condição for verdadeira (i<5) e realiza o incremento
console.log(i);
}
let i = 0; //indice
while (i < 5){
console.log(i);
i++; //incremento
}
//A única diferença é que ele executa pelo menos uma vez antes de verificar o índice
i = 0;
do{
console.log(i);
i++;
}while(i < 5);
//For in
for(let chave in pessoa){
console.log(chave, pessoa[chave]);
}
//For of
for(let indice of camy){
console.log(indice);
}
//Factory Function: criar um objeto "padrão" mais facilmente através de um método
function criarCelular (marcaCel, bateriaCel, tamanhoTela){
return {
marcaCel,
bateriaCel,
tamanhoT | );
//Constructor function: mesmo objetivo que a factory function
function Celular (marcaCel, bateriaCel, tamanhoTela) {
this.marcaCel = marcaCel,
this.tamanhoTela = tamanhoTela,
this.bateriaCel = bateriaCel,
this.ligar = function() {
console.log('Fazendo ligação...');
}
}
//instanciando o objeto através do construtor
const cel2 = new Celular('Xiaomi Redmi', 70000, 6.0);
console.log(cel2);
//natureza dinâmica dos objetos
const mouse = {
cor: 'preto',
marca: 'dazz'
}
mouse.velocidade = 5000; //adiciona uma propriedade ao objeto já existente
mouse.trocarDPI = function() {
console.log('Trocando DPI...');
}
//deletando propriedades e funções do objeto existente:
delete mouse.trocarDPI;
delete mouse.velocidade;
console.log(mouse);
//clonando objetos
//nesse exemplo o novo objeto receberá o objeto já criado celular com a adição da propriedade temDigital
const objCopia = Object.assign({temDigital: true}, cel1);
console.log(objCopia);
//Math
console.log(Math.random()); // -> gera um número aleatório de 0 a 1
console.log(Math.random() * (10 - 1) + 1); // -> gera um número aleatório de 1 a 10
console.log(Math.max(1000, 10, 3, 2, 1, 10000)); // -> gera o maior número entre os listados como parâmetro
console.log(Math.min(1000, 10, 3, 2, 1, 10000)); // -> faz o contrário do que o max, gera o menor número listado
console.log(Math.pow(5,2)); // -> calcula o primeiro parâmetro elevado ao segundo parâmetro
//String: existe o tipo primitivo e o tipo objeto
let primitivo = 'Tipo primitivo';
let obj = new String('Tipo objeto');
console.log(primitivo.length); // -> mostra a quantidade de caracteres de uma string
console.log(primitivo[2]); // -> mostra o caracter que está no índice indicado, lembrando de que sempre começa no 0
console.log(primitivo.includes('Tipo')); // -> gera um true, porque na string indicada contém 'Tipo'
console.log(primitivo.includes('azul')); // -> gera um false, porque na string indicada contém 'azul'
console.log(primitivo.startsWith('Tipo')); // -> gera um true, porque a string indicada começa com 'Tipo'
console.log(primitivo.endsWith('primitivo')); // -> gera um true, porque a string indicada termina com 'primitivo'
console.log(primitivo.indexOf('v')); // -> gera o index em que 'v' está
console.log(primitivo.replace('Tipo', ''));// -> substitui uma parte da String, nesse caso ela vira só 'primitivo'
primitivo.trim(); //-> remove espaços desnecessários no início ou no final da string
console.log(obj.split(' ')); //-> separa a string a cada espaço que encontra, nesse caso ele mostra 'Tipo' e 'objeto' como duas coisas diferentes
//String com sequência de escape
let msg = 'Bom dia '+ nome +'. \nEssa é minha \'mensagem\'';
//String com template literal
let msgL = `Bom dia ${nome}.
Essa é minha 'mensagem'`
//Date
const dataAtual = new Date(); //vai preencher com as informações de data e hora atuais
const data1 = new Date('March 06 2019 09:30'); //data e hora deifinidos na declaração da constante
const data2 = new Date(2019, 02, 06, 09, 30, 05); //data e hora definidos através dos números na ordem: ano, mês, dia, hora, minuto, segundo e ms
data2.getFullYear(); //retorna o ano
data2.setFullYear(2021); //muda o ano dessa data
//Formas de converter para String:
data1.toDateString(); //retorna só a data em String
data2.toTimeString(); //retorna a data e hora em string
dataAtual.toISOString(); //retorna as infos em números, como em SQL
//Constructor Function
//Exercício: criar um objeto postagem (miniprojeto13) através de um construtor
function Post(titulo, mensagem, autor){
this.titulo = titulo;
this.autor = autor;
this.mensagem = mensagem;
this.comentarios = [];
this.views = 0;
this.estaAoVivo = false;
}
let post = new Post('a', 'b', 'c');
//Arrays
let numeros = [1, 2, 3];
//Adicionando números no início:
numeros.unshift(0);
//Adicionando números no meio:
numeros.splice(2, 0, 1.5); // vai adicionar 1.5 no 2, e o parâmetro 0 é porque não vamos substituir nada, apenas adicionar
//Adicionando números no final:
numeros.push(4);
numeros.indexOf(1.5); //retorna o índice de 1.5 que é 2, caso não existisse 1.5 no array ele retornaria -1
numeros.lastIndexOf(1.5); //nesse caso, se tiverem dois 1.5 no array ele retorna o index do último
numeros.includes(5); //retorna true caso tenha 5 em algum índice do array e false caso não tenha
let pessoas = [
{id: 1, nome: 'Rebeca'},
{id: 2, nome: 'Laura'}
];
//Encontrando um objeto dentro de um array utilizando o find
let objetoEncontrado = pessoas.find(function(pessoas){
return pessoas.nome == 'Laura';
});
console.log(objetoEncontrado); //vai retornar o objeto que tem o nome 'Laura'
//Arrow function: usando esse método para encurtar a função anterior
objetoEncontrado = pessoas.find(pessoas => pessoas.nome === 'Laura');
//Removendo valores de um array:
numeros.pop(); //remove o último valor do array
numeros.shift(); //remove o primeiro valor do array
numeros.splice(1, 1); //remove um número no índice 1
console.log(numeros);
//Formas de esvaziar um Array:
numeros = []; //só remove esse array e deixa as referências normais
numeros.length = 0; //esvazia o array e as referências
numeros.splice(0, numeros.length); //esvazia o array e as referências
//Combinando e cortando Arrays
const array1 = [1, 2, 3];
const array2 = [4, 5, 6];
//Combinando
let combinado = array1.concat(array2);
console.log(combinado);
//Cortando
const cortado = combinado.slice(0, 3); //tem como dois parâmetros o índice inicial e o final da parte que será mantida
console.log(cortado);
//Concatenando arrays com Spread
combinado = [...array1, ...array2, 7];
//ForEach
combinado.forEach((combinado, indice) => console.log('Índice ' + indice + ': '+ combinado));
//Combinando arrays com join e split
combinado = combinado.join('-'); //separa os elementos do array pelo traço
let frase = 'esse é um curso de javascript';
frase = frase.split(' '); //separa cada palavra em um elemento diferente, identificando através de espaços
frase = frase.join('-'); //separa as palavras em um elemento só separados por um traço
/*Recebendo dados de um usuário:
const idadeUser = prompt('Qual sua idade?');
if (idadeUser >= 18) console.log('Maior de idade');
alert('Menor de idade');*/
| ela,
ligar(){
console.log('Fazendo ligação...');
}
}
}
const cel1 = criarCelular('Samsung A10', 5000, 5.5);
console.log(cel1 | identifier_body |
aula-principal.js | /*Arquivo principal com os testes feitos juntos com a aula iniciante de Javascript
Aula: https://www.youtube.com/watch?v=i6Oi-YtXnAU&feature=youtu.be
Anotações/explicações: https://drive.google.com/file/d/1WXCz5PgXpumV59kG_m1KnmHIsIcCuTd8/view?usp=sharing*/
//Exercício 01 variáveis:
let nome = 'Mariana';
let sobrenome = 'Fernandes';
let idade = 16;
let altura = 175;
let estaAprovado = true;
console.log('Nome: '+nome+'\n Idade: '+idade+'\n Altura: '+altura);
//Constantes
const valorIngressoAdulto = 20;
console.log(valorIngressoAdulto);
//Tipos primitivos
//let nome = 'Mariana'; -> String literal
// let idade = 16; -> number literal
// let estaAprovado = true; -> boolean
// let sobrenome; -> undefined tipo será definido quando for atribuído algum valor
// let corSelecionada = null; -> valor será atribuído depois
//objetos
let pessoa = {
//key: 'value'
nome: 'Mariana',
sobrenome: 'Fernandes',
idade: 16,
altura: 175,
estaAprovado: true
};
//array
let camy = ['Camylly', 17, 'São Paulo', 'DS', true];
console.log(camy.length); //tamanho do array
console.log(camy[0]); //mostra o que está na posição 0 desse array
//function
let corSite = "azul";
function resetaCor(cor, tonalidade){
corSite = cor + ' ' + tonalidade;
return corSite;
};
console.log(corSite);
resetaCor('verde', 'escuro');
console.log(corSite);
//incremento e decremento
console.log(idade++); // imprime 16, pois como o ++ tá depois da apresentação, ele não vai mostrar a idade atualizada
console.log(idade); //imprime 17, por causa da operação da linha anterior
console.log(++idade); //imprime 18, porque ele faz a operação antes de apresentar
idade -= 2;
//operadores de igualdade
//igualdade estrita (mais recomendado)
console.log(1 === 1); //imprime true, porque é o mesmo valor e o mesmo tipo
console.log('1' === 1) //imprime a false, porque apesar do valor ser o mesmo, eles são de tipos diferentes
//igualdade solta
console.log(1 == 1); //imprime true porque tem o mesmo valor
console.log('1' == 1); //imprime true porque continua tendo o mesmo valor
//operador ternário
let pontos = 100;
let tipo = pontos > 100 ? 'premium' : 'comum';
//let ex = (condicao) ? 'a' : 'b';
//ou seja, se a condicao estiver correta, a variavel ex vai receber 'a', se não vai receber 'b'
//operadores lógicos
//operador AND (&&)
let maiorDeIdade = true;
let possuiCarteira = false;
let podeAplicar = maiorDeIdade && possuiCarteira;
//se as duas variáveis comparadas forem true, ele retornará true, mas se alguma delas for false ele será false, nesse caso ele é true
console.log(podeAplicar); //vai imprimir false porque o possuiCarteira é false
//operador OR (||)
podeAplicar = maiorDeIdade || possuiCarteira;
//se pelo menos uma das variáveis comparadas forem true, ele já vai retornar true
console.log('candidato pode aplicar: ', podeAplicar); //vai imprimir true porque o maiorDeIdade é true
//Operador NOT (!)
let candidatoRecusado = !podeAplicar;
//se a variável podeAplicar for true, então candidatoRecusado receberá false, mas se podeAplicar for false o candidatoRecusado será true
console.log('candidato recusado: ', candidatoRecusado);//retorna false porque o candidato pode aplicar
//Condicionais: if... else e switch... case
numero = 263;
//Esse pequeno código verifica se a condição entre parênteses é verdadeira, caso seja ele executa o que tá no if, caso contrário ele executa o que está no "else"
if(numero % 2 == 0){ //nesse caso ele verifica se a variável numero é par
console.log('Par');
}
else if (numero == 0){//Se o laço não cair na condição anterior, ele faz essa verificação
console.log('Zero');
}
else{
console.log('ímpar');
}
//Laços de repetição: for, while, do... while, for... in e for... of
for (let i = 0; i < 5; i++){
//i = índice, executar enquanto a condição for verdad | i++; //incremento
}
//A única diferença é que ele executa pelo menos uma vez antes de verificar o índice
i = 0;
do{
console.log(i);
i++;
}while(i < 5);
//For in
for(let chave in pessoa){
console.log(chave, pessoa[chave]);
}
//For of
for(let indice of camy){
console.log(indice);
}
//Factory Function: criar um objeto "padrão" mais facilmente através de um método
function criarCelular (marcaCel, bateriaCel, tamanhoTela){
return {
marcaCel,
bateriaCel,
tamanhoTela,
ligar(){
console.log('Fazendo ligação...');
}
}
}
const cel1 = criarCelular('Samsung A10', 5000, 5.5);
console.log(cel1);
//Constructor function: mesmo objetivo que a factory function
function Celular (marcaCel, bateriaCel, tamanhoTela) {
this.marcaCel = marcaCel,
this.tamanhoTela = tamanhoTela,
this.bateriaCel = bateriaCel,
this.ligar = function() {
console.log('Fazendo ligação...');
}
}
//instanciando o objeto através do construtor
const cel2 = new Celular('Xiaomi Redmi', 70000, 6.0);
console.log(cel2);
//natureza dinâmica dos objetos
const mouse = {
cor: 'preto',
marca: 'dazz'
}
mouse.velocidade = 5000; //adiciona uma propriedade ao objeto já existente
mouse.trocarDPI = function() {
console.log('Trocando DPI...');
}
//deletando propriedades e funções do objeto existente:
delete mouse.trocarDPI;
delete mouse.velocidade;
console.log(mouse);
//clonando objetos
//nesse exemplo o novo objeto receberá o objeto já criado celular com a adição da propriedade temDigital
const objCopia = Object.assign({temDigital: true}, cel1);
console.log(objCopia);
//Math
console.log(Math.random()); // -> gera um número aleatório de 0 a 1
console.log(Math.random() * (10 - 1) + 1); // -> gera um número aleatório de 1 a 10
console.log(Math.max(1000, 10, 3, 2, 1, 10000)); // -> gera o maior número entre os listados como parâmetro
console.log(Math.min(1000, 10, 3, 2, 1, 10000)); // -> faz o contrário do que o max, gera o menor número listado
console.log(Math.pow(5,2)); // -> calcula o primeiro parâmetro elevado ao segundo parâmetro
//String: existe o tipo primitivo e o tipo objeto
let primitivo = 'Tipo primitivo';
let obj = new String('Tipo objeto');
console.log(primitivo.length); // -> mostra a quantidade de caracteres de uma string
console.log(primitivo[2]); // -> mostra o caracter que está no índice indicado, lembrando de que sempre começa no 0
console.log(primitivo.includes('Tipo')); // -> gera um true, porque na string indicada contém 'Tipo'
console.log(primitivo.includes('azul')); // -> gera um false, porque na string indicada contém 'azul'
console.log(primitivo.startsWith('Tipo')); // -> gera um true, porque a string indicada começa com 'Tipo'
console.log(primitivo.endsWith('primitivo')); // -> gera um true, porque a string indicada termina com 'primitivo'
console.log(primitivo.indexOf('v')); // -> gera o index em que 'v' está
console.log(primitivo.replace('Tipo', ''));// -> substitui uma parte da String, nesse caso ela vira só 'primitivo'
primitivo.trim(); //-> remove espaços desnecessários no início ou no final da string
console.log(obj.split(' ')); //-> separa a string a cada espaço que encontra, nesse caso ele mostra 'Tipo' e 'objeto' como duas coisas diferentes
//String com sequência de escape
let msg = 'Bom dia '+ nome +'. \nEssa é minha \'mensagem\'';
//String com template literal
let msgL = `Bom dia ${nome}.
Essa é minha 'mensagem'`
//Date
const dataAtual = new Date(); //vai preencher com as informações de data e hora atuais
const data1 = new Date('March 06 2019 09:30'); //data e hora deifinidos na declaração da constante
const data2 = new Date(2019, 02, 06, 09, 30, 05); //data e hora definidos através dos números na ordem: ano, mês, dia, hora, minuto, segundo e ms
data2.getFullYear(); //retorna o ano
data2.setFullYear(2021); //muda o ano dessa data
//Formas de converter para String:
data1.toDateString(); //retorna só a data em String
data2.toTimeString(); //retorna a data e hora em string
dataAtual.toISOString(); //retorna as infos em números, como em SQL
//Constructor Function
//Exercício: criar um objeto postagem (miniprojeto13) através de um construtor
function Post(titulo, mensagem, autor){
this.titulo = titulo;
this.autor = autor;
this.mensagem = mensagem;
this.comentarios = [];
this.views = 0;
this.estaAoVivo = false;
}
let post = new Post('a', 'b', 'c');
//Arrays
let numeros = [1, 2, 3];
//Adicionando números no início:
numeros.unshift(0);
//Adicionando números no meio:
numeros.splice(2, 0, 1.5); // vai adicionar 1.5 no 2, e o parâmetro 0 é porque não vamos substituir nada, apenas adicionar
//Adicionando números no final:
numeros.push(4);
numeros.indexOf(1.5); //retorna o índice de 1.5 que é 2, caso não existisse 1.5 no array ele retornaria -1
numeros.lastIndexOf(1.5); //nesse caso, se tiverem dois 1.5 no array ele retorna o index do último
numeros.includes(5); //retorna true caso tenha 5 em algum índice do array e false caso não tenha
let pessoas = [
{id: 1, nome: 'Rebeca'},
{id: 2, nome: 'Laura'}
];
//Encontrando um objeto dentro de um array utilizando o find
let objetoEncontrado = pessoas.find(function(pessoas){
return pessoas.nome == 'Laura';
});
console.log(objetoEncontrado); //vai retornar o objeto que tem o nome 'Laura'
//Arrow function: usando esse método para encurtar a função anterior
objetoEncontrado = pessoas.find(pessoas => pessoas.nome === 'Laura');
//Removendo valores de um array:
numeros.pop(); //remove o último valor do array
numeros.shift(); //remove o primeiro valor do array
numeros.splice(1, 1); //remove um número no índice 1
console.log(numeros);
//Formas de esvaziar um Array:
numeros = []; //só remove esse array e deixa as referências normais
numeros.length = 0; //esvazia o array e as referências
numeros.splice(0, numeros.length); //esvazia o array e as referências
//Combinando e cortando Arrays
const array1 = [1, 2, 3];
const array2 = [4, 5, 6];
//Combinando
let combinado = array1.concat(array2);
console.log(combinado);
//Cortando
const cortado = combinado.slice(0, 3); //tem como dois parâmetros o índice inicial e o final da parte que será mantida
console.log(cortado);
//Concatenando arrays com Spread
combinado = [...array1, ...array2, 7];
//ForEach
combinado.forEach((combinado, indice) => console.log('Índice ' + indice + ': '+ combinado));
//Combinando arrays com join e split
combinado = combinado.join('-'); //separa os elementos do array pelo traço
let frase = 'esse é um curso de javascript';
frase = frase.split(' '); //separa cada palavra em um elemento diferente, identificando através de espaços
frase = frase.join('-'); //separa as palavras em um elemento só separados por um traço
/*Recebendo dados de um usuário:
const idadeUser = prompt('Qual sua idade?');
if (idadeUser >= 18) console.log('Maior de idade');
alert('Menor de idade');*/
| eira (i<5) e realiza o incremento
console.log(i);
}
let i = 0; //indice
while (i < 5){
console.log(i);
| conditional_block |
aula-principal.js | /*Arquivo principal com os testes feitos juntos com a aula iniciante de Javascript
Aula: https://www.youtube.com/watch?v=i6Oi-YtXnAU&feature=youtu.be
Anotações/explicações: https://drive.google.com/file/d/1WXCz5PgXpumV59kG_m1KnmHIsIcCuTd8/view?usp=sharing*/
//Exercício 01 variáveis:
let nome = 'Mariana';
let sobrenome = 'Fernandes';
let idade = 16;
let altura = 175;
let estaAprovado = true;
console.log('Nome: '+nome+'\n Idade: '+idade+'\n Altura: '+altura);
//Constantes
const valorIngressoAdulto = 20;
console.log(valorIngressoAdulto);
//Tipos primitivos
//let nome = 'Mariana'; -> String literal
// let idade = 16; -> number literal
// let estaAprovado = true; -> boolean
// let sobrenome; -> undefined tipo será definido quando for atribuído algum valor
// let corSelecionada = null; -> valor será atribuído depois
//objetos
let pessoa = {
//key: 'value'
nome: 'Mariana',
sobrenome: 'Fernandes',
idade: 16,
altura: 175,
estaAprovado: true
};
//array
let camy = ['Camylly', 17, 'São Paulo', 'DS', true];
console.log(camy.length); //tamanho do array
console.log(camy[0]); //mostra o que está na posição 0 desse array
//function
let corSite = "azul";
function resetaCor(cor, tonalidade){
corSite = cor + ' ' + tonalidade;
return corSite;
};
console.log(corSite);
resetaCor('verde', 'escuro');
console.log(corSite);
//incremento e decremento
console.log(idade++); // imprime 16, pois como o ++ tá depois da apresentação, ele não vai mostrar a idade atualizada
console.log(idade); //imprime 17, por causa da operação da linha anterior
console.log(++idade); //imprime 18, porque ele faz a operação antes de apresentar
idade -= 2;
//operadores de igualdade
//igualdade estrita (mais recomendado)
console.log(1 === 1); //imprime true, porque é o mesmo valor e o mesmo tipo
console.log('1' === 1) //imprime a false, porque apesar do valor ser o mesmo, eles são de tipos diferentes
//igualdade solta
console.log(1 == 1); //imprime true porque tem o mesmo valor
console.log('1' == 1); //imprime true porque continua tendo o mesmo valor
//operador ternário
let pontos = 100;
let tipo = pontos > 100 ? 'premium' : 'comum';
//let ex = (condicao) ? 'a' : 'b';
//ou seja, se a condicao estiver correta, a variavel ex vai receber 'a', se não vai receber 'b'
//operadores lógicos
//operador AND (&&)
let maiorDeIdade = true;
let possuiCarteira = false;
let podeAplicar = maiorDeIdade && possuiCarteira;
//se as duas variáveis comparadas forem true, ele retornará true, mas se alguma delas for false ele será false, nesse caso ele é true
console.log(podeAplicar); //vai imprimir false porque o possuiCarteira é false
//operador OR (||)
podeAplicar = maiorDeIdade || possuiCarteira;
//se pelo menos uma das variáveis comparadas forem true, ele já vai retornar true
console.log('candidato pode aplicar: ', podeAplicar); //vai imprimir true porque o maiorDeIdade é true
//Operador NOT (!)
let candidatoRecusado = !podeAplicar;
//se a variável podeAplicar for true, então candidatoRecusado receberá false, mas se podeAplicar for false o candidatoRecusado será true
console.log('candidato recusado: ', candidatoRecusado);//retorna false porque o candidato pode aplicar
//Condicionais: if... else e switch... case
numero = 263;
//Esse pequeno código verifica se a condição entre parênteses é verdadeira, caso seja ele executa o que tá no if, caso contrário ele executa o que está no "else"
if(numero % 2 == 0){ //nesse caso ele verifica se a variável numero é par
console.log('Par');
}
else if (numero == 0){//Se o laço não cair na condição anterior, ele faz essa verificação
console.log('Zero');
}
else{
console.log('ímpar');
}
//Laços de repetição: for, while, do... while, for... in e for... of
for (let i = 0; i < 5; i++){
//i = índice, executar enquanto a condição for verdadeira (i<5) e realiza o incremento
console.log(i);
}
let i = 0; //indice
while (i < 5){
console.log(i);
i++; //incremento
}
//A única diferença é que ele executa pelo menos uma vez antes de verificar o índice
i = 0;
do{
console.log(i);
i++;
}while(i < 5);
//For in
for(let chave in pessoa){
console.log(chave, pessoa[chave]);
}
//For of
for(let indice of camy){
console.log(indice);
}
//Factory Function: criar um objeto "padrão" mais facilmente através de um método
function criarCelular (marcaCel, bateriaCel, tamanhoTela){
return {
| bateriaCel,
tamanhoTela,
ligar(){
console.log('Fazendo ligação...');
}
}
}
const cel1 = criarCelular('Samsung A10', 5000, 5.5);
console.log(cel1);
//Constructor function: mesmo objetivo que a factory function
function Celular (marcaCel, bateriaCel, tamanhoTela) {
this.marcaCel = marcaCel,
this.tamanhoTela = tamanhoTela,
this.bateriaCel = bateriaCel,
this.ligar = function() {
console.log('Fazendo ligação...');
}
}
//instanciando o objeto através do construtor
const cel2 = new Celular('Xiaomi Redmi', 70000, 6.0);
console.log(cel2);
//natureza dinâmica dos objetos
const mouse = {
cor: 'preto',
marca: 'dazz'
}
mouse.velocidade = 5000; //adiciona uma propriedade ao objeto já existente
mouse.trocarDPI = function() {
console.log('Trocando DPI...');
}
//deletando propriedades e funções do objeto existente:
delete mouse.trocarDPI;
delete mouse.velocidade;
console.log(mouse);
//clonando objetos
//nesse exemplo o novo objeto receberá o objeto já criado celular com a adição da propriedade temDigital
const objCopia = Object.assign({temDigital: true}, cel1);
console.log(objCopia);
//Math
console.log(Math.random()); // -> gera um número aleatório de 0 a 1
console.log(Math.random() * (10 - 1) + 1); // -> gera um número aleatório de 1 a 10
console.log(Math.max(1000, 10, 3, 2, 1, 10000)); // -> gera o maior número entre os listados como parâmetro
console.log(Math.min(1000, 10, 3, 2, 1, 10000)); // -> faz o contrário do que o max, gera o menor número listado
console.log(Math.pow(5,2)); // -> calcula o primeiro parâmetro elevado ao segundo parâmetro
//String: existe o tipo primitivo e o tipo objeto
let primitivo = 'Tipo primitivo';
let obj = new String('Tipo objeto');
console.log(primitivo.length); // -> mostra a quantidade de caracteres de uma string
console.log(primitivo[2]); // -> mostra o caracter que está no índice indicado, lembrando de que sempre começa no 0
console.log(primitivo.includes('Tipo')); // -> gera um true, porque na string indicada contém 'Tipo'
console.log(primitivo.includes('azul')); // -> gera um false, porque na string indicada contém 'azul'
console.log(primitivo.startsWith('Tipo')); // -> gera um true, porque a string indicada começa com 'Tipo'
console.log(primitivo.endsWith('primitivo')); // -> gera um true, porque a string indicada termina com 'primitivo'
console.log(primitivo.indexOf('v')); // -> gera o index em que 'v' está
console.log(primitivo.replace('Tipo', ''));// -> substitui uma parte da String, nesse caso ela vira só 'primitivo'
primitivo.trim(); //-> remove espaços desnecessários no início ou no final da string
console.log(obj.split(' ')); //-> separa a string a cada espaço que encontra, nesse caso ele mostra 'Tipo' e 'objeto' como duas coisas diferentes
//String com sequência de escape
let msg = 'Bom dia '+ nome +'. \nEssa é minha \'mensagem\'';
//String com template literal
let msgL = `Bom dia ${nome}.
Essa é minha 'mensagem'`
//Date
const dataAtual = new Date(); //vai preencher com as informações de data e hora atuais
const data1 = new Date('March 06 2019 09:30'); //data e hora deifinidos na declaração da constante
const data2 = new Date(2019, 02, 06, 09, 30, 05); //data e hora definidos através dos números na ordem: ano, mês, dia, hora, minuto, segundo e ms
data2.getFullYear(); //retorna o ano
data2.setFullYear(2021); //muda o ano dessa data
//Formas de converter para String:
data1.toDateString(); //retorna só a data em String
data2.toTimeString(); //retorna a data e hora em string
dataAtual.toISOString(); //retorna as infos em números, como em SQL
//Constructor Function
//Exercício: criar um objeto postagem (miniprojeto13) através de um construtor
function Post(titulo, mensagem, autor){
this.titulo = titulo;
this.autor = autor;
this.mensagem = mensagem;
this.comentarios = [];
this.views = 0;
this.estaAoVivo = false;
}
let post = new Post('a', 'b', 'c');
//Arrays
let numeros = [1, 2, 3];
//Adicionando números no início:
numeros.unshift(0);
//Adicionando números no meio:
numeros.splice(2, 0, 1.5); // vai adicionar 1.5 no 2, e o parâmetro 0 é porque não vamos substituir nada, apenas adicionar
//Adicionando números no final:
numeros.push(4);
numeros.indexOf(1.5); //retorna o índice de 1.5 que é 2, caso não existisse 1.5 no array ele retornaria -1
numeros.lastIndexOf(1.5); //nesse caso, se tiverem dois 1.5 no array ele retorna o index do último
numeros.includes(5); //retorna true caso tenha 5 em algum índice do array e false caso não tenha
let pessoas = [
{id: 1, nome: 'Rebeca'},
{id: 2, nome: 'Laura'}
];
//Encontrando um objeto dentro de um array utilizando o find
let objetoEncontrado = pessoas.find(function(pessoas){
return pessoas.nome == 'Laura';
});
console.log(objetoEncontrado); //vai retornar o objeto que tem o nome 'Laura'
//Arrow function: usando esse método para encurtar a função anterior
objetoEncontrado = pessoas.find(pessoas => pessoas.nome === 'Laura');
//Removendo valores de um array:
numeros.pop(); //remove o último valor do array
numeros.shift(); //remove o primeiro valor do array
numeros.splice(1, 1); //remove um número no índice 1
console.log(numeros);
//Formas de esvaziar um Array:
numeros = []; //só remove esse array e deixa as referências normais
numeros.length = 0; //esvazia o array e as referências
numeros.splice(0, numeros.length); //esvazia o array e as referências
//Combinando e cortando Arrays
const array1 = [1, 2, 3];
const array2 = [4, 5, 6];
//Combinando
let combinado = array1.concat(array2);
console.log(combinado);
//Cortando
const cortado = combinado.slice(0, 3); //tem como dois parâmetros o índice inicial e o final da parte que será mantida
console.log(cortado);
//Concatenando arrays com Spread
combinado = [...array1, ...array2, 7];
//ForEach
combinado.forEach((combinado, indice) => console.log('Índice ' + indice + ': '+ combinado));
//Combinando arrays com join e split
combinado = combinado.join('-'); //separa os elementos do array pelo traço
let frase = 'esse é um curso de javascript';
frase = frase.split(' '); //separa cada palavra em um elemento diferente, identificando através de espaços
frase = frase.join('-'); //separa as palavras em um elemento só separados por um traço
/*Recebendo dados de um usuário:
const idadeUser = prompt('Qual sua idade?');
if (idadeUser >= 18) console.log('Maior de idade');
alert('Menor de idade');*/
| marcaCel,
| identifier_name |
aula-principal.js | /*Arquivo principal com os testes feitos juntos com a aula iniciante de Javascript
Aula: https://www.youtube.com/watch?v=i6Oi-YtXnAU&feature=youtu.be
Anotações/explicações: https://drive.google.com/file/d/1WXCz5PgXpumV59kG_m1KnmHIsIcCuTd8/view?usp=sharing*/
//Exercício 01 variáveis:
let nome = 'Mariana';
let sobrenome = 'Fernandes';
let idade = 16;
let altura = 175;
let estaAprovado = true;
console.log('Nome: '+nome+'\n Idade: '+idade+'\n Altura: '+altura);
//Constantes
const valorIngressoAdulto = 20;
console.log(valorIngressoAdulto);
//Tipos primitivos
//let nome = 'Mariana'; -> String literal
// let idade = 16; -> number literal
// let estaAprovado = true; -> boolean
// let sobrenome; -> undefined tipo será definido quando for atribuído algum valor
// let corSelecionada = null; -> valor será atribuído depois
//objetos
let pessoa = {
//key: 'value'
nome: 'Mariana',
sobrenome: 'Fernandes',
idade: 16,
altura: 175,
estaAprovado: true
};
//array
let camy = ['Camylly', 17, 'São Paulo', 'DS', true];
console.log(camy.length); //tamanho do array
console.log(camy[0]); //mostra o que está na posição 0 desse array
//function
let corSite = "azul";
function resetaCor(cor, tonalidade){
corSite = cor + ' ' + tonalidade;
return corSite;
};
console.log(corSite);
resetaCor('verde', 'escuro');
console.log(corSite);
//incremento e decremento
console.log(idade++); // imprime 16, pois como o ++ tá depois da apresentação, ele não vai mostrar a idade atualizada
console.log(idade); //imprime 17, por causa da operação da linha anterior
console.log(++idade); //imprime 18, porque ele faz a operação antes de apresentar
idade -= 2;
//operadores de igualdade
//igualdade estrita (mais recomendado)
console.log(1 === 1); //imprime true, porque é o mesmo valor e o mesmo tipo
console.log('1' === 1) //imprime a false, porque apesar do valor ser o mesmo, eles são de tipos diferentes
//igualdade solta
console.log(1 == 1); //imprime true porque tem o mesmo valor
console.log('1' == 1); //imprime true porque continua tendo o mesmo valor
//operador ternário
let pontos = 100;
let tipo = pontos > 100 ? 'premium' : 'comum';
//let ex = (condicao) ? 'a' : 'b';
//ou seja, se a condicao estiver correta, a variavel ex vai receber 'a', se não vai receber 'b'
//operadores lógicos
//operador AND (&&)
let maiorDeIdade = true;
let possuiCarteira = false;
let podeAplicar = maiorDeIdade && possuiCarteira;
//se as duas variáveis comparadas forem true, ele retornará true, mas se alguma delas for false ele será false, nesse caso ele é true
console.log(podeAplicar); //vai imprimir false porque o possuiCarteira é false
//operador OR (||)
podeAplicar = maiorDeIdade || possuiCarteira;
//se pelo menos uma das variáveis comparadas forem true, ele já vai retornar true
console.log('candidato pode aplicar: ', podeAplicar); //vai imprimir true porque o maiorDeIdade é true
//Operador NOT (!)
let candidatoRecusado = !podeAplicar;
//se a variável podeAplicar for true, então candidatoRecusado receberá false, mas se podeAplicar for false o candidatoRecusado será true
console.log('candidato recusado: ', candidatoRecusado);//retorna false porque o candidato pode aplicar
//Condicionais: if... else e switch... case
numero = 263;
//Esse pequeno código verifica se a condição entre parênteses é verdadeira, caso seja ele executa o que tá no if, caso contrário ele executa o que está no "else"
if(numero % 2 == 0){ //nesse caso ele verifica se a variável numero é par
console.log('Par');
}
else if (numero == 0){//Se o laço não cair na condição anterior, ele faz essa verificação
console.log('Zero');
}
else{
console.log('ímpar');
}
//Laços de repetição: for, while, do... while, for... in e for... of
for (let i = 0; i < 5; i++){
//i = índice, executar enquanto a condição for verdadeira (i<5) e realiza o incremento
console.log(i);
}
let i = 0; //indice
while (i < 5){
console.log(i);
i++; //incremento
}
//A única diferença é que ele executa pelo menos uma vez antes de verificar o índice
i = 0;
do{
console.log(i);
i++;
}while(i < 5);
//For in
for(let chave in pessoa){
console.log(chave, pessoa[chave]);
}
//For of
for(let indice of camy){
console.log(indice);
}
//Factory Function: criar um objeto "padrão" mais facilmente através de um método
function criarCelular (marcaCel, bateriaCel, tamanhoTela){
return {
marcaCel,
bateriaCel,
tamanhoTela,
ligar(){
console.log('Fazendo ligação...');
}
}
}
const cel1 = criarCelular('Samsung A10', 5000, 5.5);
console.log(cel1);
//Constructor function: mesmo objetivo que a factory function
function Celular (marcaCel, bateriaCel, tamanhoTela) {
this.marcaCel = marcaCel,
this.tamanhoTela = tamanhoTela,
this.bateriaCel = bateriaCel,
this.ligar = function() {
console.log('Fazendo ligação...');
}
}
//instanciando o objeto através do construtor
const cel2 = new Celular('Xiaomi Redmi', 70000, 6.0);
console.log(cel2);
//natureza dinâmica dos objetos
const mouse = {
cor: 'preto',
marca: 'dazz'
}
mouse.velocidade = 5000; //adiciona uma propriedade ao objeto já existente
mouse.trocarDPI = function() {
console.log('Trocando DPI...');
}
| console.log(mouse);
//clonando objetos
//nesse exemplo o novo objeto receberá o objeto já criado celular com a adição da propriedade temDigital
const objCopia = Object.assign({temDigital: true}, cel1);
console.log(objCopia);
//Math
console.log(Math.random()); // -> gera um número aleatório de 0 a 1
console.log(Math.random() * (10 - 1) + 1); // -> gera um número aleatório de 1 a 10
console.log(Math.max(1000, 10, 3, 2, 1, 10000)); // -> gera o maior número entre os listados como parâmetro
console.log(Math.min(1000, 10, 3, 2, 1, 10000)); // -> faz o contrário do que o max, gera o menor número listado
console.log(Math.pow(5,2)); // -> calcula o primeiro parâmetro elevado ao segundo parâmetro
//String: existe o tipo primitivo e o tipo objeto
let primitivo = 'Tipo primitivo';
let obj = new String('Tipo objeto');
console.log(primitivo.length); // -> mostra a quantidade de caracteres de uma string
console.log(primitivo[2]); // -> mostra o caracter que está no índice indicado, lembrando de que sempre começa no 0
console.log(primitivo.includes('Tipo')); // -> gera um true, porque na string indicada contém 'Tipo'
console.log(primitivo.includes('azul')); // -> gera um false, porque na string indicada contém 'azul'
console.log(primitivo.startsWith('Tipo')); // -> gera um true, porque a string indicada começa com 'Tipo'
console.log(primitivo.endsWith('primitivo')); // -> gera um true, porque a string indicada termina com 'primitivo'
console.log(primitivo.indexOf('v')); // -> gera o index em que 'v' está
console.log(primitivo.replace('Tipo', ''));// -> substitui uma parte da String, nesse caso ela vira só 'primitivo'
primitivo.trim(); //-> remove espaços desnecessários no início ou no final da string
console.log(obj.split(' ')); //-> separa a string a cada espaço que encontra, nesse caso ele mostra 'Tipo' e 'objeto' como duas coisas diferentes
//String com sequência de escape
let msg = 'Bom dia '+ nome +'. \nEssa é minha \'mensagem\'';
//String com template literal
let msgL = `Bom dia ${nome}.
Essa é minha 'mensagem'`
//Date
const dataAtual = new Date(); //vai preencher com as informações de data e hora atuais
const data1 = new Date('March 06 2019 09:30'); //data e hora deifinidos na declaração da constante
const data2 = new Date(2019, 02, 06, 09, 30, 05); //data e hora definidos através dos números na ordem: ano, mês, dia, hora, minuto, segundo e ms
data2.getFullYear(); //retorna o ano
data2.setFullYear(2021); //muda o ano dessa data
//Formas de converter para String:
data1.toDateString(); //retorna só a data em String
data2.toTimeString(); //retorna a data e hora em string
dataAtual.toISOString(); //retorna as infos em números, como em SQL
//Constructor Function
//Exercício: criar um objeto postagem (miniprojeto13) através de um construtor
function Post(titulo, mensagem, autor){
this.titulo = titulo;
this.autor = autor;
this.mensagem = mensagem;
this.comentarios = [];
this.views = 0;
this.estaAoVivo = false;
}
let post = new Post('a', 'b', 'c');
//Arrays
let numeros = [1, 2, 3];
//Adicionando números no início:
numeros.unshift(0);
//Adicionando números no meio:
numeros.splice(2, 0, 1.5); // vai adicionar 1.5 no 2, e o parâmetro 0 é porque não vamos substituir nada, apenas adicionar
//Adicionando números no final:
numeros.push(4);
numeros.indexOf(1.5); //retorna o índice de 1.5 que é 2, caso não existisse 1.5 no array ele retornaria -1
numeros.lastIndexOf(1.5); //nesse caso, se tiverem dois 1.5 no array ele retorna o index do último
numeros.includes(5); //retorna true caso tenha 5 em algum índice do array e false caso não tenha
let pessoas = [
{id: 1, nome: 'Rebeca'},
{id: 2, nome: 'Laura'}
];
//Encontrando um objeto dentro de um array utilizando o find
let objetoEncontrado = pessoas.find(function(pessoas){
return pessoas.nome == 'Laura';
});
console.log(objetoEncontrado); //vai retornar o objeto que tem o nome 'Laura'
//Arrow function: usando esse método para encurtar a função anterior
objetoEncontrado = pessoas.find(pessoas => pessoas.nome === 'Laura');
//Removendo valores de um array:
numeros.pop(); //remove o último valor do array
numeros.shift(); //remove o primeiro valor do array
numeros.splice(1, 1); //remove um número no índice 1
console.log(numeros);
//Formas de esvaziar um Array:
numeros = []; //só remove esse array e deixa as referências normais
numeros.length = 0; //esvazia o array e as referências
numeros.splice(0, numeros.length); //esvazia o array e as referências
//Combinando e cortando Arrays
const array1 = [1, 2, 3];
const array2 = [4, 5, 6];
//Combinando
let combinado = array1.concat(array2);
console.log(combinado);
//Cortando
const cortado = combinado.slice(0, 3); //tem como dois parâmetros o índice inicial e o final da parte que será mantida
console.log(cortado);
//Concatenando arrays com Spread
combinado = [...array1, ...array2, 7];
//ForEach
combinado.forEach((combinado, indice) => console.log('Índice ' + indice + ': '+ combinado));
//Combinando arrays com join e split
combinado = combinado.join('-'); //separa os elementos do array pelo traço
let frase = 'esse é um curso de javascript';
frase = frase.split(' '); //separa cada palavra em um elemento diferente, identificando através de espaços
frase = frase.join('-'); //separa as palavras em um elemento só separados por um traço
/*Recebendo dados de um usuário:
const idadeUser = prompt('Qual sua idade?');
if (idadeUser >= 18) console.log('Maior de idade');
alert('Menor de idade');*/ | //deletando propriedades e funções do objeto existente:
delete mouse.trocarDPI;
delete mouse.velocidade;
| random_line_split |
tree_estimator.py | import os
import time
import numpy as np
import tensorflow as tf
from tree_model import CNNModel
import logging
flags = tf.app.flags
flags.DEFINE_string("data_dir", "data", "data directory")
flags.DEFINE_string("relation_file", "RE/relation2id.txt", "")
flags.DEFINE_string("out_dir", "preprocess", "")
flags.DEFINE_string("word_embed_file", "word_embed.npy", "")
flags.DEFINE_string("vocab_file", "vocab.txt", "")
flags.DEFINE_string("kb_entity_embed_file", "kb_entity_embed.npy", "")
flags.DEFINE_string("train_records", "train.records","")
flags.DEFINE_string("test_records", "test.records","")
flags.DEFINE_integer("num_threads", 10, "")
flags.DEFINE_integer("batch_size", 100, "")
flags.DEFINE_integer("max_len", 220, "")
flags.DEFINE_integer("epochs", 1, "")
flags.DEFINE_integer("log_freq", 50, "")
flags.DEFINE_integer("max_children", 5, "")
FLAGS = flags.FLAGS
def get_params():
return {
"learning_rate": 0.001,
"pos_dim" : 5,
"num_filters" : 230,
"kernel_size" : 3,
"max_len" : FLAGS.max_len,
"num_rels" : 53,
"batch_size" : FLAGS.batch_size,
"l2_coef" : 1e-4,
}
def load_vocab():
vocab_file = os.path.join(FLAGS.out_dir, FLAGS.vocab_file)
vocab = []
vocab2id = {}
with open(vocab_file) as f:
for id, line in enumerate(f):
token = line.strip()
vocab.append(token)
vocab2id[token] = id
tf.logging.info("load vocab, size: %d" % len(vocab))
return vocab, vocab2id
def load_relation():
path = os.path.join(FLAGS.data_dir, FLAGS.relation_file)
relations = []
relation2id = {}
with open(path) as f:
for line in f:
parts = line.strip().split()
rel, id = parts[0], int(parts[1])
relations.append(rel)
relation2id[rel] = id
tf.logging.info("load relation, relation size %d" % len(relations))
return relations, relation2id
def _parse_example(example_proto):
context_features = {
'e1': tf.FixedLenFeature([], tf.int64),
'e2': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
'bag_size': tf.FixedLenFeature([], tf.int64),}
sequence_features = {
# "tokens": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e1_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e2_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "seq_len": tf.FixedLenSequenceFeature([], dtype=tf.int64),
"tokens": tf.VarLenFeature(dtype=tf.int64),
"children": tf.VarLenFeature(dtype=tf.int64),
"e1_dist": tf.VarLenFeature(dtype=tf.int64),
"e2_dist": tf.VarLenFeature(dtype=tf.int64),
"seq_len": tf.VarLenFeature(dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=example_proto,
context_features=context_features,
sequence_features=sequence_features)
# e1 = context_parsed['e1']
# e2 = context_parsed['e2']
label = context_parsed['label']
bag_size = context_parsed['bag_size']
tokens = sequence_parsed['tokens']
children = sequence_parsed['children']
e1_dist = sequence_parsed['e1_dist']
e2_dist = sequence_parsed['e2_dist']
seq_len = sequence_parsed['seq_len']
# tokens = tf.sparse_tensor_to_dense(tokens)
# children = tf.sparse_tensor_to_dense(children)
# e1_dist = tf.sparse_tensor_to_dense(e1_dist)
# e2_dist = tf.sparse_tensor_to_dense(e2_dist)
# seq_len = tf.sparse_tensor_to_dense(seq_len)
return label, bag_size, tokens, e1_dist, e2_dist, seq_len, children
def batch_sparse_idx(n_sent, seq_len, n_channel=1):
'''
[ [ 0 0] [ 0 1] [ 0 2] [ 0 3] [ 0 4] [ 0 5]
[ 1 0] [ 1 1] [ 1 2] [ 1 3] [ 1 4] [ 1 5] [ 1 6] [ 1 7]
]
'''
idx0 = tf.constant([], dtype=tf.int64)
idx1 = tf.constant([], dtype=tf.int64)
i = tf.constant(0, dtype=tf.int64)
shape_invariants=[i.get_shape(), tf.TensorShape([None]),tf.TensorShape([None])]
def body(i, a, b):
length = seq_len.values[i]
a = tf.concat([a, i*tf.ones([tf.cast(length*n_channel, tf.int32)], dtype=tf.int64)], axis=0)
b = tf.concat([b, tf.range(length*n_channel, dtype=tf.int64)], axis=0)
return i+1, a, b
_, idx0, idx1 = tf.while_loop(lambda i, a, b: i<n_sent,
body, [i, idx0, idx1], shape_invariants)
idx = tf.stack([idx0,idx1], axis=-1)
return idx
def _parse_batch_sparse(*args):
labels, bag_size, tokens, e1_dist, e2_dist, seq_len, children=args
n_sent = tf.reduce_sum(bag_size)
max_len = tf.reduce_max(seq_len.values)
# reshape 2d tensor: tokens, e1_dist, e2_dist
idx2d = batch_sparse_idx(n_sent, seq_len)
dense_shape_2d = [n_sent, max_len]
tokens = tf.SparseTensor(idx2d, tokens.values, dense_shape_2d)
e1_dist = tf.SparseTensor(idx2d, e1_dist.values, dense_shape_2d)
e2_dist = tf.SparseTensor(idx2d, e2_dist.values, dense_shape_2d)
# map sparse tensor to 2d dense tensor
tokens = tf.sparse_tensor_to_dense(tokens) # [n_sent, len]
e1_dist = tf.sparse_tensor_to_dense(e1_dist) # [n_sent, len]
e2_dist = tf.sparse_tensor_to_dense(e2_dist) # [n_sent, len]
# reshape 3d tensor: children
idx3d = batch_sparse_idx(n_sent, seq_len, n_channel=FLAGS.max_children)
dense_shape_3d = [n_sent, max_len*FLAGS.max_children]
children = tf.SparseTensor(idx3d, children.values, dense_shape_3d)
# map sparse tensor to 3d dense tensor
children = tf.sparse_tensor_to_dense(children) # [n_sent, len*n_channel]
shape2d = tf.shape(tokens)
children = tf.reshape(children, [shape2d[0], shape2d[1], FLAGS.max_children])
# idx to restore bag
bag_idx = tf.scan(lambda a, x: a+x, tf.pad(bag_size, [[1,0]]))
bag_idx = tf.cast(bag_idx, tf.int32)
features = bag_size, bag_idx, seq_len.values, tokens, e1_dist, e2_dist, children
return features, labels
def _input_fn(filenames, epochs, batch_size, shuffle=False):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_example) # Parse the record into tensors.
if shuffle:
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.repeat(epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(_parse_batch_sparse)
#iterator = dataset.make_initializable_iterator()
#batch_data = iterator.get_next()
return dataset
def train_input_fn():
"""An input function for training"""
# Initialize `iterator` with training data.
train_filenames = [os.path.join(FLAGS.out_dir, FLAGS.train_records)]
return _input_fn(train_filenames, FLAGS.epochs, FLAGS.batch_size, shuffle=True)
def test_input_fn():
test_filenames = [os.path.join(FLAGS.out_dir, FLAGS.test_records) ]
return _input_fn(test_filenames, 1, FLAGS.batch_size, shuffle=False)
class PatTopKHook(tf.train.SessionRunHook):
def __init__(self, prob_tensor, labels_tensor):
self.prob_tensor = prob_tensor
self.labels_tensor = labels_tensor
self.all_prob=[]
self.all_labels = []
def before_run(self, run_context):
|
def after_run(self, run_context, run_values):
prob, label = run_values.results
self.all_prob.append(prob)
self.all_labels.append(label)
def end(self, session):
all_prob = np.concatenate(self.all_prob, axis=0)
all_labels = np.concatenate(self.all_labels,axis=0)
np.save('prob.npy', all_prob)
np.save('labels.npy', all_labels)
tf.logging.info('save results to .npy file')
bag_size, num_class = all_prob.shape
mask = np.ones([num_class])
mask[0]=0
mask_prob = np.reshape(all_prob*mask, [-1])
idx_prob = mask_prob.argsort()
one_hot_labels = np.zeros([bag_size, num_class])
one_hot_labels[np.arange(bag_size), all_labels] = 1
one_hot_labels = np.reshape(one_hot_labels, [-1])
idx = idx_prob[-100:][::-1]
p100 = np.mean(one_hot_labels[idx])
idx = idx_prob[-200:][::-1]
p200 = np.mean(one_hot_labels[idx])
idx = idx_prob[-500:][::-1]
p500 = np.mean(one_hot_labels[idx])
tf.logging.info("p@100: %.3f p@200: %.3f p@500: %.3f" % (p100, p200, p500))
tf.logging.info(all_prob[-1][:5])
def my_model(features, labels, mode, params):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
vocab, vocab2id = load_vocab()
relations, relation2id = load_relation()
word_embed = np.load(os.path.join(FLAGS.out_dir, FLAGS.word_embed_file))
training = mode == tf.estimator.ModeKeys.TRAIN
m = CNNModel(params, word_embed, features, labels, training)
# Compute evaluation metrics.
metrics = {'accuracy': m.accuracy, 'mask_accuracy': m.mask_accuracy}
tf.summary.scalar('accuracy', m.accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
p_hook = PatTopKHook(m.prob, labels)
return tf.estimator.EstimatorSpec(
mode, loss=m.total_loss, eval_metric_ops=metrics, evaluation_hooks=[p_hook])
# Create training op.
assert mode == tf.estimator.ModeKeys.TRAIN
logging_hook = tf.train.LoggingTensorHook({"loss" : m.total_loss,
"accuracy" : m.accuracy[0], 'mask_accuracy': m.mask_accuracy[0]},
every_n_iter=FLAGS.log_freq)
return tf.estimator.EstimatorSpec(mode, loss=m.total_loss, train_op=m.train_op,
training_hooks = [logging_hook])
def main(_):
start_time = time.time()
params = get_params()
classifier = tf.estimator.Estimator(
model_fn=my_model,
model_dir="saved_models/model-tree/",
params=params)
classifier.train(input_fn=train_input_fn)
eval_result = classifier.evaluate(input_fn=test_input_fn)
tf.logging.info('\nTest set accuracy: {accuracy:0.3f} {mask_accuracy:0.3f}\n'.format(**eval_result))
duration = time.time() - start_time
tf.logging.info("duration: %.2f hours" % (duration/3600))
# test_records = os.path.join(FLAGS.out_dir, FLAGS.test_records)
# dataset = _input_fn(test_records, 1, 3)
# batch_data = dataset.make_one_shot_iterator().get_next()
# with tf.train.MonitoredTrainingSession() as sess:
# features, labels = batch_data
# for t in sess.run(features):
# print(t.shape)
# print(t)
# print()
# while not sess.should_stop():
# s = sess.run(m.bag_score)
if __name__=='__main__':
tf.logging.set_verbosity(tf.logging.INFO)
log = logging.getLogger('tensorflow')
fh = logging.FileHandler('tmp.log')
log.addHandler(fh)
tf.app.run()
| return tf.train.SessionRunArgs([self.prob_tensor, self.labels_tensor]) | identifier_body |
tree_estimator.py | import os
import time
import numpy as np
import tensorflow as tf
from tree_model import CNNModel
import logging
flags = tf.app.flags
flags.DEFINE_string("data_dir", "data", "data directory")
flags.DEFINE_string("relation_file", "RE/relation2id.txt", "")
flags.DEFINE_string("out_dir", "preprocess", "")
flags.DEFINE_string("word_embed_file", "word_embed.npy", "")
flags.DEFINE_string("vocab_file", "vocab.txt", "")
flags.DEFINE_string("kb_entity_embed_file", "kb_entity_embed.npy", "")
flags.DEFINE_string("train_records", "train.records","")
flags.DEFINE_string("test_records", "test.records","")
flags.DEFINE_integer("num_threads", 10, "")
flags.DEFINE_integer("batch_size", 100, "")
flags.DEFINE_integer("max_len", 220, "")
flags.DEFINE_integer("epochs", 1, "")
flags.DEFINE_integer("log_freq", 50, "")
flags.DEFINE_integer("max_children", 5, "")
FLAGS = flags.FLAGS
def get_params():
return {
"learning_rate": 0.001,
"pos_dim" : 5,
"num_filters" : 230,
"kernel_size" : 3,
"max_len" : FLAGS.max_len,
"num_rels" : 53,
"batch_size" : FLAGS.batch_size,
"l2_coef" : 1e-4,
}
def load_vocab():
vocab_file = os.path.join(FLAGS.out_dir, FLAGS.vocab_file)
vocab = []
vocab2id = {}
with open(vocab_file) as f:
for id, line in enumerate(f):
token = line.strip()
vocab.append(token)
vocab2id[token] = id
tf.logging.info("load vocab, size: %d" % len(vocab))
return vocab, vocab2id
def load_relation():
path = os.path.join(FLAGS.data_dir, FLAGS.relation_file)
relations = []
relation2id = {}
with open(path) as f:
for line in f:
parts = line.strip().split()
rel, id = parts[0], int(parts[1])
relations.append(rel)
relation2id[rel] = id
tf.logging.info("load relation, relation size %d" % len(relations))
return relations, relation2id
def _parse_example(example_proto):
context_features = {
'e1': tf.FixedLenFeature([], tf.int64),
'e2': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
'bag_size': tf.FixedLenFeature([], tf.int64),}
sequence_features = {
# "tokens": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e1_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e2_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "seq_len": tf.FixedLenSequenceFeature([], dtype=tf.int64),
"tokens": tf.VarLenFeature(dtype=tf.int64),
"children": tf.VarLenFeature(dtype=tf.int64),
"e1_dist": tf.VarLenFeature(dtype=tf.int64),
"e2_dist": tf.VarLenFeature(dtype=tf.int64),
"seq_len": tf.VarLenFeature(dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=example_proto,
context_features=context_features,
sequence_features=sequence_features)
# e1 = context_parsed['e1']
# e2 = context_parsed['e2']
label = context_parsed['label']
bag_size = context_parsed['bag_size']
tokens = sequence_parsed['tokens']
children = sequence_parsed['children']
e1_dist = sequence_parsed['e1_dist']
e2_dist = sequence_parsed['e2_dist']
seq_len = sequence_parsed['seq_len']
# tokens = tf.sparse_tensor_to_dense(tokens)
# children = tf.sparse_tensor_to_dense(children)
# e1_dist = tf.sparse_tensor_to_dense(e1_dist)
# e2_dist = tf.sparse_tensor_to_dense(e2_dist)
# seq_len = tf.sparse_tensor_to_dense(seq_len)
return label, bag_size, tokens, e1_dist, e2_dist, seq_len, children
def | (n_sent, seq_len, n_channel=1):
'''
[ [ 0 0] [ 0 1] [ 0 2] [ 0 3] [ 0 4] [ 0 5]
[ 1 0] [ 1 1] [ 1 2] [ 1 3] [ 1 4] [ 1 5] [ 1 6] [ 1 7]
]
'''
idx0 = tf.constant([], dtype=tf.int64)
idx1 = tf.constant([], dtype=tf.int64)
i = tf.constant(0, dtype=tf.int64)
shape_invariants=[i.get_shape(), tf.TensorShape([None]),tf.TensorShape([None])]
def body(i, a, b):
length = seq_len.values[i]
a = tf.concat([a, i*tf.ones([tf.cast(length*n_channel, tf.int32)], dtype=tf.int64)], axis=0)
b = tf.concat([b, tf.range(length*n_channel, dtype=tf.int64)], axis=0)
return i+1, a, b
_, idx0, idx1 = tf.while_loop(lambda i, a, b: i<n_sent,
body, [i, idx0, idx1], shape_invariants)
idx = tf.stack([idx0,idx1], axis=-1)
return idx
def _parse_batch_sparse(*args):
labels, bag_size, tokens, e1_dist, e2_dist, seq_len, children=args
n_sent = tf.reduce_sum(bag_size)
max_len = tf.reduce_max(seq_len.values)
# reshape 2d tensor: tokens, e1_dist, e2_dist
idx2d = batch_sparse_idx(n_sent, seq_len)
dense_shape_2d = [n_sent, max_len]
tokens = tf.SparseTensor(idx2d, tokens.values, dense_shape_2d)
e1_dist = tf.SparseTensor(idx2d, e1_dist.values, dense_shape_2d)
e2_dist = tf.SparseTensor(idx2d, e2_dist.values, dense_shape_2d)
# map sparse tensor to 2d dense tensor
tokens = tf.sparse_tensor_to_dense(tokens) # [n_sent, len]
e1_dist = tf.sparse_tensor_to_dense(e1_dist) # [n_sent, len]
e2_dist = tf.sparse_tensor_to_dense(e2_dist) # [n_sent, len]
# reshape 3d tensor: children
idx3d = batch_sparse_idx(n_sent, seq_len, n_channel=FLAGS.max_children)
dense_shape_3d = [n_sent, max_len*FLAGS.max_children]
children = tf.SparseTensor(idx3d, children.values, dense_shape_3d)
# map sparse tensor to 3d dense tensor
children = tf.sparse_tensor_to_dense(children) # [n_sent, len*n_channel]
shape2d = tf.shape(tokens)
children = tf.reshape(children, [shape2d[0], shape2d[1], FLAGS.max_children])
# idx to restore bag
bag_idx = tf.scan(lambda a, x: a+x, tf.pad(bag_size, [[1,0]]))
bag_idx = tf.cast(bag_idx, tf.int32)
features = bag_size, bag_idx, seq_len.values, tokens, e1_dist, e2_dist, children
return features, labels
def _input_fn(filenames, epochs, batch_size, shuffle=False):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_example) # Parse the record into tensors.
if shuffle:
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.repeat(epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(_parse_batch_sparse)
#iterator = dataset.make_initializable_iterator()
#batch_data = iterator.get_next()
return dataset
def train_input_fn():
"""An input function for training"""
# Initialize `iterator` with training data.
train_filenames = [os.path.join(FLAGS.out_dir, FLAGS.train_records)]
return _input_fn(train_filenames, FLAGS.epochs, FLAGS.batch_size, shuffle=True)
def test_input_fn():
test_filenames = [os.path.join(FLAGS.out_dir, FLAGS.test_records) ]
return _input_fn(test_filenames, 1, FLAGS.batch_size, shuffle=False)
class PatTopKHook(tf.train.SessionRunHook):
def __init__(self, prob_tensor, labels_tensor):
self.prob_tensor = prob_tensor
self.labels_tensor = labels_tensor
self.all_prob=[]
self.all_labels = []
def before_run(self, run_context):
return tf.train.SessionRunArgs([self.prob_tensor, self.labels_tensor])
def after_run(self, run_context, run_values):
prob, label = run_values.results
self.all_prob.append(prob)
self.all_labels.append(label)
def end(self, session):
all_prob = np.concatenate(self.all_prob, axis=0)
all_labels = np.concatenate(self.all_labels,axis=0)
np.save('prob.npy', all_prob)
np.save('labels.npy', all_labels)
tf.logging.info('save results to .npy file')
bag_size, num_class = all_prob.shape
mask = np.ones([num_class])
mask[0]=0
mask_prob = np.reshape(all_prob*mask, [-1])
idx_prob = mask_prob.argsort()
one_hot_labels = np.zeros([bag_size, num_class])
one_hot_labels[np.arange(bag_size), all_labels] = 1
one_hot_labels = np.reshape(one_hot_labels, [-1])
idx = idx_prob[-100:][::-1]
p100 = np.mean(one_hot_labels[idx])
idx = idx_prob[-200:][::-1]
p200 = np.mean(one_hot_labels[idx])
idx = idx_prob[-500:][::-1]
p500 = np.mean(one_hot_labels[idx])
tf.logging.info("p@100: %.3f p@200: %.3f p@500: %.3f" % (p100, p200, p500))
tf.logging.info(all_prob[-1][:5])
def my_model(features, labels, mode, params):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
vocab, vocab2id = load_vocab()
relations, relation2id = load_relation()
word_embed = np.load(os.path.join(FLAGS.out_dir, FLAGS.word_embed_file))
training = mode == tf.estimator.ModeKeys.TRAIN
m = CNNModel(params, word_embed, features, labels, training)
# Compute evaluation metrics.
metrics = {'accuracy': m.accuracy, 'mask_accuracy': m.mask_accuracy}
tf.summary.scalar('accuracy', m.accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
p_hook = PatTopKHook(m.prob, labels)
return tf.estimator.EstimatorSpec(
mode, loss=m.total_loss, eval_metric_ops=metrics, evaluation_hooks=[p_hook])
# Create training op.
assert mode == tf.estimator.ModeKeys.TRAIN
logging_hook = tf.train.LoggingTensorHook({"loss" : m.total_loss,
"accuracy" : m.accuracy[0], 'mask_accuracy': m.mask_accuracy[0]},
every_n_iter=FLAGS.log_freq)
return tf.estimator.EstimatorSpec(mode, loss=m.total_loss, train_op=m.train_op,
training_hooks = [logging_hook])
def main(_):
start_time = time.time()
params = get_params()
classifier = tf.estimator.Estimator(
model_fn=my_model,
model_dir="saved_models/model-tree/",
params=params)
classifier.train(input_fn=train_input_fn)
eval_result = classifier.evaluate(input_fn=test_input_fn)
tf.logging.info('\nTest set accuracy: {accuracy:0.3f} {mask_accuracy:0.3f}\n'.format(**eval_result))
duration = time.time() - start_time
tf.logging.info("duration: %.2f hours" % (duration/3600))
# test_records = os.path.join(FLAGS.out_dir, FLAGS.test_records)
# dataset = _input_fn(test_records, 1, 3)
# batch_data = dataset.make_one_shot_iterator().get_next()
# with tf.train.MonitoredTrainingSession() as sess:
# features, labels = batch_data
# for t in sess.run(features):
# print(t.shape)
# print(t)
# print()
# while not sess.should_stop():
# s = sess.run(m.bag_score)
if __name__=='__main__':
tf.logging.set_verbosity(tf.logging.INFO)
log = logging.getLogger('tensorflow')
fh = logging.FileHandler('tmp.log')
log.addHandler(fh)
tf.app.run()
| batch_sparse_idx | identifier_name |
tree_estimator.py | import os
import time
import numpy as np
import tensorflow as tf
from tree_model import CNNModel
import logging
flags = tf.app.flags
flags.DEFINE_string("data_dir", "data", "data directory")
flags.DEFINE_string("relation_file", "RE/relation2id.txt", "")
flags.DEFINE_string("out_dir", "preprocess", "")
flags.DEFINE_string("word_embed_file", "word_embed.npy", "")
flags.DEFINE_string("vocab_file", "vocab.txt", "")
flags.DEFINE_string("kb_entity_embed_file", "kb_entity_embed.npy", "")
flags.DEFINE_string("train_records", "train.records","")
flags.DEFINE_string("test_records", "test.records","")
flags.DEFINE_integer("num_threads", 10, "")
flags.DEFINE_integer("batch_size", 100, "")
flags.DEFINE_integer("max_len", 220, "")
flags.DEFINE_integer("epochs", 1, "")
flags.DEFINE_integer("log_freq", 50, "")
flags.DEFINE_integer("max_children", 5, "")
FLAGS = flags.FLAGS
def get_params():
return {
"learning_rate": 0.001,
"pos_dim" : 5,
"num_filters" : 230,
"kernel_size" : 3, |
def load_vocab():
vocab_file = os.path.join(FLAGS.out_dir, FLAGS.vocab_file)
vocab = []
vocab2id = {}
with open(vocab_file) as f:
for id, line in enumerate(f):
token = line.strip()
vocab.append(token)
vocab2id[token] = id
tf.logging.info("load vocab, size: %d" % len(vocab))
return vocab, vocab2id
def load_relation():
path = os.path.join(FLAGS.data_dir, FLAGS.relation_file)
relations = []
relation2id = {}
with open(path) as f:
for line in f:
parts = line.strip().split()
rel, id = parts[0], int(parts[1])
relations.append(rel)
relation2id[rel] = id
tf.logging.info("load relation, relation size %d" % len(relations))
return relations, relation2id
def _parse_example(example_proto):
context_features = {
'e1': tf.FixedLenFeature([], tf.int64),
'e2': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
'bag_size': tf.FixedLenFeature([], tf.int64),}
sequence_features = {
# "tokens": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e1_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e2_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "seq_len": tf.FixedLenSequenceFeature([], dtype=tf.int64),
"tokens": tf.VarLenFeature(dtype=tf.int64),
"children": tf.VarLenFeature(dtype=tf.int64),
"e1_dist": tf.VarLenFeature(dtype=tf.int64),
"e2_dist": tf.VarLenFeature(dtype=tf.int64),
"seq_len": tf.VarLenFeature(dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=example_proto,
context_features=context_features,
sequence_features=sequence_features)
# e1 = context_parsed['e1']
# e2 = context_parsed['e2']
label = context_parsed['label']
bag_size = context_parsed['bag_size']
tokens = sequence_parsed['tokens']
children = sequence_parsed['children']
e1_dist = sequence_parsed['e1_dist']
e2_dist = sequence_parsed['e2_dist']
seq_len = sequence_parsed['seq_len']
# tokens = tf.sparse_tensor_to_dense(tokens)
# children = tf.sparse_tensor_to_dense(children)
# e1_dist = tf.sparse_tensor_to_dense(e1_dist)
# e2_dist = tf.sparse_tensor_to_dense(e2_dist)
# seq_len = tf.sparse_tensor_to_dense(seq_len)
return label, bag_size, tokens, e1_dist, e2_dist, seq_len, children
def batch_sparse_idx(n_sent, seq_len, n_channel=1):
'''
[ [ 0 0] [ 0 1] [ 0 2] [ 0 3] [ 0 4] [ 0 5]
[ 1 0] [ 1 1] [ 1 2] [ 1 3] [ 1 4] [ 1 5] [ 1 6] [ 1 7]
]
'''
idx0 = tf.constant([], dtype=tf.int64)
idx1 = tf.constant([], dtype=tf.int64)
i = tf.constant(0, dtype=tf.int64)
shape_invariants=[i.get_shape(), tf.TensorShape([None]),tf.TensorShape([None])]
def body(i, a, b):
length = seq_len.values[i]
a = tf.concat([a, i*tf.ones([tf.cast(length*n_channel, tf.int32)], dtype=tf.int64)], axis=0)
b = tf.concat([b, tf.range(length*n_channel, dtype=tf.int64)], axis=0)
return i+1, a, b
_, idx0, idx1 = tf.while_loop(lambda i, a, b: i<n_sent,
body, [i, idx0, idx1], shape_invariants)
idx = tf.stack([idx0,idx1], axis=-1)
return idx
def _parse_batch_sparse(*args):
labels, bag_size, tokens, e1_dist, e2_dist, seq_len, children=args
n_sent = tf.reduce_sum(bag_size)
max_len = tf.reduce_max(seq_len.values)
# reshape 2d tensor: tokens, e1_dist, e2_dist
idx2d = batch_sparse_idx(n_sent, seq_len)
dense_shape_2d = [n_sent, max_len]
tokens = tf.SparseTensor(idx2d, tokens.values, dense_shape_2d)
e1_dist = tf.SparseTensor(idx2d, e1_dist.values, dense_shape_2d)
e2_dist = tf.SparseTensor(idx2d, e2_dist.values, dense_shape_2d)
# map sparse tensor to 2d dense tensor
tokens = tf.sparse_tensor_to_dense(tokens) # [n_sent, len]
e1_dist = tf.sparse_tensor_to_dense(e1_dist) # [n_sent, len]
e2_dist = tf.sparse_tensor_to_dense(e2_dist) # [n_sent, len]
# reshape 3d tensor: children
idx3d = batch_sparse_idx(n_sent, seq_len, n_channel=FLAGS.max_children)
dense_shape_3d = [n_sent, max_len*FLAGS.max_children]
children = tf.SparseTensor(idx3d, children.values, dense_shape_3d)
# map sparse tensor to 3d dense tensor
children = tf.sparse_tensor_to_dense(children) # [n_sent, len*n_channel]
shape2d = tf.shape(tokens)
children = tf.reshape(children, [shape2d[0], shape2d[1], FLAGS.max_children])
# idx to restore bag
bag_idx = tf.scan(lambda a, x: a+x, tf.pad(bag_size, [[1,0]]))
bag_idx = tf.cast(bag_idx, tf.int32)
features = bag_size, bag_idx, seq_len.values, tokens, e1_dist, e2_dist, children
return features, labels
def _input_fn(filenames, epochs, batch_size, shuffle=False):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_example) # Parse the record into tensors.
if shuffle:
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.repeat(epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(_parse_batch_sparse)
#iterator = dataset.make_initializable_iterator()
#batch_data = iterator.get_next()
return dataset
def train_input_fn():
"""An input function for training"""
# Initialize `iterator` with training data.
train_filenames = [os.path.join(FLAGS.out_dir, FLAGS.train_records)]
return _input_fn(train_filenames, FLAGS.epochs, FLAGS.batch_size, shuffle=True)
def test_input_fn():
test_filenames = [os.path.join(FLAGS.out_dir, FLAGS.test_records) ]
return _input_fn(test_filenames, 1, FLAGS.batch_size, shuffle=False)
class PatTopKHook(tf.train.SessionRunHook):
def __init__(self, prob_tensor, labels_tensor):
self.prob_tensor = prob_tensor
self.labels_tensor = labels_tensor
self.all_prob=[]
self.all_labels = []
def before_run(self, run_context):
return tf.train.SessionRunArgs([self.prob_tensor, self.labels_tensor])
def after_run(self, run_context, run_values):
prob, label = run_values.results
self.all_prob.append(prob)
self.all_labels.append(label)
def end(self, session):
all_prob = np.concatenate(self.all_prob, axis=0)
all_labels = np.concatenate(self.all_labels,axis=0)
np.save('prob.npy', all_prob)
np.save('labels.npy', all_labels)
tf.logging.info('save results to .npy file')
bag_size, num_class = all_prob.shape
mask = np.ones([num_class])
mask[0]=0
mask_prob = np.reshape(all_prob*mask, [-1])
idx_prob = mask_prob.argsort()
one_hot_labels = np.zeros([bag_size, num_class])
one_hot_labels[np.arange(bag_size), all_labels] = 1
one_hot_labels = np.reshape(one_hot_labels, [-1])
idx = idx_prob[-100:][::-1]
p100 = np.mean(one_hot_labels[idx])
idx = idx_prob[-200:][::-1]
p200 = np.mean(one_hot_labels[idx])
idx = idx_prob[-500:][::-1]
p500 = np.mean(one_hot_labels[idx])
tf.logging.info("p@100: %.3f p@200: %.3f p@500: %.3f" % (p100, p200, p500))
tf.logging.info(all_prob[-1][:5])
def my_model(features, labels, mode, params):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
vocab, vocab2id = load_vocab()
relations, relation2id = load_relation()
word_embed = np.load(os.path.join(FLAGS.out_dir, FLAGS.word_embed_file))
training = mode == tf.estimator.ModeKeys.TRAIN
m = CNNModel(params, word_embed, features, labels, training)
# Compute evaluation metrics.
metrics = {'accuracy': m.accuracy, 'mask_accuracy': m.mask_accuracy}
tf.summary.scalar('accuracy', m.accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
p_hook = PatTopKHook(m.prob, labels)
return tf.estimator.EstimatorSpec(
mode, loss=m.total_loss, eval_metric_ops=metrics, evaluation_hooks=[p_hook])
# Create training op.
assert mode == tf.estimator.ModeKeys.TRAIN
logging_hook = tf.train.LoggingTensorHook({"loss" : m.total_loss,
"accuracy" : m.accuracy[0], 'mask_accuracy': m.mask_accuracy[0]},
every_n_iter=FLAGS.log_freq)
return tf.estimator.EstimatorSpec(mode, loss=m.total_loss, train_op=m.train_op,
training_hooks = [logging_hook])
def main(_):
start_time = time.time()
params = get_params()
classifier = tf.estimator.Estimator(
model_fn=my_model,
model_dir="saved_models/model-tree/",
params=params)
classifier.train(input_fn=train_input_fn)
eval_result = classifier.evaluate(input_fn=test_input_fn)
tf.logging.info('\nTest set accuracy: {accuracy:0.3f} {mask_accuracy:0.3f}\n'.format(**eval_result))
duration = time.time() - start_time
tf.logging.info("duration: %.2f hours" % (duration/3600))
# test_records = os.path.join(FLAGS.out_dir, FLAGS.test_records)
# dataset = _input_fn(test_records, 1, 3)
# batch_data = dataset.make_one_shot_iterator().get_next()
# with tf.train.MonitoredTrainingSession() as sess:
# features, labels = batch_data
# for t in sess.run(features):
# print(t.shape)
# print(t)
# print()
# while not sess.should_stop():
# s = sess.run(m.bag_score)
if __name__=='__main__':
tf.logging.set_verbosity(tf.logging.INFO)
log = logging.getLogger('tensorflow')
fh = logging.FileHandler('tmp.log')
log.addHandler(fh)
tf.app.run() | "max_len" : FLAGS.max_len,
"num_rels" : 53,
"batch_size" : FLAGS.batch_size,
"l2_coef" : 1e-4,
} | random_line_split |
tree_estimator.py | import os
import time
import numpy as np
import tensorflow as tf
from tree_model import CNNModel
import logging
flags = tf.app.flags
flags.DEFINE_string("data_dir", "data", "data directory")
flags.DEFINE_string("relation_file", "RE/relation2id.txt", "")
flags.DEFINE_string("out_dir", "preprocess", "")
flags.DEFINE_string("word_embed_file", "word_embed.npy", "")
flags.DEFINE_string("vocab_file", "vocab.txt", "")
flags.DEFINE_string("kb_entity_embed_file", "kb_entity_embed.npy", "")
flags.DEFINE_string("train_records", "train.records","")
flags.DEFINE_string("test_records", "test.records","")
flags.DEFINE_integer("num_threads", 10, "")
flags.DEFINE_integer("batch_size", 100, "")
flags.DEFINE_integer("max_len", 220, "")
flags.DEFINE_integer("epochs", 1, "")
flags.DEFINE_integer("log_freq", 50, "")
flags.DEFINE_integer("max_children", 5, "")
FLAGS = flags.FLAGS
def get_params():
return {
"learning_rate": 0.001,
"pos_dim" : 5,
"num_filters" : 230,
"kernel_size" : 3,
"max_len" : FLAGS.max_len,
"num_rels" : 53,
"batch_size" : FLAGS.batch_size,
"l2_coef" : 1e-4,
}
def load_vocab():
vocab_file = os.path.join(FLAGS.out_dir, FLAGS.vocab_file)
vocab = []
vocab2id = {}
with open(vocab_file) as f:
for id, line in enumerate(f):
token = line.strip()
vocab.append(token)
vocab2id[token] = id
tf.logging.info("load vocab, size: %d" % len(vocab))
return vocab, vocab2id
def load_relation():
path = os.path.join(FLAGS.data_dir, FLAGS.relation_file)
relations = []
relation2id = {}
with open(path) as f:
for line in f:
|
tf.logging.info("load relation, relation size %d" % len(relations))
return relations, relation2id
def _parse_example(example_proto):
context_features = {
'e1': tf.FixedLenFeature([], tf.int64),
'e2': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
'bag_size': tf.FixedLenFeature([], tf.int64),}
sequence_features = {
# "tokens": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e1_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e2_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "seq_len": tf.FixedLenSequenceFeature([], dtype=tf.int64),
"tokens": tf.VarLenFeature(dtype=tf.int64),
"children": tf.VarLenFeature(dtype=tf.int64),
"e1_dist": tf.VarLenFeature(dtype=tf.int64),
"e2_dist": tf.VarLenFeature(dtype=tf.int64),
"seq_len": tf.VarLenFeature(dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=example_proto,
context_features=context_features,
sequence_features=sequence_features)
# e1 = context_parsed['e1']
# e2 = context_parsed['e2']
label = context_parsed['label']
bag_size = context_parsed['bag_size']
tokens = sequence_parsed['tokens']
children = sequence_parsed['children']
e1_dist = sequence_parsed['e1_dist']
e2_dist = sequence_parsed['e2_dist']
seq_len = sequence_parsed['seq_len']
# tokens = tf.sparse_tensor_to_dense(tokens)
# children = tf.sparse_tensor_to_dense(children)
# e1_dist = tf.sparse_tensor_to_dense(e1_dist)
# e2_dist = tf.sparse_tensor_to_dense(e2_dist)
# seq_len = tf.sparse_tensor_to_dense(seq_len)
return label, bag_size, tokens, e1_dist, e2_dist, seq_len, children
def batch_sparse_idx(n_sent, seq_len, n_channel=1):
'''
[ [ 0 0] [ 0 1] [ 0 2] [ 0 3] [ 0 4] [ 0 5]
[ 1 0] [ 1 1] [ 1 2] [ 1 3] [ 1 4] [ 1 5] [ 1 6] [ 1 7]
]
'''
idx0 = tf.constant([], dtype=tf.int64)
idx1 = tf.constant([], dtype=tf.int64)
i = tf.constant(0, dtype=tf.int64)
shape_invariants=[i.get_shape(), tf.TensorShape([None]),tf.TensorShape([None])]
def body(i, a, b):
length = seq_len.values[i]
a = tf.concat([a, i*tf.ones([tf.cast(length*n_channel, tf.int32)], dtype=tf.int64)], axis=0)
b = tf.concat([b, tf.range(length*n_channel, dtype=tf.int64)], axis=0)
return i+1, a, b
_, idx0, idx1 = tf.while_loop(lambda i, a, b: i<n_sent,
body, [i, idx0, idx1], shape_invariants)
idx = tf.stack([idx0,idx1], axis=-1)
return idx
def _parse_batch_sparse(*args):
labels, bag_size, tokens, e1_dist, e2_dist, seq_len, children=args
n_sent = tf.reduce_sum(bag_size)
max_len = tf.reduce_max(seq_len.values)
# reshape 2d tensor: tokens, e1_dist, e2_dist
idx2d = batch_sparse_idx(n_sent, seq_len)
dense_shape_2d = [n_sent, max_len]
tokens = tf.SparseTensor(idx2d, tokens.values, dense_shape_2d)
e1_dist = tf.SparseTensor(idx2d, e1_dist.values, dense_shape_2d)
e2_dist = tf.SparseTensor(idx2d, e2_dist.values, dense_shape_2d)
# map sparse tensor to 2d dense tensor
tokens = tf.sparse_tensor_to_dense(tokens) # [n_sent, len]
e1_dist = tf.sparse_tensor_to_dense(e1_dist) # [n_sent, len]
e2_dist = tf.sparse_tensor_to_dense(e2_dist) # [n_sent, len]
# reshape 3d tensor: children
idx3d = batch_sparse_idx(n_sent, seq_len, n_channel=FLAGS.max_children)
dense_shape_3d = [n_sent, max_len*FLAGS.max_children]
children = tf.SparseTensor(idx3d, children.values, dense_shape_3d)
# map sparse tensor to 3d dense tensor
children = tf.sparse_tensor_to_dense(children) # [n_sent, len*n_channel]
shape2d = tf.shape(tokens)
children = tf.reshape(children, [shape2d[0], shape2d[1], FLAGS.max_children])
# idx to restore bag
bag_idx = tf.scan(lambda a, x: a+x, tf.pad(bag_size, [[1,0]]))
bag_idx = tf.cast(bag_idx, tf.int32)
features = bag_size, bag_idx, seq_len.values, tokens, e1_dist, e2_dist, children
return features, labels
def _input_fn(filenames, epochs, batch_size, shuffle=False):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_example) # Parse the record into tensors.
if shuffle:
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.repeat(epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(_parse_batch_sparse)
#iterator = dataset.make_initializable_iterator()
#batch_data = iterator.get_next()
return dataset
def train_input_fn():
"""An input function for training"""
# Initialize `iterator` with training data.
train_filenames = [os.path.join(FLAGS.out_dir, FLAGS.train_records)]
return _input_fn(train_filenames, FLAGS.epochs, FLAGS.batch_size, shuffle=True)
def test_input_fn():
test_filenames = [os.path.join(FLAGS.out_dir, FLAGS.test_records) ]
return _input_fn(test_filenames, 1, FLAGS.batch_size, shuffle=False)
class PatTopKHook(tf.train.SessionRunHook):
def __init__(self, prob_tensor, labels_tensor):
self.prob_tensor = prob_tensor
self.labels_tensor = labels_tensor
self.all_prob=[]
self.all_labels = []
def before_run(self, run_context):
return tf.train.SessionRunArgs([self.prob_tensor, self.labels_tensor])
def after_run(self, run_context, run_values):
prob, label = run_values.results
self.all_prob.append(prob)
self.all_labels.append(label)
def end(self, session):
all_prob = np.concatenate(self.all_prob, axis=0)
all_labels = np.concatenate(self.all_labels,axis=0)
np.save('prob.npy', all_prob)
np.save('labels.npy', all_labels)
tf.logging.info('save results to .npy file')
bag_size, num_class = all_prob.shape
mask = np.ones([num_class])
mask[0]=0
mask_prob = np.reshape(all_prob*mask, [-1])
idx_prob = mask_prob.argsort()
one_hot_labels = np.zeros([bag_size, num_class])
one_hot_labels[np.arange(bag_size), all_labels] = 1
one_hot_labels = np.reshape(one_hot_labels, [-1])
idx = idx_prob[-100:][::-1]
p100 = np.mean(one_hot_labels[idx])
idx = idx_prob[-200:][::-1]
p200 = np.mean(one_hot_labels[idx])
idx = idx_prob[-500:][::-1]
p500 = np.mean(one_hot_labels[idx])
tf.logging.info("p@100: %.3f p@200: %.3f p@500: %.3f" % (p100, p200, p500))
tf.logging.info(all_prob[-1][:5])
def my_model(features, labels, mode, params):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
vocab, vocab2id = load_vocab()
relations, relation2id = load_relation()
word_embed = np.load(os.path.join(FLAGS.out_dir, FLAGS.word_embed_file))
training = mode == tf.estimator.ModeKeys.TRAIN
m = CNNModel(params, word_embed, features, labels, training)
# Compute evaluation metrics.
metrics = {'accuracy': m.accuracy, 'mask_accuracy': m.mask_accuracy}
tf.summary.scalar('accuracy', m.accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
p_hook = PatTopKHook(m.prob, labels)
return tf.estimator.EstimatorSpec(
mode, loss=m.total_loss, eval_metric_ops=metrics, evaluation_hooks=[p_hook])
# Create training op.
assert mode == tf.estimator.ModeKeys.TRAIN
logging_hook = tf.train.LoggingTensorHook({"loss" : m.total_loss,
"accuracy" : m.accuracy[0], 'mask_accuracy': m.mask_accuracy[0]},
every_n_iter=FLAGS.log_freq)
return tf.estimator.EstimatorSpec(mode, loss=m.total_loss, train_op=m.train_op,
training_hooks = [logging_hook])
def main(_):
start_time = time.time()
params = get_params()
classifier = tf.estimator.Estimator(
model_fn=my_model,
model_dir="saved_models/model-tree/",
params=params)
classifier.train(input_fn=train_input_fn)
eval_result = classifier.evaluate(input_fn=test_input_fn)
tf.logging.info('\nTest set accuracy: {accuracy:0.3f} {mask_accuracy:0.3f}\n'.format(**eval_result))
duration = time.time() - start_time
tf.logging.info("duration: %.2f hours" % (duration/3600))
# test_records = os.path.join(FLAGS.out_dir, FLAGS.test_records)
# dataset = _input_fn(test_records, 1, 3)
# batch_data = dataset.make_one_shot_iterator().get_next()
# with tf.train.MonitoredTrainingSession() as sess:
# features, labels = batch_data
# for t in sess.run(features):
# print(t.shape)
# print(t)
# print()
# while not sess.should_stop():
# s = sess.run(m.bag_score)
if __name__=='__main__':
tf.logging.set_verbosity(tf.logging.INFO)
log = logging.getLogger('tensorflow')
fh = logging.FileHandler('tmp.log')
log.addHandler(fh)
tf.app.run()
| parts = line.strip().split()
rel, id = parts[0], int(parts[1])
relations.append(rel)
relation2id[rel] = id | conditional_block |
utils.js | String.prototype.replaceAll = function (oldChar, newChar) {
return this.replace(new RegExp(oldChar, "gm"), newChar);
}
function getCtxPath() {
var path = window.location.pathname;
return path.substring(0, path.indexOf("/", 1));
}
var ctxPath = "";
var ZENG = ZENG || null;
if (ZENG != null) {
ZENG.msgbox.loadingAnimationPath = (ctxPath + "/static/js/msgbox/loading.gif");
}
//对Date的扩展,将 Date 转化为指定格式的String
//月(M)、日(d)、小时(h)、分(m)、秒(s)、季度(q) 可以用 1-2 个占位符,
//年(y)可以用 1-4 个占位符,毫秒(S)只能用 1 个占位符(是 1-3 位的数字)
//例子:
//(new Date()).Format("yyyy-MM-dd hh:mm:ss.S") ==> 2006-07-02 08:09:04.423
//(new Date()).Format("yyyy-M-d h:m:s.S") ==> 2006-7-2 8:9:4.18
Date.prototype.Format = function (fmt) { // author: meizz
var o = {
"M+": this.getMonth() + 1, // 月份
"d+": this.getDate(), // 日
"h+": this.getHours(), // 小时
"m+": this.getMinutes(), // 分
"s+": this.getSeconds(), // 秒
"q+": Math.floor((this.getMonth() + 3) / 3), // 季度
"S": this.getMilliseconds()
// 毫秒
};
if (/(y+)/.test(fmt))
fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "")
.substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt))
fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k])
: (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
}
function createSelect($select, data) {
$select.empty();
for (var i = 0; i < data.length; i++) {
var d = data[i];
var option = $("<option>" + d.TEXT + "</option>").val(d.VALUE);
$select.append(option);
}
}
function getRootParent() {
var _this = window;
var _parent = window.parent;
for (; _this != _parent;) {
_this = _parent;
_parent = _this.parent;
}
return _parent;
}
function redirectLogin() {
var expect = ctxPath + "/login.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function redirectIndex() {
var expect = ctxPath + "/index.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function tryLogin() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d == true) {
redirectIndex();
}
});
}
function tryLogout() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d != true) {
redirectLogin();
}
});
}
function validateLogin(d, fun) {
var _d;
if ("string" == typeof d) {
_d = JSON.parse(d);
} else {
_d = d;
}
if (_d.code == -999) {
redirectLogin();
return;
}
fun(_d);
}
function $get0(url, param, fun, async) {
if (fun == undefined) {
fun = param;
}
$.ajax({
cache: false,
type: "post",
url: ctxPath + url,
data: param,
async: async,
success: function (d) {
validateLogin(d, fun);
},
error: function (r) {
errorMsgbox("请求超时");
}
});
}
//同步请求
function $get(url, param, fun) {
$get0(url, param, fun, false);
}
//异步请求
function $geta(url, param, fun) {
$get0(url, param, fun, true);
}
function createCombobox(pid, id, list, cl) {
if (!cl) {
cl = "sel w150";
}
var p = "<select id='" + id + "' name='" + id + "' class='" + cl + "'>";
if (list.length > 0) {
p += "<option></option>";
for (var i in list) {
var item = list[i];
p += "<option value='" + item["VALUE"] + "'>" + item["NAME"] + "</option>"
}
} else {
p += "<option></option>";
}
p += "</select>";
$("#" + pid).html("");
$("#" + pid).append(p);
}
function createTable(pid, id, headers, buttonsFn, callbacks, list, cid) {
var buttons;
var hasButtons;
var isButtonsFn = false;
if ((typeof buttonsFn) == "function") {
hasButtons = true;
isButtonsFn = true;
} else if (buttonsFn.length > 0 && (typeof buttonsFn[0] == "string")) {
hasButtons = true;
buttons = buttonsFn;
}
if (!cid) {
cid = "ID";
}
var headersMap = [];
var p = "<table id=" + id + " class='showtablelist' style='width: 90%'>";
p += "<thead>";
p += "<tr>";
for (var i in headers) {
var header = headers[i];
var arr = header.split(":");
var width = "auto;";
if (arr.length == 2) {
headersMap.push(arr[1]);
} else if (arr.length == 3) {
headersMap.push(arr[1]);
width = arr[2] + "px;"
}
p += "<th nowrap='nowrap' style='width:" + width + "'>" + arr[0] + "</th>";
}
p += "</tr>";
p += "</thead>";
p += "<tbody>";
if (list.length > 0) {
for (var i in list) {
p += "<tr>";
var item = list[i];
if (hasButtons) {
if (isButtonsFn) {
buttons = buttonsFn(item);
}
var buttonsMap = [];
for (var i in buttons) {
buttonsMap.push(buttons[i].split(":"));
}
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
p += "<td nowrap='nowrap'>";
for (var j in buttonsMap) {
p += "<a href='javascript:void(0);' data-id='" + item[cid] + "' data-type='" + buttonsMap[j][1];
p += "' class='" + buttonsMap[j][2] + "'>" + buttonsMap[j][0] + "</a> ";
}
p += "</td>";
} else {
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
}
p += "</tr>";
}
$("#paging").show();
} else {
var noDtataTip = ctxPath + "/static/images/noDataTip.jpg";
p += "<tr>";
p += "<td class='tc' colspan='10'><img class='vm' src='" + noDtataTip + "'/></td>";
p += "</tr>";
$("#paging").hide();
}
p += "</tbody>";
p += "</table>";
p += "<br>";
$("#" + pid).html("");
$("#" + pid).append(p);
$("#" + pid).find("a").click(function () {
callbacks[$(this).data("type")].call(this, $(this).data("id"));
});
}
function createPage(url, param, callback) {
if (param != null) {
param = encodeURI(obj2httpParam(param));
}
$("#paging").myPagination({
cssStyle: 'bspagination',
currPage: 1,
pageNumber: 10,
ajax: {
on: true,
type: "POST",
url: ctxPath + url,
dataType: "json",
param: param,
ajaxStart: function () {
ZENG.msgbox.show(" 正在加载中,请稍后...", 6, 10000);
},
onClick: function (page) {
$.fn.debug(page);
},
ajaxStop: function () {
setTimeout(function () {
ZENG.msgbox.hide();
}, 1);
},
callback: function (d) {
validateLogin(d, callback);
}
}
});
}
function validateNotNull(param) {
for (var i in param) {
if (param[i] == '' || param[i] == null) {
errorMsgbox(i);
return false;
}
}
return true;
}
function validateNotNullForm(form) {
var v = true;
$("#" + form).find("input,select").each(function () {
if (this.type == "button") {
return;
}
if (!$(this).attr("hint") == "none") {
if (!$(this).val()) {
v = false;
errorMsgbox(this.name || this.id);
return false;
}
}
});
return v;
}
function mapList(list, id) {
if (!id) {
id = "ID";
}
var map = {};
for (var i in list) {
var item = list[i];
map[item[id]] = item;
}
return map;
}
function getInputValues(id) {
var map = {};
$("#" + id).find("input,select").each(function () {
if (this.type == "button") {
return;
}
map[this.name || this.id] = $(this).val();
});
return map;
}
function setInputValues(id, data, mapping) {
// $("#"+id).find("input").each(function(){
// if(this.type == "button"){
// return;
// }
// var v = mapping == null ? data[this.name] : data[mapping[this.name]];
// if(v == null || v == undefined){
// this.value = "";
// return;
// }
// this.value = v;
// });
// $("#"+id).find("select").each(function(){
// var v = mapping == null ? data[this.name] : data[mapping[this.name]];
// if(v == null || v == undefined){
// $(this).val("");
// return;
// }
// $(this).val(v);
// });
$("#" + id).find("input,select,textarea").each(function () {
var n = this.name || this.id;
var v = mapping == null ? data[n] : data[mapping[n]];
if (v == null || v == undefined) {
$(this).val("");
return;
}
$(this).val(v);
});
}
function errorMsgbox(msg) {
sysMsgbox(msg, true);
}
function msgbox(msg) {
sysMsgbox(msg, false);
}
function sysMsgbox(msg, error) {
var el = $("#sys-msgbox");
if (el.length == 0) {
$("body").append("<div id='sys-msgbox' title='系统提示' style='display:none;'></div>");
el = $("#sys-msgbox");
}
var color = error == true ? "red" : "black";
el.css("color", color);
el.html(msg);
setTimeout(function () {
el.dialog({
resizable: false,
height: "auto",
width: 400,
modal: true,
buttons: {
"关 闭": function () {
$(this).dialog("close"); | }
}).show();
}, 100);
}
function dialog(id, width, buttons) {
if (buttons == null) {
buttons = width;
width = 400;
}
buttons["关 闭"] = function () {
$(this).dialog("close");
}
$("#" + id).dialog({
resizable: false,
height: "auto",
width: width,
modal: true,
buttons: buttons
}).show();
}
function obj2httpParam(obj) {
if (typeof obj == "string") {
return obj;
}
var str = "";
for (var i in obj) {
str += i + "=" + obj[i] + "&";
}
return str;
}
function addColumn(list, c, nc, map) {
for (var i in list) {
var it = list[i];
it[nc] = map[it[c]];
}
}
function fillDate(list, c, nc, f) {
for (var i in list) {
var it = list[i];
if (it[c]) {
it[nc] = new Date(it[c]).Format(f);
}
}
} | } | random_line_split |
utils.js | String.prototype.replaceAll = function (oldChar, newChar) {
return this.replace(new RegExp(oldChar, "gm"), newChar);
}
function getCtxPath() {
var path = window.location.pathname;
return path.substring(0, path.indexOf("/", 1));
}
var ctxPath = "";
var ZENG = ZENG || null;
if (ZENG != null) {
ZENG.msgbox.loadingAnimationPath = (ctxPath + "/static/js/msgbox/loading.gif");
}
//对Date的扩展,将 Date 转化为指定格式的String
//月(M)、日(d)、小时(h)、分(m)、秒(s)、季度(q) 可以用 1-2 个占位符,
//年(y)可以用 1-4 个占位符,毫秒(S)只能用 1 个占位符(是 1-3 位的数字)
//例子:
//(new Date()).Format("yyyy-MM-dd hh:mm:ss.S") ==> 2006-07-02 08:09:04.423
//(new Date()).Format("yyyy-M-d h:m:s.S") ==> 2006-7-2 8:9:4.18
Date.prototype.Format = function (fmt) { // author: meizz
var o = {
"M+": this.getMonth() + 1, // 月份
"d+": this.getDate(), // 日
"h+": this.getHours(), // 小时
"m+": this.getMinutes(), // 分
"s+": this.getSeconds(), // 秒
"q+": Math.floor((this.getMonth() + 3) / 3), // 季度
"S": this.getMilliseconds()
// 毫秒
};
if (/(y+)/.test(fmt))
fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "")
.substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt))
fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k])
: (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
}
function createSelect($select, data) {
$select.empty();
for (var i = 0; i < data.length; i++) {
var d = data[i];
var option = $("<option>" + d.TEXT + "</option>").val(d.VALUE);
$select.append(option);
}
}
function getRootParent() {
var _this = window;
var _parent = window.parent;
for (; _this != _parent;) {
_this = _parent;
_parent = _this.parent;
}
return _parent;
}
function redirectLogin() {
var expect = ctxPath + "/login.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function redirectIndex() {
var expect = ctxPath + "/index.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function tryLogin() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d == true) {
redirectIndex();
}
});
}
function tryLogout() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d != true) {
redirectLogin();
}
});
}
function validateLogin(d, fun) {
var _d;
if ("string" == typeof d) {
_d = JSON.parse(d);
} else {
_d = d;
}
if (_d.code == -999) {
redirectLogin();
return;
}
fun(_d);
}
function $get0(url, param, fun, async) {
if (fun == undefined) {
fun = param;
}
$.ajax({
cache: false,
type: "post",
url: ctxPath + url,
data: param,
async: async,
success: function (d) {
validateLogin(d, fun);
},
error: function (r) {
errorMsgbox("请求超时");
}
});
}
//同步请求
function $get(url, param, fun) {
$get0(url, param, fun, false);
}
//异步请求
function $geta(url, param, fun) {
$get0(url, param, fun, true);
}
function createCombobox(pid, id, list, cl) {
if (!cl) {
cl = "sel w150";
}
var p = "<select id='" + id + "' name='" + id + "' class='" + cl + "'>";
if (list.length > 0) {
p += "<option></option>";
for (var i in list) {
var item = list[i];
p += "<option value='" + item["VALUE"] + "'>" + item["NAME"] + "</option>"
}
} else {
p += "<option></option>";
}
p += "</select>";
$("#" + pid).html("");
$("#" + pid).append(p);
}
function createTable(pid, id, headers, buttonsFn, callbacks, list, cid) {
var buttons;
var hasButtons;
var isButtonsFn = false;
if ((typeof buttonsFn) == "function") {
hasButtons = true;
isButtonsFn = true;
} else if (buttonsFn.length > 0 && (typeof buttonsFn[0] == "string")) {
hasButtons = true;
buttons = buttonsFn;
}
if (!cid) {
cid = "ID";
}
var headersMap = [];
var p = "<table id=" + id + " class='showtablelist' style='width: 90%'>";
p += "<thead>";
p += "<tr>";
for (var i in headers) {
var header = headers[i];
var arr = header.split(":");
var width = "auto;";
if (arr.length == 2) {
headersMap.push(arr[1]);
} else if (arr.length == 3) {
headersMap.push(arr[1]);
width = arr[2] + "px;"
}
p += "<th nowrap='nowrap' style='width:" + width + "'>" + arr[0] + "</th>";
}
p += "</tr>";
p += "</thead>";
p += "<tbody>";
if (list.length > 0) {
for (var i in list) {
p += "<tr>";
var item = list[i];
if (hasButtons) {
if (isButtonsFn) {
buttons = buttonsFn(item);
}
var buttonsMap = [];
for (var i in buttons) {
buttonsMap.push(buttons[i].split(":"));
}
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
p += "<td nowrap='nowrap'>";
for (var j in buttonsMap) {
p += "<a href='javascript:void(0);' data-id='" + item[cid] + "' data-type='" + buttonsMap[j][1];
p += "' class='" + buttonsMap[j][2] + "'>" + buttonsMap[j][0] + "</a> ";
}
p += "</td>";
} else {
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
}
p += "</tr>";
}
$("#paging").show();
} else {
var noDtataTip = ctxPath + "/static/images/noDataTip.jpg";
p += "<tr>";
p += "<td class='tc' colspan='10'><img class='vm' src='" + noDtataTip + "'/></td>";
p += "</tr>";
$("#paging").hide();
}
p += "</tbody>";
p += "</table>";
p += "<br>";
$("#" + pid).html("");
$("#" + pid).append(p);
$("#" + pid).find("a").click(function () {
callbacks[$(this).data("type")].call(this, $(this).data("id"));
});
}
function createPage(url, param, callback) {
if (param != null) {
param = encodeURI(obj2httpParam(param));
}
$("#paging").myPagination({
cssStyle: 'bspagination',
currPage: 1,
pageNumber: 10,
ajax: {
on: true,
type: "POST",
url: ctxPath + url,
dataType: "json",
param: param,
ajaxStart: function () {
ZENG.msgbox.show(" 正在加载中,请稍后...", 6, 10000);
},
onClick: function (page) {
$.fn.debug(page);
},
ajaxStop: function () {
setTimeout(function () {
ZENG.msgbox.hide();
}, 1);
},
callback: function (d) {
validateLogin(d, callback);
}
}
});
}
function validateNotNull(param) {
for (var i in param) {
if (param[i] == '' || param[i] == null) {
errorMsgbox(i);
return false;
}
}
return true;
}
function validateNotNullForm(form) {
var v = true;
$("#" + form).find("input,select").each(function () {
if (this.type == "button") {
return;
}
if (!$(this).attr("hint") == "none") {
if (!$(this).val()) {
v = false;
errorMsgbox(this.name || this.id);
return false;
}
}
});
return v;
}
function mapList(list, id) {
if (!id) {
id = "ID";
}
var map = {};
for (var i in list) {
var item = list[i];
map[item[id]] = item;
}
return map;
}
function getInputValues(id) {
var map = {};
$("#" + id).find("input,select").each(function () {
if (this.type == "button") {
return;
}
map[this.name || this.id] = $(this) | : data[mapping[this.name]];
// if(v == null || v == undefined){
// this.value = "";
// return;
// }
// this.value = v;
// });
// $("#"+id).find("select").each(function(){
// var v = mapping == null ? data[this.name] : data[mapping[this.name]];
// if(v == null || v == undefined){
// $(this).val("");
// return;
// }
// $(this).val(v);
// });
$("#" + id).find("input,select,textarea").each(function () {
var n = this.name || this.id;
var v = mapping == null ? data[n] : data[mapping[n]];
if (v == null || v == undefined) {
$(this).val("");
return;
}
$(this).val(v);
});
}
function errorMsgbox(msg) {
sysMsgbox(msg, true);
}
function msgbox(msg) {
sysMsgbox(msg, false);
}
function sysMsgbox(msg, error) {
var el = $("#sys-msgbox");
if (el.length == 0) {
$("body").append("<div id='sys-msgbox' title='系统提示' style='display:none;'></div>");
el = $("#sys-msgbox");
}
var color = error == true ? "red" : "black";
el.css("color", color);
el.html(msg);
setTimeout(function () {
el.dialog({
resizable: false,
height: "auto",
width: 400,
modal: true,
buttons: {
"关 闭": function () {
$(this).dialog("close");
}
}
}).show();
}, 100);
}
function dialog(id, width, buttons) {
if (buttons == null) {
buttons = width;
width = 400;
}
buttons["关 闭"] = function () {
$(this).dialog("close");
}
$("#" + id).dialog({
resizable: false,
height: "auto",
width: width,
modal: true,
buttons: buttons
}).show();
}
function obj2httpParam(obj) {
if (typeof obj == "string") {
return obj;
}
var str = "";
for (var i in obj) {
str += i + "=" + obj[i] + "&";
}
return str;
}
function addColumn(list, c, nc, map) {
for (var i in list) {
var it = list[i];
it[nc] = map[it[c]];
}
}
function fillDate(list, c, nc, f) {
for (var i in list) {
var it = list[i];
if (it[c]) {
it[nc] = new Date(it[c]).Format(f);
}
}
}
| .val();
});
return map;
}
function setInputValues(id, data, mapping) {
// $("#"+id).find("input").each(function(){
// if(this.type == "button"){
// return;
// }
// var v = mapping == null ? data[this.name] | identifier_body |
utils.js | String.prototype.replaceAll = function (oldChar, newChar) {
return this.replace(new RegExp(oldChar, "gm"), newChar);
}
function getCtxPath() {
var path = window.location.pathname;
return path.substring(0, path.indexOf("/", 1));
}
var ctxPath = "";
var ZENG = ZENG || null;
if (ZENG != null) {
ZENG.msgbox.loadingAnimationPath = (ctxPath + "/static/js/msgbox/loading.gif");
}
//对Date的扩展,将 Date 转化为指定格式的String
//月(M)、日(d)、小时(h)、分(m)、秒(s)、季度(q) 可以用 1-2 个占位符,
//年(y)可以用 1-4 个占位符,毫秒(S)只能用 1 个占位符(是 1-3 位的数字)
//例子:
//(new Date()).Format("yyyy-MM-dd hh:mm:ss.S") ==> 2006-07-02 08:09:04.423
//(new Date()).Format("yyyy-M-d h:m:s.S") ==> 2006-7-2 8:9:4.18
Date.prototype.Format = function (fmt) { // author: meizz
var o = {
"M+": this.getMonth() + 1, // 月份
"d+": this.getDate(), // 日
"h+": this.getHours(), // 小时
"m+": this.getMinutes(), // 分
"s+": this.getSeconds(), // 秒
"q+": Math.floor((this.getMonth() + 3) / 3), // 季度
"S": this.getMilliseconds()
// 毫秒
};
if (/(y+)/.test(fmt))
fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "")
.substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt))
fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k])
: (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
}
function createSelect($select, data) {
$select.empty();
for (var i = 0; i < data.length; i++) {
var d = data[i];
var option = $("<option>" + d.TEXT + "</option>").val(d.VALUE);
$select.append(option);
}
}
function getRootParent() {
var _this = window;
var _parent = window.parent;
for (; _this != _parent;) {
_this = _parent;
_parent = _this.parent;
}
return _parent;
}
function redirectLogin() {
var expect = ctxPath + "/login.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function redirectIndex() {
var expect = ctxPath + "/index.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function tryLogin() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d == true) {
redirectIndex();
}
});
}
function tryLogout() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d != true) {
redirectLogin();
}
});
}
function validateLogin(d, fun) {
var _d;
if ("string" == typeof d) {
_d = JSON.parse(d);
} else {
_d = d;
}
if (_d.code == -999) {
redirectLogin();
return;
}
fun(_d);
}
function $get0(url, param, fun, async) {
if (fun == undefined) {
fun = param;
}
$.ajax({
cache: false,
type: "post",
url: ctxPath + url,
data: param,
async: async,
success: function (d) {
validateLogin(d, fun);
},
error: function (r) {
errorMsgbox("请求超时");
}
});
}
//同步请求
function $get(url, param, fun) {
$get0(url, param, fun, false);
}
//异步请求
function $geta(url, param, fun) {
$get0(url, param, fun, true);
}
function createCombobox(pid, id, list, cl) {
if (!cl) {
cl = "sel w150";
}
var p = "<select id='" + id + "' name='" + id + "' class='" + cl + "'>";
if (list.length > 0) {
p += "<option></option>";
for (var i in list) {
var item = list[i];
p += "<option value='" + item["VALUE"] + "'>" + item["NAME"] + "</option>"
}
} else {
p += "<option></option>";
}
p += "</select>";
$("#" + pid).html("");
$("#" + pid).append(p);
}
function createTable(pid, id, headers, buttonsFn, callbacks, list, cid) {
var buttons;
var hasButtons;
var isButtonsFn = false;
if ((typeof buttonsFn) == "function") {
hasButtons = true;
isButtonsFn = true;
} else if (buttonsFn.length > 0 && (typeof buttonsFn[0] == "string")) {
hasButtons = true;
buttons = buttonsFn;
}
if (!cid) {
cid = "ID";
}
var headersMap = [];
var p = "<table id=" + id + " class='showtablelist' style='width: 90%'>";
p += "<thead>";
p += "<tr>";
for (var i in headers) {
var header = headers[i];
var arr = header.split(":");
var width = "auto;";
if (arr.length == 2) {
headersMap.push(arr[1]);
} else if (arr.length == 3) {
headersMap.push(arr[1]);
width = arr[2] + "px;"
}
p += "<th nowrap='nowrap' style='width:" + width + "'>" + arr[0] + "</th>";
}
p += "</tr>";
p += "</thead>";
p += "<tbody>";
if (list.length > 0) {
for (var i in list) {
p += "<tr>";
var item = list[i];
if (hasButtons) {
if (isButtonsFn) {
buttons = buttonsFn(item);
}
var buttonsMap = [];
for (var i in buttons) {
buttonsMap.push(buttons[i].split(":"));
}
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
p += "<td nowrap='nowrap'>";
for (var j in buttonsMap) {
p += "<a href='javascript:void(0);' data-id='" + item[cid] + "' data-type='" + buttonsMap[j][1];
p += "' class='" + buttonsMap[j][2] + "'>" + buttonsMap[j][0] + "</a> ";
}
p += "</td>";
} else {
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
}
p += "</tr>";
}
$("#paging").show();
} else {
var noDtataTip = ctxPath + "/static/images/noDataTip.jpg";
p += "<tr>";
p += "<td class='tc' colspan='10'><img class='vm' src='" + noDtataTip + "'/></td>";
p += "</tr>";
$("#paging").hide();
}
p += "</tbody>";
p += "</table>";
p += "<br>";
$("#" + pid).html("");
$("#" + pid).append(p);
$("#" + pid).find("a").click(function () {
callbacks[$(this).data("type")].call(this, $(this).data("id"));
});
}
function createPage(url, param, callback) {
if (param != null) {
param = encodeURI(obj2httpParam(param));
}
$("#paging").myPagination({
cssStyle: 'bspagination',
currPage: 1,
pageNumber: 10,
ajax: {
on: true,
type: "POST",
url: ctxPath + url,
dataType: "json",
param: param,
ajaxStart: function () {
ZENG.msgbox.show(" 正在加载中,请稍后...", 6, 10000);
},
onClick: function (page) {
$.fn.debug(page);
},
ajaxStop: function () {
setTimeout(function () {
ZENG.msgbox.hide();
}, 1);
},
callback: function (d) {
validateLogin(d, callback);
}
}
});
}
function validateNotNull(param) {
for (var i in param) {
if (param[i] == '' || param[i] == null) {
errorMsgbox(i);
return false;
}
}
return tru | validateNotNullForm(form) {
var v = true;
$("#" + form).find("input,select").each(function () {
if (this.type == "button") {
return;
}
if (!$(this).attr("hint") == "none") {
if (!$(this).val()) {
v = false;
errorMsgbox(this.name || this.id);
return false;
}
}
});
return v;
}
function mapList(list, id) {
if (!id) {
id = "ID";
}
var map = {};
for (var i in list) {
var item = list[i];
map[item[id]] = item;
}
return map;
}
function getInputValues(id) {
var map = {};
$("#" + id).find("input,select").each(function () {
if (this.type == "button") {
return;
}
map[this.name || this.id] = $(this).val();
});
return map;
}
function setInputValues(id, data, mapping) {
// $("#"+id).find("input").each(function(){
// if(this.type == "button"){
// return;
// }
// var v = mapping == null ? data[this.name] : data[mapping[this.name]];
// if(v == null || v == undefined){
// this.value = "";
// return;
// }
// this.value = v;
// });
// $("#"+id).find("select").each(function(){
// var v = mapping == null ? data[this.name] : data[mapping[this.name]];
// if(v == null || v == undefined){
// $(this).val("");
// return;
// }
// $(this).val(v);
// });
$("#" + id).find("input,select,textarea").each(function () {
var n = this.name || this.id;
var v = mapping == null ? data[n] : data[mapping[n]];
if (v == null || v == undefined) {
$(this).val("");
return;
}
$(this).val(v);
});
}
function errorMsgbox(msg) {
sysMsgbox(msg, true);
}
function msgbox(msg) {
sysMsgbox(msg, false);
}
function sysMsgbox(msg, error) {
var el = $("#sys-msgbox");
if (el.length == 0) {
$("body").append("<div id='sys-msgbox' title='系统提示' style='display:none;'></div>");
el = $("#sys-msgbox");
}
var color = error == true ? "red" : "black";
el.css("color", color);
el.html(msg);
setTimeout(function () {
el.dialog({
resizable: false,
height: "auto",
width: 400,
modal: true,
buttons: {
"关 闭": function () {
$(this).dialog("close");
}
}
}).show();
}, 100);
}
function dialog(id, width, buttons) {
if (buttons == null) {
buttons = width;
width = 400;
}
buttons["关 闭"] = function () {
$(this).dialog("close");
}
$("#" + id).dialog({
resizable: false,
height: "auto",
width: width,
modal: true,
buttons: buttons
}).show();
}
function obj2httpParam(obj) {
if (typeof obj == "string") {
return obj;
}
var str = "";
for (var i in obj) {
str += i + "=" + obj[i] + "&";
}
return str;
}
function addColumn(list, c, nc, map) {
for (var i in list) {
var it = list[i];
it[nc] = map[it[c]];
}
}
function fillDate(list, c, nc, f) {
for (var i in list) {
var it = list[i];
if (it[c]) {
it[nc] = new Date(it[c]).Format(f);
}
}
}
| e;
}
function | identifier_name |
utils.js | String.prototype.replaceAll = function (oldChar, newChar) {
return this.replace(new RegExp(oldChar, "gm"), newChar);
}
function getCtxPath() {
var path = window.location.pathname;
return path.substring(0, path.indexOf("/", 1));
}
var ctxPath = "";
var ZENG = ZENG || null;
if (ZENG != null) {
ZENG.msgbox.loadingAnimationPath = (ctxPath + "/static/js/msgbox/loading.gif");
}
//对Date的扩展,将 Date 转化为指定格式的String
//月(M)、日(d)、小时(h)、分(m)、秒(s)、季度(q) 可以用 1-2 个占位符,
//年(y)可以用 1-4 个占位符,毫秒(S)只能用 1 个占位符(是 1-3 位的数字)
//例子:
//(new Date()).Format("yyyy-MM-dd hh:mm:ss.S") ==> 2006-07-02 08:09:04.423
//(new Date()).Format("yyyy-M-d h:m:s.S") ==> 2006-7-2 8:9:4.18
Date.prototype.Format = function (fmt) { // author: meizz
var o = {
"M+": this.getMonth() + 1, // 月份
"d+": this.getDate(), // 日
"h+": this.getHours(), // 小时
"m+": this.getMinutes(), // 分
"s+": this.getSeconds(), // 秒
"q+": Math.floor((this.getMonth() + 3) / 3), // 季度
"S": this.getMilliseconds()
// 毫秒
};
if (/(y+)/.test(fmt))
fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "")
.substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt))
fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k])
: (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
}
function createSelect($select, data) {
$select.empty();
for (var i = 0; i < data.length; i++) {
var d = data[i];
var option = $("<option>" + d.TEXT + "</option>").val(d.VALUE);
$select.append(option);
}
}
func |
_parent = _this.parent;
}
return _parent;
}
function redirectLogin() {
var expect = ctxPath + "/login.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function redirectIndex() {
var expect = ctxPath + "/index.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function tryLogin() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d == true) {
redirectIndex();
}
});
}
function tryLogout() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d != true) {
redirectLogin();
}
});
}
function validateLogin(d, fun) {
var _d;
if ("string" == typeof d) {
_d = JSON.parse(d);
} else {
_d = d;
}
if (_d.code == -999) {
redirectLogin();
return;
}
fun(_d);
}
function $get0(url, param, fun, async) {
if (fun == undefined) {
fun = param;
}
$.ajax({
cache: false,
type: "post",
url: ctxPath + url,
data: param,
async: async,
success: function (d) {
validateLogin(d, fun);
},
error: function (r) {
errorMsgbox("请求超时");
}
});
}
//同步请求
function $get(url, param, fun) {
$get0(url, param, fun, false);
}
//异步请求
function $geta(url, param, fun) {
$get0(url, param, fun, true);
}
function createCombobox(pid, id, list, cl) {
if (!cl) {
cl = "sel w150";
}
var p = "<select id='" + id + "' name='" + id + "' class='" + cl + "'>";
if (list.length > 0) {
p += "<option></option>";
for (var i in list) {
var item = list[i];
p += "<option value='" + item["VALUE"] + "'>" + item["NAME"] + "</option>"
}
} else {
p += "<option></option>";
}
p += "</select>";
$("#" + pid).html("");
$("#" + pid).append(p);
}
function createTable(pid, id, headers, buttonsFn, callbacks, list, cid) {
var buttons;
var hasButtons;
var isButtonsFn = false;
if ((typeof buttonsFn) == "function") {
hasButtons = true;
isButtonsFn = true;
} else if (buttonsFn.length > 0 && (typeof buttonsFn[0] == "string")) {
hasButtons = true;
buttons = buttonsFn;
}
if (!cid) {
cid = "ID";
}
var headersMap = [];
var p = "<table id=" + id + " class='showtablelist' style='width: 90%'>";
p += "<thead>";
p += "<tr>";
for (var i in headers) {
var header = headers[i];
var arr = header.split(":");
var width = "auto;";
if (arr.length == 2) {
headersMap.push(arr[1]);
} else if (arr.length == 3) {
headersMap.push(arr[1]);
width = arr[2] + "px;"
}
p += "<th nowrap='nowrap' style='width:" + width + "'>" + arr[0] + "</th>";
}
p += "</tr>";
p += "</thead>";
p += "<tbody>";
if (list.length > 0) {
for (var i in list) {
p += "<tr>";
var item = list[i];
if (hasButtons) {
if (isButtonsFn) {
buttons = buttonsFn(item);
}
var buttonsMap = [];
for (var i in buttons) {
buttonsMap.push(buttons[i].split(":"));
}
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
p += "<td nowrap='nowrap'>";
for (var j in buttonsMap) {
p += "<a href='javascript:void(0);' data-id='" + item[cid] + "' data-type='" + buttonsMap[j][1];
p += "' class='" + buttonsMap[j][2] + "'>" + buttonsMap[j][0] + "</a> ";
}
p += "</td>";
} else {
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
}
p += "</tr>";
}
$("#paging").show();
} else {
var noDtataTip = ctxPath + "/static/images/noDataTip.jpg";
p += "<tr>";
p += "<td class='tc' colspan='10'><img class='vm' src='" + noDtataTip + "'/></td>";
p += "</tr>";
$("#paging").hide();
}
p += "</tbody>";
p += "</table>";
p += "<br>";
$("#" + pid).html("");
$("#" + pid).append(p);
$("#" + pid).find("a").click(function () {
callbacks[$(this).data("type")].call(this, $(this).data("id"));
});
}
function createPage(url, param, callback) {
if (param != null) {
param = encodeURI(obj2httpParam(param));
}
$("#paging").myPagination({
cssStyle: 'bspagination',
currPage: 1,
pageNumber: 10,
ajax: {
on: true,
type: "POST",
url: ctxPath + url,
dataType: "json",
param: param,
ajaxStart: function () {
ZENG.msgbox.show(" 正在加载中,请稍后...", 6, 10000);
},
onClick: function (page) {
$.fn.debug(page);
},
ajaxStop: function () {
setTimeout(function () {
ZENG.msgbox.hide();
}, 1);
},
callback: function (d) {
validateLogin(d, callback);
}
}
});
}
function validateNotNull(param) {
for (var i in param) {
if (param[i] == '' || param[i] == null) {
errorMsgbox(i);
return false;
}
}
return true;
}
function validateNotNullForm(form) {
var v = true;
$("#" + form).find("input,select").each(function () {
if (this.type == "button") {
return;
}
if (!$(this).attr("hint") == "none") {
if (!$(this).val()) {
v = false;
errorMsgbox(this.name || this.id);
return false;
}
}
});
return v;
}
function mapList(list, id) {
if (!id) {
id = "ID";
}
var map = {};
for (var i in list) {
var item = list[i];
map[item[id]] = item;
}
return map;
}
function getInputValues(id) {
var map = {};
$("#" + id).find("input,select").each(function () {
if (this.type == "button") {
return;
}
map[this.name || this.id] = $(this).val();
});
return map;
}
function setInputValues(id, data, mapping) {
// $("#"+id).find("input").each(function(){
// if(this.type == "button"){
// return;
// }
// var v = mapping == null ? data[this.name] : data[mapping[this.name]];
// if(v == null || v == undefined){
// this.value = "";
// return;
// }
// this.value = v;
// });
// $("#"+id).find("select").each(function(){
// var v = mapping == null ? data[this.name] : data[mapping[this.name]];
// if(v == null || v == undefined){
// $(this).val("");
// return;
// }
// $(this).val(v);
// });
$("#" + id).find("input,select,textarea").each(function () {
var n = this.name || this.id;
var v = mapping == null ? data[n] : data[mapping[n]];
if (v == null || v == undefined) {
$(this).val("");
return;
}
$(this).val(v);
});
}
function errorMsgbox(msg) {
sysMsgbox(msg, true);
}
function msgbox(msg) {
sysMsgbox(msg, false);
}
function sysMsgbox(msg, error) {
var el = $("#sys-msgbox");
if (el.length == 0) {
$("body").append("<div id='sys-msgbox' title='系统提示' style='display:none;'></div>");
el = $("#sys-msgbox");
}
var color = error == true ? "red" : "black";
el.css("color", color);
el.html(msg);
setTimeout(function () {
el.dialog({
resizable: false,
height: "auto",
width: 400,
modal: true,
buttons: {
"关 闭": function () {
$(this).dialog("close");
}
}
}).show();
}, 100);
}
function dialog(id, width, buttons) {
if (buttons == null) {
buttons = width;
width = 400;
}
buttons["关 闭"] = function () {
$(this).dialog("close");
}
$("#" + id).dialog({
resizable: false,
height: "auto",
width: width,
modal: true,
buttons: buttons
}).show();
}
function obj2httpParam(obj) {
if (typeof obj == "string") {
return obj;
}
var str = "";
for (var i in obj) {
str += i + "=" + obj[i] + "&";
}
return str;
}
function addColumn(list, c, nc, map) {
for (var i in list) {
var it = list[i];
it[nc] = map[it[c]];
}
}
function fillDate(list, c, nc, f) {
for (var i in list) {
var it = list[i];
if (it[c]) {
it[nc] = new Date(it[c]).Format(f);
}
}
}
| tion getRootParent() {
var _this = window;
var _parent = window.parent;
for (; _this != _parent;) {
_this = _parent; | conditional_block |
bikeshare.py | import time
import pandas as pd
import numpy as np
from datetime import datetime
#from collections import Counter
def main():
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
#start of program after title, loop here if restarting program from the top
#Bicycle Picture
print(' o__ __o ,__o __o __o\n ,>/_ -\<, _-\_<, _`\<,_ _ \<_\n(*)`(*).....O/ O.....(*)/\'(*).....(*)/ (*).....(_)/(_)')
#Project Title
print(' ___ __ / \n| | _ | _ _ __ _ | _ (_ _|_ _ _ __ _ \n|^|(/_ | (_ (_)|||(/_ | (_) __) |_(/_\_/(/_| | _> \n _ __ _ _ o \n|_) o | _ (_ |_ _ __ _ | \ _ _|_ _ |_) __ _ | _ _ _|_\n|_) | |<(/_ __)| |(_| | (/_ |_/(_| |_(_| | | (_)_| (/_(_ |_')
#Welcome Statement
print('\nHello! Welcome to Steven Ling\'s udacity python project! \nLet\'s explore some US bikeshare data!\n\n')
#defining time intervals for display time function
intervals = (
('years',217728000), # 60 * 60 * 24 * 7 * 30 * 12
('months',18144000), # 60 * 60 * 24 * 7 * 30
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
#function to convert seconds to years,months,weeks,days,hours,seconds
def display_time(seconds, granularity=6):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def get_filters():
#City Choice Input
city_choice = input("Which city are you interested in?\n\nChoose a city by entering the corresponding number:\n1 for Chicago or\n2 for New York city or\n3 for Washington?")
global city
if city_choice == '1':
city ='chicago'
print('you have chosen Chicago!\n')
elif city_choice == '2':
city = 'new york city'
print('you have chosen New York city!\n')
elif city_choice == '3':
city = 'washington'
print('you have chosen Washington city!\n')
else:
print('This does not seem to be a valid choice!')
restart = input("Do you wish to reselect filters? y/n?\n").lower()
if restart == 'y':
get_filters()
else:
exit()
# TO DO: get user input for month (all, january, february, ... , june)
# Month Choice Input
global month
month =()
month_choice = input("Which month are you interested in?\n\nChoose a month by entering the following choices:\n (all, january, february, march, april, may, june) ")
valid_months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']
month_choice = month_choice.lower()
if month_choice in valid_months:
month = month_choice
print ('For months, you have selected {}'.format(month))
else:
print('This does not seem to be a valid choice!')
restart_month = input("Do you wish to choose filters again? y/n?\n").lower()
if restart_month == 'y':
get_filters()
else:
exit()
# Get user input for day of the week
global day
day=()
day_choice = input("which day of the week are you interested in?\n\nChoose a day by entering the following choices:\n (all, monday, tuesday, wednesday, thursday, friday, saturday, sunday)")
valid_days = ['all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
day_choice = day_choice.lower()
if day_choice in valid_days:
day = day_choice
print ('For days, you have selected {}'.format(day))
else:
print('This does not seem to be a valid choice!')
restart_days = input("Do you wish to repick filters? y/n?\n").lower()
if restart_days == 'y':
get_filters()
else:
exit()
print('-'*40)
return city, month, day
def load_data(city, month, day):
# load data file into a dataframe
global df
df = pd.read_csv(CITY_DATA[city],index_col=0, infer_datetime_format=True)
# convert the Start Time and end Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['End Time'] = pd.to_datetime(df['End Time'])
# extract month and day of week from Start Time to create new columns
df['Start_Hour'] = df['Start Time'].dt.hour
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['Start Time'] = df['Start Time'].dt.time
df['End Time'] = df['End Time'].dt.time
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
#Displays statistics on the most frequent times of travel.
print('\nCalculating The Most Frequent Times of Travel for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display the most common month
most_common_month = df['month'].mode()[0]
print('Most Common month: \n', most_common_month)
#display the most common day of week
most_common_day = df['day_of_week'].mode()[0]
print('Most Common Day: \n', most_common_day)
#display the most common start hour
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:\n', most_common_start_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
#Displays statistics on the most popular stations and trip.
print('\nCalculating The Most Popular Stations and Trips for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display most commonly used start station
most_common_start_station = df['Start Station'].mode()[0]
print('Most Common Start Station:{}\n'.format(most_common_start_station))
#print('Most Common Start Hour:', most_common_start_hour)
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:{}: '.format(most_common_start_hour))
#display most commonly used end station
most_common_end_station = df['End Station'].mode()[0]
print('Most Common End Station:{}: '.format(most_common_end_station))
#display most frequent combination of start station and end station trip
time_delay_short()
most_common_start_end_station = df[['Start Station', 'End Station']].mode(0)
print('Most Common Start and End Station: \n',most_common_start_end_station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
#Displays statistics on the total and average trip duration.
print('\nCalculating Trip Duration for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# TO DO: display total travel time
Total_travel_time = df['Trip Duration'].sum(axis = 0, skipna = True)
print('Total travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ' , display_time(Total_travel_time))
time_delay_short()
# TO DO: display mean travel time
Mean_travel_time = df['Trip Duration'].mean(axis = 0, skipna = True)
print('Total average travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ', display_time(Mean_travel_time))
time_delay_short()
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
#Displays statistics on bikeshare users.
print('\nCalculating User Stats: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# Display counts of user type
x = 'User Type'
print('\nCount of User Type:\n',df[x].value_counts())
time_delay_short()
# Display counts of gender
y = 'Gender'
print('\nCount of Gender:\n',df[y].value_counts())
# Display earliest, most recent, and most common year of birth
z = 'Birth Year'
currentYear = datetime.now().year
oldest_biker = currentYear - df[z].min()
print('\nOldest User is {} years old!'.format(oldest_biker))
print('Wow that\'s old!')
youngest_biker = currentYear - df[z].max()
print('\nYoungest User is {} years old!'.format(youngest_biker))
print('Wow that\'s young!')
common_year = currentYear - df[z].mode()
print('\nMost common age of users in data set is {} years old'.format(str(common_year)))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def display_raw_data():
# get user input whether to displays cycle through 5 rows of data
raw_data_display = input("Would you like to see 5 records of the data? Press any key to continue displaying or type 'pass' to skip to descriptive statistics \n")
if raw_data_display != 'pass':
i = 5
while raw_data_display !='pass':
print(df.iloc[i-5:i, :])
raw_data_display = input("Would you like to see the next 5 records of raw data? Press any key to continue displaying or type 'pass' to skip to descriptive statistics \n")
i = i + 5
else:
print("....skipping ahead to descriptive stats\n")
def drop_na_values():
global df
# get number of rows in dataframe
numOfRows = df.shape[0]
print('\nThe raw data set is {} rows long!\n'.format(numOfRows))
time_delay_short()
print('\nAnalyzing for number of blank fields in the raw dataset...\n')
time_delay_short()
nan_count = df.isnull().sum()
print ('\nNumber of blank fields of each column in our dataset:\n', nan_count)
time_delay_short()
count_of_non_nan = df.count()
print ('\nCount of number of completed fields in our data set:\n', count_of_non_nan)
print ('\nWe will now drop the rows with blanks from the dataset so that the calculated statistics will not be skewed...\n')
df.dropna(axis = 0, inplace = True)
time_delay_short()
numOfRows = df.shape[0]
print('\nThe modified data set is now {} rows long!'.format(numOfRows))
#def time_delay_long():
#to add time delay to slow down the bombard of text to the user (and for fun!)
# time.sleep(1)
# print('...executing task...')
# time.sleep(2)
# print('.........................Complete!\n')
# time.sleep(1)
def | ():
#to add time delay to slow down the bombard of text to the user (and for fun!)
time.sleep(1)
print('...executing task...')
time.sleep(1)
print('....................Complete!\n')
time.sleep(1)
get_filters()
print('\nThe bike data will now be filtered by the following: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
load_data(city,month,day)
drop_na_values()
display_raw_data()
continue_choice = input("Time stats will now be displayed. Press any key to continue or type 'pass' to skip to station stats\n").lower()
if continue_choice != 'pass':
time_stats(df)
else:
print("....skipping time stats\n")
continue_choice = input("Station stats will now be displayed. Press any key to continue or type 'pass' to skip to trip duration stats\n").lower()
if continue_choice != 'pass':
station_stats(df)
else:
print("....skipping station_stats\n")
continue_choice = input("Trip duration stats will now be displayed. Press any key to continue or type 'pass' to skip to trip user stats\n").lower()
if continue_choice != 'pass':
trip_duration_stats(df)
else:
print("....skipping trip duration stats\n")
if city != "washington":
continue_choice = input("User stats will now be displayed. Press any key to continue or type 'pass' to skip\n").lower()
if continue_choice != 'pass':
user_stats(df)
else:
print("....skipping user stats\n")
else:
print('Washington data set contains no gender or user type data therefore there are no user stats to display for this city! T_T )')
#restart code
restart = input("Do you wish to try again? y/n\n").lower()
if restart == 'y':
main()
else:
exit()
main()
| time_delay_short | identifier_name |
bikeshare.py | import time
import pandas as pd
import numpy as np
from datetime import datetime
#from collections import Counter
def main():
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
#start of program after title, loop here if restarting program from the top
#Bicycle Picture
print(' o__ __o ,__o __o __o\n ,>/_ -\<, _-\_<, _`\<,_ _ \<_\n(*)`(*).....O/ O.....(*)/\'(*).....(*)/ (*).....(_)/(_)')
#Project Title
print(' ___ __ / \n| | _ | _ _ __ _ | _ (_ _|_ _ _ __ _ \n|^|(/_ | (_ (_)|||(/_ | (_) __) |_(/_\_/(/_| | _> \n _ __ _ _ o \n|_) o | _ (_ |_ _ __ _ | \ _ _|_ _ |_) __ _ | _ _ _|_\n|_) | |<(/_ __)| |(_| | (/_ |_/(_| |_(_| | | (_)_| (/_(_ |_')
#Welcome Statement
print('\nHello! Welcome to Steven Ling\'s udacity python project! \nLet\'s explore some US bikeshare data!\n\n')
#defining time intervals for display time function
intervals = (
('years',217728000), # 60 * 60 * 24 * 7 * 30 * 12
('months',18144000), # 60 * 60 * 24 * 7 * 30
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
#function to convert seconds to years,months,weeks,days,hours,seconds
def display_time(seconds, granularity=6):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def get_filters():
#City Choice Input
city_choice = input("Which city are you interested in?\n\nChoose a city by entering the corresponding number:\n1 for Chicago or\n2 for New York city or\n3 for Washington?")
global city
if city_choice == '1':
city ='chicago'
print('you have chosen Chicago!\n')
elif city_choice == '2':
city = 'new york city'
print('you have chosen New York city!\n')
elif city_choice == '3':
city = 'washington'
print('you have chosen Washington city!\n') | restart = input("Do you wish to reselect filters? y/n?\n").lower()
if restart == 'y':
get_filters()
else:
exit()
# TO DO: get user input for month (all, january, february, ... , june)
# Month Choice Input
global month
month =()
month_choice = input("Which month are you interested in?\n\nChoose a month by entering the following choices:\n (all, january, february, march, april, may, june) ")
valid_months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']
month_choice = month_choice.lower()
if month_choice in valid_months:
month = month_choice
print ('For months, you have selected {}'.format(month))
else:
print('This does not seem to be a valid choice!')
restart_month = input("Do you wish to choose filters again? y/n?\n").lower()
if restart_month == 'y':
get_filters()
else:
exit()
# Get user input for day of the week
global day
day=()
day_choice = input("which day of the week are you interested in?\n\nChoose a day by entering the following choices:\n (all, monday, tuesday, wednesday, thursday, friday, saturday, sunday)")
valid_days = ['all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
day_choice = day_choice.lower()
if day_choice in valid_days:
day = day_choice
print ('For days, you have selected {}'.format(day))
else:
print('This does not seem to be a valid choice!')
restart_days = input("Do you wish to repick filters? y/n?\n").lower()
if restart_days == 'y':
get_filters()
else:
exit()
print('-'*40)
return city, month, day
def load_data(city, month, day):
# load data file into a dataframe
global df
df = pd.read_csv(CITY_DATA[city],index_col=0, infer_datetime_format=True)
# convert the Start Time and end Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['End Time'] = pd.to_datetime(df['End Time'])
# extract month and day of week from Start Time to create new columns
df['Start_Hour'] = df['Start Time'].dt.hour
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['Start Time'] = df['Start Time'].dt.time
df['End Time'] = df['End Time'].dt.time
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
#Displays statistics on the most frequent times of travel.
print('\nCalculating The Most Frequent Times of Travel for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display the most common month
most_common_month = df['month'].mode()[0]
print('Most Common month: \n', most_common_month)
#display the most common day of week
most_common_day = df['day_of_week'].mode()[0]
print('Most Common Day: \n', most_common_day)
#display the most common start hour
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:\n', most_common_start_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
#Displays statistics on the most popular stations and trip.
print('\nCalculating The Most Popular Stations and Trips for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display most commonly used start station
most_common_start_station = df['Start Station'].mode()[0]
print('Most Common Start Station:{}\n'.format(most_common_start_station))
#print('Most Common Start Hour:', most_common_start_hour)
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:{}: '.format(most_common_start_hour))
#display most commonly used end station
most_common_end_station = df['End Station'].mode()[0]
print('Most Common End Station:{}: '.format(most_common_end_station))
#display most frequent combination of start station and end station trip
time_delay_short()
most_common_start_end_station = df[['Start Station', 'End Station']].mode(0)
print('Most Common Start and End Station: \n',most_common_start_end_station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
#Displays statistics on the total and average trip duration.
print('\nCalculating Trip Duration for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# TO DO: display total travel time
Total_travel_time = df['Trip Duration'].sum(axis = 0, skipna = True)
print('Total travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ' , display_time(Total_travel_time))
time_delay_short()
# TO DO: display mean travel time
Mean_travel_time = df['Trip Duration'].mean(axis = 0, skipna = True)
print('Total average travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ', display_time(Mean_travel_time))
time_delay_short()
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
#Displays statistics on bikeshare users.
print('\nCalculating User Stats: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# Display counts of user type
x = 'User Type'
print('\nCount of User Type:\n',df[x].value_counts())
time_delay_short()
# Display counts of gender
y = 'Gender'
print('\nCount of Gender:\n',df[y].value_counts())
# Display earliest, most recent, and most common year of birth
z = 'Birth Year'
currentYear = datetime.now().year
oldest_biker = currentYear - df[z].min()
print('\nOldest User is {} years old!'.format(oldest_biker))
print('Wow that\'s old!')
youngest_biker = currentYear - df[z].max()
print('\nYoungest User is {} years old!'.format(youngest_biker))
print('Wow that\'s young!')
common_year = currentYear - df[z].mode()
print('\nMost common age of users in data set is {} years old'.format(str(common_year)))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def display_raw_data():
# get user input whether to displays cycle through 5 rows of data
raw_data_display = input("Would you like to see 5 records of the data? Press any key to continue displaying or type 'pass' to skip to descriptive statistics \n")
if raw_data_display != 'pass':
i = 5
while raw_data_display !='pass':
print(df.iloc[i-5:i, :])
raw_data_display = input("Would you like to see the next 5 records of raw data? Press any key to continue displaying or type 'pass' to skip to descriptive statistics \n")
i = i + 5
else:
print("....skipping ahead to descriptive stats\n")
def drop_na_values():
global df
# get number of rows in dataframe
numOfRows = df.shape[0]
print('\nThe raw data set is {} rows long!\n'.format(numOfRows))
time_delay_short()
print('\nAnalyzing for number of blank fields in the raw dataset...\n')
time_delay_short()
nan_count = df.isnull().sum()
print ('\nNumber of blank fields of each column in our dataset:\n', nan_count)
time_delay_short()
count_of_non_nan = df.count()
print ('\nCount of number of completed fields in our data set:\n', count_of_non_nan)
print ('\nWe will now drop the rows with blanks from the dataset so that the calculated statistics will not be skewed...\n')
df.dropna(axis = 0, inplace = True)
time_delay_short()
numOfRows = df.shape[0]
print('\nThe modified data set is now {} rows long!'.format(numOfRows))
#def time_delay_long():
#to add time delay to slow down the bombard of text to the user (and for fun!)
# time.sleep(1)
# print('...executing task...')
# time.sleep(2)
# print('.........................Complete!\n')
# time.sleep(1)
def time_delay_short():
#to add time delay to slow down the bombard of text to the user (and for fun!)
time.sleep(1)
print('...executing task...')
time.sleep(1)
print('....................Complete!\n')
time.sleep(1)
get_filters()
print('\nThe bike data will now be filtered by the following: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
load_data(city,month,day)
drop_na_values()
display_raw_data()
continue_choice = input("Time stats will now be displayed. Press any key to continue or type 'pass' to skip to station stats\n").lower()
if continue_choice != 'pass':
time_stats(df)
else:
print("....skipping time stats\n")
continue_choice = input("Station stats will now be displayed. Press any key to continue or type 'pass' to skip to trip duration stats\n").lower()
if continue_choice != 'pass':
station_stats(df)
else:
print("....skipping station_stats\n")
continue_choice = input("Trip duration stats will now be displayed. Press any key to continue or type 'pass' to skip to trip user stats\n").lower()
if continue_choice != 'pass':
trip_duration_stats(df)
else:
print("....skipping trip duration stats\n")
if city != "washington":
continue_choice = input("User stats will now be displayed. Press any key to continue or type 'pass' to skip\n").lower()
if continue_choice != 'pass':
user_stats(df)
else:
print("....skipping user stats\n")
else:
print('Washington data set contains no gender or user type data therefore there are no user stats to display for this city! T_T )')
#restart code
restart = input("Do you wish to try again? y/n\n").lower()
if restart == 'y':
main()
else:
exit()
main() | else:
print('This does not seem to be a valid choice!') | random_line_split |
bikeshare.py | import time
import pandas as pd
import numpy as np
from datetime import datetime
#from collections import Counter
def main():
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
#start of program after title, loop here if restarting program from the top
#Bicycle Picture
print(' o__ __o ,__o __o __o\n ,>/_ -\<, _-\_<, _`\<,_ _ \<_\n(*)`(*).....O/ O.....(*)/\'(*).....(*)/ (*).....(_)/(_)')
#Project Title
print(' ___ __ / \n| | _ | _ _ __ _ | _ (_ _|_ _ _ __ _ \n|^|(/_ | (_ (_)|||(/_ | (_) __) |_(/_\_/(/_| | _> \n _ __ _ _ o \n|_) o | _ (_ |_ _ __ _ | \ _ _|_ _ |_) __ _ | _ _ _|_\n|_) | |<(/_ __)| |(_| | (/_ |_/(_| |_(_| | | (_)_| (/_(_ |_')
#Welcome Statement
print('\nHello! Welcome to Steven Ling\'s udacity python project! \nLet\'s explore some US bikeshare data!\n\n')
#defining time intervals for display time function
intervals = (
('years',217728000), # 60 * 60 * 24 * 7 * 30 * 12
('months',18144000), # 60 * 60 * 24 * 7 * 30
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
#function to convert seconds to years,months,weeks,days,hours,seconds
def display_time(seconds, granularity=6):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def get_filters():
#City Choice Input
city_choice = input("Which city are you interested in?\n\nChoose a city by entering the corresponding number:\n1 for Chicago or\n2 for New York city or\n3 for Washington?")
global city
if city_choice == '1':
city ='chicago'
print('you have chosen Chicago!\n')
elif city_choice == '2':
city = 'new york city'
print('you have chosen New York city!\n')
elif city_choice == '3':
city = 'washington'
print('you have chosen Washington city!\n')
else:
|
# TO DO: get user input for month (all, january, february, ... , june)
# Month Choice Input
global month
month =()
month_choice = input("Which month are you interested in?\n\nChoose a month by entering the following choices:\n (all, january, february, march, april, may, june) ")
valid_months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']
month_choice = month_choice.lower()
if month_choice in valid_months:
month = month_choice
print ('For months, you have selected {}'.format(month))
else:
print('This does not seem to be a valid choice!')
restart_month = input("Do you wish to choose filters again? y/n?\n").lower()
if restart_month == 'y':
get_filters()
else:
exit()
# Get user input for day of the week
global day
day=()
day_choice = input("which day of the week are you interested in?\n\nChoose a day by entering the following choices:\n (all, monday, tuesday, wednesday, thursday, friday, saturday, sunday)")
valid_days = ['all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
day_choice = day_choice.lower()
if day_choice in valid_days:
day = day_choice
print ('For days, you have selected {}'.format(day))
else:
print('This does not seem to be a valid choice!')
restart_days = input("Do you wish to repick filters? y/n?\n").lower()
if restart_days == 'y':
get_filters()
else:
exit()
print('-'*40)
return city, month, day
def load_data(city, month, day):
# load data file into a dataframe
global df
df = pd.read_csv(CITY_DATA[city],index_col=0, infer_datetime_format=True)
# convert the Start Time and end Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['End Time'] = pd.to_datetime(df['End Time'])
# extract month and day of week from Start Time to create new columns
df['Start_Hour'] = df['Start Time'].dt.hour
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['Start Time'] = df['Start Time'].dt.time
df['End Time'] = df['End Time'].dt.time
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
#Displays statistics on the most frequent times of travel.
print('\nCalculating The Most Frequent Times of Travel for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display the most common month
most_common_month = df['month'].mode()[0]
print('Most Common month: \n', most_common_month)
#display the most common day of week
most_common_day = df['day_of_week'].mode()[0]
print('Most Common Day: \n', most_common_day)
#display the most common start hour
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:\n', most_common_start_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
#Displays statistics on the most popular stations and trip.
print('\nCalculating The Most Popular Stations and Trips for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display most commonly used start station
most_common_start_station = df['Start Station'].mode()[0]
print('Most Common Start Station:{}\n'.format(most_common_start_station))
#print('Most Common Start Hour:', most_common_start_hour)
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:{}: '.format(most_common_start_hour))
#display most commonly used end station
most_common_end_station = df['End Station'].mode()[0]
print('Most Common End Station:{}: '.format(most_common_end_station))
#display most frequent combination of start station and end station trip
time_delay_short()
most_common_start_end_station = df[['Start Station', 'End Station']].mode(0)
print('Most Common Start and End Station: \n',most_common_start_end_station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
#Displays statistics on the total and average trip duration.
print('\nCalculating Trip Duration for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# TO DO: display total travel time
Total_travel_time = df['Trip Duration'].sum(axis = 0, skipna = True)
print('Total travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ' , display_time(Total_travel_time))
time_delay_short()
# TO DO: display mean travel time
Mean_travel_time = df['Trip Duration'].mean(axis = 0, skipna = True)
print('Total average travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ', display_time(Mean_travel_time))
time_delay_short()
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
#Displays statistics on bikeshare users.
print('\nCalculating User Stats: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# Display counts of user type
x = 'User Type'
print('\nCount of User Type:\n',df[x].value_counts())
time_delay_short()
# Display counts of gender
y = 'Gender'
print('\nCount of Gender:\n',df[y].value_counts())
# Display earliest, most recent, and most common year of birth
z = 'Birth Year'
currentYear = datetime.now().year
oldest_biker = currentYear - df[z].min()
print('\nOldest User is {} years old!'.format(oldest_biker))
print('Wow that\'s old!')
youngest_biker = currentYear - df[z].max()
print('\nYoungest User is {} years old!'.format(youngest_biker))
print('Wow that\'s young!')
common_year = currentYear - df[z].mode()
print('\nMost common age of users in data set is {} years old'.format(str(common_year)))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def display_raw_data():
# get user input whether to displays cycle through 5 rows of data
raw_data_display = input("Would you like to see 5 records of the data? Press any key to continue displaying or type 'pass' to skip to descriptive statistics \n")
if raw_data_display != 'pass':
i = 5
while raw_data_display !='pass':
print(df.iloc[i-5:i, :])
raw_data_display = input("Would you like to see the next 5 records of raw data? Press any key to continue displaying or type 'pass' to skip to descriptive statistics \n")
i = i + 5
else:
print("....skipping ahead to descriptive stats\n")
def drop_na_values():
global df
# get number of rows in dataframe
numOfRows = df.shape[0]
print('\nThe raw data set is {} rows long!\n'.format(numOfRows))
time_delay_short()
print('\nAnalyzing for number of blank fields in the raw dataset...\n')
time_delay_short()
nan_count = df.isnull().sum()
print ('\nNumber of blank fields of each column in our dataset:\n', nan_count)
time_delay_short()
count_of_non_nan = df.count()
print ('\nCount of number of completed fields in our data set:\n', count_of_non_nan)
print ('\nWe will now drop the rows with blanks from the dataset so that the calculated statistics will not be skewed...\n')
df.dropna(axis = 0, inplace = True)
time_delay_short()
numOfRows = df.shape[0]
print('\nThe modified data set is now {} rows long!'.format(numOfRows))
#def time_delay_long():
#to add time delay to slow down the bombard of text to the user (and for fun!)
# time.sleep(1)
# print('...executing task...')
# time.sleep(2)
# print('.........................Complete!\n')
# time.sleep(1)
def time_delay_short():
#to add time delay to slow down the bombard of text to the user (and for fun!)
time.sleep(1)
print('...executing task...')
time.sleep(1)
print('....................Complete!\n')
time.sleep(1)
get_filters()
print('\nThe bike data will now be filtered by the following: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
load_data(city,month,day)
drop_na_values()
display_raw_data()
continue_choice = input("Time stats will now be displayed. Press any key to continue or type 'pass' to skip to station stats\n").lower()
if continue_choice != 'pass':
time_stats(df)
else:
print("....skipping time stats\n")
continue_choice = input("Station stats will now be displayed. Press any key to continue or type 'pass' to skip to trip duration stats\n").lower()
if continue_choice != 'pass':
station_stats(df)
else:
print("....skipping station_stats\n")
continue_choice = input("Trip duration stats will now be displayed. Press any key to continue or type 'pass' to skip to trip user stats\n").lower()
if continue_choice != 'pass':
trip_duration_stats(df)
else:
print("....skipping trip duration stats\n")
if city != "washington":
continue_choice = input("User stats will now be displayed. Press any key to continue or type 'pass' to skip\n").lower()
if continue_choice != 'pass':
user_stats(df)
else:
print("....skipping user stats\n")
else:
print('Washington data set contains no gender or user type data therefore there are no user stats to display for this city! T_T )')
#restart code
restart = input("Do you wish to try again? y/n\n").lower()
if restart == 'y':
main()
else:
exit()
main()
| print('This does not seem to be a valid choice!')
restart = input("Do you wish to reselect filters? y/n?\n").lower()
if restart == 'y':
get_filters()
else:
exit() | conditional_block |
bikeshare.py | import time
import pandas as pd
import numpy as np
from datetime import datetime
#from collections import Counter
def main():
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
#start of program after title, loop here if restarting program from the top
#Bicycle Picture
print(' o__ __o ,__o __o __o\n ,>/_ -\<, _-\_<, _`\<,_ _ \<_\n(*)`(*).....O/ O.....(*)/\'(*).....(*)/ (*).....(_)/(_)')
#Project Title
print(' ___ __ / \n| | _ | _ _ __ _ | _ (_ _|_ _ _ __ _ \n|^|(/_ | (_ (_)|||(/_ | (_) __) |_(/_\_/(/_| | _> \n _ __ _ _ o \n|_) o | _ (_ |_ _ __ _ | \ _ _|_ _ |_) __ _ | _ _ _|_\n|_) | |<(/_ __)| |(_| | (/_ |_/(_| |_(_| | | (_)_| (/_(_ |_')
#Welcome Statement
print('\nHello! Welcome to Steven Ling\'s udacity python project! \nLet\'s explore some US bikeshare data!\n\n')
#defining time intervals for display time function
intervals = (
('years',217728000), # 60 * 60 * 24 * 7 * 30 * 12
('months',18144000), # 60 * 60 * 24 * 7 * 30
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
#function to convert seconds to years,months,weeks,days,hours,seconds
def display_time(seconds, granularity=6):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def get_filters():
#City Choice Input
city_choice = input("Which city are you interested in?\n\nChoose a city by entering the corresponding number:\n1 for Chicago or\n2 for New York city or\n3 for Washington?")
global city
if city_choice == '1':
city ='chicago'
print('you have chosen Chicago!\n')
elif city_choice == '2':
city = 'new york city'
print('you have chosen New York city!\n')
elif city_choice == '3':
city = 'washington'
print('you have chosen Washington city!\n')
else:
print('This does not seem to be a valid choice!')
restart = input("Do you wish to reselect filters? y/n?\n").lower()
if restart == 'y':
get_filters()
else:
exit()
# TO DO: get user input for month (all, january, february, ... , june)
# Month Choice Input
global month
month =()
month_choice = input("Which month are you interested in?\n\nChoose a month by entering the following choices:\n (all, january, february, march, april, may, june) ")
valid_months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']
month_choice = month_choice.lower()
if month_choice in valid_months:
month = month_choice
print ('For months, you have selected {}'.format(month))
else:
print('This does not seem to be a valid choice!')
restart_month = input("Do you wish to choose filters again? y/n?\n").lower()
if restart_month == 'y':
get_filters()
else:
exit()
# Get user input for day of the week
global day
day=()
day_choice = input("which day of the week are you interested in?\n\nChoose a day by entering the following choices:\n (all, monday, tuesday, wednesday, thursday, friday, saturday, sunday)")
valid_days = ['all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
day_choice = day_choice.lower()
if day_choice in valid_days:
day = day_choice
print ('For days, you have selected {}'.format(day))
else:
print('This does not seem to be a valid choice!')
restart_days = input("Do you wish to repick filters? y/n?\n").lower()
if restart_days == 'y':
get_filters()
else:
exit()
print('-'*40)
return city, month, day
def load_data(city, month, day):
# load data file into a dataframe
|
def time_stats(df):
#Displays statistics on the most frequent times of travel.
print('\nCalculating The Most Frequent Times of Travel for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display the most common month
most_common_month = df['month'].mode()[0]
print('Most Common month: \n', most_common_month)
#display the most common day of week
most_common_day = df['day_of_week'].mode()[0]
print('Most Common Day: \n', most_common_day)
#display the most common start hour
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:\n', most_common_start_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
#Displays statistics on the most popular stations and trip.
print('\nCalculating The Most Popular Stations and Trips for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display most commonly used start station
most_common_start_station = df['Start Station'].mode()[0]
print('Most Common Start Station:{}\n'.format(most_common_start_station))
#print('Most Common Start Hour:', most_common_start_hour)
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:{}: '.format(most_common_start_hour))
#display most commonly used end station
most_common_end_station = df['End Station'].mode()[0]
print('Most Common End Station:{}: '.format(most_common_end_station))
#display most frequent combination of start station and end station trip
time_delay_short()
most_common_start_end_station = df[['Start Station', 'End Station']].mode(0)
print('Most Common Start and End Station: \n',most_common_start_end_station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
#Displays statistics on the total and average trip duration.
print('\nCalculating Trip Duration for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# TO DO: display total travel time
Total_travel_time = df['Trip Duration'].sum(axis = 0, skipna = True)
print('Total travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ' , display_time(Total_travel_time))
time_delay_short()
# TO DO: display mean travel time
Mean_travel_time = df['Trip Duration'].mean(axis = 0, skipna = True)
print('Total average travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ', display_time(Mean_travel_time))
time_delay_short()
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
#Displays statistics on bikeshare users.
print('\nCalculating User Stats: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# Display counts of user type
x = 'User Type'
print('\nCount of User Type:\n',df[x].value_counts())
time_delay_short()
# Display counts of gender
y = 'Gender'
print('\nCount of Gender:\n',df[y].value_counts())
# Display earliest, most recent, and most common year of birth
z = 'Birth Year'
currentYear = datetime.now().year
oldest_biker = currentYear - df[z].min()
print('\nOldest User is {} years old!'.format(oldest_biker))
print('Wow that\'s old!')
youngest_biker = currentYear - df[z].max()
print('\nYoungest User is {} years old!'.format(youngest_biker))
print('Wow that\'s young!')
common_year = currentYear - df[z].mode()
print('\nMost common age of users in data set is {} years old'.format(str(common_year)))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def display_raw_data():
# get user input whether to displays cycle through 5 rows of data
raw_data_display = input("Would you like to see 5 records of the data? Press any key to continue displaying or type 'pass' to skip to descriptive statistics \n")
if raw_data_display != 'pass':
i = 5
while raw_data_display !='pass':
print(df.iloc[i-5:i, :])
raw_data_display = input("Would you like to see the next 5 records of raw data? Press any key to continue displaying or type 'pass' to skip to descriptive statistics \n")
i = i + 5
else:
print("....skipping ahead to descriptive stats\n")
def drop_na_values():
global df
# get number of rows in dataframe
numOfRows = df.shape[0]
print('\nThe raw data set is {} rows long!\n'.format(numOfRows))
time_delay_short()
print('\nAnalyzing for number of blank fields in the raw dataset...\n')
time_delay_short()
nan_count = df.isnull().sum()
print ('\nNumber of blank fields of each column in our dataset:\n', nan_count)
time_delay_short()
count_of_non_nan = df.count()
print ('\nCount of number of completed fields in our data set:\n', count_of_non_nan)
print ('\nWe will now drop the rows with blanks from the dataset so that the calculated statistics will not be skewed...\n')
df.dropna(axis = 0, inplace = True)
time_delay_short()
numOfRows = df.shape[0]
print('\nThe modified data set is now {} rows long!'.format(numOfRows))
#def time_delay_long():
#to add time delay to slow down the bombard of text to the user (and for fun!)
# time.sleep(1)
# print('...executing task...')
# time.sleep(2)
# print('.........................Complete!\n')
# time.sleep(1)
def time_delay_short():
#to add time delay to slow down the bombard of text to the user (and for fun!)
time.sleep(1)
print('...executing task...')
time.sleep(1)
print('....................Complete!\n')
time.sleep(1)
get_filters()
print('\nThe bike data will now be filtered by the following: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
load_data(city,month,day)
drop_na_values()
display_raw_data()
continue_choice = input("Time stats will now be displayed. Press any key to continue or type 'pass' to skip to station stats\n").lower()
if continue_choice != 'pass':
time_stats(df)
else:
print("....skipping time stats\n")
continue_choice = input("Station stats will now be displayed. Press any key to continue or type 'pass' to skip to trip duration stats\n").lower()
if continue_choice != 'pass':
station_stats(df)
else:
print("....skipping station_stats\n")
continue_choice = input("Trip duration stats will now be displayed. Press any key to continue or type 'pass' to skip to trip user stats\n").lower()
if continue_choice != 'pass':
trip_duration_stats(df)
else:
print("....skipping trip duration stats\n")
if city != "washington":
continue_choice = input("User stats will now be displayed. Press any key to continue or type 'pass' to skip\n").lower()
if continue_choice != 'pass':
user_stats(df)
else:
print("....skipping user stats\n")
else:
print('Washington data set contains no gender or user type data therefore there are no user stats to display for this city! T_T )')
#restart code
restart = input("Do you wish to try again? y/n\n").lower()
if restart == 'y':
main()
else:
exit()
main()
| global df
df = pd.read_csv(CITY_DATA[city],index_col=0, infer_datetime_format=True)
# convert the Start Time and end Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['End Time'] = pd.to_datetime(df['End Time'])
# extract month and day of week from Start Time to create new columns
df['Start_Hour'] = df['Start Time'].dt.hour
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['Start Time'] = df['Start Time'].dt.time
df['End Time'] = df['End Time'].dt.time
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df | identifier_body |
MyGraph.py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 23 01:33:42 2017
@author: miguelrocha
"""
## Graph represented as adjacency list using a dictionary
## keys are vertices
## values of the dictionary represent the list of adjacent vertices of the key node
class MyGraph:
def __init__(self, g = {}):
''' Constructor - takes dictionary to fill the graph as input; default is empty dictionary '''
self.graph = g #unico atributo (g = dicionario)
def print_graph(self):
''' Prints the content of the graph as adjacency list '''
for v in self.graph.keys():#para cada key no dicionario (vertice)
print (v, " -> ", self.graph[v])#para cada key no dicionario (vertice)
## get basic info
def get_nodes(self):#vai buscar os vetices(nos)
''' Returns list of nodes in the graph '''
return list(self.graph.keys())#devolve uma lista com os vertices
def get_edges(self): #buscar as arestas(pares de vertices, ou seja, uma aresta liga dois vertices)
''' Returns edges in the graph as a list of tuples (origin, destination) '''
edges = []
for v in self.graph.keys():#para cada key v
for d in self.graph[v]:#para cada value de v
edges.append((v,d))#acrescentar a lista as arestas (vertice v que se ligou ao vertice x)
return edges#devolver a lista
def size(self):##tamanho do grafo
''' Returns size of the graph : number of nodes, number of edges '''
return len(self.get_nodes()), len(self.get_edges())#usa o get_nodes e o get_edges para ter o tamanho do grafo
## add nodes and edges
def add_vertex(self, v):#adicionar vertice(no)
''' Add a vertex to the graph; tests if vertex exists not adding if it does '''
if v not in self.graph.keys():
self.graph[v] = []#adicionar uma key ao dicitionary
def add_edge(self, o, d):#(o,d) vertices
''' Add edge to the graph; if vertices do not exist, they are added to the graph '''
if o not in self.graph.keys():#confirmar se os vertices o e d nao estao no dicionario
self.add_vertex(o) #adicionar vertice o
if d not in self.graph.keys():
self.add_vertex(d) #adicionar vertice d
if d not in self.graph[o]:#confirmar se d e um value de o
self.graph[o].append(d) #adicionar o value d ao o
## successors, predecessors, adjacent nodes
def get_successors(self, v):
return list(self.graph[v]) # needed to avoid list being overwritten of result of the function is used
def get_predecessors(self, v):
pre = []#abrir lista de antecessor
for k in self.graph.keys(): #percorrer as keys do dicionario
if v in self.graph[k]: #OU if self.graph[i]==v: #verificar se v e um value de i
pre.append(k)#adicionar a key com value v a lista
return pre #retornar a lista com os antecessor
def get_adjacents(self, v):
'''Da lista de vertices(nos) adjacentes do vertice(no) v ->dois vertices sao adjacentes se um e sucessor do outro'''
suc = self.get_successors(v)#buscar os sucessores de v
pred = self.get_predecessors(v)#buscar os antecessor de v
res = pred #res e igual a lista de antecessores (podia ser ao contrario)
for p in suc: #percorrer a lista de sucessores
if p not in res: #verificar se nao esta na lista
res.append(p)#adicionar todos os sucessores de v a lista de antecessores se nao estiver na lista
return res #retornar res
## degrees
def out_degree(self, v):#calcula grau de saída do vertice(no) v
#self.get_successors(v) -> lista de todos os arcos que saiem do vertice v
return len(self.graph[v])#contagem de todos os arcos que saiem do vertice v
def in_degree(self, v):#calcula grau de entrada do vertice(no) v
#self.get_predecessors(v) -> lista de todos os arcos que entram do vertice v
return len(self.get_predecessors(v))
def degree(self, v):#O grau de um vértice e dado pelo numero de arestas que lhe sao incidentes
#self.get_adjacents(v) -> ver os sucessores e os predecessores para dar lista de adjacentes
return len(self.get_adjacents(v))#contar os adjacentes da lista
def all_degrees(self, deg_type = "inout"):#tudo o que sai e tudo o que entra
''' Cálculo de graus de entrada e saída (ou ambos) para todos os nós da rede.
deg_type can be "in", "out", or "inout" '''
degs = {}
for v in self.graph.keys():#para cada key no grafo
if deg_type == "out" or deg_type == "inout":#se for graus de saida ou de entrada/saida
degs[v] = len(self.graph[v])#inicializar o número do dicionario com o valor de graus de saida
else: degs[v] = 0
if deg_type == "in" or deg_type == "inout":#se for graus de entrada ou de entrada/saida
for v in self.graph.keys():#para cada key (metabolito ou reação) no grafo
for d in self.graph[v]:#para cada value de v
if deg_type == "in" or v not in self.graph[d]:#se in ou v, não for um value de d no grafo
#-> se nao estiver nos values de d quer dizer que é de entrada(antecessor) e nao de saida(sucessor)
degs[d] = degs[d] + 1 #adicionar + 1 ao value de d no dicionario degs
return degs #retorna todas as keys com os seus respetivos graus (entrada + saida)
def highest_degrees(self, all_deg= None, deg_type = "inout", top= 10):#vai ver o top 10
'''Vai buscar o top 10 de nos com maior grau'''
if all_deg is None: #percorrer todos graus
all_deg = self.all_degrees(deg_type)#ir buscar o dicionario a all_degrees
ord_deg = sorted(list(all_deg.items()), key=lambda x : x[1], reverse = True)
#por por ordem o dicionario do mmaior para o mais pequeno, neste caso trasnforma em lista .items para por em tuplo (key,value) so assim consegue
#por por ordem os graus
return list(map(lambda x:x[0], ord_deg[:top]))#retorna uma lista com os nos com os 10 primeiros
#x[0]-> key; x[1]-> values
## topological metrics over degrees
def mean_degree(self, deg_type = "inout"):#media dos graus
degs = self.all_degrees(deg_type)#calculo dos graus de entrada e saída (ou ambos) para todos os nós da rede
return sum(degs.values()) / float(len(degs))#soma de todos os valores do dicionario e fazer a media de nos do grafico
def prob_degree(self, deg_type = "inout"):#probabilidade desse grau existir no grafo
'''Para cada grau quantos nós é que tenho'''
degs = self.all_degrees(deg_type)#calculo dos graus de entrada e saída (ou ambos) para todos os nós da rede
res = {}#abrir dicionario
for k in degs.keys():#percorrer todas as keys de degs
if degs[k] in res.keys():#ver se tem um determinado k(grau) nas keys de res
res[degs[k]] += 1 #adicionar esse k + 1 ao dicionario res
else:#caso contrario
res[degs[k]] = 1 | n res.keys():
res[k] /= float(len(degs))#probabilidade dos graus
return res
'''EXEMPLO:
c ={'a':1,'b':1,'c':3}
v={}
for k in c.keys():
if c[k] in v.keys():
v[c[k]] += 1
else:
v[c[k]] =1
v ={1: 2, 3: 1} '''
## BFS and DFS searches
def reachable_bfs(self, v):
'''Começa pelo nó origem, depois explora todos os seus sucessores,
depois os sucessores destes, e assim sucessivamente até todos os nós
atingíveis terem sido explorados'''
'''de cima para baixo'''
l = [v]#comeca pelo no de origem
res = []
while len(l) > 0:
node = l.pop(0)
if node != v: res.append(node) #se o node for diferente de v adicionar a res
for elem in self.graph[node]:#ver os values de node
if elem not in res and elem not in l and elem != node:#se esse value nao estiver em res, l e for diferente de node
l.append(elem)#adicionar a l
return res
def reachable_dfs(self, v):
'''Começa pelo nó origem e explora o 1º sucessor,
seguido pelo 1º sucessor deste e assim sucessivamente
até não haver mais sucessores e ter que se fazer “backtracking”'''
'''da esquerda para a direita'''
l = [v]#comeca pelo no de origem
res = []
while len(l) > 0:
node = l.pop(0)
if node != v: res.append(node)
s = 0 #contagem
for elem in self.graph[node]:
if elem not in res and elem not in l:
l.insert(s, elem)#s=posicao, elemento
s += 1
return res
def distance(self, s, d):#retorna distancia entre vertices(nos) s e d
if s == d:
return 0
l = [(s,0)]#lista com o no e a distancia de origem
visited = [s]#vertices visitados para obter o caminho
while len(l) > 0:
node, dist = l.pop(0)#removes the item at the given index from the list and returns the removed item(isolar o 1º no na queue)
for elem in self.graph[node]:#percorrer os values do no de origem
if elem == d: return dist + 1 #se o primeiro value for d retornar logo a distancia
elif elem not in visited:#se o value nao estiver em visitado
l.append((elem,dist+1))#vamos adicionar a lista l (caminho)
visited.append(elem)#adicionar o no (que ja foi visitado)
return None #retorna None se nao e atingivel
def shortest_path(self, s, d):#retorna caminho mais curto entre s e d (lista de nos por onde passa)
'''Retorna caminho mais curto entre s e d (lista de nós por onde passa)'''
if s == d: return 0
l = [(s,[])]#lista de nos por onde passa que comeca na de origem
visited = [s]#vertices visitados (nos atingidos)
while len(l) > 0:
node, preds = l.pop(0)#removes the item at the given index from the list and returns the removed item
for elem in self.graph[node]:#percorrer os values do no de origem
if elem == d: return preds+[node,elem] #se o primeiro for logo d retorna o caminho mais curto
elif elem not in visited:#se o value nao estiver em visitado
l.append((elem,preds+[node]))#adicionar a l
visited.append(elem)#acrescentar vertice a lista de vetices visitados
return None#retorna None se nao e atingivel o caminho mais curto
def reachable_with_dist(self, s):
'''Retorna lista de nós atingíveis a partir de s com respetiva distância(lista de pares nó, distância)'''
#na primeira iteracao faz o for logo
res = []#lista de nós atingíveis a partir de s com respetiva distância
l = [(s,0)] #lista com tuplo com s e a distancia de s a s (0)
while len(l) > 0:
node, dist = l.pop(0)
if node != s: #vai ver se e diferente de s
res.append((node,dist))# nao conta o s
for elem in self.graph[node]:#vai ver onde e que o node s se esta a ligar
if not is_in_tuple_list(l,elem) and not is_in_tuple_list(res,elem): #vai ver se o p se encontra dentro de l ou em res
l.append((elem,dist+1))#adiciona o vertice a que se liga
return res
## mean distances ignoring unreachable nodes
def mean_distances(self):
tot = 0 #total
num_reachable = 0 #numero de vetores ligados entre si
for k in self.graph.keys():
distsk = self.reachable_with_dist(k)#[(no,dist)]->lista de nos atingiveis a partir de s com respetiva distancia
for _, dist in distsk:
tot += dist
num_reachable += len(distsk)#todas as proporçoes de nos atingiveis
meandist = float(tot) / num_reachable #media das distancias de ligacao
n = len(self.get_nodes()) #contagem de todos os nos que tem
return meandist, float(num_reachable)/((n-1)*n) #meandist->distância média,num_reachable proporção de nos atingiveis(num_reachable) / nº de ligacoes esperadas ((n-1)*n))
def closeness_centrality(self, node):#node = s
'''Baseadas nos nós que estão mais próximos dos restantes'''
dist = self.reachable_with_dist(node) #[(no,dist)]->lista de nos atingiveis a partir de s com respetiva distancia
if len(dist)==0:
return 0.0 #centralidade mais proxima e 0
s = 0.0 #distancia
for d in dist: #d = ( , )
s += d[1] #tuplo (t,6)
return len(dist) / s #todos os nos a dividir pela distancia total
#Centralidade mais proxima = todos os tuplos (vertice com ligacao a esse vertice)/distancia total
def highest_closeness(self, top = 10):
'''Centralidade mais alta -> top 10'''
cc = {} #abrir o dicionario com todas as keys do grafo e a centralidade mais proxima
for k in self.graph.keys():#para todas as keys no grafo
cc[k] = self.closeness_centrality(k)# o value de k = a centralidade mais proxima da key do grafo
print(cc)
ord_cl = sorted(list(cc.items()), key=lambda x : x[1], reverse = True) #ordenar o dicionario em ordem a centralidade mais proxima(transformar em lista)
return list(map(lambda x:x[0], ord_cl[:top])) #retornar os vertices com o top 10
def betweenness_centrality(self, node):
'''Baseadas na proporção de caminhos mais curtos entre todos os nós que passam pelo nó'''
'''Soma de todas as distancia possiveis '''
total_sp = 0 #todos os caminhos curtos que existem
sps_with_node = 0 #caminhos curtos que passam pelo node
for s in self.graph.keys():
for t in self.graph.keys():
if s != t and s != node and t != node:
sp = self.shortest_path(s, t)#retorna os caminhos dos nos de s a t
if sp is not None:# ou seja, se existir um caminho
total_sp += 1 #somar 1 aos caminhos totais
if node in sp: #se node se encontrar no sp
sps_with_node += 1 #ver se nesse caminho o meu node existe
return sps_with_node / total_sp #caminhos curtos que passam pelo node/ caminhos curtos totais
def highest_betweenness(self, top = 10):
'''Centralidade mais alta no betweenes -> top 10'''
cc = {} #abrir o dicionario com todas as keys do grafo e a betweenness_centrality
for k in self.graph.keys():#para todas as keys no grafo
cc[k] = self.betweenness_centrality(k)# o value de k = a cbetweenness_centrality da key do grafo
print(cc)
ord_cl = sorted(list(cc.items()), key=lambda x : x[1], reverse = True) #ordenar o dicionario em ordem da betweenness_centrality (transformar em lista)
return list(map(lambda x:x[0], ord_cl[:top])) #retornar os vertices com o top 10
def centralidade_de_grau_no(self,v):
'''A centralidade de grau de um vertice e dada pelo seu grau'''
alldegree = self.all_degrees()
return(alldegree[v]) #vai buscar o grau do no v
## cycles
def node_has_cycle (self, v):
l = [v]
res = False
visited = [v]
while len(l) > 0:
node = l.pop(0)
for elem in self.graph[node]:
if elem == v: return True
elif elem not in visited:
l.append(elem)
visited.append(elem)
return res
def has_cycle(self):
res = False
for v in self.graph.keys():
if self.node_has_cycle(v): return True
return res
## clustering
def clustering_coef(self, v):#nova função
adjs = self.get_adjacents(v) #lista de vertices
if len(adjs) <=1:#se isto acontecer quer dizer que nao existe agrupamento
return 0.0 #entao o coeficiente e 0
ligs = 0 #ligacoes
for i in adjs:#vai ao primeiro elemento de adjs (um no)
for j in adjs:# vai ao primeiro elemento de adjs(um no)
if i != j:#na primeira iteracao nao se vai verificar isto entao volta para o inicio do for
if j in self.graph[i] or i in self.graph[j]: #se j for um value de i e se i for um value de j
ligs = ligs + 1 #adicionar aos ligantes
return float(ligs)/(len(adjs)*(len(adjs)-1))#nº de arcos existentes entre vizinhos do no / nº total de arcos que poderiam existir entre vizinhos do no
'''EXEMPLO:
l = [1,2,3]
for i in l:
for j in l:
if i != j:
print('soma:',i+j)
RESULTADO:
i: 1,
j: 1, j: 2 -> soma: 3
j: 3 -> soma: 4
i: 2
j: 1-> soma: 3
j: 2, j: 3 -> soma: 5'''
def all_clustering_coefs(self):#nova função
ccs = {}#dicionario com todos os coeficientes de clustering
for k in self.graph.keys():#percorrer os nos
ccs[k] = self.clustering_coef(k)#adicionar ao k(no) o seu valor de clustering
return ccs
def mean_clustering_coef(self):#nova função
'''Média sobre todos os nós'''
ccs = self.all_clustering_coefs()#vai buscar um dicionario com {no: coeficiente}
return sum(ccs.values()) / float(len(ccs))# soma dos values/total de elemntos em ccs
def mean_clustering_perdegree(self, deg_type = "inout"):#nova função
'''Média dos coeficientes considerando nós de grau k.'''
degs = self.all_degrees(deg_type)#dicionario com as keys com os seus respetivos graus (entrada + saida) -> {no: graus(entrada + saida)}
ccs = self.all_clustering_coefs()#vai buscar um dicionario com {no: coeficiente}
degs_k = {}#{grau: no}
for k in degs.keys():#percorrer os no
if degs[k] in degs_k.keys(): #se o degs[k] (value- graus) for uma key em degs_k
degs_k[degs[k]].append(k)#adicionar a key de degs_k (grau) o value k (no)
else: degs_k[degs[k]] = [k]#caso contrario adicionar a key (degs[k]) o value k
ck = {}#{grau: media coeficiente}
for k in degs_k.keys():#para cada grau(key) em degs_k
tot = 0
for v in degs_k[k]:#para cada no(value) em degs_k -> buscar todos os nos com aquele grau
tot += ccs[v]# buscar o coeficiente de todos os nos com aquele grau
ck[k] = float(tot) / len(degs_k[k])#media do clustering, adicionar ao dicionario o {grau: media do coeficiente por grau}
return ck
def is_in_tuple_list(tl, val):
res = False
for (x,y) in tl:
if val == x: return True
return res
if __name__ == "__main__":
gr = MyGraph()
gr.add_vertex(1)
gr.add_vertex(2)
gr.add_vertex(3)
gr.add_vertex(4)
gr.add_edge(1,2)
gr.add_edge(2,3)
gr.add_edge(3,2)
gr.add_edge(3,4)
gr.add_edge(4,2)
gr.print_graph()
print(gr.size())
print (gr.get_successors(2))
print (gr.get_predecessors(2))
print("ADJACENTES:")
print (gr.get_adjacents(2))
print (gr.in_degree(2))
print (gr.out_degree(2))
print (gr.degree(2))
print(gr.all_degrees("inout"))
print(gr.all_degrees("in"))
print(gr.all_degrees("out"))
gr2 = MyGraph({1:[2,3,4], 2:[5,6],3:[6,8],4:[8],5:[7],6:[],7:[],8:[]})
print(gr2.reachable_bfs(1))
print(gr2.reachable_dfs(1))
print(gr2.distance(1,7))
print(gr2.shortest_path(1,7))
print(gr2.distance(1,8))
print(gr2.shortest_path(1,8))
print(gr2.distance(6,1))
print(gr2.shortest_path(6,1))
print(gr2.reachable_with_dist(1))
print(gr.has_cycle())
print(gr2.has_cycle())
print(gr.mean_degree())
print(gr.prob_degree())
print(gr.mean_distances())
print("CLUSTERING COEF:")
print (gr.clustering_coef(1))
print("CLUSTERING COEF:")
print (gr.clustering_coef(2))
| #adicionar ao dicionario esse k(grau) = 1
#degs[k]= key e res[degs[k]] = value
for k i | conditional_block |
MyGraph.py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 23 01:33:42 2017
@author: miguelrocha
"""
## Graph represented as adjacency list using a dictionary
## keys are vertices
## values of the dictionary represent the list of adjacent vertices of the key node
class MyGraph:
def __init__(self, g = {}):
''' Constructor - takes dictionary to fill the graph as input; default is empty dictionary '''
self.graph = g #unico atributo (g = dicionario)
def print_graph(self):
''' Prints the content of the graph as adjacency list '''
for v in self.graph.keys():#para cada key no dicionario (vertice)
print (v, " -> ", self.graph[v])#para cada key no dicionario (vertice)
## get basic info
def get_nodes(self):#vai buscar os vetices(nos)
''' Returns list of nodes in the graph '''
return list(self.graph.keys())#devolve uma lista com os vertices
def get_edges(self): #buscar as arestas(pares de vertices, ou seja, uma aresta liga dois vertices)
''' Returns edges in the graph as a list of tuples (origin, destination) '''
edges = []
for v in self.graph.keys():#para cada key v
for d in self.graph[v]:#para cada value de v
edges.append((v,d))#acrescentar a lista as arestas (vertice v que se ligou ao vertice x)
return edges#devolver a lista
def size(self):##tamanho do grafo
''' Returns size of the graph : number of nodes, number of edges '''
return len(self.get_nodes()), len(self.get_edges())#usa o get_nodes e o get_edges para ter o tamanho do grafo
## add nodes and edges
def add_vertex(self, v):#adicionar vertice(no)
''' Add a vertex to the graph; tests if vertex exists not adding if it does '''
if v not in self.graph.keys():
self.graph[v] = []#adicionar uma key ao dicitionary
def add_edge(self, o, d):#(o,d) vertices
''' Add edge to the graph; if vertices do not exist, they are added to the graph '''
if o not in self.graph.keys():#confirmar se os vertices o e d nao estao no dicionario
self.add_vertex(o) #adicionar vertice o
if d not in self.graph.keys():
self.add_vertex(d) #adicionar vertice d
if d not in self.graph[o]:#confirmar se d e um value de o
self.graph[o].append(d) #adicionar o value d ao o
## successors, predecessors, adjacent nodes
def get_successors(self, v):
return list(self.graph[v]) # needed to avoid list being overwritten of result of the function is used
def get_predecessors(self, v):
pre = []#abrir lista de antecessor
for k in self.graph.keys(): #percorrer as keys do dicionario
if v in self.graph[k]: #OU if self.graph[i]==v: #verificar se v e um value de i
pre.append(k)#adicionar a key com value v a lista
return pre #retornar a lista com os antecessor
def get_adjacents(self, v):
'''Da lista de vertices(nos) adjacentes do vertice(no) v ->dois vertices sao adjacentes se um e sucessor do outro'''
suc = self.get_successors(v)#buscar os sucessores de v
pred = self.get_predecessors(v)#buscar os antecessor de v
res = pred #res e igual a lista de antecessores (podia ser ao contrario)
for p in suc: #percorrer a lista de sucessores
if p not in res: #verificar se nao esta na lista
res.append(p)#adicionar todos os sucessores de v a lista de antecessores se nao estiver na lista
return res #retornar res
## degrees
def out_degree(self, v):#calcula grau de saída do vertice(no) v
#self.get_successors(v) -> lista de todos os arcos que saiem do vertice v
return len(self.graph[v])#contagem de todos os arcos que saiem do vertice v
def in_degree(self, v):#calcula grau de entrada do vertice(no) v
#self.get_predecessors(v) -> lista de todos os arcos que entram do vertice v
return len(self.get_predecessors(v))
def degree(self, v):#O grau de um vértice e dado pelo numero de arestas que lhe sao incidentes
#self.get_adjacents(v) -> ver os sucessores e os predecessores para dar lista de adjacentes
return len(self.get_adjacents(v))#contar os adjacentes da lista
def all_degrees(self, deg_type = "inout"):#tudo o que sai e tudo o que entra
''' Cálculo de graus de entrada e saída (ou ambos) para todos os nós da rede.
deg_type can be "in", "out", or "inout" '''
degs = {}
for v in self.graph.keys():#para cada key no grafo
if deg_type == "out" or deg_type == "inout":#se for graus de saida ou de entrada/saida
degs[v] = len(self.graph[v])#inicializar o número do dicionario com o valor de graus de saida
else: degs[v] = 0
if deg_type == "in" or deg_type == "inout":#se for graus de entrada ou de entrada/saida
for v in self.graph.keys():#para cada key (metabolito ou reação) no grafo
for d in self.graph[v]:#para cada value de v
if deg_type == "in" or v not in self.graph[d]:#se in ou v, não for um value de d no grafo
#-> se nao estiver nos values de d quer dizer que é de entrada(antecessor) e nao de saida(sucessor)
degs[d] = degs[d] + 1 #adicionar + 1 ao value de d no dicionario degs
return degs #retorna todas as keys com os seus respetivos graus (entrada + saida)
def highest_degrees(self, all_deg= None, deg_type = "inout", top= 10):#vai ver o top 10
'''Vai buscar o top 10 de nos com maior grau'''
if all_deg is None: #percorrer todos graus
all_deg = self.all_degrees(deg_type)#ir buscar o dicionario a all_degrees
ord_deg = sorted(list(all_deg.items()), key=lambda x : x[1], reverse = True)
#por por ordem o dicionario do mmaior para o mais pequeno, neste caso trasnforma em lista .items para por em tuplo (key,value) so assim consegue
#por por ordem os graus
return list(map(lambda x:x[0], ord_deg[:top]))#retorna uma lista com os nos com os 10 primeiros
#x[0]-> key; x[1]-> values
## topological metrics over degrees
def mean_degree(self, deg_type = "inout"):#media dos graus
degs = self.all_degrees(deg_type)#calculo dos graus de entrada e saída (ou ambos) para todos os nós da rede
return sum(degs.values()) / float(len(degs))#soma de todos os valores do dicionario e fazer a media de nos do grafico
def prob_degree(self, deg_type = "inout"):#probabilidade desse grau existir no grafo
'''Para cada grau quantos nós é que tenho'''
degs = self.all_degrees(deg_type)#calculo dos graus de entrada e saída (ou ambos) para todos os nós da rede
res = {}#abrir dicionario
for k in degs.keys():#percorrer todas as keys de degs
if degs[k] in res.keys():#ver se tem um determinado k(grau) nas keys de res
res[degs[k]] += 1 #adicionar esse k + 1 ao dicionario res
else:#caso contrario
res[degs[k]] = 1 #adicionar ao dicionario esse k(grau) = 1
#degs[k]= key e res[degs[k]] = value
for k in res.keys():
res[k] /= float(len(degs))#probabilidade dos graus
return res
'''EXEMPLO:
c ={'a':1,'b':1,'c':3}
v={}
for k in c.keys():
if c[k] in v.keys():
v[c[k]] += 1
else:
v[c[k]] =1
v ={1: 2, 3: 1} '''
## BFS and DFS searches
def reachable_bfs(self, v):
'''Começa pelo nó origem, depois explora todos os seus sucessores,
depois os sucessores destes, e assim sucessivamente até todos os nós
atingíveis terem sido explorados'''
'''de cima para baixo'''
l = [v]#comeca pelo no de origem
res = []
while len(l) > 0:
node = l.pop(0)
if node != v: res.append(node) #se o node for diferente de v adicionar a res
for elem in self.graph[node]:#ver os values de node
if elem not in res and elem not in l and elem != node:#se esse value nao estiver em res, l e for diferente de node
l.append(elem)#adicionar a l
return res
def reachable_dfs(self, v):
'''Começa pelo nó origem e explora o 1º sucessor,
seguido pelo 1º sucessor deste e assim sucessivamente
até não haver mais sucessores e ter que se fazer “backtracking”'''
'''da esquerda para a direita'''
l = [v]#comeca pelo no de origem
res = []
while len(l) > 0:
node = l.pop(0)
if node != v: res.append(node)
s = 0 #contagem
for elem in self.graph[node]:
if elem not in res and elem not in l:
l.insert(s, elem)#s=posicao, elemento
s += 1
return res
def distance(self, s, d):#retorna distancia entre vertices(nos) s e d
if s == d:
return 0
l = [(s,0)]#lista com o no e a distancia de origem
visited = [s]#vertices visitados para obter o caminho
while len(l) > 0:
node, dist = l.pop(0)#removes the item at the given index from the list and returns the removed item(isolar o 1º no na queue)
for elem in self.graph[node]:#percorrer os values do no de origem
if elem == d: return dist + 1 #se o primeiro value for d retornar logo a distancia
elif elem not in visited:#se o value nao estiver em visitado
l.append((elem,dist+1))#vamos adicionar a lista l (caminho)
visited.append(elem)#adicionar o no (que ja foi visitado)
return None #retorna None se nao e atingivel
def shortest_path(self, s, d):#retorna caminho mais curto entre s e d (lista de nos por onde passa)
'''Retorna caminho mais curto entre s e d (lista de nós por onde passa)'''
if s == d: return 0
l = [(s,[])]#lista de nos por onde passa que comeca na de origem
visited = [s]#vertices visitados (nos atingidos)
while len(l) > 0:
node, preds = l.pop(0)#removes the item at the given index from the list and returns the removed item
for elem in self.graph[node]:#percorrer os values do no de origem
if elem == d: return preds+[node,elem] #se o primeiro for logo d retorna o caminho mais curto
elif elem not in visited:#se o value nao estiver em visitado
l.append((elem,preds+[node]))#adicionar a l
visited.append(elem)#acrescentar vertice a lista de vetices visitados
return None#retorna None se nao e atingivel o caminho mais curto
def reachable_with_dist(self, s):
'''Retorna lista de nós atingíveis a partir de s com respetiva distância(lista de pares nó, distância)'''
#na primeira iteracao faz o for logo
res = []#lista de nós atingíveis a partir de s com respetiva distância
l = [(s,0)] #lista com tuplo com s e a distancia de s a s (0)
while len(l) > 0:
node, dist = l.pop(0)
if node != s: #vai ver se e diferente de s
res.append((node,dist))# nao conta o s
for elem in self.graph[node]:#vai ver onde e que o node s se esta a ligar
if not is_in_tuple_list(l,elem) and not is_in_tuple_list(res,elem): #vai ver se o p se encontra dentro de l ou em res
l.append((elem,dist+1))#adiciona o vertice a que se liga
return res
## mean distances ignoring unreachable nodes
def mean_distances(self):
tot = 0 #total
num_reachable = 0 #numero de vetores ligados entre si
for k in self.graph.keys():
distsk = self.reachable_with_dist(k)#[(no,dist)]->lista de nos atingiveis a partir de s com respetiva distancia
for _, dist in distsk:
tot += dist
num_reachable += len(distsk)#todas as proporçoes de nos atingiveis
meandist = float(tot) / num_reachable #media das distancias de ligacao
n = len(self.get_nodes()) #contagem de todos os nos que tem
return meandist, float(num_reachable)/((n-1)*n) #meandist->distância média,num_reachable proporção de nos atingiveis(num_reachable) / nº de ligacoes esperadas ((n-1)*n))
def closeness_centrality(self, node):#node = s
'''Baseadas nos nós que estão mais próximos dos restantes'''
dist = self.reachable_with_dist(node) #[(no,dist)]->lista de nos atingiveis a partir de s com respetiva distancia
if len(dist)==0:
return 0.0 #centralidade mais proxima e 0
s = 0.0 #distancia
for d in dist: #d = ( , )
s += d[1] #tuplo (t,6)
return len(dist) / s #todos os nos a dividir pela distancia total
#Centralidade mais proxima = todos os tuplos (vertice com ligacao a esse vertice)/distancia total
def highest_closeness(self, top = 10):
'''Centralidade mais alta -> top 10'''
cc = {} #abrir o dicionario com todas as keys do grafo e a centralidade mais proxima
for k in self.graph.keys():#para todas as keys no grafo
cc[k] = self.closeness_centrality(k)# o value de k = a centralidade mais proxima da key do grafo
print(cc)
ord_cl = sorted(list(cc.items()), key=lambda x : x[1], reverse = True) #ordenar o dicionario em ordem a centralidade mais proxima(transformar em lista)
return list(map(lambda x:x[0], ord_cl[:top])) #retornar os vertices com o top 10
def betweenness_centrality(self, node):
'''Baseadas na proporção de caminhos mais curtos entre todos os nós que passam pelo nó'''
'''Soma de todas as distancia possiveis '''
total_sp = 0 #todos os caminhos curtos que existem
sps_with_node = 0 #caminhos curtos que passam pelo node
for s in self.graph.keys():
for t in self.graph.keys():
if s != t and s != node and t != node:
sp = self.shortest_path(s, t)#retorna os caminhos dos nos de s a t
if sp is not None:# ou seja, se existir um caminho
total_sp += 1 #somar 1 aos caminhos totais
if node in sp: #se node se encontrar no sp
sps_with_node += 1 #ver se nesse caminho o meu node existe
return sps_with_node / total_sp #caminhos curtos que passam pelo node/ caminhos curtos totais
def highest_betweenness(self, top = 10):
'''Centralidade mais alta no betweenes -> top 10'''
cc = {} #abrir o dicionario com todas as keys do grafo e a betweenness_centrality
for k in self.graph.keys():#para todas as keys no grafo
cc[k] = self.betweenness_centrality(k)# o value de k = a cbetweenness_centrality da key do grafo
print(cc)
ord_cl = sorted(list(cc.items()), key=lambda x : x[1], reverse = True) #ordenar o dicionario em ordem da betweenness_centrality (transformar em lista)
return list(map(lambda x:x[0], ord_cl[:top])) #retornar os vertices com o top 10
def centralidade_de_grau_no(self,v):
'''A centralidade de grau de um vertice e dada pelo seu grau'''
alldegree = self.all_degrees()
return(alldegree[v]) #vai buscar o grau do no v
## cycles
def node_has_cycle (self, v):
l = [v]
res = False
visited = [v]
while len(l) > 0:
node = l.pop(0)
for elem in self.graph[node]:
if elem == v: return True
elif elem not in visited:
l.append(elem)
visited.append(elem)
return res
def has_cycle(self):
res = False
for v in self.graph.keys():
if self.node_has_cycle(v): return True
return res
## clustering
def clustering_coef(self, v):#nova função
adjs = self.get_adjacents(v) #lista de vertices
if len(adjs) <=1:#se isto acontecer quer dizer que nao existe agrupamento
return 0.0 #entao o coeficiente e 0
ligs = 0 #ligacoes
for i in adjs:#vai ao primeiro elemento de adjs (um no)
for j in adjs:# vai ao primeiro elemento de adjs(um no)
if i != j:#na primeira iteracao nao se vai verificar isto entao volta para o inicio do for
if j in self.graph[i] or i in self.graph[j]: #se j for um value de i e se i for um value de j
ligs = ligs + 1 #adicionar aos ligantes
return float(ligs)/(len(adjs)*(len(adjs)-1))#nº de arcos existentes entre vizinhos do no / nº total de arcos que poderiam existir entre vizinhos do no
'''EXEMPLO:
l = [1,2,3]
for i in l:
for j in l:
if i != j:
print('soma:',i+j)
RESULTADO:
i: 1,
j: 1, j: 2 -> soma: 3
j: 3 -> soma: 4
i: 2
j: 1-> soma: 3
j: 2, j: 3 -> soma: 5'''
def all_clustering_coefs(self):#nova função
ccs = {}#dicionario com todos os coeficientes de clustering
for k in self.graph.keys():#percorrer os nos
ccs[k] = self.clustering_coef(k)#adicionar ao k(no) o seu valor de clustering
return ccs
def mean_clustering_coef(self):#nova função
'''Média sobre todos os nós'''
ccs = self.all_clustering_coefs()#vai buscar um dicionario com {no: coeficiente}
return sum(ccs.values()) / float(len(ccs))# soma dos values/total de elemntos em ccs
def mean_clustering_perdegree(self, deg_type = "inout"):#nova função
'''Média dos coeficientes considerando nós de grau k.'''
d | l:
if val == x: return True
return res
if __name__ == "__main__":
gr = MyGraph()
gr.add_vertex(1)
gr.add_vertex(2)
gr.add_vertex(3)
gr.add_vertex(4)
gr.add_edge(1,2)
gr.add_edge(2,3)
gr.add_edge(3,2)
gr.add_edge(3,4)
gr.add_edge(4,2)
gr.print_graph()
print(gr.size())
print (gr.get_successors(2))
print (gr.get_predecessors(2))
print("ADJACENTES:")
print (gr.get_adjacents(2))
print (gr.in_degree(2))
print (gr.out_degree(2))
print (gr.degree(2))
print(gr.all_degrees("inout"))
print(gr.all_degrees("in"))
print(gr.all_degrees("out"))
gr2 = MyGraph({1:[2,3,4], 2:[5,6],3:[6,8],4:[8],5:[7],6:[],7:[],8:[]})
print(gr2.reachable_bfs(1))
print(gr2.reachable_dfs(1))
print(gr2.distance(1,7))
print(gr2.shortest_path(1,7))
print(gr2.distance(1,8))
print(gr2.shortest_path(1,8))
print(gr2.distance(6,1))
print(gr2.shortest_path(6,1))
print(gr2.reachable_with_dist(1))
print(gr.has_cycle())
print(gr2.has_cycle())
print(gr.mean_degree())
print(gr.prob_degree())
print(gr.mean_distances())
print("CLUSTERING COEF:")
print (gr.clustering_coef(1))
print("CLUSTERING COEF:")
print (gr.clustering_coef(2))
| egs = self.all_degrees(deg_type)#dicionario com as keys com os seus respetivos graus (entrada + saida) -> {no: graus(entrada + saida)}
ccs = self.all_clustering_coefs()#vai buscar um dicionario com {no: coeficiente}
degs_k = {}#{grau: no}
for k in degs.keys():#percorrer os no
if degs[k] in degs_k.keys(): #se o degs[k] (value- graus) for uma key em degs_k
degs_k[degs[k]].append(k)#adicionar a key de degs_k (grau) o value k (no)
else: degs_k[degs[k]] = [k]#caso contrario adicionar a key (degs[k]) o value k
ck = {}#{grau: media coeficiente}
for k in degs_k.keys():#para cada grau(key) em degs_k
tot = 0
for v in degs_k[k]:#para cada no(value) em degs_k -> buscar todos os nos com aquele grau
tot += ccs[v]# buscar o coeficiente de todos os nos com aquele grau
ck[k] = float(tot) / len(degs_k[k])#media do clustering, adicionar ao dicionario o {grau: media do coeficiente por grau}
return ck
def is_in_tuple_list(tl, val):
res = False
for (x,y) in t | identifier_body |
MyGraph.py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 23 01:33:42 2017
@author: miguelrocha
"""
## Graph represented as adjacency list using a dictionary
## keys are vertices
## values of the dictionary represent the list of adjacent vertices of the key node
class MyGraph:
def __init__(self, g = {}):
''' Constructor - takes dictionary to fill the graph as input; default is empty dictionary '''
self.graph = g #unico atributo (g = dicionario)
def print_graph(self):
''' Prints the content of the graph as adjacency list '''
for v in self.graph.keys():#para cada key no dicionario (vertice)
print (v, " -> ", self.graph[v])#para cada key no dicionario (vertice)
## get basic info
def get_nodes(self):#vai buscar os vetices(nos)
''' Returns list of nodes in the graph '''
return list(self.graph.keys())#devolve uma lista com os vertices
def get_edges(self): #buscar as arestas(pares de vertices, ou seja, uma aresta liga dois vertices)
''' Returns edges in the graph as a list of tuples (origin, destination) '''
edges = []
for v in self.graph.keys():#para cada key v
for d in self.graph[v]:#para cada value de v
edges.append((v,d))#acrescentar a lista as arestas (vertice v que se ligou ao vertice x)
return edges#devolver a lista
def size(self):##tamanho do grafo
''' Returns size of the graph : number of nodes, number of edges '''
return len(self.get_nodes()), len(self.get_edges())#usa o get_nodes e o get_edges para ter o tamanho do grafo
## add nodes and edges
def add_vertex(self, v):#adicionar vertice(no)
''' Add a vertex to the graph; tests if vertex exists not adding if it does '''
if v not in self.graph.keys():
self.graph[v] = []#adicionar uma key ao dicitionary
def add_edge(self, o, d):#(o,d) vertices
''' Add edge to the graph; if vertices do not exist, they are added to the graph '''
if o not in self.graph.keys():#confirmar se os vertices o e d nao estao no dicionario
self.add_vertex(o) #adicionar vertice o
if d not in self.graph.keys():
self.add_vertex(d) #adicionar vertice d
if d not in self.graph[o]:#confirmar se d e um value de o
self.graph[o].append(d) #adicionar o value d ao o
## successors, predecessors, adjacent nodes
def get_successors(self, v):
return list(self.graph[v]) # needed to avoid list being overwritten of result of the function is used
def get_predecessors(self, v):
pre = []#abrir lista de antecessor
for k in self.graph.keys(): #percorrer as keys do dicionario
if v in self.graph[k]: #OU if self.graph[i]==v: #verificar se v e um value de i
pre.append(k)#adicionar a key com value v a lista
return pre #retornar a lista com os antecessor
def get_adjacents(self, v):
'''Da lista de vertices(nos) adjacentes do vertice(no) v ->dois vertices sao adjacentes se um e sucessor do outro'''
suc = self.get_successors(v)#buscar os sucessores de v
pred = self.get_predecessors(v)#buscar os antecessor de v
res = pred #res e igual a lista de antecessores (podia ser ao contrario)
for p in suc: #percorrer a lista de sucessores
if p not in res: #verificar se nao esta na lista
res.append(p)#adicionar todos os sucessores de v a lista de antecessores se nao estiver na lista
return res #retornar res
## degrees
def out_degree(self, v):#calcula grau de saída do vertice(no) v
#self.get_successors(v) -> lista de todos os arcos que saiem do vertice v
return len(self.graph[v])#contagem de todos os arcos que saiem do vertice v
def in_degree(self, v):#calcula grau de entrada do vertice(no) v
#self.get_predecessors(v) -> lista de todos os arcos que entram do vertice v
return len(self.get_predecessors(v))
def degree(self, v):#O grau de um vértice e dado pelo numero de arestas que lhe sao incidentes
#self.get_adjacents(v) -> ver os sucessores e os predecessores para dar lista de adjacentes
return len(self.get_adjacents(v))#contar os adjacentes da lista
def all_degrees(self, deg_type = "inout"):#tudo o que sai e tudo o que entra
''' Cálculo de graus de entrada e saída (ou ambos) para todos os nós da rede.
deg_type can be "in", "out", or "inout" '''
degs = {}
for v in self.graph.keys():#para cada key no grafo
if deg_type == "out" or deg_type == "inout":#se for graus de saida ou de entrada/saida
degs[v] = len(self.graph[v])#inicializar o número do dicionario com o valor de graus de saida
else: degs[v] = 0
if deg_type == "in" or deg_type == "inout":#se for graus de entrada ou de entrada/saida
for v in self.graph.keys():#para cada key (metabolito ou reação) no grafo
for d in self.graph[v]:#para cada value de v
if deg_type == "in" or v not in self.graph[d]:#se in ou v, não for um value de d no grafo
#-> se nao estiver nos values de d quer dizer que é de entrada(antecessor) e nao de saida(sucessor)
degs[d] = degs[d] + 1 #adicionar + 1 ao value de d no dicionario degs
return degs #retorna todas as keys com os seus respetivos graus (entrada + saida)
def highest_degrees(self, all_deg= None, deg_type = "inout", top= 10):#vai ver o top 10
'''Vai buscar o top 10 de nos com maior grau'''
if all_deg is None: #percorrer todos graus
all_deg = self.all_degrees(deg_type)#ir buscar o dicionario a all_degrees
ord_deg = sorted(list(all_deg.items()), key=lambda x : x[1], reverse = True)
#por por ordem o dicionario do mmaior para o mais pequeno, neste caso trasnforma em lista .items para por em tuplo (key,value) so assim consegue
#por por ordem os graus
return list(map(lambda x:x[0], ord_deg[:top]))#retorna uma lista com os nos com os 10 primeiros
#x[0]-> key; x[1]-> values
## topological metrics over degrees
def mean_degree(self, deg_type = "inout"):#media dos graus
degs = self.all_degrees(deg_type)#calculo dos graus de entrada e saída (ou ambos) para todos os nós da rede
return sum(degs.values()) / float(len(degs))#soma de todos os valores do dicionario e fazer a media de nos do grafico
def prob_degree(self, deg_type = "inout"):#probabilidade desse grau existir no grafo
'''Para cada grau quantos nós é que tenho'''
degs = self.all_degrees(deg_type)#calculo dos graus de entrada e saída (ou ambos) para todos os nós da rede
res = {}#abrir dicionario
for k in degs.keys():#percorrer todas as keys de degs
if degs[k] in res.keys():#ver se tem um determinado k(grau) nas keys de res
res[degs[k]] += 1 #adicionar esse k + 1 ao dicionario res
else:#caso contrario
res[degs[k]] = 1 #adicionar ao dicionario esse k(grau) = 1
#degs[k]= key e res[degs[k]] = value
for k in res.keys():
res[k] /= float(len(degs))#probabilidade dos graus
return res
'''EXEMPLO:
c ={'a':1,'b':1,'c':3}
v={}
for k in c.keys():
if c[k] in v.keys():
v[c[k]] += 1
else:
v[c[k]] =1
v ={1: 2, 3: 1} '''
## BFS and DFS searches
def reachable_bfs(self, v):
'''Começa pelo nó origem, depois explora todos os seus sucessores,
depois os sucessores destes, e assim sucessivamente até todos os nós
atingíveis terem sido explorados'''
'''de cima para baixo'''
l = [v]#comeca pelo no de origem
res = []
while len(l) > 0:
node = l.pop(0)
if node != v: res.append(node) #se o node for diferente de v adicionar a res
for elem in self.graph[node]:#ver os values de node
if elem not in res and elem not in l and elem != node:#se esse value nao estiver em res, l e for diferente de node
l.append(elem)#adicionar a l
return res
def reachable_dfs(self, v):
'''Começa pelo nó origem e explora o 1º sucessor,
seguido pelo 1º sucessor deste e assim sucessivamente
até não haver mais sucessores e ter que se fazer “backtracking”'''
'''da esquerda para a direita'''
l = [v]#comeca pelo no de origem
res = []
while len(l) > 0:
node = l.pop(0)
if node != v: res.append(node)
s = 0 #contagem
for elem in self.graph[node]:
if elem not in res and elem not in l:
l.insert(s, elem)#s=posicao, elemento
s += 1
return res
def distance(self, s, d):#retorna distancia entre vertices(nos) s e d
if s == d:
return 0
l = [(s,0)]#lista com o no e a distancia de origem
visited = [s]#vertices visitados para obter o caminho
while len(l) > 0:
node, dist = l.pop(0)#removes the item at the given index from the list and returns the removed item(isolar o 1º no na queue)
for elem in self.graph[node]:#percorrer os values do no de origem
if elem == d: return dist + 1 #se o primeiro value for d retornar logo a distancia
elif elem not in visited:#se o value nao estiver em visitado
l.append((elem,dist+1))#vamos adicionar a lista l (caminho)
visited.append(elem)#adicionar o no (que ja foi visitado)
return None #retorna None se nao e atingivel
def shortest_path(self, s, d):#retor | is curto entre s e d (lista de nos por onde passa)
'''Retorna caminho mais curto entre s e d (lista de nós por onde passa)'''
if s == d: return 0
l = [(s,[])]#lista de nos por onde passa que comeca na de origem
visited = [s]#vertices visitados (nos atingidos)
while len(l) > 0:
node, preds = l.pop(0)#removes the item at the given index from the list and returns the removed item
for elem in self.graph[node]:#percorrer os values do no de origem
if elem == d: return preds+[node,elem] #se o primeiro for logo d retorna o caminho mais curto
elif elem not in visited:#se o value nao estiver em visitado
l.append((elem,preds+[node]))#adicionar a l
visited.append(elem)#acrescentar vertice a lista de vetices visitados
return None#retorna None se nao e atingivel o caminho mais curto
def reachable_with_dist(self, s):
'''Retorna lista de nós atingíveis a partir de s com respetiva distância(lista de pares nó, distância)'''
#na primeira iteracao faz o for logo
res = []#lista de nós atingíveis a partir de s com respetiva distância
l = [(s,0)] #lista com tuplo com s e a distancia de s a s (0)
while len(l) > 0:
node, dist = l.pop(0)
if node != s: #vai ver se e diferente de s
res.append((node,dist))# nao conta o s
for elem in self.graph[node]:#vai ver onde e que o node s se esta a ligar
if not is_in_tuple_list(l,elem) and not is_in_tuple_list(res,elem): #vai ver se o p se encontra dentro de l ou em res
l.append((elem,dist+1))#adiciona o vertice a que se liga
return res
## mean distances ignoring unreachable nodes
def mean_distances(self):
tot = 0 #total
num_reachable = 0 #numero de vetores ligados entre si
for k in self.graph.keys():
distsk = self.reachable_with_dist(k)#[(no,dist)]->lista de nos atingiveis a partir de s com respetiva distancia
for _, dist in distsk:
tot += dist
num_reachable += len(distsk)#todas as proporçoes de nos atingiveis
meandist = float(tot) / num_reachable #media das distancias de ligacao
n = len(self.get_nodes()) #contagem de todos os nos que tem
return meandist, float(num_reachable)/((n-1)*n) #meandist->distância média,num_reachable proporção de nos atingiveis(num_reachable) / nº de ligacoes esperadas ((n-1)*n))
def closeness_centrality(self, node):#node = s
'''Baseadas nos nós que estão mais próximos dos restantes'''
dist = self.reachable_with_dist(node) #[(no,dist)]->lista de nos atingiveis a partir de s com respetiva distancia
if len(dist)==0:
return 0.0 #centralidade mais proxima e 0
s = 0.0 #distancia
for d in dist: #d = ( , )
s += d[1] #tuplo (t,6)
return len(dist) / s #todos os nos a dividir pela distancia total
#Centralidade mais proxima = todos os tuplos (vertice com ligacao a esse vertice)/distancia total
def highest_closeness(self, top = 10):
'''Centralidade mais alta -> top 10'''
cc = {} #abrir o dicionario com todas as keys do grafo e a centralidade mais proxima
for k in self.graph.keys():#para todas as keys no grafo
cc[k] = self.closeness_centrality(k)# o value de k = a centralidade mais proxima da key do grafo
print(cc)
ord_cl = sorted(list(cc.items()), key=lambda x : x[1], reverse = True) #ordenar o dicionario em ordem a centralidade mais proxima(transformar em lista)
return list(map(lambda x:x[0], ord_cl[:top])) #retornar os vertices com o top 10
def betweenness_centrality(self, node):
'''Baseadas na proporção de caminhos mais curtos entre todos os nós que passam pelo nó'''
'''Soma de todas as distancia possiveis '''
total_sp = 0 #todos os caminhos curtos que existem
sps_with_node = 0 #caminhos curtos que passam pelo node
for s in self.graph.keys():
for t in self.graph.keys():
if s != t and s != node and t != node:
sp = self.shortest_path(s, t)#retorna os caminhos dos nos de s a t
if sp is not None:# ou seja, se existir um caminho
total_sp += 1 #somar 1 aos caminhos totais
if node in sp: #se node se encontrar no sp
sps_with_node += 1 #ver se nesse caminho o meu node existe
return sps_with_node / total_sp #caminhos curtos que passam pelo node/ caminhos curtos totais
def highest_betweenness(self, top = 10):
'''Centralidade mais alta no betweenes -> top 10'''
cc = {} #abrir o dicionario com todas as keys do grafo e a betweenness_centrality
for k in self.graph.keys():#para todas as keys no grafo
cc[k] = self.betweenness_centrality(k)# o value de k = a cbetweenness_centrality da key do grafo
print(cc)
ord_cl = sorted(list(cc.items()), key=lambda x : x[1], reverse = True) #ordenar o dicionario em ordem da betweenness_centrality (transformar em lista)
return list(map(lambda x:x[0], ord_cl[:top])) #retornar os vertices com o top 10
def centralidade_de_grau_no(self,v):
'''A centralidade de grau de um vertice e dada pelo seu grau'''
alldegree = self.all_degrees()
return(alldegree[v]) #vai buscar o grau do no v
## cycles
def node_has_cycle (self, v):
l = [v]
res = False
visited = [v]
while len(l) > 0:
node = l.pop(0)
for elem in self.graph[node]:
if elem == v: return True
elif elem not in visited:
l.append(elem)
visited.append(elem)
return res
def has_cycle(self):
res = False
for v in self.graph.keys():
if self.node_has_cycle(v): return True
return res
## clustering
def clustering_coef(self, v):#nova função
adjs = self.get_adjacents(v) #lista de vertices
if len(adjs) <=1:#se isto acontecer quer dizer que nao existe agrupamento
return 0.0 #entao o coeficiente e 0
ligs = 0 #ligacoes
for i in adjs:#vai ao primeiro elemento de adjs (um no)
for j in adjs:# vai ao primeiro elemento de adjs(um no)
if i != j:#na primeira iteracao nao se vai verificar isto entao volta para o inicio do for
if j in self.graph[i] or i in self.graph[j]: #se j for um value de i e se i for um value de j
ligs = ligs + 1 #adicionar aos ligantes
return float(ligs)/(len(adjs)*(len(adjs)-1))#nº de arcos existentes entre vizinhos do no / nº total de arcos que poderiam existir entre vizinhos do no
'''EXEMPLO:
l = [1,2,3]
for i in l:
for j in l:
if i != j:
print('soma:',i+j)
RESULTADO:
i: 1,
j: 1, j: 2 -> soma: 3
j: 3 -> soma: 4
i: 2
j: 1-> soma: 3
j: 2, j: 3 -> soma: 5'''
def all_clustering_coefs(self):#nova função
ccs = {}#dicionario com todos os coeficientes de clustering
for k in self.graph.keys():#percorrer os nos
ccs[k] = self.clustering_coef(k)#adicionar ao k(no) o seu valor de clustering
return ccs
def mean_clustering_coef(self):#nova função
'''Média sobre todos os nós'''
ccs = self.all_clustering_coefs()#vai buscar um dicionario com {no: coeficiente}
return sum(ccs.values()) / float(len(ccs))# soma dos values/total de elemntos em ccs
def mean_clustering_perdegree(self, deg_type = "inout"):#nova função
'''Média dos coeficientes considerando nós de grau k.'''
degs = self.all_degrees(deg_type)#dicionario com as keys com os seus respetivos graus (entrada + saida) -> {no: graus(entrada + saida)}
ccs = self.all_clustering_coefs()#vai buscar um dicionario com {no: coeficiente}
degs_k = {}#{grau: no}
for k in degs.keys():#percorrer os no
if degs[k] in degs_k.keys(): #se o degs[k] (value- graus) for uma key em degs_k
degs_k[degs[k]].append(k)#adicionar a key de degs_k (grau) o value k (no)
else: degs_k[degs[k]] = [k]#caso contrario adicionar a key (degs[k]) o value k
ck = {}#{grau: media coeficiente}
for k in degs_k.keys():#para cada grau(key) em degs_k
tot = 0
for v in degs_k[k]:#para cada no(value) em degs_k -> buscar todos os nos com aquele grau
tot += ccs[v]# buscar o coeficiente de todos os nos com aquele grau
ck[k] = float(tot) / len(degs_k[k])#media do clustering, adicionar ao dicionario o {grau: media do coeficiente por grau}
return ck
def is_in_tuple_list(tl, val):
res = False
for (x,y) in tl:
if val == x: return True
return res
if __name__ == "__main__":
gr = MyGraph()
gr.add_vertex(1)
gr.add_vertex(2)
gr.add_vertex(3)
gr.add_vertex(4)
gr.add_edge(1,2)
gr.add_edge(2,3)
gr.add_edge(3,2)
gr.add_edge(3,4)
gr.add_edge(4,2)
gr.print_graph()
print(gr.size())
print (gr.get_successors(2))
print (gr.get_predecessors(2))
print("ADJACENTES:")
print (gr.get_adjacents(2))
print (gr.in_degree(2))
print (gr.out_degree(2))
print (gr.degree(2))
print(gr.all_degrees("inout"))
print(gr.all_degrees("in"))
print(gr.all_degrees("out"))
gr2 = MyGraph({1:[2,3,4], 2:[5,6],3:[6,8],4:[8],5:[7],6:[],7:[],8:[]})
print(gr2.reachable_bfs(1))
print(gr2.reachable_dfs(1))
print(gr2.distance(1,7))
print(gr2.shortest_path(1,7))
print(gr2.distance(1,8))
print(gr2.shortest_path(1,8))
print(gr2.distance(6,1))
print(gr2.shortest_path(6,1))
print(gr2.reachable_with_dist(1))
print(gr.has_cycle())
print(gr2.has_cycle())
print(gr.mean_degree())
print(gr.prob_degree())
print(gr.mean_distances())
print("CLUSTERING COEF:")
print (gr.clustering_coef(1))
print("CLUSTERING COEF:")
print (gr.clustering_coef(2))
| na caminho ma | identifier_name |
MyGraph.py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 23 01:33:42 2017
@author: miguelrocha
"""
## Graph represented as adjacency list using a dictionary
## keys are vertices
## values of the dictionary represent the list of adjacent vertices of the key node
class MyGraph:
def __init__(self, g = {}):
''' Constructor - takes dictionary to fill the graph as input; default is empty dictionary '''
self.graph = g #unico atributo (g = dicionario)
def print_graph(self):
''' Prints the content of the graph as adjacency list '''
for v in self.graph.keys():#para cada key no dicionario (vertice)
print (v, " -> ", self.graph[v])#para cada key no dicionario (vertice)
## get basic info
def get_nodes(self):#vai buscar os vetices(nos)
''' Returns list of nodes in the graph '''
return list(self.graph.keys())#devolve uma lista com os vertices
def get_edges(self): #buscar as arestas(pares de vertices, ou seja, uma aresta liga dois vertices)
''' Returns edges in the graph as a list of tuples (origin, destination) '''
edges = []
for v in self.graph.keys():#para cada key v
for d in self.graph[v]:#para cada value de v
edges.append((v,d))#acrescentar a lista as arestas (vertice v que se ligou ao vertice x)
return edges#devolver a lista
def size(self):##tamanho do grafo
''' Returns size of the graph : number of nodes, number of edges '''
return len(self.get_nodes()), len(self.get_edges())#usa o get_nodes e o get_edges para ter o tamanho do grafo
## add nodes and edges
def add_vertex(self, v):#adicionar vertice(no)
''' Add a vertex to the graph; tests if vertex exists not adding if it does '''
if v not in self.graph.keys():
self.graph[v] = []#adicionar uma key ao dicitionary
def add_edge(self, o, d):#(o,d) vertices
''' Add edge to the graph; if vertices do not exist, they are added to the graph '''
if o not in self.graph.keys():#confirmar se os vertices o e d nao estao no dicionario
self.add_vertex(o) #adicionar vertice o
if d not in self.graph.keys():
self.add_vertex(d) #adicionar vertice d
if d not in self.graph[o]:#confirmar se d e um value de o
self.graph[o].append(d) #adicionar o value d ao o
## successors, predecessors, adjacent nodes
def get_successors(self, v):
return list(self.graph[v]) # needed to avoid list being overwritten of result of the function is used
def get_predecessors(self, v):
pre = []#abrir lista de antecessor
for k in self.graph.keys(): #percorrer as keys do dicionario
if v in self.graph[k]: #OU if self.graph[i]==v: #verificar se v e um value de i
pre.append(k)#adicionar a key com value v a lista
return pre #retornar a lista com os antecessor
def get_adjacents(self, v):
'''Da lista de vertices(nos) adjacentes do vertice(no) v ->dois vertices sao adjacentes se um e sucessor do outro'''
suc = self.get_successors(v)#buscar os sucessores de v
pred = self.get_predecessors(v)#buscar os antecessor de v
res = pred #res e igual a lista de antecessores (podia ser ao contrario)
for p in suc: #percorrer a lista de sucessores
if p not in res: #verificar se nao esta na lista
res.append(p)#adicionar todos os sucessores de v a lista de antecessores se nao estiver na lista
return res #retornar res
## degrees
def out_degree(self, v):#calcula grau de saída do vertice(no) v
#self.get_successors(v) -> lista de todos os arcos que saiem do vertice v
return len(self.graph[v])#contagem de todos os arcos que saiem do vertice v
def in_degree(self, v):#calcula grau de entrada do vertice(no) v
#self.get_predecessors(v) -> lista de todos os arcos que entram do vertice v
return len(self.get_predecessors(v))
def degree(self, v):#O grau de um vértice e dado pelo numero de arestas que lhe sao incidentes
#self.get_adjacents(v) -> ver os sucessores e os predecessores para dar lista de adjacentes
return len(self.get_adjacents(v))#contar os adjacentes da lista
def all_degrees(self, deg_type = "inout"):#tudo o que sai e tudo o que entra
''' Cálculo de graus de entrada e saída (ou ambos) para todos os nós da rede.
deg_type can be "in", "out", or "inout" '''
degs = {}
for v in self.graph.keys():#para cada key no grafo
if deg_type == "out" or deg_type == "inout":#se for graus de saida ou de entrada/saida
degs[v] = len(self.graph[v])#inicializar o número do dicionario com o valor de graus de saida
else: degs[v] = 0
if deg_type == "in" or deg_type == "inout":#se for graus de entrada ou de entrada/saida
for v in self.graph.keys():#para cada key (metabolito ou reação) no grafo
for d in self.graph[v]:#para cada value de v
if deg_type == "in" or v not in self.graph[d]:#se in ou v, não for um value de d no grafo
#-> se nao estiver nos values de d quer dizer que é de entrada(antecessor) e nao de saida(sucessor)
degs[d] = degs[d] + 1 #adicionar + 1 ao value de d no dicionario degs
return degs #retorna todas as keys com os seus respetivos graus (entrada + saida)
def highest_degrees(self, all_deg= None, deg_type = "inout", top= 10):#vai ver o top 10
'''Vai buscar o top 10 de nos com maior grau'''
if all_deg is None: #percorrer todos graus
all_deg = self.all_degrees(deg_type)#ir buscar o dicionario a all_degrees
ord_deg = sorted(list(all_deg.items()), key=lambda x : x[1], reverse = True)
#por por ordem o dicionario do mmaior para o mais pequeno, neste caso trasnforma em lista .items para por em tuplo (key,value) so assim consegue
#por por ordem os graus
return list(map(lambda x:x[0], ord_deg[:top]))#retorna uma lista com os nos com os 10 primeiros
#x[0]-> key; x[1]-> values
## topological metrics over degrees
def mean_degree(self, deg_type = "inout"):#media dos graus
degs = self.all_degrees(deg_type)#calculo dos graus de entrada e saída (ou ambos) para todos os nós da rede
return sum(degs.values()) / float(len(degs))#soma de todos os valores do dicionario e fazer a media de nos do grafico
def prob_degree(self, deg_type = "inout"):#probabilidade desse grau existir no grafo
'''Para cada grau quantos nós é que tenho'''
degs = self.all_degrees(deg_type)#calculo dos graus de entrada e saída (ou ambos) para todos os nós da rede
res = {}#abrir dicionario
for k in degs.keys():#percorrer todas as keys de degs
if degs[k] in res.keys():#ver se tem um determinado k(grau) nas keys de res
res[degs[k]] += 1 #adicionar esse k + 1 ao dicionario res
else:#caso contrario
res[degs[k]] = 1 #adicionar ao dicionario esse k(grau) = 1
#degs[k]= key e res[degs[k]] = value
for k in res.keys():
res[k] /= float(len(degs))#probabilidade dos graus
return res
'''EXEMPLO:
c ={'a':1,'b':1,'c':3}
v={}
for k in c.keys():
if c[k] in v.keys():
v[c[k]] += 1
else:
v[c[k]] =1
v ={1: 2, 3: 1} '''
## BFS and DFS searches
def reachable_bfs(self, v):
'''Começa pelo nó origem, depois explora todos os seus sucessores,
depois os sucessores destes, e assim sucessivamente até todos os nós
atingíveis terem sido explorados'''
'''de cima para baixo'''
l = [v]#comeca pelo no de origem
res = []
while len(l) > 0:
node = l.pop(0)
if node != v: res.append(node) #se o node for diferente de v adicionar a res
for elem in self.graph[node]:#ver os values de node
if elem not in res and elem not in l and elem != node:#se esse value nao estiver em res, l e for diferente de node
l.append(elem)#adicionar a l
return res
def reachable_dfs(self, v):
'''Começa pelo nó origem e explora o 1º sucessor,
seguido pelo 1º sucessor deste e assim sucessivamente
até não haver mais sucessores e ter que se fazer “backtracking”'''
'''da esquerda para a direita'''
l = [v]#comeca pelo no de origem
res = []
while len(l) > 0:
node = l.pop(0)
if node != v: res.append(node)
s = 0 #contagem
for elem in self.graph[node]:
if elem not in res and elem not in l:
l.insert(s, elem)#s=posicao, elemento
s += 1
return res
def distance(self, s, d):#retorna distancia entre vertices(nos) s e d
if s == d:
return 0
l = [(s,0)]#lista com o no e a distancia de origem
visited = [s]#vertices visitados para obter o caminho
while len(l) > 0:
node, dist = l.pop(0)#removes the item at the given index from the list and returns the removed item(isolar o 1º no na queue)
for elem in self.graph[node]:#percorrer os values do no de origem
if elem == d: return dist + 1 #se o primeiro value for d retornar logo a distancia
elif elem not in visited:#se o value nao estiver em visitado
l.append((elem,dist+1))#vamos adicionar a lista l (caminho)
visited.append(elem)#adicionar o no (que ja foi visitado)
return None #retorna None se nao e atingivel
def shortest_path(self, s, d):#retorna caminho mais curto entre s e d (lista de nos por onde passa)
'''Retorna caminho mais curto entre s e d (lista de nós por onde passa)'''
if s == d: return 0
l = [(s,[])]#lista de nos por onde passa que comeca na de origem
visited = [s]#vertices visitados (nos atingidos)
while len(l) > 0:
node, preds = l.pop(0)#removes the item at the given index from the list and returns the removed item
for elem in self.graph[node]:#percorrer os values do no de origem
if elem == d: return preds+[node,elem] #se o primeiro for logo d retorna o caminho mais curto
elif elem not in visited:#se o value nao estiver em visitado
l.append((elem,preds+[node]))#adicionar a l
visited.append(elem)#acrescentar vertice a lista de vetices visitados
return None#retorna None se nao e atingivel o caminho mais curto
def reachable_with_dist(self, s):
'''Retorna lista de nós atingíveis a partir de s com respetiva distância(lista de pares nó, distância)'''
#na primeira iteracao faz o for logo
res = []#lista de nós atingíveis a partir de s com respetiva distância
l = [(s,0)] #lista com tuplo com s e a distancia de s a s (0)
while len(l) > 0:
node, dist = l.pop(0)
if node != s: #vai ver se e diferente de s
res.append((node,dist))# nao conta o s
for elem in self.graph[node]:#vai ver onde e que o node s se esta a ligar
if not is_in_tuple_list(l,elem) and not is_in_tuple_list(res,elem): #vai ver se o p se encontra dentro de l ou em res
l.append((elem,dist+1))#adiciona o vertice a que se liga
return res
## mean distances ignoring unreachable nodes
def mean_distances(self):
tot = 0 #total
num_reachable = 0 #numero de vetores ligados entre si
for k in self.graph.keys():
distsk = self.reachable_with_dist(k)#[(no,dist)]->lista de nos atingiveis a partir de s com respetiva distancia
for _, dist in distsk:
tot += dist
num_reachable += len(distsk)#todas as proporçoes de nos atingiveis
meandist = float(tot) / num_reachable #media das distancias de ligacao
n = len(self.get_nodes()) #contagem de todos os nos que tem
return meandist, float(num_reachable)/((n-1)*n) #meandist->distância média,num_reachable proporção de nos atingiveis(num_reachable) / nº de ligacoes esperadas ((n-1)*n))
def closeness_centrality(self, node):#node = s
'''Baseadas nos nós que estão mais próximos dos restantes'''
dist = self.reachable_with_dist(node) #[(no,dist)]->lista de nos atingiveis a partir de s com respetiva distancia
if len(dist)==0:
return 0.0 #centralidade mais proxima e 0
s = 0.0 #distancia
for d in dist: #d = ( , )
s += d[1] #tuplo (t,6)
return len(dist) / s #todos os nos a dividir pela distancia total
#Centralidade mais proxima = todos os tuplos (vertice com ligacao a esse vertice)/distancia total
def highest_closeness(self, top = 10):
'''Centralidade mais alta -> top 10'''
cc = {} #abrir o dicionario com todas as keys do grafo e a centralidade mais proxima
for k in self.graph.keys():#para todas as keys no grafo
cc[k] = self.closeness_centrality(k)# o value de k = a centralidade mais proxima da key do grafo
print(cc)
ord_cl = sorted(list(cc.items()), key=lambda x : x[1], reverse = True) #ordenar o dicionario em ordem a centralidade mais proxima(transformar em lista)
return list(map(lambda x:x[0], ord_cl[:top])) #retornar os vertices com o top 10
def betweenness_centrality(self, node):
'''Baseadas na proporção de caminhos mais curtos entre todos os nós que passam pelo nó'''
'''Soma de todas as distancia possiveis '''
total_sp = 0 #todos os caminhos curtos que existem
sps_with_node = 0 #caminhos curtos que passam pelo node
for s in self.graph.keys():
for t in self.graph.keys():
if s != t and s != node and t != node:
sp = self.shortest_path(s, t)#retorna os caminhos dos nos de s a t
if sp is not None:# ou seja, se existir um caminho
total_sp += 1 #somar 1 aos caminhos totais
if node in sp: #se node se encontrar no sp
sps_with_node += 1 #ver se nesse caminho o meu node existe
return sps_with_node / total_sp #caminhos curtos que passam pelo node/ caminhos curtos totais
def highest_betweenness(self, top = 10):
'''Centralidade mais alta no betweenes -> top 10'''
cc = {} #abrir o dicionario com todas as keys do grafo e a betweenness_centrality
for k in self.graph.keys():#para todas as keys no grafo
cc[k] = self.betweenness_centrality(k)# o value de k = a cbetweenness_centrality da key do grafo
print(cc)
ord_cl = sorted(list(cc.items()), key=lambda x : x[1], reverse = True) #ordenar o dicionario em ordem da betweenness_centrality (transformar em lista)
return list(map(lambda x:x[0], ord_cl[:top])) #retornar os vertices com o top 10
def centralidade_de_grau_no(self,v):
'''A centralidade de grau de um vertice e dada pelo seu grau'''
alldegree = self.all_degrees()
return(alldegree[v]) #vai buscar o grau do no v
## cycles
def node_has_cycle (self, v):
l = [v]
res = False
visited = [v]
while len(l) > 0:
node = l.pop(0)
for elem in self.graph[node]:
if elem == v: return True
elif elem not in visited:
l.append(elem)
visited.append(elem)
return res
def has_cycle(self):
res = False
for v in self.graph.keys():
if self.node_has_cycle(v): return True
return res
## clustering
def clustering_coef(self, v):#nova função
adjs = self.get_adjacents(v) #lista de vertices
if len(adjs) <=1:#se isto acontecer quer dizer que nao existe agrupamento
return 0.0 #entao o coeficiente e 0
ligs = 0 #ligacoes
for i in adjs:#vai ao primeiro elemento de adjs (um no)
for j in adjs:# vai ao primeiro elemento de adjs(um no)
if i != j:#na primeira iteracao nao se vai verificar isto entao volta para o inicio do for
if j in self.graph[i] or i in self.graph[j]: #se j for um value de i e se i for um value de j
ligs = ligs + 1 #adicionar aos ligantes
return float(ligs)/(len(adjs)*(len(adjs)-1))#nº de arcos existentes entre vizinhos do no / nº total de arcos que poderiam existir entre vizinhos do no
'''EXEMPLO:
l = [1,2,3]
for i in l:
for j in l:
if i != j:
print('soma:',i+j)
RESULTADO:
i: 1,
j: 1, j: 2 -> soma: 3
j: 3 -> soma: 4
i: 2
j: 1-> soma: 3
j: 2, j: 3 -> soma: 5'''
def all_clustering_coefs(self):#nova função
ccs = {}#dicionario com todos os coeficientes de clustering
for k in self.graph.keys():#percorrer os nos
ccs[k] = self.clustering_coef(k)#adicionar ao k(no) o seu valor de clustering
return ccs | ccs = self.all_clustering_coefs()#vai buscar um dicionario com {no: coeficiente}
return sum(ccs.values()) / float(len(ccs))# soma dos values/total de elemntos em ccs
def mean_clustering_perdegree(self, deg_type = "inout"):#nova função
'''Média dos coeficientes considerando nós de grau k.'''
degs = self.all_degrees(deg_type)#dicionario com as keys com os seus respetivos graus (entrada + saida) -> {no: graus(entrada + saida)}
ccs = self.all_clustering_coefs()#vai buscar um dicionario com {no: coeficiente}
degs_k = {}#{grau: no}
for k in degs.keys():#percorrer os no
if degs[k] in degs_k.keys(): #se o degs[k] (value- graus) for uma key em degs_k
degs_k[degs[k]].append(k)#adicionar a key de degs_k (grau) o value k (no)
else: degs_k[degs[k]] = [k]#caso contrario adicionar a key (degs[k]) o value k
ck = {}#{grau: media coeficiente}
for k in degs_k.keys():#para cada grau(key) em degs_k
tot = 0
for v in degs_k[k]:#para cada no(value) em degs_k -> buscar todos os nos com aquele grau
tot += ccs[v]# buscar o coeficiente de todos os nos com aquele grau
ck[k] = float(tot) / len(degs_k[k])#media do clustering, adicionar ao dicionario o {grau: media do coeficiente por grau}
return ck
def is_in_tuple_list(tl, val):
res = False
for (x,y) in tl:
if val == x: return True
return res
if __name__ == "__main__":
gr = MyGraph()
gr.add_vertex(1)
gr.add_vertex(2)
gr.add_vertex(3)
gr.add_vertex(4)
gr.add_edge(1,2)
gr.add_edge(2,3)
gr.add_edge(3,2)
gr.add_edge(3,4)
gr.add_edge(4,2)
gr.print_graph()
print(gr.size())
print (gr.get_successors(2))
print (gr.get_predecessors(2))
print("ADJACENTES:")
print (gr.get_adjacents(2))
print (gr.in_degree(2))
print (gr.out_degree(2))
print (gr.degree(2))
print(gr.all_degrees("inout"))
print(gr.all_degrees("in"))
print(gr.all_degrees("out"))
gr2 = MyGraph({1:[2,3,4], 2:[5,6],3:[6,8],4:[8],5:[7],6:[],7:[],8:[]})
print(gr2.reachable_bfs(1))
print(gr2.reachable_dfs(1))
print(gr2.distance(1,7))
print(gr2.shortest_path(1,7))
print(gr2.distance(1,8))
print(gr2.shortest_path(1,8))
print(gr2.distance(6,1))
print(gr2.shortest_path(6,1))
print(gr2.reachable_with_dist(1))
print(gr.has_cycle())
print(gr2.has_cycle())
print(gr.mean_degree())
print(gr.prob_degree())
print(gr.mean_distances())
print("CLUSTERING COEF:")
print (gr.clustering_coef(1))
print("CLUSTERING COEF:")
print (gr.clustering_coef(2)) |
def mean_clustering_coef(self):#nova função
'''Média sobre todos os nós''' | random_line_split |
queries.py | # Copyright 2018 Jose Cambronero and Phillip Stanley-Marbell
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import pandas as pd
import sqlite3 as db
import dbtypes
from specimen import utils
def read_sql(sql, conn):
# read sql with pandas but make sure column names are lowercase
df = pd.read_sql(sql, conn)
df.columns = df.columns.map(lambda x: x.lower())
return df
class SpecimenQueries:
"""
Contains helpful specimen database queries. Should be used as a starting point for analysis of specimen
data.
"""
def __init__(self, database_path=None):
"""
Provides wrapper for queries. Caches queries where possible.
:param database_path: Path to SQLITE database file
"""
self.database_path = database_path
self.conn = db.connect(database=self.database_path)
# start use of foreign keys
_cursor = self.conn.cursor()
_cursor.execute('PRAGMA foreign_keys = ON')
_cursor.close()
self.cache = {}
def _clear_cache(self):
""" Clear cache, which stores prior query results """
self.cache = {}
def _drop_tables(self, tables):
"""
Drop a set of tables from db (often used to materialize intermediate tables for ease of querying and
then removing these to avoid affecting db state)
:param tables: list of tables to drop
:return: drops if they exist, ignores otherwise
"""
cursor = self.conn.cursor()
try:
cursor.execute('DROP TABLE ' + ','.join(map(str, tables)))
except:
pass
finally:
cursor.close()
def _get_unknown_userid(self):
"""
Retrieve user id associated with unknown user
"""
cursor = self.conn.cursor()
unknown_user_str = dbtypes.User.null
cursor.execute("select id from users where uniqueid='%s'" % unknown_user_str)
return cursor.fetchone()[0]
def users_and_countries(self, use_cache=True):
"""
Returns a table with userid and most likely country (based on carrier location frequency).
:param use_cache: if true uses cached result, else clears database state and reruns query
:return: pandas dataframe
"""
key = 'user_and_countries'
if use_cache and key in self.cache:
return self.cache[key].copy()
cursor = self.conn.cursor()
if not use_cache:
self._drop_tables(['user_country_freqs', 'user_and_likely_country'])
# userid for unknown user
unknown_user_id = self._get_unknown_userid()
# can only return country info if userid is known
cursor.execute(
"""
CREATE TEMP TABLE user_country_freqs AS
select userid, country, count(*) as ct
from sessions where userid <> %d and country is not null
group by userid, country
""" % unknown_user_id
)
# assigns each user to country with most counts
cursor.execute(
"""
CREATE TEMP TABLE user_and_likely_country AS
SELECT *
FROM
user_country_freqs JOIN (SELECT userid, max(ct) as max_ct FROM user_country_freqs GROUP BY userid) max_cts
USING (userid)
WHERE user_country_freqs.ct = max_cts.max_ct
GROUP BY userid
"""
)
cursor.close()
result = read_sql('SELECT * FROM user_and_likely_country', self.conn)
self.cache[key] = result.copy()
return result
def | (self, vals, table_name='_ref'):
"""
Create a temporary reference table by inserting values.
This is used to speed up sqlite queries that are too slow when given
the list directly in the query text (most likely a parsing issue?).
"""
# remove existing
self._drop_tables([table_name])
cursor = self.conn.cursor()
cursor.execute('CREATE TEMP TABLE %s (id INTEGER)' % table_name)
for i, v in enumerate(vals):
cursor.execute('INSERT INTO %s VALUES(%d)' % (table_name, v))
def get_time_offset(self, event_ids, get_extra_info=True, use_cache=True):
"""
Compute the time offset from the start of a session for a list of events.
Only possible with data from JSON files. CSV files have dummy timestamps.
:param event_ids: list of event ids to query
"""
print "Warning: This is only valid for data from the json files! Timestamps in csv are dummies"
if event_ids is None:
raise ValueError('Must provide event ids ts')
key = ('timestamps', tuple(event_ids), get_extra_info)
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
ts_query = """
SELECT events.id as id, offsettimestamp, event FROM events, _ref
WHERE events.id = _ref.id AND offsettimestamp >= 0
"""
ts = read_sql(ts_query, self.conn)
# adds additional information such as user id, and session id for matching up timestamps
if get_extra_info:
extra_info_query = """
SELECT
sessions.userid,
events.id AS id,
sessions.id AS sessionid
FROM events, sessions, _ref
WHERE events.id = _ref.id AND
events.sessionid = sessions.id
"""
extra_info_df = read_sql(extra_info_query, self.conn)
ts = ts.merge(extra_info_df, how='left', on='id')
self.cache[key] = ts.copy()
return ts
def get_devices(self, event_ids, use_cache=True):
"""
Query the devices associated with particular event ids.
:param event_ids: list of event ids to query
"""
if event_ids is None:
raise ValueError('Must provide event ids')
# cast to tuple so that can be hashed
key = ('devices', tuple(event_ids))
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
devices_query = """
select
devices.name as device_name,
events.id as eventid
FROM
sessions, events, devices, _ref
WHERE
events.id = _ref.id AND
sessions.id = events.sessionid AND
sessions.deviceid = devices.id
"""
devices_df = read_sql(devices_query, self.conn)
self.cache[key] = devices_df.copy()
return devices_df
def base_selections(self, min_turns=50, which='all', add_fields=None, use_cache=True):
"""
Obtain base selections data, consisting of selections for known userids (i.e. this
precludes data from the CSV files from Flurry, which do not have known user ids associated
with each record). Selects only the first turn in a 'play',
to control for game play. Selects data for users with at least `min_turns` such turns. Caches results
:param min_turns: minimum number of first turns necessary for data, if 0, returns all
:param which: one of 'all', 'correct', 'incorrect', determines what kind of selections are returned
:param add_fields: add extra base fields from table selectionevents. If dict, uses keys as fields
and values as names, if list uses elements as fields and names
:param use_cache: if true, uses cached results, else clears database state and reruns.
:return: pandas dataframe
"""
if min_turns < 0:
raise ValueError('min_turns must be > 0')
if add_fields and not utils.is_iterable(add_fields):
raise ValueError('add_fields must be iterable')
if not which in ['all', 'correct', 'incorrect']:
raise ValueError("which must be one of 'all', 'correct', 'incorrect'")
key = ('first_sels', min_turns, which, add_fields)
if use_cache:
if key in self.cache:
return self.cache[key].copy()
else:
# we may have created tables for different optional args (i.e. diff min_turns)
self._drop_tables(['first_sels', 'enough_plays'])
if not use_cache:
self._drop_tables(['first_sels', 'enough_plays'])
# cobble together additional fields from selectionevents
added = ""
if add_fields:
if not isinstance(add_fields, dict):
add_fields = dict(zip(add_fields, add_fields))
added = ", " + (".".join(["%s as %s" % (f,n) for f, n in add_fields.iteritems()]))
cursor = self.conn.cursor()
# unknown user id
unknown_user_id = self._get_unknown_userid()
# filter to base data consisting of first-turns in play for known user ids
print "Filtering down to first-turns in a play"
cursor.execute("""
-- compute the smallest eventid associated with each playid
CREATE TEMP TABLE sel_cts AS
SELECT MIN(eventid) as min_event_id
FROM selectionevents
where userid <> %d
GROUP BY playid
""" % unknown_user_id)
print "Retrieving selection information for those turns"
cursor.execute("""
-- use this min eventid to select the first choice in each round
CREATE TEMP TABLE first_sels AS
SELECT
userid, playid, id as selid, eventid,
target_r, target_g, target_b,
specimen_r, specimen_g, specimen_b,
target_lab_l, target_lab_a, target_lab_b,
specimen_lab_l, specimen_lab_a, specimen_lab_b,
is_first_pick,
target_h,
target_s,
target_v,
specimen_h,
correct
%s
FROM
selectionevents
INNER JOIN sel_cts
ON selectionevents.eventid = sel_cts.min_event_id
WHERE userid <> %d
""" % (added, unknown_user_id)
)
# restrict to subset of users with at least min_turns
if min_turns:
cursor.execute(
"""
CREATE TEMP TABLE enough_plays as
SELECT userid FROM first_sels GROUP BY userid HAVING count(*) >= %s
""" % min_turns
)
cursor.execute('DELETE FROM first_sels WHERE NOT userid IN (SELECT userid FROM enough_plays)')
cursor.close()
# filter to type of selections requested
if which == 'all':
results = read_sql('SELECT * FROM first_sels', self.conn)
elif which == 'correct':
results = read_sql('SELECT * FROM first_sels WHERE correct', self.conn)
else:
results = read_sql('SELECT * FROM first_sels WHERE NOT correct', self.conn)
self.cache[key] = results.copy()
return results
def execute_adhoc(self, query, use_cache=True):
"""
Execute ad-hoc queries over the Specimen database.
:param query: String SQL query
"""
key = query
if use_cache and key in self.cache:
return self.cache[key].copy()
results = read_sql(query, self.conn)
self.cache[key] = results.copy()
return results
| create_reference_ids_table | identifier_name |
queries.py | # Copyright 2018 Jose Cambronero and Phillip Stanley-Marbell
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import pandas as pd
import sqlite3 as db
import dbtypes
from specimen import utils
def read_sql(sql, conn):
# read sql with pandas but make sure column names are lowercase
df = pd.read_sql(sql, conn)
df.columns = df.columns.map(lambda x: x.lower())
return df
class SpecimenQueries:
"""
Contains helpful specimen database queries. Should be used as a starting point for analysis of specimen
data.
"""
def __init__(self, database_path=None):
"""
Provides wrapper for queries. Caches queries where possible.
:param database_path: Path to SQLITE database file
"""
self.database_path = database_path
self.conn = db.connect(database=self.database_path)
# start use of foreign keys
_cursor = self.conn.cursor()
_cursor.execute('PRAGMA foreign_keys = ON')
_cursor.close()
self.cache = {}
def _clear_cache(self):
""" Clear cache, which stores prior query results """
self.cache = {}
def _drop_tables(self, tables):
"""
Drop a set of tables from db (often used to materialize intermediate tables for ease of querying and
then removing these to avoid affecting db state)
:param tables: list of tables to drop
:return: drops if they exist, ignores otherwise
"""
cursor = self.conn.cursor()
try:
cursor.execute('DROP TABLE ' + ','.join(map(str, tables)))
except:
pass
finally:
cursor.close()
def _get_unknown_userid(self):
"""
Retrieve user id associated with unknown user
"""
cursor = self.conn.cursor()
unknown_user_str = dbtypes.User.null
cursor.execute("select id from users where uniqueid='%s'" % unknown_user_str)
return cursor.fetchone()[0]
def users_and_countries(self, use_cache=True):
"""
Returns a table with userid and most likely country (based on carrier location frequency).
:param use_cache: if true uses cached result, else clears database state and reruns query
:return: pandas dataframe
"""
key = 'user_and_countries'
if use_cache and key in self.cache:
return self.cache[key].copy()
cursor = self.conn.cursor()
if not use_cache:
|
# userid for unknown user
unknown_user_id = self._get_unknown_userid()
# can only return country info if userid is known
cursor.execute(
"""
CREATE TEMP TABLE user_country_freqs AS
select userid, country, count(*) as ct
from sessions where userid <> %d and country is not null
group by userid, country
""" % unknown_user_id
)
# assigns each user to country with most counts
cursor.execute(
"""
CREATE TEMP TABLE user_and_likely_country AS
SELECT *
FROM
user_country_freqs JOIN (SELECT userid, max(ct) as max_ct FROM user_country_freqs GROUP BY userid) max_cts
USING (userid)
WHERE user_country_freqs.ct = max_cts.max_ct
GROUP BY userid
"""
)
cursor.close()
result = read_sql('SELECT * FROM user_and_likely_country', self.conn)
self.cache[key] = result.copy()
return result
def create_reference_ids_table(self, vals, table_name='_ref'):
"""
Create a temporary reference table by inserting values.
This is used to speed up sqlite queries that are too slow when given
the list directly in the query text (most likely a parsing issue?).
"""
# remove existing
self._drop_tables([table_name])
cursor = self.conn.cursor()
cursor.execute('CREATE TEMP TABLE %s (id INTEGER)' % table_name)
for i, v in enumerate(vals):
cursor.execute('INSERT INTO %s VALUES(%d)' % (table_name, v))
def get_time_offset(self, event_ids, get_extra_info=True, use_cache=True):
"""
Compute the time offset from the start of a session for a list of events.
Only possible with data from JSON files. CSV files have dummy timestamps.
:param event_ids: list of event ids to query
"""
print "Warning: This is only valid for data from the json files! Timestamps in csv are dummies"
if event_ids is None:
raise ValueError('Must provide event ids ts')
key = ('timestamps', tuple(event_ids), get_extra_info)
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
ts_query = """
SELECT events.id as id, offsettimestamp, event FROM events, _ref
WHERE events.id = _ref.id AND offsettimestamp >= 0
"""
ts = read_sql(ts_query, self.conn)
# adds additional information such as user id, and session id for matching up timestamps
if get_extra_info:
extra_info_query = """
SELECT
sessions.userid,
events.id AS id,
sessions.id AS sessionid
FROM events, sessions, _ref
WHERE events.id = _ref.id AND
events.sessionid = sessions.id
"""
extra_info_df = read_sql(extra_info_query, self.conn)
ts = ts.merge(extra_info_df, how='left', on='id')
self.cache[key] = ts.copy()
return ts
def get_devices(self, event_ids, use_cache=True):
"""
Query the devices associated with particular event ids.
:param event_ids: list of event ids to query
"""
if event_ids is None:
raise ValueError('Must provide event ids')
# cast to tuple so that can be hashed
key = ('devices', tuple(event_ids))
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
devices_query = """
select
devices.name as device_name,
events.id as eventid
FROM
sessions, events, devices, _ref
WHERE
events.id = _ref.id AND
sessions.id = events.sessionid AND
sessions.deviceid = devices.id
"""
devices_df = read_sql(devices_query, self.conn)
self.cache[key] = devices_df.copy()
return devices_df
def base_selections(self, min_turns=50, which='all', add_fields=None, use_cache=True):
"""
Obtain base selections data, consisting of selections for known userids (i.e. this
precludes data from the CSV files from Flurry, which do not have known user ids associated
with each record). Selects only the first turn in a 'play',
to control for game play. Selects data for users with at least `min_turns` such turns. Caches results
:param min_turns: minimum number of first turns necessary for data, if 0, returns all
:param which: one of 'all', 'correct', 'incorrect', determines what kind of selections are returned
:param add_fields: add extra base fields from table selectionevents. If dict, uses keys as fields
and values as names, if list uses elements as fields and names
:param use_cache: if true, uses cached results, else clears database state and reruns.
:return: pandas dataframe
"""
if min_turns < 0:
raise ValueError('min_turns must be > 0')
if add_fields and not utils.is_iterable(add_fields):
raise ValueError('add_fields must be iterable')
if not which in ['all', 'correct', 'incorrect']:
raise ValueError("which must be one of 'all', 'correct', 'incorrect'")
key = ('first_sels', min_turns, which, add_fields)
if use_cache:
if key in self.cache:
return self.cache[key].copy()
else:
# we may have created tables for different optional args (i.e. diff min_turns)
self._drop_tables(['first_sels', 'enough_plays'])
if not use_cache:
self._drop_tables(['first_sels', 'enough_plays'])
# cobble together additional fields from selectionevents
added = ""
if add_fields:
if not isinstance(add_fields, dict):
add_fields = dict(zip(add_fields, add_fields))
added = ", " + (".".join(["%s as %s" % (f,n) for f, n in add_fields.iteritems()]))
cursor = self.conn.cursor()
# unknown user id
unknown_user_id = self._get_unknown_userid()
# filter to base data consisting of first-turns in play for known user ids
print "Filtering down to first-turns in a play"
cursor.execute("""
-- compute the smallest eventid associated with each playid
CREATE TEMP TABLE sel_cts AS
SELECT MIN(eventid) as min_event_id
FROM selectionevents
where userid <> %d
GROUP BY playid
""" % unknown_user_id)
print "Retrieving selection information for those turns"
cursor.execute("""
-- use this min eventid to select the first choice in each round
CREATE TEMP TABLE first_sels AS
SELECT
userid, playid, id as selid, eventid,
target_r, target_g, target_b,
specimen_r, specimen_g, specimen_b,
target_lab_l, target_lab_a, target_lab_b,
specimen_lab_l, specimen_lab_a, specimen_lab_b,
is_first_pick,
target_h,
target_s,
target_v,
specimen_h,
correct
%s
FROM
selectionevents
INNER JOIN sel_cts
ON selectionevents.eventid = sel_cts.min_event_id
WHERE userid <> %d
""" % (added, unknown_user_id)
)
# restrict to subset of users with at least min_turns
if min_turns:
cursor.execute(
"""
CREATE TEMP TABLE enough_plays as
SELECT userid FROM first_sels GROUP BY userid HAVING count(*) >= %s
""" % min_turns
)
cursor.execute('DELETE FROM first_sels WHERE NOT userid IN (SELECT userid FROM enough_plays)')
cursor.close()
# filter to type of selections requested
if which == 'all':
results = read_sql('SELECT * FROM first_sels', self.conn)
elif which == 'correct':
results = read_sql('SELECT * FROM first_sels WHERE correct', self.conn)
else:
results = read_sql('SELECT * FROM first_sels WHERE NOT correct', self.conn)
self.cache[key] = results.copy()
return results
def execute_adhoc(self, query, use_cache=True):
"""
Execute ad-hoc queries over the Specimen database.
:param query: String SQL query
"""
key = query
if use_cache and key in self.cache:
return self.cache[key].copy()
results = read_sql(query, self.conn)
self.cache[key] = results.copy()
return results
| self._drop_tables(['user_country_freqs', 'user_and_likely_country']) | conditional_block |
queries.py | # Copyright 2018 Jose Cambronero and Phillip Stanley-Marbell
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import pandas as pd
import sqlite3 as db
import dbtypes
from specimen import utils
def read_sql(sql, conn):
# read sql with pandas but make sure column names are lowercase
df = pd.read_sql(sql, conn)
df.columns = df.columns.map(lambda x: x.lower())
return df
class SpecimenQueries:
"""
Contains helpful specimen database queries. Should be used as a starting point for analysis of specimen
data.
"""
def __init__(self, database_path=None):
"""
Provides wrapper for queries. Caches queries where possible.
:param database_path: Path to SQLITE database file
"""
self.database_path = database_path
self.conn = db.connect(database=self.database_path)
# start use of foreign keys
_cursor = self.conn.cursor()
_cursor.execute('PRAGMA foreign_keys = ON')
_cursor.close()
self.cache = {}
def _clear_cache(self):
""" Clear cache, which stores prior query results """
self.cache = {}
def _drop_tables(self, tables):
|
def _get_unknown_userid(self):
"""
Retrieve user id associated with unknown user
"""
cursor = self.conn.cursor()
unknown_user_str = dbtypes.User.null
cursor.execute("select id from users where uniqueid='%s'" % unknown_user_str)
return cursor.fetchone()[0]
def users_and_countries(self, use_cache=True):
"""
Returns a table with userid and most likely country (based on carrier location frequency).
:param use_cache: if true uses cached result, else clears database state and reruns query
:return: pandas dataframe
"""
key = 'user_and_countries'
if use_cache and key in self.cache:
return self.cache[key].copy()
cursor = self.conn.cursor()
if not use_cache:
self._drop_tables(['user_country_freqs', 'user_and_likely_country'])
# userid for unknown user
unknown_user_id = self._get_unknown_userid()
# can only return country info if userid is known
cursor.execute(
"""
CREATE TEMP TABLE user_country_freqs AS
select userid, country, count(*) as ct
from sessions where userid <> %d and country is not null
group by userid, country
""" % unknown_user_id
)
# assigns each user to country with most counts
cursor.execute(
"""
CREATE TEMP TABLE user_and_likely_country AS
SELECT *
FROM
user_country_freqs JOIN (SELECT userid, max(ct) as max_ct FROM user_country_freqs GROUP BY userid) max_cts
USING (userid)
WHERE user_country_freqs.ct = max_cts.max_ct
GROUP BY userid
"""
)
cursor.close()
result = read_sql('SELECT * FROM user_and_likely_country', self.conn)
self.cache[key] = result.copy()
return result
def create_reference_ids_table(self, vals, table_name='_ref'):
"""
Create a temporary reference table by inserting values.
This is used to speed up sqlite queries that are too slow when given
the list directly in the query text (most likely a parsing issue?).
"""
# remove existing
self._drop_tables([table_name])
cursor = self.conn.cursor()
cursor.execute('CREATE TEMP TABLE %s (id INTEGER)' % table_name)
for i, v in enumerate(vals):
cursor.execute('INSERT INTO %s VALUES(%d)' % (table_name, v))
def get_time_offset(self, event_ids, get_extra_info=True, use_cache=True):
"""
Compute the time offset from the start of a session for a list of events.
Only possible with data from JSON files. CSV files have dummy timestamps.
:param event_ids: list of event ids to query
"""
print "Warning: This is only valid for data from the json files! Timestamps in csv are dummies"
if event_ids is None:
raise ValueError('Must provide event ids ts')
key = ('timestamps', tuple(event_ids), get_extra_info)
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
ts_query = """
SELECT events.id as id, offsettimestamp, event FROM events, _ref
WHERE events.id = _ref.id AND offsettimestamp >= 0
"""
ts = read_sql(ts_query, self.conn)
# adds additional information such as user id, and session id for matching up timestamps
if get_extra_info:
extra_info_query = """
SELECT
sessions.userid,
events.id AS id,
sessions.id AS sessionid
FROM events, sessions, _ref
WHERE events.id = _ref.id AND
events.sessionid = sessions.id
"""
extra_info_df = read_sql(extra_info_query, self.conn)
ts = ts.merge(extra_info_df, how='left', on='id')
self.cache[key] = ts.copy()
return ts
def get_devices(self, event_ids, use_cache=True):
"""
Query the devices associated with particular event ids.
:param event_ids: list of event ids to query
"""
if event_ids is None:
raise ValueError('Must provide event ids')
# cast to tuple so that can be hashed
key = ('devices', tuple(event_ids))
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
devices_query = """
select
devices.name as device_name,
events.id as eventid
FROM
sessions, events, devices, _ref
WHERE
events.id = _ref.id AND
sessions.id = events.sessionid AND
sessions.deviceid = devices.id
"""
devices_df = read_sql(devices_query, self.conn)
self.cache[key] = devices_df.copy()
return devices_df
def base_selections(self, min_turns=50, which='all', add_fields=None, use_cache=True):
"""
Obtain base selections data, consisting of selections for known userids (i.e. this
precludes data from the CSV files from Flurry, which do not have known user ids associated
with each record). Selects only the first turn in a 'play',
to control for game play. Selects data for users with at least `min_turns` such turns. Caches results
:param min_turns: minimum number of first turns necessary for data, if 0, returns all
:param which: one of 'all', 'correct', 'incorrect', determines what kind of selections are returned
:param add_fields: add extra base fields from table selectionevents. If dict, uses keys as fields
and values as names, if list uses elements as fields and names
:param use_cache: if true, uses cached results, else clears database state and reruns.
:return: pandas dataframe
"""
if min_turns < 0:
raise ValueError('min_turns must be > 0')
if add_fields and not utils.is_iterable(add_fields):
raise ValueError('add_fields must be iterable')
if not which in ['all', 'correct', 'incorrect']:
raise ValueError("which must be one of 'all', 'correct', 'incorrect'")
key = ('first_sels', min_turns, which, add_fields)
if use_cache:
if key in self.cache:
return self.cache[key].copy()
else:
# we may have created tables for different optional args (i.e. diff min_turns)
self._drop_tables(['first_sels', 'enough_plays'])
if not use_cache:
self._drop_tables(['first_sels', 'enough_plays'])
# cobble together additional fields from selectionevents
added = ""
if add_fields:
if not isinstance(add_fields, dict):
add_fields = dict(zip(add_fields, add_fields))
added = ", " + (".".join(["%s as %s" % (f,n) for f, n in add_fields.iteritems()]))
cursor = self.conn.cursor()
# unknown user id
unknown_user_id = self._get_unknown_userid()
# filter to base data consisting of first-turns in play for known user ids
print "Filtering down to first-turns in a play"
cursor.execute("""
-- compute the smallest eventid associated with each playid
CREATE TEMP TABLE sel_cts AS
SELECT MIN(eventid) as min_event_id
FROM selectionevents
where userid <> %d
GROUP BY playid
""" % unknown_user_id)
print "Retrieving selection information for those turns"
cursor.execute("""
-- use this min eventid to select the first choice in each round
CREATE TEMP TABLE first_sels AS
SELECT
userid, playid, id as selid, eventid,
target_r, target_g, target_b,
specimen_r, specimen_g, specimen_b,
target_lab_l, target_lab_a, target_lab_b,
specimen_lab_l, specimen_lab_a, specimen_lab_b,
is_first_pick,
target_h,
target_s,
target_v,
specimen_h,
correct
%s
FROM
selectionevents
INNER JOIN sel_cts
ON selectionevents.eventid = sel_cts.min_event_id
WHERE userid <> %d
""" % (added, unknown_user_id)
)
# restrict to subset of users with at least min_turns
if min_turns:
cursor.execute(
"""
CREATE TEMP TABLE enough_plays as
SELECT userid FROM first_sels GROUP BY userid HAVING count(*) >= %s
""" % min_turns
)
cursor.execute('DELETE FROM first_sels WHERE NOT userid IN (SELECT userid FROM enough_plays)')
cursor.close()
# filter to type of selections requested
if which == 'all':
results = read_sql('SELECT * FROM first_sels', self.conn)
elif which == 'correct':
results = read_sql('SELECT * FROM first_sels WHERE correct', self.conn)
else:
results = read_sql('SELECT * FROM first_sels WHERE NOT correct', self.conn)
self.cache[key] = results.copy()
return results
def execute_adhoc(self, query, use_cache=True):
"""
Execute ad-hoc queries over the Specimen database.
:param query: String SQL query
"""
key = query
if use_cache and key in self.cache:
return self.cache[key].copy()
results = read_sql(query, self.conn)
self.cache[key] = results.copy()
return results
| """
Drop a set of tables from db (often used to materialize intermediate tables for ease of querying and
then removing these to avoid affecting db state)
:param tables: list of tables to drop
:return: drops if they exist, ignores otherwise
"""
cursor = self.conn.cursor()
try:
cursor.execute('DROP TABLE ' + ','.join(map(str, tables)))
except:
pass
finally:
cursor.close() | identifier_body |
queries.py | # Copyright 2018 Jose Cambronero and Phillip Stanley-Marbell
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import pandas as pd
import sqlite3 as db
import dbtypes
from specimen import utils
def read_sql(sql, conn):
# read sql with pandas but make sure column names are lowercase
df = pd.read_sql(sql, conn)
df.columns = df.columns.map(lambda x: x.lower())
return df
class SpecimenQueries:
"""
Contains helpful specimen database queries. Should be used as a starting point for analysis of specimen
data.
"""
def __init__(self, database_path=None):
"""
Provides wrapper for queries. Caches queries where possible.
:param database_path: Path to SQLITE database file
"""
self.database_path = database_path
self.conn = db.connect(database=self.database_path)
# start use of foreign keys
_cursor = self.conn.cursor()
_cursor.execute('PRAGMA foreign_keys = ON')
_cursor.close()
self.cache = {}
def _clear_cache(self):
""" Clear cache, which stores prior query results """
self.cache = {}
def _drop_tables(self, tables):
"""
Drop a set of tables from db (often used to materialize intermediate tables for ease of querying and
then removing these to avoid affecting db state)
:param tables: list of tables to drop
:return: drops if they exist, ignores otherwise
"""
cursor = self.conn.cursor()
try:
cursor.execute('DROP TABLE ' + ','.join(map(str, tables)))
except:
pass
finally:
cursor.close()
def _get_unknown_userid(self):
"""
Retrieve user id associated with unknown user
"""
cursor = self.conn.cursor()
unknown_user_str = dbtypes.User.null
cursor.execute("select id from users where uniqueid='%s'" % unknown_user_str)
return cursor.fetchone()[0]
def users_and_countries(self, use_cache=True):
"""
Returns a table with userid and most likely country (based on carrier location frequency).
:param use_cache: if true uses cached result, else clears database state and reruns query
:return: pandas dataframe | """
key = 'user_and_countries'
if use_cache and key in self.cache:
return self.cache[key].copy()
cursor = self.conn.cursor()
if not use_cache:
self._drop_tables(['user_country_freqs', 'user_and_likely_country'])
# userid for unknown user
unknown_user_id = self._get_unknown_userid()
# can only return country info if userid is known
cursor.execute(
"""
CREATE TEMP TABLE user_country_freqs AS
select userid, country, count(*) as ct
from sessions where userid <> %d and country is not null
group by userid, country
""" % unknown_user_id
)
# assigns each user to country with most counts
cursor.execute(
"""
CREATE TEMP TABLE user_and_likely_country AS
SELECT *
FROM
user_country_freqs JOIN (SELECT userid, max(ct) as max_ct FROM user_country_freqs GROUP BY userid) max_cts
USING (userid)
WHERE user_country_freqs.ct = max_cts.max_ct
GROUP BY userid
"""
)
cursor.close()
result = read_sql('SELECT * FROM user_and_likely_country', self.conn)
self.cache[key] = result.copy()
return result
def create_reference_ids_table(self, vals, table_name='_ref'):
"""
Create a temporary reference table by inserting values.
This is used to speed up sqlite queries that are too slow when given
the list directly in the query text (most likely a parsing issue?).
"""
# remove existing
self._drop_tables([table_name])
cursor = self.conn.cursor()
cursor.execute('CREATE TEMP TABLE %s (id INTEGER)' % table_name)
for i, v in enumerate(vals):
cursor.execute('INSERT INTO %s VALUES(%d)' % (table_name, v))
def get_time_offset(self, event_ids, get_extra_info=True, use_cache=True):
"""
Compute the time offset from the start of a session for a list of events.
Only possible with data from JSON files. CSV files have dummy timestamps.
:param event_ids: list of event ids to query
"""
print "Warning: This is only valid for data from the json files! Timestamps in csv are dummies"
if event_ids is None:
raise ValueError('Must provide event ids ts')
key = ('timestamps', tuple(event_ids), get_extra_info)
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
ts_query = """
SELECT events.id as id, offsettimestamp, event FROM events, _ref
WHERE events.id = _ref.id AND offsettimestamp >= 0
"""
ts = read_sql(ts_query, self.conn)
# adds additional information such as user id, and session id for matching up timestamps
if get_extra_info:
extra_info_query = """
SELECT
sessions.userid,
events.id AS id,
sessions.id AS sessionid
FROM events, sessions, _ref
WHERE events.id = _ref.id AND
events.sessionid = sessions.id
"""
extra_info_df = read_sql(extra_info_query, self.conn)
ts = ts.merge(extra_info_df, how='left', on='id')
self.cache[key] = ts.copy()
return ts
def get_devices(self, event_ids, use_cache=True):
"""
Query the devices associated with particular event ids.
:param event_ids: list of event ids to query
"""
if event_ids is None:
raise ValueError('Must provide event ids')
# cast to tuple so that can be hashed
key = ('devices', tuple(event_ids))
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
devices_query = """
select
devices.name as device_name,
events.id as eventid
FROM
sessions, events, devices, _ref
WHERE
events.id = _ref.id AND
sessions.id = events.sessionid AND
sessions.deviceid = devices.id
"""
devices_df = read_sql(devices_query, self.conn)
self.cache[key] = devices_df.copy()
return devices_df
def base_selections(self, min_turns=50, which='all', add_fields=None, use_cache=True):
"""
Obtain base selections data, consisting of selections for known userids (i.e. this
precludes data from the CSV files from Flurry, which do not have known user ids associated
with each record). Selects only the first turn in a 'play',
to control for game play. Selects data for users with at least `min_turns` such turns. Caches results
:param min_turns: minimum number of first turns necessary for data, if 0, returns all
:param which: one of 'all', 'correct', 'incorrect', determines what kind of selections are returned
:param add_fields: add extra base fields from table selectionevents. If dict, uses keys as fields
and values as names, if list uses elements as fields and names
:param use_cache: if true, uses cached results, else clears database state and reruns.
:return: pandas dataframe
"""
if min_turns < 0:
raise ValueError('min_turns must be > 0')
if add_fields and not utils.is_iterable(add_fields):
raise ValueError('add_fields must be iterable')
if not which in ['all', 'correct', 'incorrect']:
raise ValueError("which must be one of 'all', 'correct', 'incorrect'")
key = ('first_sels', min_turns, which, add_fields)
if use_cache:
if key in self.cache:
return self.cache[key].copy()
else:
# we may have created tables for different optional args (i.e. diff min_turns)
self._drop_tables(['first_sels', 'enough_plays'])
if not use_cache:
self._drop_tables(['first_sels', 'enough_plays'])
# cobble together additional fields from selectionevents
added = ""
if add_fields:
if not isinstance(add_fields, dict):
add_fields = dict(zip(add_fields, add_fields))
added = ", " + (".".join(["%s as %s" % (f,n) for f, n in add_fields.iteritems()]))
cursor = self.conn.cursor()
# unknown user id
unknown_user_id = self._get_unknown_userid()
# filter to base data consisting of first-turns in play for known user ids
print "Filtering down to first-turns in a play"
cursor.execute("""
-- compute the smallest eventid associated with each playid
CREATE TEMP TABLE sel_cts AS
SELECT MIN(eventid) as min_event_id
FROM selectionevents
where userid <> %d
GROUP BY playid
""" % unknown_user_id)
print "Retrieving selection information for those turns"
cursor.execute("""
-- use this min eventid to select the first choice in each round
CREATE TEMP TABLE first_sels AS
SELECT
userid, playid, id as selid, eventid,
target_r, target_g, target_b,
specimen_r, specimen_g, specimen_b,
target_lab_l, target_lab_a, target_lab_b,
specimen_lab_l, specimen_lab_a, specimen_lab_b,
is_first_pick,
target_h,
target_s,
target_v,
specimen_h,
correct
%s
FROM
selectionevents
INNER JOIN sel_cts
ON selectionevents.eventid = sel_cts.min_event_id
WHERE userid <> %d
""" % (added, unknown_user_id)
)
# restrict to subset of users with at least min_turns
if min_turns:
cursor.execute(
"""
CREATE TEMP TABLE enough_plays as
SELECT userid FROM first_sels GROUP BY userid HAVING count(*) >= %s
""" % min_turns
)
cursor.execute('DELETE FROM first_sels WHERE NOT userid IN (SELECT userid FROM enough_plays)')
cursor.close()
# filter to type of selections requested
if which == 'all':
results = read_sql('SELECT * FROM first_sels', self.conn)
elif which == 'correct':
results = read_sql('SELECT * FROM first_sels WHERE correct', self.conn)
else:
results = read_sql('SELECT * FROM first_sels WHERE NOT correct', self.conn)
self.cache[key] = results.copy()
return results
def execute_adhoc(self, query, use_cache=True):
"""
Execute ad-hoc queries over the Specimen database.
:param query: String SQL query
"""
key = query
if use_cache and key in self.cache:
return self.cache[key].copy()
results = read_sql(query, self.conn)
self.cache[key] = results.copy()
return results | random_line_split | |
retryable.go | package retryable
import (
"bytes"
"context"
"sync"
// crypto libraries included for go-digest
_ "crypto/sha256"
_ "crypto/sha512"
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"time"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
// Retryable is used to create requests with built in retry capabilities
type Retryable interface {
DoRequest(ctx context.Context, method string, u []url.URL, opts ...OptsReq) (Response, error)
BackoffClear()
BackoffUntil() time.Time
}
// Response is used to handle the result of a request
type Response interface {
io.ReadCloser
HTTPResponse() *http.Response
HTTPResponses() ([]*http.Response, error)
}
// Auth is used to process Www-Authenticate header and update request with Authorization header
type Auth interface {
AddScope(host, scope string) error
HandleResponse(*http.Response) error
UpdateRequest(*http.Request) error
}
// Opts injects options into NewRetryable
type Opts func(*retryable)
// OptsReq injects options into NewRequest
type OptsReq func(*request)
type retryable struct {
httpClient *http.Client
auth Auth
rootCAPool [][]byte
limit int
delayInit time.Duration
delayMax time.Duration
backoffNeeded bool
backoffCur int
backoffUntil time.Time
log *logrus.Logger
useragent string
mu sync.Mutex
}
var defaultDelayInit, _ = time.ParseDuration("1s")
var defaultDelayMax, _ = time.ParseDuration("30s")
var defaultLimit = 3
// NewRetryable returns a retryable interface
func NewRetryable(opts ...Opts) Retryable {
r := &retryable{
httpClient: &http.Client{},
limit: defaultLimit,
delayInit: defaultDelayInit,
delayMax: defaultDelayMax,
log: &logrus.Logger{Out: ioutil.Discard},
rootCAPool: [][]byte{},
}
for _, opt := range opts {
opt(r)
}
// inject certificates from user
if len(r.rootCAPool) > 0 {
var tlsc *tls.Config
if r.httpClient.Transport == nil {
r.httpClient.Transport = &http.Transport{}
}
t, ok := r.httpClient.Transport.(*http.Transport)
if ok {
if t.TLSClientConfig != nil {
tlsc = t.TLSClientConfig.Clone()
} else {
tlsc = &tls.Config{}
}
if tlsc.RootCAs == nil {
rootPool, err := x509.SystemCertPool()
if err != nil {
r.log.WithFields(logrus.Fields{
"err": err,
}).Warn("Failed to load system cert pool")
}
tlsc.RootCAs = rootPool
}
for _, ca := range r.rootCAPool {
if ok := tlsc.RootCAs.AppendCertsFromPEM(ca); !ok {
r.log.WithFields(logrus.Fields{
"cert": string(ca),
}).Warn("Failed to load root certificate")
}
}
t.TLSClientConfig = tlsc
r.httpClient.Transport = t
}
}
return r
}
// WithAuth adds authentication to retryable methods
func WithAuth(auth Auth) Opts {
return func(r *retryable) {
r.auth = auth
}
}
// WithCerts adds certificates
func WithCerts(certs [][]byte) Opts {
return func(r *retryable) {
for _, c := range certs {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
// WithCertFiles adds certificates by filename
func WithCertFiles(files []string) Opts {
return func(r *retryable) {
for _, f := range files {
c, err := ioutil.ReadFile(f)
if err != nil {
r.log.WithFields(logrus.Fields{
"err": err,
"file": f,
}).Warn("Failed to read certificate")
} else {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
}
// WithDelay initial time to wait between retries (increased with exponential backoff)
func WithDelay(delayInit time.Duration, delayMax time.Duration) Opts {
return func(r *retryable) {
if delayInit > 0 {
r.delayInit = delayInit
}
// delayMax must be at least delayInit, if 0 initialize to 30x delayInit
if delayMax > r.delayInit {
r.delayMax = delayMax
} else if delayMax > 0 {
r.delayMax = r.delayInit
} else {
r.delayMax = r.delayInit * 30
}
}
}
// WithHTTPClient uses a specific http client with retryable requests
func WithHTTPClient(h *http.Client) Opts {
return func(r *retryable) {
r.httpClient = h
}
}
// WithLimit restricts the number of retries (defaults to 5)
func WithLimit(l int) Opts {
return func(r *retryable) {
if l > 0 {
r.limit = l
}
}
}
// WithLog injects a logrus Logger configuration
func WithLog(log *logrus.Logger) Opts {
return func(r *retryable) {
r.log = log
}
}
// WithTransport uses a specific http transport with retryable requests
func WithTransport(t *http.Transport) Opts {
return func(r *retryable) {
r.httpClient = &http.Client{Transport: t}
}
}
// WithUserAgent sets a user agent header
func WithUserAgent(ua string) Opts {
return func(r *retryable) {
r.useragent = ua
}
}
func (r *retryable) BackoffClear() {
if r.backoffCur > r.limit {
r.backoffCur = r.limit
}
if r.backoffCur > 0 {
r.backoffCur--
if r.backoffCur == 0 {
r.backoffUntil = time.Time{}
}
}
r.backoffNeeded = false
}
func (r *retryable) backoffSet(lastResp *http.Response) error {
r.backoffCur++
// sleep for backoff time
sleepTime := r.delayInit << r.backoffCur
// limit to max delay
if sleepTime > r.delayMax {
sleepTime = r.delayMax
}
// check rate limit header
if lastResp != nil && lastResp.Header.Get("Retry-After") != "" {
ras := lastResp.Header.Get("Retry-After")
ra, _ := time.ParseDuration(ras + "s")
if ra > r.delayMax {
sleepTime = r.delayMax
} else if ra > sleepTime {
sleepTime = ra
}
}
r.backoffUntil = time.Now().Add(sleepTime)
r.backoffNeeded = true
if r.backoffCur == r.limit {
return fmt.Errorf("%w: backoffs %d", ErrBackoffLimit, r.backoffCur)
}
return nil
}
// BackoffUntil returns the time until the next backoff would complete
func (r *retryable) BackoffUntil() time.Time {
return r.backoffUntil
}
type request struct {
r *retryable
context context.Context
method string
urls []url.URL
curURL int
header http.Header
getBody func() (io.ReadCloser, error)
contentLen int64
chunking bool
offset int64
curRead int64
done bool
digest digest.Digest
digester digest.Digester
progressCB func(int64, error)
responses []*http.Response
reader io.Reader
log *logrus.Logger
}
func (r *retryable) DoRequest(ctx context.Context, method string, u []url.URL, opts ...OptsReq) (Response, error) {
req := &request{
r: r,
context: ctx,
method: method,
urls: u,
curURL: 0,
header: http.Header{},
getBody: nil,
contentLen: -1,
chunking: false,
offset: 0,
curRead: 0,
done: false,
digest: "",
digester: nil,
progressCB: nil,
responses: []*http.Response{},
reader: nil,
log: r.log,
}
// apply opts
for _, opt := range opts {
opt(req)
}
// run the request until successful or non-recoverable error
err := req.retryLoop()
return req, err
}
// WithBodyBytes converts a bytes slice into a body func and content length
func WithBodyBytes(body []byte) OptsReq {
return func(req *request) {
req.contentLen = int64(len(body))
req.getBody = func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(body)), nil
}
}
}
// WithBodyFunc includes body content in a request
func WithBodyFunc(getbody func() (io.ReadCloser, error)) OptsReq |
// WithChunking allows content to be divided into multiple smaller chunks
func WithChunking() OptsReq {
return func(req *request) {
req.chunking = true
}
}
// WithContentLen sets the content length
func WithContentLen(l int64) OptsReq {
return func(req *request) {
req.contentLen = l
}
}
// WithDigest verifies the returned content digest matches.
// Note that the digest is only calculated upon EOF from the downloaded
// content, so the reader may receive an error rather than EOF from a
// digest mismatch. The content itself must still be read.
func WithDigest(d digest.Digest) OptsReq {
return func(req *request) {
req.digest = d
req.digester = digest.Canonical.Digester()
}
}
// WithHeader sets a header
func WithHeader(key string, values []string) OptsReq {
return func(req *request) {
for _, v := range values {
req.header.Add(key, v)
}
}
}
// WithHeaders includes a header object
func WithHeaders(headers http.Header) OptsReq {
return func(req *request) {
for key := range headers {
for _, val := range headers.Values(key) {
req.header.Add(key, val)
}
}
}
}
// WithProgressCB calls the CB function as data is received
func WithProgressCB(cb func(int64, error)) OptsReq {
return func(req *request) {
req.progressCB = cb
}
}
func WithScope(repo string, push bool) OptsReq {
scope := "repository:" + repo + ":pull"
if push {
scope = scope + ",push"
}
return func(req *request) {
for _, url := range req.urls {
req.r.auth.AddScope(url.Host, scope)
}
}
}
func (req *request) retryLoop() error {
req.r.mu.Lock()
defer req.r.mu.Unlock()
curRetry := 0
var httpErr error
for {
// handle backoffs and errors
if len(req.urls) == 0 {
if httpErr != nil {
return httpErr
}
return ErrAllRequestsFailed
}
curRetry++
if curRetry > req.r.limit {
return ErrAllRequestsFailed
}
if !req.r.backoffUntil.IsZero() && req.r.backoffUntil.After(time.Now()) {
sleepTime := time.Until(req.r.backoffUntil)
req.log.WithFields(logrus.Fields{
"Host": req.urls[req.curURL].Host,
"Seconds": sleepTime.Seconds(),
}).Warn("Sleeping for backoff")
select {
case <-req.context.Done():
return ErrCanceled
case <-time.After(sleepTime):
}
}
// close any previous responses before making a new request
if len(req.responses) > 0 {
req.responses[len(req.responses)-1].Body.Close()
}
// send the new request
httpErr = req.httpDo()
if httpErr != nil {
req.r.backoffSet(nil)
req.nextURL(true)
continue
}
// check the response
lastURL := req.urls[req.curURL]
lastResp := req.responses[len(req.responses)-1]
statusCode := lastResp.StatusCode
removeURL := false
runBackoff := false
switch {
case 200 <= statusCode && statusCode < 300:
// all 200 status codes are successful
req.r.BackoffClear()
return nil
case statusCode == http.StatusUnauthorized:
err := req.handleAuth()
if err != nil {
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Err": err,
}).Warn("Failed to handle auth request")
runBackoff = true
removeURL = true
}
case statusCode == http.StatusForbidden:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Forbidden")
runBackoff = true
removeURL = true
case statusCode == http.StatusNotFound:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Not found")
removeURL = true
case statusCode == http.StatusTooManyRequests:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Rate limit exceeded")
runBackoff = true
case statusCode == http.StatusRequestTimeout:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Timeout")
runBackoff = true
case statusCode == http.StatusGatewayTimeout:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Gateway timeout")
runBackoff = true
default:
body, _ := ioutil.ReadAll(lastResp.Body)
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
"Body": string(body),
}).Debug("Unexpected status")
runBackoff = true
removeURL = true
}
// remove url and trigger backoff if needed
if removeURL {
req.nextURL(removeURL)
}
if runBackoff {
req.r.backoffSet(lastResp)
}
}
}
func (req *request) handleAuth() error {
curURL := req.urls[req.curURL]
lastResp := req.responses[len(req.responses)-1]
// for unauthorized requests, try to setup auth and retry without backoff
if req.r.auth == nil {
return ErrUnauthorized
}
err := req.r.auth.HandleResponse(lastResp)
if err != nil {
req.log.WithFields(logrus.Fields{
"URL": curURL.String(),
"Err": err,
}).Warn("Failed to handle auth request")
return err
}
return nil
}
func (req *request) httpDo() error {
// build the http reqest for the current mirror url
httpReq, err := http.NewRequestWithContext(req.context, req.method, req.urls[req.curURL].String(), nil)
if err != nil {
return err
}
if req.getBody != nil {
httpReq.Body, err = req.getBody()
if err != nil {
return err
}
httpReq.GetBody = req.getBody
httpReq.ContentLength = req.contentLen
}
if len(req.header) > 0 {
httpReq.Header = req.header
}
if req.r.useragent != "" && httpReq.Header.Get("User-Agent") == "" {
httpReq.Header.Add("User-Agent", req.r.useragent)
}
if req.offset > 0 {
// TODO: implement range requests
return ErrNotImplemented
}
// include auth header
if req.r.auth != nil {
err = req.r.auth.UpdateRequest(httpReq)
if err != nil {
return err
}
}
req.log.WithFields(logrus.Fields{
"method": req.method,
"url": req.urls[req.curURL].String(),
"withAuth": (len(httpReq.Header.Values("Authorization")) > 0),
}).Debug("Sending request")
resp, err := req.r.httpClient.Do(httpReq)
if err != nil {
return err
}
req.responses = append(req.responses, resp)
// update reader
if req.digester == nil {
req.reader = resp.Body
} else {
req.reader = io.TeeReader(resp.Body, req.digester.Hash())
}
return nil
}
func (req *request) nextURL(removeLast bool) {
// next mirror based on whether remove flag is set
if removeLast {
req.urls = append(req.urls[:req.curURL], req.urls[req.curURL+1:]...)
if req.curURL >= len(req.urls) {
req.curURL = 0
}
} else {
if len(req.urls) > 0 {
req.curURL = (req.curURL + 1) % len(req.urls)
} else {
req.curURL = 0
}
}
}
func (req *request) Read(b []byte) (int, error) {
// if done, return eof
if req.done {
return 0, io.EOF
}
// if no responses, error
if len(req.responses) == 0 {
return 0, ErrNotFound
}
// fetch block
lastResp := req.responses[len(req.responses)-1]
i, err := req.reader.Read(b)
req.curRead += int64(i)
if err == io.EOF && lastResp.ContentLength > 0 {
if lastResp.Request.Method == "HEAD" {
// no body on a head request
req.done = true
} else if req.curRead < lastResp.ContentLength {
// TODO: handle early EOF or other failed connection with a retry
// req.offset += req.curRead
// err = req.retryLoop()
// if err != nil {
// return i, err
// }
req.log.WithFields(logrus.Fields{
"curRead": req.curRead,
"contentLen": lastResp.ContentLength,
}).Debug("EOF before reading all content, retrying")
return i, err
} else if req.curRead >= lastResp.ContentLength {
req.done = true
}
}
// if eof, verify digest, set error on mismatch
if req.digester != nil && err == io.EOF && req.digest != req.digester.Digest() {
req.log.WithFields(logrus.Fields{
"expected": req.digest,
"computed": req.digester.Digest(),
}).Warn("Digest mismatch")
req.done = true
return i, ErrDigestMismatch
}
// pass through read on the last response
return i, err
}
func (req *request) Close() error {
// if no responses, error
if req.reader == nil || len(req.responses) == 0 {
return ErrNotFound
}
// pass through close to last request, mark as done
lastResp := req.responses[len(req.responses)-1]
req.done = true
return lastResp.Body.Close()
}
func (req *request) HTTPResponse() *http.Response {
if len(req.responses) > 0 {
return req.responses[len(req.responses)-1]
}
return nil
}
func (req *request) HTTPResponses() ([]*http.Response, error) {
if len(req.responses) > 0 {
return req.responses, nil
}
return nil, ErrNotFound
}
| {
return func(req *request) {
req.getBody = getbody
}
} | identifier_body |
retryable.go | package retryable
import (
"bytes"
"context"
"sync"
// crypto libraries included for go-digest
_ "crypto/sha256"
_ "crypto/sha512"
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"time"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
// Retryable is used to create requests with built in retry capabilities
type Retryable interface {
DoRequest(ctx context.Context, method string, u []url.URL, opts ...OptsReq) (Response, error)
BackoffClear()
BackoffUntil() time.Time
}
// Response is used to handle the result of a request
type Response interface {
io.ReadCloser
HTTPResponse() *http.Response
HTTPResponses() ([]*http.Response, error)
}
// Auth is used to process Www-Authenticate header and update request with Authorization header
type Auth interface {
AddScope(host, scope string) error
HandleResponse(*http.Response) error
UpdateRequest(*http.Request) error
}
// Opts injects options into NewRetryable
type Opts func(*retryable)
// OptsReq injects options into NewRequest
type OptsReq func(*request)
type retryable struct {
httpClient *http.Client
auth Auth
rootCAPool [][]byte
limit int
delayInit time.Duration
delayMax time.Duration
backoffNeeded bool
backoffCur int
backoffUntil time.Time
log *logrus.Logger
useragent string
mu sync.Mutex
}
var defaultDelayInit, _ = time.ParseDuration("1s")
var defaultDelayMax, _ = time.ParseDuration("30s")
var defaultLimit = 3
// NewRetryable returns a retryable interface
func NewRetryable(opts ...Opts) Retryable {
r := &retryable{
httpClient: &http.Client{},
limit: defaultLimit,
delayInit: defaultDelayInit,
delayMax: defaultDelayMax,
log: &logrus.Logger{Out: ioutil.Discard},
rootCAPool: [][]byte{},
}
for _, opt := range opts {
opt(r)
}
// inject certificates from user
if len(r.rootCAPool) > 0 {
var tlsc *tls.Config
if r.httpClient.Transport == nil {
r.httpClient.Transport = &http.Transport{}
}
t, ok := r.httpClient.Transport.(*http.Transport)
if ok {
if t.TLSClientConfig != nil {
tlsc = t.TLSClientConfig.Clone()
} else {
tlsc = &tls.Config{}
}
if tlsc.RootCAs == nil {
rootPool, err := x509.SystemCertPool()
if err != nil {
r.log.WithFields(logrus.Fields{
"err": err,
}).Warn("Failed to load system cert pool")
}
tlsc.RootCAs = rootPool
}
for _, ca := range r.rootCAPool {
if ok := tlsc.RootCAs.AppendCertsFromPEM(ca); !ok {
r.log.WithFields(logrus.Fields{
"cert": string(ca),
}).Warn("Failed to load root certificate")
}
}
t.TLSClientConfig = tlsc
r.httpClient.Transport = t
}
}
return r
}
// WithAuth adds authentication to retryable methods
func WithAuth(auth Auth) Opts {
return func(r *retryable) {
r.auth = auth
}
}
// WithCerts adds certificates
func WithCerts(certs [][]byte) Opts {
return func(r *retryable) {
for _, c := range certs {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
// WithCertFiles adds certificates by filename
func WithCertFiles(files []string) Opts {
return func(r *retryable) {
for _, f := range files {
c, err := ioutil.ReadFile(f)
if err != nil {
r.log.WithFields(logrus.Fields{
"err": err,
"file": f,
}).Warn("Failed to read certificate")
} else {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
}
// WithDelay initial time to wait between retries (increased with exponential backoff)
func WithDelay(delayInit time.Duration, delayMax time.Duration) Opts {
return func(r *retryable) {
if delayInit > 0 {
r.delayInit = delayInit
}
// delayMax must be at least delayInit, if 0 initialize to 30x delayInit
if delayMax > r.delayInit {
r.delayMax = delayMax
} else if delayMax > 0 {
r.delayMax = r.delayInit
} else {
r.delayMax = r.delayInit * 30
}
}
}
// WithHTTPClient uses a specific http client with retryable requests
func WithHTTPClient(h *http.Client) Opts {
return func(r *retryable) {
r.httpClient = h
}
}
// WithLimit restricts the number of retries (defaults to 5)
func WithLimit(l int) Opts {
return func(r *retryable) {
if l > 0 {
r.limit = l
}
}
}
// WithLog injects a logrus Logger configuration
func WithLog(log *logrus.Logger) Opts {
return func(r *retryable) {
r.log = log
}
}
// WithTransport uses a specific http transport with retryable requests
func WithTransport(t *http.Transport) Opts {
return func(r *retryable) {
r.httpClient = &http.Client{Transport: t}
}
}
// WithUserAgent sets a user agent header
func WithUserAgent(ua string) Opts {
return func(r *retryable) {
r.useragent = ua
}
}
func (r *retryable) BackoffClear() {
if r.backoffCur > r.limit {
r.backoffCur = r.limit
}
if r.backoffCur > 0 {
r.backoffCur--
if r.backoffCur == 0 {
r.backoffUntil = time.Time{}
}
}
r.backoffNeeded = false
}
func (r *retryable) backoffSet(lastResp *http.Response) error {
r.backoffCur++
// sleep for backoff time
sleepTime := r.delayInit << r.backoffCur
// limit to max delay
if sleepTime > r.delayMax {
sleepTime = r.delayMax
}
// check rate limit header
if lastResp != nil && lastResp.Header.Get("Retry-After") != "" {
ras := lastResp.Header.Get("Retry-After")
ra, _ := time.ParseDuration(ras + "s")
if ra > r.delayMax {
sleepTime = r.delayMax
} else if ra > sleepTime {
sleepTime = ra
}
}
r.backoffUntil = time.Now().Add(sleepTime)
r.backoffNeeded = true
if r.backoffCur == r.limit {
return fmt.Errorf("%w: backoffs %d", ErrBackoffLimit, r.backoffCur)
}
return nil
}
// BackoffUntil returns the time until the next backoff would complete
func (r *retryable) BackoffUntil() time.Time {
return r.backoffUntil
}
type request struct {
r *retryable
context context.Context
method string
urls []url.URL
curURL int
header http.Header
getBody func() (io.ReadCloser, error)
contentLen int64
chunking bool
offset int64
curRead int64
done bool
digest digest.Digest
digester digest.Digester
progressCB func(int64, error)
responses []*http.Response
reader io.Reader
log *logrus.Logger
}
func (r *retryable) DoRequest(ctx context.Context, method string, u []url.URL, opts ...OptsReq) (Response, error) {
req := &request{
r: r,
context: ctx,
method: method,
urls: u,
curURL: 0,
header: http.Header{},
getBody: nil,
contentLen: -1,
chunking: false,
offset: 0,
curRead: 0,
done: false,
digest: "",
digester: nil,
progressCB: nil,
responses: []*http.Response{},
reader: nil,
log: r.log,
}
// apply opts
for _, opt := range opts {
opt(req)
}
// run the request until successful or non-recoverable error
err := req.retryLoop()
return req, err
}
// WithBodyBytes converts a bytes slice into a body func and content length
func WithBodyBytes(body []byte) OptsReq {
return func(req *request) {
req.contentLen = int64(len(body))
req.getBody = func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(body)), nil
}
}
}
// WithBodyFunc includes body content in a request
func WithBodyFunc(getbody func() (io.ReadCloser, error)) OptsReq {
return func(req *request) {
req.getBody = getbody
}
}
// WithChunking allows content to be divided into multiple smaller chunks
func WithChunking() OptsReq {
return func(req *request) {
req.chunking = true
}
}
// WithContentLen sets the content length
func WithContentLen(l int64) OptsReq {
return func(req *request) {
req.contentLen = l
}
}
// WithDigest verifies the returned content digest matches.
// Note that the digest is only calculated upon EOF from the downloaded
// content, so the reader may receive an error rather than EOF from a
// digest mismatch. The content itself must still be read.
func WithDigest(d digest.Digest) OptsReq {
return func(req *request) {
req.digest = d
req.digester = digest.Canonical.Digester()
}
}
// WithHeader sets a header
func WithHeader(key string, values []string) OptsReq {
return func(req *request) {
for _, v := range values {
req.header.Add(key, v)
}
}
}
// WithHeaders includes a header object
func WithHeaders(headers http.Header) OptsReq {
return func(req *request) {
for key := range headers {
for _, val := range headers.Values(key) {
req.header.Add(key, val)
}
}
}
}
// WithProgressCB calls the CB function as data is received
func WithProgressCB(cb func(int64, error)) OptsReq {
return func(req *request) {
req.progressCB = cb
}
}
func WithScope(repo string, push bool) OptsReq {
scope := "repository:" + repo + ":pull"
if push {
scope = scope + ",push"
}
return func(req *request) {
for _, url := range req.urls {
req.r.auth.AddScope(url.Host, scope)
}
}
}
func (req *request) retryLoop() error {
req.r.mu.Lock()
defer req.r.mu.Unlock()
curRetry := 0
var httpErr error
for {
// handle backoffs and errors
if len(req.urls) == 0 {
if httpErr != nil {
return httpErr
}
return ErrAllRequestsFailed
}
curRetry++
if curRetry > req.r.limit {
return ErrAllRequestsFailed
}
if !req.r.backoffUntil.IsZero() && req.r.backoffUntil.After(time.Now()) |
// close any previous responses before making a new request
if len(req.responses) > 0 {
req.responses[len(req.responses)-1].Body.Close()
}
// send the new request
httpErr = req.httpDo()
if httpErr != nil {
req.r.backoffSet(nil)
req.nextURL(true)
continue
}
// check the response
lastURL := req.urls[req.curURL]
lastResp := req.responses[len(req.responses)-1]
statusCode := lastResp.StatusCode
removeURL := false
runBackoff := false
switch {
case 200 <= statusCode && statusCode < 300:
// all 200 status codes are successful
req.r.BackoffClear()
return nil
case statusCode == http.StatusUnauthorized:
err := req.handleAuth()
if err != nil {
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Err": err,
}).Warn("Failed to handle auth request")
runBackoff = true
removeURL = true
}
case statusCode == http.StatusForbidden:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Forbidden")
runBackoff = true
removeURL = true
case statusCode == http.StatusNotFound:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Not found")
removeURL = true
case statusCode == http.StatusTooManyRequests:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Rate limit exceeded")
runBackoff = true
case statusCode == http.StatusRequestTimeout:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Timeout")
runBackoff = true
case statusCode == http.StatusGatewayTimeout:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Gateway timeout")
runBackoff = true
default:
body, _ := ioutil.ReadAll(lastResp.Body)
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
"Body": string(body),
}).Debug("Unexpected status")
runBackoff = true
removeURL = true
}
// remove url and trigger backoff if needed
if removeURL {
req.nextURL(removeURL)
}
if runBackoff {
req.r.backoffSet(lastResp)
}
}
}
func (req *request) handleAuth() error {
curURL := req.urls[req.curURL]
lastResp := req.responses[len(req.responses)-1]
// for unauthorized requests, try to setup auth and retry without backoff
if req.r.auth == nil {
return ErrUnauthorized
}
err := req.r.auth.HandleResponse(lastResp)
if err != nil {
req.log.WithFields(logrus.Fields{
"URL": curURL.String(),
"Err": err,
}).Warn("Failed to handle auth request")
return err
}
return nil
}
func (req *request) httpDo() error {
// build the http reqest for the current mirror url
httpReq, err := http.NewRequestWithContext(req.context, req.method, req.urls[req.curURL].String(), nil)
if err != nil {
return err
}
if req.getBody != nil {
httpReq.Body, err = req.getBody()
if err != nil {
return err
}
httpReq.GetBody = req.getBody
httpReq.ContentLength = req.contentLen
}
if len(req.header) > 0 {
httpReq.Header = req.header
}
if req.r.useragent != "" && httpReq.Header.Get("User-Agent") == "" {
httpReq.Header.Add("User-Agent", req.r.useragent)
}
if req.offset > 0 {
// TODO: implement range requests
return ErrNotImplemented
}
// include auth header
if req.r.auth != nil {
err = req.r.auth.UpdateRequest(httpReq)
if err != nil {
return err
}
}
req.log.WithFields(logrus.Fields{
"method": req.method,
"url": req.urls[req.curURL].String(),
"withAuth": (len(httpReq.Header.Values("Authorization")) > 0),
}).Debug("Sending request")
resp, err := req.r.httpClient.Do(httpReq)
if err != nil {
return err
}
req.responses = append(req.responses, resp)
// update reader
if req.digester == nil {
req.reader = resp.Body
} else {
req.reader = io.TeeReader(resp.Body, req.digester.Hash())
}
return nil
}
func (req *request) nextURL(removeLast bool) {
// next mirror based on whether remove flag is set
if removeLast {
req.urls = append(req.urls[:req.curURL], req.urls[req.curURL+1:]...)
if req.curURL >= len(req.urls) {
req.curURL = 0
}
} else {
if len(req.urls) > 0 {
req.curURL = (req.curURL + 1) % len(req.urls)
} else {
req.curURL = 0
}
}
}
func (req *request) Read(b []byte) (int, error) {
// if done, return eof
if req.done {
return 0, io.EOF
}
// if no responses, error
if len(req.responses) == 0 {
return 0, ErrNotFound
}
// fetch block
lastResp := req.responses[len(req.responses)-1]
i, err := req.reader.Read(b)
req.curRead += int64(i)
if err == io.EOF && lastResp.ContentLength > 0 {
if lastResp.Request.Method == "HEAD" {
// no body on a head request
req.done = true
} else if req.curRead < lastResp.ContentLength {
// TODO: handle early EOF or other failed connection with a retry
// req.offset += req.curRead
// err = req.retryLoop()
// if err != nil {
// return i, err
// }
req.log.WithFields(logrus.Fields{
"curRead": req.curRead,
"contentLen": lastResp.ContentLength,
}).Debug("EOF before reading all content, retrying")
return i, err
} else if req.curRead >= lastResp.ContentLength {
req.done = true
}
}
// if eof, verify digest, set error on mismatch
if req.digester != nil && err == io.EOF && req.digest != req.digester.Digest() {
req.log.WithFields(logrus.Fields{
"expected": req.digest,
"computed": req.digester.Digest(),
}).Warn("Digest mismatch")
req.done = true
return i, ErrDigestMismatch
}
// pass through read on the last response
return i, err
}
func (req *request) Close() error {
// if no responses, error
if req.reader == nil || len(req.responses) == 0 {
return ErrNotFound
}
// pass through close to last request, mark as done
lastResp := req.responses[len(req.responses)-1]
req.done = true
return lastResp.Body.Close()
}
func (req *request) HTTPResponse() *http.Response {
if len(req.responses) > 0 {
return req.responses[len(req.responses)-1]
}
return nil
}
func (req *request) HTTPResponses() ([]*http.Response, error) {
if len(req.responses) > 0 {
return req.responses, nil
}
return nil, ErrNotFound
}
| {
sleepTime := time.Until(req.r.backoffUntil)
req.log.WithFields(logrus.Fields{
"Host": req.urls[req.curURL].Host,
"Seconds": sleepTime.Seconds(),
}).Warn("Sleeping for backoff")
select {
case <-req.context.Done():
return ErrCanceled
case <-time.After(sleepTime):
}
} | conditional_block |
retryable.go | package retryable
import (
"bytes"
"context"
"sync"
// crypto libraries included for go-digest
_ "crypto/sha256"
_ "crypto/sha512"
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"time"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
// Retryable is used to create requests with built in retry capabilities
type Retryable interface {
DoRequest(ctx context.Context, method string, u []url.URL, opts ...OptsReq) (Response, error)
BackoffClear()
BackoffUntil() time.Time
}
// Response is used to handle the result of a request
type Response interface {
io.ReadCloser
HTTPResponse() *http.Response
HTTPResponses() ([]*http.Response, error)
}
// Auth is used to process Www-Authenticate header and update request with Authorization header
type Auth interface {
AddScope(host, scope string) error
HandleResponse(*http.Response) error
UpdateRequest(*http.Request) error
}
// Opts injects options into NewRetryable
type Opts func(*retryable)
// OptsReq injects options into NewRequest
type OptsReq func(*request)
type retryable struct {
httpClient *http.Client
auth Auth
rootCAPool [][]byte
limit int
delayInit time.Duration
delayMax time.Duration
backoffNeeded bool
backoffCur int
backoffUntil time.Time
log *logrus.Logger
useragent string
mu sync.Mutex
}
var defaultDelayInit, _ = time.ParseDuration("1s")
var defaultDelayMax, _ = time.ParseDuration("30s")
var defaultLimit = 3
// NewRetryable returns a retryable interface
func NewRetryable(opts ...Opts) Retryable {
r := &retryable{
httpClient: &http.Client{},
limit: defaultLimit,
delayInit: defaultDelayInit,
delayMax: defaultDelayMax,
log: &logrus.Logger{Out: ioutil.Discard},
rootCAPool: [][]byte{},
}
for _, opt := range opts {
opt(r)
}
// inject certificates from user
if len(r.rootCAPool) > 0 {
var tlsc *tls.Config
if r.httpClient.Transport == nil {
r.httpClient.Transport = &http.Transport{}
}
t, ok := r.httpClient.Transport.(*http.Transport)
if ok {
if t.TLSClientConfig != nil {
tlsc = t.TLSClientConfig.Clone()
} else {
tlsc = &tls.Config{}
}
if tlsc.RootCAs == nil {
rootPool, err := x509.SystemCertPool()
if err != nil {
r.log.WithFields(logrus.Fields{
"err": err,
}).Warn("Failed to load system cert pool")
}
tlsc.RootCAs = rootPool
}
for _, ca := range r.rootCAPool {
if ok := tlsc.RootCAs.AppendCertsFromPEM(ca); !ok {
r.log.WithFields(logrus.Fields{
"cert": string(ca),
}).Warn("Failed to load root certificate")
}
}
t.TLSClientConfig = tlsc
r.httpClient.Transport = t
}
}
return r
}
// WithAuth adds authentication to retryable methods
func WithAuth(auth Auth) Opts {
return func(r *retryable) {
r.auth = auth
}
}
// WithCerts adds certificates
func WithCerts(certs [][]byte) Opts {
return func(r *retryable) {
for _, c := range certs {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
// WithCertFiles adds certificates by filename
func WithCertFiles(files []string) Opts {
return func(r *retryable) {
for _, f := range files {
c, err := ioutil.ReadFile(f)
if err != nil {
r.log.WithFields(logrus.Fields{
"err": err,
"file": f,
}).Warn("Failed to read certificate")
} else {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
}
// WithDelay initial time to wait between retries (increased with exponential backoff)
func WithDelay(delayInit time.Duration, delayMax time.Duration) Opts {
return func(r *retryable) {
if delayInit > 0 {
r.delayInit = delayInit
}
// delayMax must be at least delayInit, if 0 initialize to 30x delayInit
if delayMax > r.delayInit {
r.delayMax = delayMax
} else if delayMax > 0 {
r.delayMax = r.delayInit
} else {
r.delayMax = r.delayInit * 30
}
}
}
// WithHTTPClient uses a specific http client with retryable requests
func WithHTTPClient(h *http.Client) Opts {
return func(r *retryable) {
r.httpClient = h
}
}
// WithLimit restricts the number of retries (defaults to 5)
func WithLimit(l int) Opts {
return func(r *retryable) {
if l > 0 {
r.limit = l
}
}
}
// WithLog injects a logrus Logger configuration
func WithLog(log *logrus.Logger) Opts {
return func(r *retryable) {
r.log = log
}
}
// WithTransport uses a specific http transport with retryable requests
func WithTransport(t *http.Transport) Opts {
return func(r *retryable) {
r.httpClient = &http.Client{Transport: t}
}
}
// WithUserAgent sets a user agent header
func WithUserAgent(ua string) Opts {
return func(r *retryable) {
r.useragent = ua
}
}
func (r *retryable) BackoffClear() {
if r.backoffCur > r.limit {
r.backoffCur = r.limit
}
if r.backoffCur > 0 {
r.backoffCur--
if r.backoffCur == 0 {
r.backoffUntil = time.Time{}
}
}
r.backoffNeeded = false
}
func (r *retryable) backoffSet(lastResp *http.Response) error {
r.backoffCur++
// sleep for backoff time
sleepTime := r.delayInit << r.backoffCur
// limit to max delay
if sleepTime > r.delayMax {
sleepTime = r.delayMax
}
// check rate limit header
if lastResp != nil && lastResp.Header.Get("Retry-After") != "" {
ras := lastResp.Header.Get("Retry-After")
ra, _ := time.ParseDuration(ras + "s")
if ra > r.delayMax {
sleepTime = r.delayMax
} else if ra > sleepTime {
sleepTime = ra
}
}
r.backoffUntil = time.Now().Add(sleepTime)
r.backoffNeeded = true
if r.backoffCur == r.limit {
return fmt.Errorf("%w: backoffs %d", ErrBackoffLimit, r.backoffCur)
}
return nil
}
// BackoffUntil returns the time until the next backoff would complete
func (r *retryable) BackoffUntil() time.Time {
return r.backoffUntil
}
type request struct {
r *retryable
context context.Context
method string
urls []url.URL
curURL int
header http.Header
getBody func() (io.ReadCloser, error)
contentLen int64
chunking bool
offset int64
curRead int64
done bool
digest digest.Digest
digester digest.Digester
progressCB func(int64, error)
responses []*http.Response
reader io.Reader
log *logrus.Logger
}
func (r *retryable) DoRequest(ctx context.Context, method string, u []url.URL, opts ...OptsReq) (Response, error) {
req := &request{
r: r,
context: ctx,
method: method,
urls: u,
curURL: 0,
header: http.Header{},
getBody: nil,
contentLen: -1,
chunking: false,
offset: 0,
curRead: 0,
done: false,
digest: "",
digester: nil,
progressCB: nil,
responses: []*http.Response{},
reader: nil,
log: r.log,
}
// apply opts
for _, opt := range opts {
opt(req)
}
// run the request until successful or non-recoverable error
err := req.retryLoop()
return req, err
}
// WithBodyBytes converts a bytes slice into a body func and content length
func WithBodyBytes(body []byte) OptsReq {
return func(req *request) {
req.contentLen = int64(len(body))
req.getBody = func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(body)), nil
}
}
}
// WithBodyFunc includes body content in a request
func WithBodyFunc(getbody func() (io.ReadCloser, error)) OptsReq {
return func(req *request) {
req.getBody = getbody
}
}
// WithChunking allows content to be divided into multiple smaller chunks
func WithChunking() OptsReq {
return func(req *request) {
req.chunking = true
}
}
// WithContentLen sets the content length
func WithContentLen(l int64) OptsReq {
return func(req *request) {
req.contentLen = l
}
}
// WithDigest verifies the returned content digest matches.
// Note that the digest is only calculated upon EOF from the downloaded
// content, so the reader may receive an error rather than EOF from a
// digest mismatch. The content itself must still be read.
func WithDigest(d digest.Digest) OptsReq {
return func(req *request) {
req.digest = d
req.digester = digest.Canonical.Digester()
}
}
// WithHeader sets a header
func WithHeader(key string, values []string) OptsReq {
return func(req *request) {
for _, v := range values {
req.header.Add(key, v)
}
}
}
// WithHeaders includes a header object
func WithHeaders(headers http.Header) OptsReq {
return func(req *request) {
for key := range headers {
for _, val := range headers.Values(key) {
req.header.Add(key, val)
}
}
}
}
// WithProgressCB calls the CB function as data is received
func WithProgressCB(cb func(int64, error)) OptsReq {
return func(req *request) {
req.progressCB = cb
}
}
func WithScope(repo string, push bool) OptsReq {
scope := "repository:" + repo + ":pull"
if push {
scope = scope + ",push"
}
return func(req *request) {
for _, url := range req.urls {
req.r.auth.AddScope(url.Host, scope)
}
}
}
func (req *request) retryLoop() error {
req.r.mu.Lock()
defer req.r.mu.Unlock()
curRetry := 0
var httpErr error
for {
// handle backoffs and errors
if len(req.urls) == 0 {
if httpErr != nil {
return httpErr
}
return ErrAllRequestsFailed
}
curRetry++
if curRetry > req.r.limit {
return ErrAllRequestsFailed
}
if !req.r.backoffUntil.IsZero() && req.r.backoffUntil.After(time.Now()) {
sleepTime := time.Until(req.r.backoffUntil)
req.log.WithFields(logrus.Fields{
"Host": req.urls[req.curURL].Host,
"Seconds": sleepTime.Seconds(),
}).Warn("Sleeping for backoff")
select {
case <-req.context.Done():
return ErrCanceled
case <-time.After(sleepTime):
}
}
// close any previous responses before making a new request
if len(req.responses) > 0 {
req.responses[len(req.responses)-1].Body.Close()
}
// send the new request
httpErr = req.httpDo()
if httpErr != nil {
req.r.backoffSet(nil)
req.nextURL(true)
continue
}
// check the response
lastURL := req.urls[req.curURL]
lastResp := req.responses[len(req.responses)-1]
statusCode := lastResp.StatusCode
removeURL := false
runBackoff := false
switch {
case 200 <= statusCode && statusCode < 300:
// all 200 status codes are successful
req.r.BackoffClear()
return nil
case statusCode == http.StatusUnauthorized:
err := req.handleAuth()
if err != nil {
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Err": err,
}).Warn("Failed to handle auth request")
runBackoff = true
removeURL = true
}
case statusCode == http.StatusForbidden:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Forbidden")
runBackoff = true
removeURL = true
case statusCode == http.StatusNotFound:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Not found")
removeURL = true
case statusCode == http.StatusTooManyRequests:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Rate limit exceeded")
runBackoff = true
case statusCode == http.StatusRequestTimeout:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Timeout")
runBackoff = true
case statusCode == http.StatusGatewayTimeout:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Gateway timeout")
runBackoff = true
default:
body, _ := ioutil.ReadAll(lastResp.Body)
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
"Body": string(body),
}).Debug("Unexpected status")
runBackoff = true
removeURL = true
}
// remove url and trigger backoff if needed
if removeURL {
req.nextURL(removeURL)
}
if runBackoff {
req.r.backoffSet(lastResp)
}
}
}
func (req *request) handleAuth() error {
curURL := req.urls[req.curURL]
lastResp := req.responses[len(req.responses)-1]
// for unauthorized requests, try to setup auth and retry without backoff
if req.r.auth == nil {
return ErrUnauthorized
}
err := req.r.auth.HandleResponse(lastResp)
if err != nil {
req.log.WithFields(logrus.Fields{
"URL": curURL.String(),
"Err": err,
}).Warn("Failed to handle auth request")
return err
}
return nil
}
func (req *request) httpDo() error {
// build the http reqest for the current mirror url
httpReq, err := http.NewRequestWithContext(req.context, req.method, req.urls[req.curURL].String(), nil)
if err != nil {
return err
}
if req.getBody != nil {
httpReq.Body, err = req.getBody()
if err != nil {
return err
}
httpReq.GetBody = req.getBody
httpReq.ContentLength = req.contentLen
}
if len(req.header) > 0 {
httpReq.Header = req.header
}
if req.r.useragent != "" && httpReq.Header.Get("User-Agent") == "" {
httpReq.Header.Add("User-Agent", req.r.useragent)
}
if req.offset > 0 {
// TODO: implement range requests
return ErrNotImplemented
}
// include auth header
if req.r.auth != nil {
err = req.r.auth.UpdateRequest(httpReq)
if err != nil {
return err
}
}
req.log.WithFields(logrus.Fields{
"method": req.method,
"url": req.urls[req.curURL].String(),
"withAuth": (len(httpReq.Header.Values("Authorization")) > 0),
}).Debug("Sending request")
resp, err := req.r.httpClient.Do(httpReq)
if err != nil {
return err
}
req.responses = append(req.responses, resp)
// update reader
if req.digester == nil {
req.reader = resp.Body
} else {
req.reader = io.TeeReader(resp.Body, req.digester.Hash())
}
return nil
}
func (req *request) nextURL(removeLast bool) {
// next mirror based on whether remove flag is set
if removeLast {
req.urls = append(req.urls[:req.curURL], req.urls[req.curURL+1:]...)
if req.curURL >= len(req.urls) {
req.curURL = 0
}
} else {
if len(req.urls) > 0 {
req.curURL = (req.curURL + 1) % len(req.urls)
} else {
req.curURL = 0
}
}
}
func (req *request) Read(b []byte) (int, error) {
// if done, return eof
if req.done {
return 0, io.EOF
}
// if no responses, error
if len(req.responses) == 0 {
return 0, ErrNotFound
}
// fetch block
lastResp := req.responses[len(req.responses)-1]
i, err := req.reader.Read(b)
req.curRead += int64(i)
if err == io.EOF && lastResp.ContentLength > 0 {
if lastResp.Request.Method == "HEAD" {
// no body on a head request
req.done = true
} else if req.curRead < lastResp.ContentLength {
// TODO: handle early EOF or other failed connection with a retry
// req.offset += req.curRead
// err = req.retryLoop()
// if err != nil {
// return i, err
// }
req.log.WithFields(logrus.Fields{
"curRead": req.curRead,
"contentLen": lastResp.ContentLength,
}).Debug("EOF before reading all content, retrying")
return i, err
} else if req.curRead >= lastResp.ContentLength {
req.done = true
}
}
// if eof, verify digest, set error on mismatch
if req.digester != nil && err == io.EOF && req.digest != req.digester.Digest() {
req.log.WithFields(logrus.Fields{
"expected": req.digest,
"computed": req.digester.Digest(),
}).Warn("Digest mismatch")
req.done = true
return i, ErrDigestMismatch
}
// pass through read on the last response
return i, err
}
func (req *request) | () error {
// if no responses, error
if req.reader == nil || len(req.responses) == 0 {
return ErrNotFound
}
// pass through close to last request, mark as done
lastResp := req.responses[len(req.responses)-1]
req.done = true
return lastResp.Body.Close()
}
func (req *request) HTTPResponse() *http.Response {
if len(req.responses) > 0 {
return req.responses[len(req.responses)-1]
}
return nil
}
func (req *request) HTTPResponses() ([]*http.Response, error) {
if len(req.responses) > 0 {
return req.responses, nil
}
return nil, ErrNotFound
}
| Close | identifier_name |
retryable.go | package retryable
import (
"bytes"
"context"
"sync"
// crypto libraries included for go-digest
_ "crypto/sha256"
_ "crypto/sha512"
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"time"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
// Retryable is used to create requests with built in retry capabilities
type Retryable interface {
DoRequest(ctx context.Context, method string, u []url.URL, opts ...OptsReq) (Response, error)
BackoffClear()
BackoffUntil() time.Time
}
// Response is used to handle the result of a request
type Response interface {
io.ReadCloser
HTTPResponse() *http.Response
HTTPResponses() ([]*http.Response, error)
}
// Auth is used to process Www-Authenticate header and update request with Authorization header
type Auth interface {
AddScope(host, scope string) error
HandleResponse(*http.Response) error
UpdateRequest(*http.Request) error
}
// Opts injects options into NewRetryable
type Opts func(*retryable)
// OptsReq injects options into NewRequest
type OptsReq func(*request)
type retryable struct {
httpClient *http.Client
auth Auth
rootCAPool [][]byte
limit int
delayInit time.Duration
delayMax time.Duration
backoffNeeded bool
backoffCur int
backoffUntil time.Time
log *logrus.Logger
useragent string
mu sync.Mutex
}
var defaultDelayInit, _ = time.ParseDuration("1s")
var defaultDelayMax, _ = time.ParseDuration("30s")
var defaultLimit = 3
// NewRetryable returns a retryable interface
func NewRetryable(opts ...Opts) Retryable {
r := &retryable{
httpClient: &http.Client{},
limit: defaultLimit,
delayInit: defaultDelayInit,
delayMax: defaultDelayMax,
log: &logrus.Logger{Out: ioutil.Discard},
rootCAPool: [][]byte{},
}
for _, opt := range opts {
opt(r)
}
// inject certificates from user
if len(r.rootCAPool) > 0 {
var tlsc *tls.Config
if r.httpClient.Transport == nil {
r.httpClient.Transport = &http.Transport{}
}
t, ok := r.httpClient.Transport.(*http.Transport)
if ok {
if t.TLSClientConfig != nil {
tlsc = t.TLSClientConfig.Clone()
} else {
tlsc = &tls.Config{}
}
if tlsc.RootCAs == nil {
rootPool, err := x509.SystemCertPool()
if err != nil {
r.log.WithFields(logrus.Fields{
"err": err,
}).Warn("Failed to load system cert pool")
}
tlsc.RootCAs = rootPool
}
for _, ca := range r.rootCAPool {
if ok := tlsc.RootCAs.AppendCertsFromPEM(ca); !ok {
r.log.WithFields(logrus.Fields{ | }).Warn("Failed to load root certificate")
}
}
t.TLSClientConfig = tlsc
r.httpClient.Transport = t
}
}
return r
}
// WithAuth adds authentication to retryable methods
func WithAuth(auth Auth) Opts {
return func(r *retryable) {
r.auth = auth
}
}
// WithCerts adds certificates
func WithCerts(certs [][]byte) Opts {
return func(r *retryable) {
for _, c := range certs {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
// WithCertFiles adds certificates by filename
func WithCertFiles(files []string) Opts {
return func(r *retryable) {
for _, f := range files {
c, err := ioutil.ReadFile(f)
if err != nil {
r.log.WithFields(logrus.Fields{
"err": err,
"file": f,
}).Warn("Failed to read certificate")
} else {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
}
// WithDelay initial time to wait between retries (increased with exponential backoff)
func WithDelay(delayInit time.Duration, delayMax time.Duration) Opts {
return func(r *retryable) {
if delayInit > 0 {
r.delayInit = delayInit
}
// delayMax must be at least delayInit, if 0 initialize to 30x delayInit
if delayMax > r.delayInit {
r.delayMax = delayMax
} else if delayMax > 0 {
r.delayMax = r.delayInit
} else {
r.delayMax = r.delayInit * 30
}
}
}
// WithHTTPClient uses a specific http client with retryable requests
func WithHTTPClient(h *http.Client) Opts {
return func(r *retryable) {
r.httpClient = h
}
}
// WithLimit restricts the number of retries (defaults to 5)
func WithLimit(l int) Opts {
return func(r *retryable) {
if l > 0 {
r.limit = l
}
}
}
// WithLog injects a logrus Logger configuration
func WithLog(log *logrus.Logger) Opts {
return func(r *retryable) {
r.log = log
}
}
// WithTransport uses a specific http transport with retryable requests
func WithTransport(t *http.Transport) Opts {
return func(r *retryable) {
r.httpClient = &http.Client{Transport: t}
}
}
// WithUserAgent sets a user agent header
func WithUserAgent(ua string) Opts {
return func(r *retryable) {
r.useragent = ua
}
}
func (r *retryable) BackoffClear() {
if r.backoffCur > r.limit {
r.backoffCur = r.limit
}
if r.backoffCur > 0 {
r.backoffCur--
if r.backoffCur == 0 {
r.backoffUntil = time.Time{}
}
}
r.backoffNeeded = false
}
func (r *retryable) backoffSet(lastResp *http.Response) error {
r.backoffCur++
// sleep for backoff time
sleepTime := r.delayInit << r.backoffCur
// limit to max delay
if sleepTime > r.delayMax {
sleepTime = r.delayMax
}
// check rate limit header
if lastResp != nil && lastResp.Header.Get("Retry-After") != "" {
ras := lastResp.Header.Get("Retry-After")
ra, _ := time.ParseDuration(ras + "s")
if ra > r.delayMax {
sleepTime = r.delayMax
} else if ra > sleepTime {
sleepTime = ra
}
}
r.backoffUntil = time.Now().Add(sleepTime)
r.backoffNeeded = true
if r.backoffCur == r.limit {
return fmt.Errorf("%w: backoffs %d", ErrBackoffLimit, r.backoffCur)
}
return nil
}
// BackoffUntil returns the time until the next backoff would complete
func (r *retryable) BackoffUntil() time.Time {
return r.backoffUntil
}
type request struct {
r *retryable
context context.Context
method string
urls []url.URL
curURL int
header http.Header
getBody func() (io.ReadCloser, error)
contentLen int64
chunking bool
offset int64
curRead int64
done bool
digest digest.Digest
digester digest.Digester
progressCB func(int64, error)
responses []*http.Response
reader io.Reader
log *logrus.Logger
}
func (r *retryable) DoRequest(ctx context.Context, method string, u []url.URL, opts ...OptsReq) (Response, error) {
req := &request{
r: r,
context: ctx,
method: method,
urls: u,
curURL: 0,
header: http.Header{},
getBody: nil,
contentLen: -1,
chunking: false,
offset: 0,
curRead: 0,
done: false,
digest: "",
digester: nil,
progressCB: nil,
responses: []*http.Response{},
reader: nil,
log: r.log,
}
// apply opts
for _, opt := range opts {
opt(req)
}
// run the request until successful or non-recoverable error
err := req.retryLoop()
return req, err
}
// WithBodyBytes converts a bytes slice into a body func and content length
func WithBodyBytes(body []byte) OptsReq {
return func(req *request) {
req.contentLen = int64(len(body))
req.getBody = func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(body)), nil
}
}
}
// WithBodyFunc includes body content in a request
func WithBodyFunc(getbody func() (io.ReadCloser, error)) OptsReq {
return func(req *request) {
req.getBody = getbody
}
}
// WithChunking allows content to be divided into multiple smaller chunks
func WithChunking() OptsReq {
return func(req *request) {
req.chunking = true
}
}
// WithContentLen sets the content length
func WithContentLen(l int64) OptsReq {
return func(req *request) {
req.contentLen = l
}
}
// WithDigest verifies the returned content digest matches.
// Note that the digest is only calculated upon EOF from the downloaded
// content, so the reader may receive an error rather than EOF from a
// digest mismatch. The content itself must still be read.
func WithDigest(d digest.Digest) OptsReq {
return func(req *request) {
req.digest = d
req.digester = digest.Canonical.Digester()
}
}
// WithHeader sets a header
func WithHeader(key string, values []string) OptsReq {
return func(req *request) {
for _, v := range values {
req.header.Add(key, v)
}
}
}
// WithHeaders includes a header object
func WithHeaders(headers http.Header) OptsReq {
return func(req *request) {
for key := range headers {
for _, val := range headers.Values(key) {
req.header.Add(key, val)
}
}
}
}
// WithProgressCB calls the CB function as data is received
func WithProgressCB(cb func(int64, error)) OptsReq {
return func(req *request) {
req.progressCB = cb
}
}
func WithScope(repo string, push bool) OptsReq {
scope := "repository:" + repo + ":pull"
if push {
scope = scope + ",push"
}
return func(req *request) {
for _, url := range req.urls {
req.r.auth.AddScope(url.Host, scope)
}
}
}
func (req *request) retryLoop() error {
req.r.mu.Lock()
defer req.r.mu.Unlock()
curRetry := 0
var httpErr error
for {
// handle backoffs and errors
if len(req.urls) == 0 {
if httpErr != nil {
return httpErr
}
return ErrAllRequestsFailed
}
curRetry++
if curRetry > req.r.limit {
return ErrAllRequestsFailed
}
if !req.r.backoffUntil.IsZero() && req.r.backoffUntil.After(time.Now()) {
sleepTime := time.Until(req.r.backoffUntil)
req.log.WithFields(logrus.Fields{
"Host": req.urls[req.curURL].Host,
"Seconds": sleepTime.Seconds(),
}).Warn("Sleeping for backoff")
select {
case <-req.context.Done():
return ErrCanceled
case <-time.After(sleepTime):
}
}
// close any previous responses before making a new request
if len(req.responses) > 0 {
req.responses[len(req.responses)-1].Body.Close()
}
// send the new request
httpErr = req.httpDo()
if httpErr != nil {
req.r.backoffSet(nil)
req.nextURL(true)
continue
}
// check the response
lastURL := req.urls[req.curURL]
lastResp := req.responses[len(req.responses)-1]
statusCode := lastResp.StatusCode
removeURL := false
runBackoff := false
switch {
case 200 <= statusCode && statusCode < 300:
// all 200 status codes are successful
req.r.BackoffClear()
return nil
case statusCode == http.StatusUnauthorized:
err := req.handleAuth()
if err != nil {
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Err": err,
}).Warn("Failed to handle auth request")
runBackoff = true
removeURL = true
}
case statusCode == http.StatusForbidden:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Forbidden")
runBackoff = true
removeURL = true
case statusCode == http.StatusNotFound:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Not found")
removeURL = true
case statusCode == http.StatusTooManyRequests:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Rate limit exceeded")
runBackoff = true
case statusCode == http.StatusRequestTimeout:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Timeout")
runBackoff = true
case statusCode == http.StatusGatewayTimeout:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Gateway timeout")
runBackoff = true
default:
body, _ := ioutil.ReadAll(lastResp.Body)
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
"Body": string(body),
}).Debug("Unexpected status")
runBackoff = true
removeURL = true
}
// remove url and trigger backoff if needed
if removeURL {
req.nextURL(removeURL)
}
if runBackoff {
req.r.backoffSet(lastResp)
}
}
}
func (req *request) handleAuth() error {
curURL := req.urls[req.curURL]
lastResp := req.responses[len(req.responses)-1]
// for unauthorized requests, try to setup auth and retry without backoff
if req.r.auth == nil {
return ErrUnauthorized
}
err := req.r.auth.HandleResponse(lastResp)
if err != nil {
req.log.WithFields(logrus.Fields{
"URL": curURL.String(),
"Err": err,
}).Warn("Failed to handle auth request")
return err
}
return nil
}
func (req *request) httpDo() error {
// build the http reqest for the current mirror url
httpReq, err := http.NewRequestWithContext(req.context, req.method, req.urls[req.curURL].String(), nil)
if err != nil {
return err
}
if req.getBody != nil {
httpReq.Body, err = req.getBody()
if err != nil {
return err
}
httpReq.GetBody = req.getBody
httpReq.ContentLength = req.contentLen
}
if len(req.header) > 0 {
httpReq.Header = req.header
}
if req.r.useragent != "" && httpReq.Header.Get("User-Agent") == "" {
httpReq.Header.Add("User-Agent", req.r.useragent)
}
if req.offset > 0 {
// TODO: implement range requests
return ErrNotImplemented
}
// include auth header
if req.r.auth != nil {
err = req.r.auth.UpdateRequest(httpReq)
if err != nil {
return err
}
}
req.log.WithFields(logrus.Fields{
"method": req.method,
"url": req.urls[req.curURL].String(),
"withAuth": (len(httpReq.Header.Values("Authorization")) > 0),
}).Debug("Sending request")
resp, err := req.r.httpClient.Do(httpReq)
if err != nil {
return err
}
req.responses = append(req.responses, resp)
// update reader
if req.digester == nil {
req.reader = resp.Body
} else {
req.reader = io.TeeReader(resp.Body, req.digester.Hash())
}
return nil
}
func (req *request) nextURL(removeLast bool) {
// next mirror based on whether remove flag is set
if removeLast {
req.urls = append(req.urls[:req.curURL], req.urls[req.curURL+1:]...)
if req.curURL >= len(req.urls) {
req.curURL = 0
}
} else {
if len(req.urls) > 0 {
req.curURL = (req.curURL + 1) % len(req.urls)
} else {
req.curURL = 0
}
}
}
func (req *request) Read(b []byte) (int, error) {
// if done, return eof
if req.done {
return 0, io.EOF
}
// if no responses, error
if len(req.responses) == 0 {
return 0, ErrNotFound
}
// fetch block
lastResp := req.responses[len(req.responses)-1]
i, err := req.reader.Read(b)
req.curRead += int64(i)
if err == io.EOF && lastResp.ContentLength > 0 {
if lastResp.Request.Method == "HEAD" {
// no body on a head request
req.done = true
} else if req.curRead < lastResp.ContentLength {
// TODO: handle early EOF or other failed connection with a retry
// req.offset += req.curRead
// err = req.retryLoop()
// if err != nil {
// return i, err
// }
req.log.WithFields(logrus.Fields{
"curRead": req.curRead,
"contentLen": lastResp.ContentLength,
}).Debug("EOF before reading all content, retrying")
return i, err
} else if req.curRead >= lastResp.ContentLength {
req.done = true
}
}
// if eof, verify digest, set error on mismatch
if req.digester != nil && err == io.EOF && req.digest != req.digester.Digest() {
req.log.WithFields(logrus.Fields{
"expected": req.digest,
"computed": req.digester.Digest(),
}).Warn("Digest mismatch")
req.done = true
return i, ErrDigestMismatch
}
// pass through read on the last response
return i, err
}
func (req *request) Close() error {
// if no responses, error
if req.reader == nil || len(req.responses) == 0 {
return ErrNotFound
}
// pass through close to last request, mark as done
lastResp := req.responses[len(req.responses)-1]
req.done = true
return lastResp.Body.Close()
}
func (req *request) HTTPResponse() *http.Response {
if len(req.responses) > 0 {
return req.responses[len(req.responses)-1]
}
return nil
}
func (req *request) HTTPResponses() ([]*http.Response, error) {
if len(req.responses) > 0 {
return req.responses, nil
}
return nil, ErrNotFound
} | "cert": string(ca), | random_line_split |
Telecom_customer_churn.py | #!/usr/bin/env python
# coding: utf-8
# # Analysis on Telcom Customer Churn
# Context:
# -----------
#
# "Predict behavior to retain customers. You can analyze all relevant customer data and develop focused customer retention programs."
#
# Content:
# ------------
#
# Each row represents a customer, each column contains customer’s attributes described on the column Metadata.
#
# The data set includes information about:
# ------------------------------------------------------------
#
# Customers who left within the last month – the column is called Churn
#
# Services that each customer has signed up for – phone, multiple lines, internet, online security, online backup, device protection, tech support, and streaming TV and movies
#
# Customer account information – how long they’ve been a customer, contract, payment method, paperless billing, monthly charges, and total charges
#
# Demographic info about customers – gender, age range, and if they have partners and dependents
# # Exploratory Data Analysis
# In[1]:
#import library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
#reading the file
df=pd.read_csv("telecom_customer_churn.csv")
#pd.options.display.max_columns = 30
#pd.options.display.max_rows = None
#
# Context
# "Predict behavior to retain customers. You can analyze all relevant customer data and develop focused customer retention programs." [IBM Sample Data Sets]
#
# Content
# Each row represents a customer, each column contains customer’s attributes described on the column Metadata.
#
# The data set includes information about:
#
# Customers who left within the last month – the column is called Churn
# Services that each customer has signed up for – phone, multiple lines, internet, online security, online backup, device protection, tech support, and streaming TV and movies
# Customer account information – how long they’ve been a customer, contract, payment method, paperless billing, monthly charges, and total charges
# Demographic info about customers – gender, age range, and if they have partners and dependents
#
# In[3]:
#first 5 rows are obtained
df.head()
# In[4]:
#shape gives the no:of rows and columns
df.shape
# In[5]:
#gives the general info about the dataset like the non-null counts and the datatype of each column
df.info()
# In[6]:
df.dtypes
#total charges is seen to be object.
# In[7]:
#shows the list of columns
df.columns
# In[8]:
#to find whether there is any null values for each column
df.isnull().sum()
#no null values
# In[9]:
#df.TotalCharges = df.TotalCharges.astype('float64')
#gives errors as this column is having spaces instead of null values .So need to convert to 0's and type cast accordingly
# In[10]:
#showing the rows having spaces for the column total charges
df[df["TotalCharges"]==" "][["TotalCharges"]]
# In[11]:
# Replacing the mssing value into 0 and converting the object into float value.
df['TotalCharges'] = df['TotalCharges'].replace(" ", 0).astype('float32')
# In[12]:
#checking whether the column is converted into float type
df.info()
# In[13]:
#summary of numerical columns
df.describe()
# In[14]:
#summary of categorical columns
df.describe(include = object)
# In[15]:
#The columns having "No internet service" are replaced to "No"
#The column "MultipleLines" having "No phone service" are replaced to "No"
replace_cols=["OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies"]
for i in replace_cols:
df[i] = df[i].replace('No internet service' , 'No')
df["MultipleLines"]=df["MultipleLines"].replace("No phone service","No")
# In[16]:
#to check if duplicated rows are present
df.duplicated().sum()
# In[17]:
y = pd.crosstab(df["Churn"],columns = "Frequency")
print(y)
#no: of customers churned = 1869
#no: of customers not churned = 5174
# In[18]:
#bar plot showing the customers who churned and who didn't
y_bar = y.plot(kind="bar")
y_percent = y/len(df)*100
print(round(y_percent,2))
#27% churned
#73% not churned
# In[19]:
#categorical columns and numerical columns
categorical_cols = ["gender","Partner","Dependents","PhoneService","MultipleLines","InternetService","OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies","Contract","PaperlessBilling","PaymentMethod","Churn"]
numerical_cols = ["SeniorCitizen","tenure","MonthlyCharges","TotalCharges"]
# # Hypothesis Generation
# Possible Questions or variables to be checked:
#
# 1)tenure - which category of people (people with high tenure or low tenure) are getting churned.We need to know if recently joining cstomers are churning or not
#
# 2)MonthlyCharges - if the monthly charges are high, there is a chance for churning.We need to analyse whether monthly charges are high or not
#
# 3)TotalCharges - Same as monthly charge, total charge should increase accoding to monthly charges
#
# 4)SeniorCitizen - need to check whether senior citizens are more tending to churn
#
# 5)PaymentMethod - to check whether payment method is creating any transaction issues which is causing churning.Which among them is causing issue
#
# 6)PaperlessBilling - to see how many customers using paperless billing and analyse it with respect to churning
#
# 7)There are multiple services that company is providing like phone,internet,multiple lines, etc.check which particular service or which all services is giving more churning
# **KDE PLOT on tenure, MonthlyCharges and TotalCharges.**
# In[20]:
"""
checking the churn status of other numerical fields using kde plot
we can see that recent joiners have a churning tendency more and high monthly charges leads to churning
"""
def kde(feature):
plt.figure(figsize=(9,4) | hlyCharges")
kde("TotalCharges")
# # Tenure
# In[21]:
#Univariate Analysis
#histogram
sns.distplot(df["tenure"])
# In[22]:
# there is a good no: of people with less than 10 months of tenure approximately 26%
df[df["tenure"]<10]["tenure"].count()/len(df)*100
# In[23]:
#summary of tenure
df["tenure"].describe()
# In[24]:
#dividing tenure into 3 categories for further analysisanalysis
#tenure>=60 months-->highest
#tenure 20 to 60 months-->medium
#tenure 0 to 20 months--->lowest
df["tenure_groups"] = np.where(df["tenure"]>=60,"highest",np.where(df["tenure"]<20,"lowest","medium"))
# In[25]:
sns.countplot(df["tenure_groups"],data=df)
pd.crosstab(df["tenure_groups"],columns="frequency")
# In[26]:
#Multivariate Analysis
#checking which tenure period gives more churning.Around 44% among the lowest tenure group has churned
tenure_Crosstab = pd.crosstab(df.tenure_groups, columns=df.Churn)
row_tot = tenure_Crosstab.sum(axis=1)
tenure_Crosstab_prop = round(tenure_Crosstab.div(row_tot,axis=0)*100)
print("---------------------------------------------------------------------------------------------------------------------------")
print("The proportion of churning in different tenure groups namley lowest,medium and highest in the order of their tenure period is: ")
print("---------------------------------------------------------------------------------------------------------------------------")
print(tenure_Crosstab_prop)
tenure_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
#lowest tenure period gives more churning
# **tenure vs Monthly charges and total charges**
# In[27]:
#as tenure is less and monthly or total charges increases, churning happens
g=sns.PairGrid(df,x_vars=["MonthlyCharges","TotalCharges"],y_vars="tenure",hue="Churn",palette="coolwarm",height=8)
g.map(plt.scatter,alpha=0.5)
plt.legend(loc=(-0.3,0.6))
# **Summary:
# low tenure is a reason for churning.This means that new joining customers are getting churned.**
# # MonthlyCharges
# In[28]:
#univarate analysis
#summary of Monthly Charges
df["MonthlyCharges"].describe()
# In[29]:
#histogram showing the distribution of monthly charges
sns.distplot(df["MonthlyCharges"])
# In[30]:
#we can see that as monthly charges increases, churning increases
sns.boxplot(x="Churn",y="MonthlyCharges",data=df,palette="coolwarm")
# **Monthly Charges vs Multiple Lines**
# In[31]:
df.MultipleLines.value_counts()
# In[32]:
"""
multiple lines with high monthly charges is showing high churning rate.
Whether or not the person has multiple lines, if he has high monthly charges, he has a tendency to churn.
"""
print(sns.boxplot(x="MultipleLines",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm"))
# **Monthly Charges vs Internet Service**
# In[33]:
#Fibre optic services have a high monthly charge when compared to others and so is the churn rate
sns.boxplot(x="InternetService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Phone Service**
# In[34]:
#churning is there for people having phone service and high monthly charges
sns.boxplot(x="PhoneService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Total Charges**
# In[35]:
plt.figure(figsize=(13,8))
sns.scatterplot(x="MonthlyCharges",y="TotalCharges",data = df,palette="coolwarm",hue = "Churn")
# using monthly charges for further analysis instead of total charges as both are proportional and taking anyone of this would be only required
# **Summary:As monthly charges and total charges increases, churning increases**
# # Senior Citizen
# In[36]:
#We can infer that there are less senior citizen people(1142 senior citizens) joined when compared to youngsters
sns.countplot(x="SeniorCitizen",data=df)
pd.crosstab(df["SeniorCitizen"],columns="frequency")
# In[37]:
#here among the senior citzens,around 42% has churned where as youngsters have churned less(among youngsters, 24% only churned)
SeniorCitizen_Crosstab = pd.crosstab(df.SeniorCitizen, columns=df.Churn)
row_tot = SeniorCitizen_Crosstab.sum(axis=1)
print("------------------------------------------------------------------------------------")
SeniorCitizen_Crosstab_prop = round(SeniorCitizen_Crosstab.div(row_tot,axis=0)*100)
print("Percentage of people who got attrited among the senior citizen and youngsters: ")
print("------------------------------------------------------------------------------------")
print(SeniorCitizen_Crosstab_prop)
SeniorCitizen_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
# In[38]:
#senior citizen vs payment method
# In[39]:
#senior citizens have opted electronic check more when compared to other payment methods.
#So we need to know if there was any issue regarding electronic check
sns.barplot(x="SeniorCitizen",y="PaymentMethod",data=df)
# In[40]:
#The average monthly charges were around 90 dollars for senior citizens who have churned
#whereas the average is less for people who haven't churned around 65 dollars
sns.boxplot(x="SeniorCitizen",y="MonthlyCharges",data=df)
# **Summary:
# Senior citizens are comparitively very less.ie, around 16%.Among these 16%, around 48% are churned .
# When checked their monthly charges, it looks comparitively higher for people who churned among the senior citizens
# Also, the payment method used was electronic check.We need to further analyse whether electronic check is creating an issue for them causing churning**
# # All other services including:
# **OnlineSecurity,OnlineBackup,DeviceProtection,TechSupport,StreamingTV,StreamingMovies**
#
# In[41]:
replace_cols=['OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',
'StreamingTV', 'StreamingMovies']
#To display these columns together with subplots using for loop
x=0
y=0
num=0
plt.tight_layout()
fig, axes =plt.subplots(2,3,figsize=(15,8))
for x in range(2):
for y in range(3):
sns.countplot(x=replace_cols[num],data=df,hue = "Churn",ax=axes[x,y],palette="coolwarm")
num +=1
#for people who have opted the services, the churning rate(shown in pink) is not higher with respect to the
#churning rate of people who haven't opted in an overall view
# In[42]:
#univariate crosstab
df_service=df[['OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',
'StreamingTV', 'StreamingMovies']]
#def crosstab(df_service):
for idx, variable in enumerate(df_service.columns):
#univariate crosstab
other_services_Crosstab = pd.crosstab(df_service[variable],columns = df.Churn)
row_tot = other_services_Crosstab.sum(axis=1)
other_services_Crosstab_prop = round(other_services_Crosstab.div(row_tot,axis=0)*100)
print("Showing the percentage of churn happened for people opting {}".format(variable))
print("-----------------------------------------------------------------------------------")
print(other_services_Crosstab_prop)
print("-----------------------------------------------------------------------------------")
#churn yes and service yes is checked here
#for people who have opted the services, the churning rate is not higher as expected
# **Summary:**
#
# **As the churning rate is less for yes category of the services, we consider this to be not effecting churning**
# # Contract
# In[43]:
#people with monthly contract showing high churning rate.
sns.countplot(x="Contract",data=df,hue="Churn",palette="coolwarm")
# In[44]:
#monthly charges is high for for all kind of contracts
sns.boxplot(x="Contract",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **summary:Month to month contract gives churning.The reason might be they can leave the service if they are not interested.So people thinking that in mind might have chosen monthly contract.Churning is very low for one year and two year contract even if the monthly charges are high**
# # PaperlessBilling
# In[45]:
#around 60% of the customers have chosen paperless billing
round(pd.crosstab(df.PaperlessBilling,columns="frequency")/len(df)*100,2)
# In[46]:
sns.countplot(x="PaperlessBilling",data=df,palette="coolwarm")
# In[47]:
"""
Churn rate is more for people opted paperless billing.
"""
sns.countplot(x="PaperlessBilling",hue="Churn",data=df,palette="coolwarm")
print("Among the people who chose paperless billing",round(1400/(1400+2771)*100),"% are churned")
print("Among the people who didn't choose paperless billing",round(469/(469+2403)*100),"% are only churned")
df.groupby(["PaperlessBilling","Churn"])["PaperlessBilling"].agg(["count"])
# **Summary:Paperless billing is very common for customers and people opted this are having high churning**
# # Payment Method
# In[48]:
#Checking the count of different payment methods
print(df["PaymentMethod"].value_counts())
sns.countplot(x="PaymentMethod",data=df,palette="coolwarm")
plt.tight_layout()
plt.xticks(rotation=45)
#electronic check is more used
# In[49]:
PaymentMethod_Crosstab = pd.crosstab(df.PaymentMethod, columns=df.Churn)
row_tot = PaymentMethod_Crosstab.sum(axis=1)
PaymentMethod_Crosstab_prop = round(PaymentMethod_Crosstab.div(row_tot,axis=0)*100)
print("Percentage of people who got attrited among the senior citizen and youngsters: ")
print(PaymentMethod_Crosstab_prop)
PaymentMethod_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
#Electronic check payment method is giving more churning.
#among the people who have opted electronic check, around 45% are churned
# **Summary:Electronic check is causing more churning of customers even if it is more preferred.This might be because of loading issues due to traffic or there might be other complaints.**
# In[50]:
#Using a heatmap to find the correlation between the numerical columns
tc=df.corr()
sns.heatmap(tc,xticklabels=True,annot=True,cmap="coolwarm")
#here we see tenure and monthly charges show the highest churning
# # Summary on EDA
# Variables causing Churning:
#
# 1)tenure
#
# 2)Monthly Charges
#
# 3)Total Charges
#
# 4)Internet Service-Fibre optic service
#
# 5)Senior Citizen due to monthly charges and payment method which is electronic check
#
# 6)payment method-electronic check
#
# 7)Contract-Month to Month
#
# 8)PaperlessBilling
# # Converting Categorical columns to numerical columns
# In[51]:
#Creating dummy values for the categorical columns for visualization and modelling purpose
dummy=["gender","Partner","Dependents","PhoneService","MultipleLines","InternetService","OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies","PaperlessBilling","PaymentMethod","Churn"]
df=pd.get_dummies(df, prefix=dummy, columns=dummy,drop_first=True)#to get either one of the columns.ie, yes or no
df.head()
# In[52]:
contract = pd.get_dummies(df["Contract"])
contract[["Month-to-month"]].head()
# In[53]:
df = pd.concat([df,contract],axis=1)
# In[54]:
df.rename(columns = {'Month-to-month' : 'Month_to_month', 'PaymentMethod_Electronic check' : 'PaymentMethod_Electronic_check','InternetService_Fiber optic':'InternetService_Fiber_optic'}, inplace = True)
# In[55]:
#Splitting dataset into X using the important features
X=df[["TotalCharges","tenure","SeniorCitizen","MonthlyCharges",'Month_to_month']]
#target column(dependent coumn) is taken i.e churn/not churn
y = df["Churn_Yes"]
# In[56]:
X
# In[57]:
X.dtypes
# # Train Test Split
# In[58]:
#Splitting the whole dataset into 2:Train and Test
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0)
# # Feature Scaling
# In[59]:
"""
Using a standard scaler to scale all columns into a small range which makes prediction more easier and
decreses the chances of the model getting biased.
"""
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train) #Fitting and transforming the X train dataset
X_test=sc.transform(X_test)#Transforming the test dataset
# # Modelling Using Classification Models and Checking Performance
# In[60]:
#Classification Model-1
from sklearn.linear_model import LogisticRegression
classifier=LogisticRegression()
#fitting the train values( both X and y )
classifier.fit(X_train,y_train)
#Predicting the y valus for the X_test
y_pred=classifier.predict(X_test)
#Performance check using Confusion matrix,accuracy score and Classification Report
from sklearn.metrics import classification_report,accuracy_score,confusion_matrix
print(confusion_matrix(y_pred,y_test))
print("\n") #correct prediction= 1425+250, wrong prediction=135+303
print(classification_report(y_pred,y_test))
print("\n") # to get the values of precision,recall,f1-score and support
print("accuracy score of Logistic Regression : ",round(accuracy_score(y_pred,y_test),2))
#Accuracy=( no:of correct predictions)/(Total no:of predictions)
# In[61]:
import pickle
# In[62]:
pkl_file = open("classifiertelcom.pkl", "wb") # create a binary file, open it and then only we can save the model
# wb -> opening file in writing mode
pickle.dump(classifier, pkl_file) # saving of the trained model into pkl file
# close the file
pkl_file.close()
# In[63]:
get_ipython().system('ls')
# In[64]:
classifier.predict([[30,10,0,40,1]])
# In[ ]:
| )
plt.title("kde plot for {}".format(feature))
ax0=sns.kdeplot(df[df["Churn"]=="Yes"][feature],color="red",label= "Churn - Yes")
ax1=sns.kdeplot(df[df["Churn"]=="No"][feature],color="green",label="Churn - No")
kde("tenure")
kde("Mont | identifier_body |
Telecom_customer_churn.py | #!/usr/bin/env python
# coding: utf-8
# # Analysis on Telcom Customer Churn
# Context:
# -----------
#
# "Predict behavior to retain customers. You can analyze all relevant customer data and develop focused customer retention programs."
#
# Content:
# ------------
#
# Each row represents a customer, each column contains customer’s attributes described on the column Metadata.
#
# The data set includes information about:
# ------------------------------------------------------------
#
# Customers who left within the last month – the column is called Churn
#
# Services that each customer has signed up for – phone, multiple lines, internet, online security, online backup, device protection, tech support, and streaming TV and movies
#
# Customer account information – how long they’ve been a customer, contract, payment method, paperless billing, monthly charges, and total charges
#
# Demographic info about customers – gender, age range, and if they have partners and dependents
# # Exploratory Data Analysis
# In[1]:
#import library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
#reading the file
df=pd.read_csv("telecom_customer_churn.csv")
#pd.options.display.max_columns = 30
#pd.options.display.max_rows = None
#
# Context
# "Predict behavior to retain customers. You can analyze all relevant customer data and develop focused customer retention programs." [IBM Sample Data Sets]
#
# Content
# Each row represents a customer, each column contains customer’s attributes described on the column Metadata.
#
# The data set includes information about:
#
# Customers who left within the last month – the column is called Churn
# Services that each customer has signed up for – phone, multiple lines, internet, online security, online backup, device protection, tech support, and streaming TV and movies
# Customer account information – how long they’ve been a customer, contract, payment method, paperless billing, monthly charges, and total charges
# Demographic info about customers – gender, age range, and if they have partners and dependents
#
# In[3]:
#first 5 rows are obtained
df.head()
# In[4]:
#shape gives the no:of rows and columns
df.shape
# In[5]:
#gives the general info about the dataset like the non-null counts and the datatype of each column
df.info()
# In[6]:
df.dtypes
#total charges is seen to be object.
# In[7]:
#shows the list of columns
df.columns
# In[8]:
#to find whether there is any null values for each column
df.isnull().sum()
#no null values
# In[9]:
#df.TotalCharges = df.TotalCharges.astype('float64')
#gives errors as this column is having spaces instead of null values .So need to convert to 0's and type cast accordingly
# In[10]:
#showing the rows having spaces for the column total charges
df[df["TotalCharges"]==" "][["TotalCharges"]]
# In[11]:
# Replacing the mssing value into 0 and converting the object into float value.
df['TotalCharges'] = df['TotalCharges'].replace(" ", 0).astype('float32')
# In[12]:
#checking whether the column is converted into float type
df.info()
# In[13]:
#summary of numerical columns
df.describe()
# In[14]:
#summary of categorical columns
df.describe(include = object)
# In[15]:
#The columns having "No internet service" are replaced to "No"
#The column "MultipleLines" having "No phone service" are replaced to "No"
replace_cols=["OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies"]
for i in replace_cols:
df[i] = df[i].replace('No internet service' , 'No')
df["MultipleLines"]=df["MultipleLines"].replace("No phone service","No")
# In[16]:
#to check if duplicated rows are present
df.duplicated().sum()
# In[17]:
y = pd.crosstab(df["Churn"],columns = "Frequency")
print(y)
#no: of customers churned = 1869
#no: of customers not churned = 5174
# In[18]:
#bar plot showing the customers who churned and who didn't
y_bar = y.plot(kind="bar")
y_percent = y/len(df)*100
print(round(y_percent,2))
#27% churned
#73% not churned
# In[19]:
#categorical columns and numerical columns
categorical_cols = ["gender","Partner","Dependents","PhoneService","MultipleLines","InternetService","OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies","Contract","PaperlessBilling","PaymentMethod","Churn"]
numerical_cols = ["SeniorCitizen","tenure","MonthlyCharges","TotalCharges"]
# # Hypothesis Generation
# Possible Questions or variables to be checked:
#
# 1)tenure - which category of people (people with high tenure or low tenure) are getting churned.We need to know if recently joining cstomers are churning or not
#
# 2)MonthlyCharges - if the monthly charges are high, there is a chance for churning.We need to analyse whether monthly charges are high or not
#
# 3)TotalCharges - Same as monthly charge, total charge should increase accoding to monthly charges
#
# 4)SeniorCitizen - need to check whether senior citizens are more tending to churn
#
# 5)PaymentMethod - to check whether payment method is creating any transaction issues which is causing churning.Which among them is causing issue
#
# 6)PaperlessBilling - to see how many customers using paperless billing and analyse it with respect to churning
#
# 7)There are multiple services that company is providing like phone,internet,multiple lines, etc.check which particular service or which all services is giving more churning
# **KDE PLOT on tenure, MonthlyCharges and TotalCharges.**
# In[20]:
"""
checking the churn status of other numerical fields using kde plot
we can see that recent joiners have a churning tendency more and high monthly charges leads to churning
"""
def kde(feature):
plt.fi | e(figsize=(9,4))
plt.title("kde plot for {}".format(feature))
ax0=sns.kdeplot(df[df["Churn"]=="Yes"][feature],color="red",label= "Churn - Yes")
ax1=sns.kdeplot(df[df["Churn"]=="No"][feature],color="green",label="Churn - No")
kde("tenure")
kde("MonthlyCharges")
kde("TotalCharges")
# # Tenure
# In[21]:
#Univariate Analysis
#histogram
sns.distplot(df["tenure"])
# In[22]:
# there is a good no: of people with less than 10 months of tenure approximately 26%
df[df["tenure"]<10]["tenure"].count()/len(df)*100
# In[23]:
#summary of tenure
df["tenure"].describe()
# In[24]:
#dividing tenure into 3 categories for further analysisanalysis
#tenure>=60 months-->highest
#tenure 20 to 60 months-->medium
#tenure 0 to 20 months--->lowest
df["tenure_groups"] = np.where(df["tenure"]>=60,"highest",np.where(df["tenure"]<20,"lowest","medium"))
# In[25]:
sns.countplot(df["tenure_groups"],data=df)
pd.crosstab(df["tenure_groups"],columns="frequency")
# In[26]:
#Multivariate Analysis
#checking which tenure period gives more churning.Around 44% among the lowest tenure group has churned
tenure_Crosstab = pd.crosstab(df.tenure_groups, columns=df.Churn)
row_tot = tenure_Crosstab.sum(axis=1)
tenure_Crosstab_prop = round(tenure_Crosstab.div(row_tot,axis=0)*100)
print("---------------------------------------------------------------------------------------------------------------------------")
print("The proportion of churning in different tenure groups namley lowest,medium and highest in the order of their tenure period is: ")
print("---------------------------------------------------------------------------------------------------------------------------")
print(tenure_Crosstab_prop)
tenure_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
#lowest tenure period gives more churning
# **tenure vs Monthly charges and total charges**
# In[27]:
#as tenure is less and monthly or total charges increases, churning happens
g=sns.PairGrid(df,x_vars=["MonthlyCharges","TotalCharges"],y_vars="tenure",hue="Churn",palette="coolwarm",height=8)
g.map(plt.scatter,alpha=0.5)
plt.legend(loc=(-0.3,0.6))
# **Summary:
# low tenure is a reason for churning.This means that new joining customers are getting churned.**
# # MonthlyCharges
# In[28]:
#univarate analysis
#summary of Monthly Charges
df["MonthlyCharges"].describe()
# In[29]:
#histogram showing the distribution of monthly charges
sns.distplot(df["MonthlyCharges"])
# In[30]:
#we can see that as monthly charges increases, churning increases
sns.boxplot(x="Churn",y="MonthlyCharges",data=df,palette="coolwarm")
# **Monthly Charges vs Multiple Lines**
# In[31]:
df.MultipleLines.value_counts()
# In[32]:
"""
multiple lines with high monthly charges is showing high churning rate.
Whether or not the person has multiple lines, if he has high monthly charges, he has a tendency to churn.
"""
print(sns.boxplot(x="MultipleLines",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm"))
# **Monthly Charges vs Internet Service**
# In[33]:
#Fibre optic services have a high monthly charge when compared to others and so is the churn rate
sns.boxplot(x="InternetService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Phone Service**
# In[34]:
#churning is there for people having phone service and high monthly charges
sns.boxplot(x="PhoneService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Total Charges**
# In[35]:
plt.figure(figsize=(13,8))
sns.scatterplot(x="MonthlyCharges",y="TotalCharges",data = df,palette="coolwarm",hue = "Churn")
# using monthly charges for further analysis instead of total charges as both are proportional and taking anyone of this would be only required
# **Summary:As monthly charges and total charges increases, churning increases**
# # Senior Citizen
# In[36]:
#We can infer that there are less senior citizen people(1142 senior citizens) joined when compared to youngsters
sns.countplot(x="SeniorCitizen",data=df)
pd.crosstab(df["SeniorCitizen"],columns="frequency")
# In[37]:
#here among the senior citzens,around 42% has churned where as youngsters have churned less(among youngsters, 24% only churned)
SeniorCitizen_Crosstab = pd.crosstab(df.SeniorCitizen, columns=df.Churn)
row_tot = SeniorCitizen_Crosstab.sum(axis=1)
print("------------------------------------------------------------------------------------")
SeniorCitizen_Crosstab_prop = round(SeniorCitizen_Crosstab.div(row_tot,axis=0)*100)
print("Percentage of people who got attrited among the senior citizen and youngsters: ")
print("------------------------------------------------------------------------------------")
print(SeniorCitizen_Crosstab_prop)
SeniorCitizen_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
# In[38]:
#senior citizen vs payment method
# In[39]:
#senior citizens have opted electronic check more when compared to other payment methods.
#So we need to know if there was any issue regarding electronic check
sns.barplot(x="SeniorCitizen",y="PaymentMethod",data=df)
# In[40]:
#The average monthly charges were around 90 dollars for senior citizens who have churned
#whereas the average is less for people who haven't churned around 65 dollars
sns.boxplot(x="SeniorCitizen",y="MonthlyCharges",data=df)
# **Summary:
# Senior citizens are comparitively very less.ie, around 16%.Among these 16%, around 48% are churned .
# When checked their monthly charges, it looks comparitively higher for people who churned among the senior citizens
# Also, the payment method used was electronic check.We need to further analyse whether electronic check is creating an issue for them causing churning**
# # All other services including:
# **OnlineSecurity,OnlineBackup,DeviceProtection,TechSupport,StreamingTV,StreamingMovies**
#
# In[41]:
replace_cols=['OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',
'StreamingTV', 'StreamingMovies']
#To display these columns together with subplots using for loop
x=0
y=0
num=0
plt.tight_layout()
fig, axes =plt.subplots(2,3,figsize=(15,8))
for x in range(2):
for y in range(3):
sns.countplot(x=replace_cols[num],data=df,hue = "Churn",ax=axes[x,y],palette="coolwarm")
num +=1
#for people who have opted the services, the churning rate(shown in pink) is not higher with respect to the
#churning rate of people who haven't opted in an overall view
# In[42]:
#univariate crosstab
df_service=df[['OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',
'StreamingTV', 'StreamingMovies']]
#def crosstab(df_service):
for idx, variable in enumerate(df_service.columns):
#univariate crosstab
other_services_Crosstab = pd.crosstab(df_service[variable],columns = df.Churn)
row_tot = other_services_Crosstab.sum(axis=1)
other_services_Crosstab_prop = round(other_services_Crosstab.div(row_tot,axis=0)*100)
print("Showing the percentage of churn happened for people opting {}".format(variable))
print("-----------------------------------------------------------------------------------")
print(other_services_Crosstab_prop)
print("-----------------------------------------------------------------------------------")
#churn yes and service yes is checked here
#for people who have opted the services, the churning rate is not higher as expected
# **Summary:**
#
# **As the churning rate is less for yes category of the services, we consider this to be not effecting churning**
# # Contract
# In[43]:
#people with monthly contract showing high churning rate.
sns.countplot(x="Contract",data=df,hue="Churn",palette="coolwarm")
# In[44]:
#monthly charges is high for for all kind of contracts
sns.boxplot(x="Contract",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **summary:Month to month contract gives churning.The reason might be they can leave the service if they are not interested.So people thinking that in mind might have chosen monthly contract.Churning is very low for one year and two year contract even if the monthly charges are high**
# # PaperlessBilling
# In[45]:
#around 60% of the customers have chosen paperless billing
round(pd.crosstab(df.PaperlessBilling,columns="frequency")/len(df)*100,2)
# In[46]:
sns.countplot(x="PaperlessBilling",data=df,palette="coolwarm")
# In[47]:
"""
Churn rate is more for people opted paperless billing.
"""
sns.countplot(x="PaperlessBilling",hue="Churn",data=df,palette="coolwarm")
print("Among the people who chose paperless billing",round(1400/(1400+2771)*100),"% are churned")
print("Among the people who didn't choose paperless billing",round(469/(469+2403)*100),"% are only churned")
df.groupby(["PaperlessBilling","Churn"])["PaperlessBilling"].agg(["count"])
# **Summary:Paperless billing is very common for customers and people opted this are having high churning**
# # Payment Method
# In[48]:
#Checking the count of different payment methods
print(df["PaymentMethod"].value_counts())
sns.countplot(x="PaymentMethod",data=df,palette="coolwarm")
plt.tight_layout()
plt.xticks(rotation=45)
#electronic check is more used
# In[49]:
PaymentMethod_Crosstab = pd.crosstab(df.PaymentMethod, columns=df.Churn)
row_tot = PaymentMethod_Crosstab.sum(axis=1)
PaymentMethod_Crosstab_prop = round(PaymentMethod_Crosstab.div(row_tot,axis=0)*100)
print("Percentage of people who got attrited among the senior citizen and youngsters: ")
print(PaymentMethod_Crosstab_prop)
PaymentMethod_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
#Electronic check payment method is giving more churning.
#among the people who have opted electronic check, around 45% are churned
# **Summary:Electronic check is causing more churning of customers even if it is more preferred.This might be because of loading issues due to traffic or there might be other complaints.**
# In[50]:
#Using a heatmap to find the correlation between the numerical columns
tc=df.corr()
sns.heatmap(tc,xticklabels=True,annot=True,cmap="coolwarm")
#here we see tenure and monthly charges show the highest churning
# # Summary on EDA
# Variables causing Churning:
#
# 1)tenure
#
# 2)Monthly Charges
#
# 3)Total Charges
#
# 4)Internet Service-Fibre optic service
#
# 5)Senior Citizen due to monthly charges and payment method which is electronic check
#
# 6)payment method-electronic check
#
# 7)Contract-Month to Month
#
# 8)PaperlessBilling
# # Converting Categorical columns to numerical columns
# In[51]:
#Creating dummy values for the categorical columns for visualization and modelling purpose
dummy=["gender","Partner","Dependents","PhoneService","MultipleLines","InternetService","OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies","PaperlessBilling","PaymentMethod","Churn"]
df=pd.get_dummies(df, prefix=dummy, columns=dummy,drop_first=True)#to get either one of the columns.ie, yes or no
df.head()
# In[52]:
contract = pd.get_dummies(df["Contract"])
contract[["Month-to-month"]].head()
# In[53]:
df = pd.concat([df,contract],axis=1)
# In[54]:
df.rename(columns = {'Month-to-month' : 'Month_to_month', 'PaymentMethod_Electronic check' : 'PaymentMethod_Electronic_check','InternetService_Fiber optic':'InternetService_Fiber_optic'}, inplace = True)
# In[55]:
#Splitting dataset into X using the important features
X=df[["TotalCharges","tenure","SeniorCitizen","MonthlyCharges",'Month_to_month']]
#target column(dependent coumn) is taken i.e churn/not churn
y = df["Churn_Yes"]
# In[56]:
X
# In[57]:
X.dtypes
# # Train Test Split
# In[58]:
#Splitting the whole dataset into 2:Train and Test
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0)
# # Feature Scaling
# In[59]:
"""
Using a standard scaler to scale all columns into a small range which makes prediction more easier and
decreses the chances of the model getting biased.
"""
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train) #Fitting and transforming the X train dataset
X_test=sc.transform(X_test)#Transforming the test dataset
# # Modelling Using Classification Models and Checking Performance
# In[60]:
#Classification Model-1
from sklearn.linear_model import LogisticRegression
classifier=LogisticRegression()
#fitting the train values( both X and y )
classifier.fit(X_train,y_train)
#Predicting the y valus for the X_test
y_pred=classifier.predict(X_test)
#Performance check using Confusion matrix,accuracy score and Classification Report
from sklearn.metrics import classification_report,accuracy_score,confusion_matrix
print(confusion_matrix(y_pred,y_test))
print("\n") #correct prediction= 1425+250, wrong prediction=135+303
print(classification_report(y_pred,y_test))
print("\n") # to get the values of precision,recall,f1-score and support
print("accuracy score of Logistic Regression : ",round(accuracy_score(y_pred,y_test),2))
#Accuracy=( no:of correct predictions)/(Total no:of predictions)
# In[61]:
import pickle
# In[62]:
pkl_file = open("classifiertelcom.pkl", "wb") # create a binary file, open it and then only we can save the model
# wb -> opening file in writing mode
pickle.dump(classifier, pkl_file) # saving of the trained model into pkl file
# close the file
pkl_file.close()
# In[63]:
get_ipython().system('ls')
# In[64]:
classifier.predict([[30,10,0,40,1]])
# In[ ]:
| gur | identifier_name |
Telecom_customer_churn.py | #!/usr/bin/env python
# coding: utf-8
# # Analysis on Telcom Customer Churn
# Context:
# -----------
#
# "Predict behavior to retain customers. You can analyze all relevant customer data and develop focused customer retention programs."
#
# Content:
# ------------
#
# Each row represents a customer, each column contains customer’s attributes described on the column Metadata.
#
# The data set includes information about:
# ------------------------------------------------------------
#
# Customers who left within the last month – the column is called Churn
#
# Services that each customer has signed up for – phone, multiple lines, internet, online security, online backup, device protection, tech support, and streaming TV and movies
#
# Customer account information – how long they’ve been a customer, contract, payment method, paperless billing, monthly charges, and total charges
#
# Demographic info about customers – gender, age range, and if they have partners and dependents
# # Exploratory Data Analysis
# In[1]:
#import library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
#reading the file
df=pd.read_csv("telecom_customer_churn.csv")
#pd.options.display.max_columns = 30
#pd.options.display.max_rows = None
#
# Context
# "Predict behavior to retain customers. You can analyze all relevant customer data and develop focused customer retention programs." [IBM Sample Data Sets]
#
# Content
# Each row represents a customer, each column contains customer’s attributes described on the column Metadata.
#
# The data set includes information about:
#
# Customers who left within the last month – the column is called Churn
# Services that each customer has signed up for – phone, multiple lines, internet, online security, online backup, device protection, tech support, and streaming TV and movies
# Customer account information – how long they’ve been a customer, contract, payment method, paperless billing, monthly charges, and total charges
# Demographic info about customers – gender, age range, and if they have partners and dependents
#
# In[3]:
#first 5 rows are obtained
df.head()
# In[4]:
#shape gives the no:of rows and columns
df.shape
# In[5]:
#gives the general info about the dataset like the non-null counts and the datatype of each column
df.info()
# In[6]:
df.dtypes
#total charges is seen to be object.
# In[7]:
#shows the list of columns
df.columns
# In[8]:
#to find whether there is any null values for each column
df.isnull().sum()
#no null values
# In[9]:
#df.TotalCharges = df.TotalCharges.astype('float64')
#gives errors as this column is having spaces instead of null values .So need to convert to 0's and type cast accordingly
# In[10]:
#showing the rows having spaces for the column total charges
df[df["TotalCharges"]==" "][["TotalCharges"]]
# In[11]:
# Replacing the mssing value into 0 and converting the object into float value.
df['TotalCharges'] = df['TotalCharges'].replace(" ", 0).astype('float32')
# In[12]:
#checking whether the column is converted into float type
df.info()
# In[13]:
#summary of numerical columns
df.describe()
# In[14]:
#summary of categorical columns
df.describe(include = object)
# In[15]:
#The columns having "No internet service" are replaced to "No"
#The column "MultipleLines" having "No phone service" are replaced to "No"
replace_cols=["OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies"]
for i in replace_cols:
df[i] = df[i].replace('No internet service' , 'No')
df["MultipleLines"]=df["MultipleLines"].replace("No phone service","No")
# In[16]:
#to check if duplicated rows are present
df.duplicated().sum()
# In[17]:
y = pd.crosstab(df["Churn"],columns = "Frequency")
print(y)
#no: of customers churned = 1869
#no: of customers not churned = 5174
# In[18]:
#bar plot showing the customers who churned and who didn't
y_bar = y.plot(kind="bar")
y_percent = y/len(df)*100
print(round(y_percent,2))
#27% churned
#73% not churned
# In[19]:
#categorical columns and numerical columns
categorical_cols = ["gender","Partner","Dependents","PhoneService","MultipleLines","InternetService","OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies","Contract","PaperlessBilling","PaymentMethod","Churn"]
numerical_cols = ["SeniorCitizen","tenure","MonthlyCharges","TotalCharges"]
# # Hypothesis Generation
# Possible Questions or variables to be checked:
#
# 1)tenure - which category of people (people with high tenure or low tenure) are getting churned.We need to know if recently joining cstomers are churning or not
#
# 2)MonthlyCharges - if the monthly charges are high, there is a chance for churning.We need to analyse whether monthly charges are high or not
#
# 3)TotalCharges - Same as monthly charge, total charge should increase accoding to monthly charges
#
# 4)SeniorCitizen - need to check whether senior citizens are more tending to churn
#
# 5)PaymentMethod - to check whether payment method is creating any transaction issues which is causing churning.Which among them is causing issue
#
# 6)PaperlessBilling - to see how many customers using paperless billing and analyse it with respect to churning
#
# 7)There are multiple services that company is providing like phone,internet,multiple lines, etc.check which particular service or which all services is giving more churning
# **KDE PLOT on tenure, MonthlyCharges and TotalCharges.**
# In[20]:
"""
checking the churn status of other numerical fields using kde plot
we can see that recent joiners have a churning tendency more and high monthly charges leads to churning
"""
def kde(feature):
plt.figure(figsize=(9,4))
plt.title("kde plot for {}".format(feature))
ax0=sns.kdeplot(df[df["Churn"]=="Yes"][feature],color="red",label= "Churn - Yes")
ax1=sns.kdeplot(df[df["Churn"]=="No"][feature],color="green",label="Churn - No")
kde("tenure")
kde("MonthlyCharges")
kde("TotalCharges")
# # Tenure
# In[21]:
#Univariate Analysis
#histogram
sns.distplot(df["tenure"])
# In[22]:
# there is a good no: of people with less than 10 months of tenure approximately 26%
df[df["tenure"]<10]["tenure"].count()/len(df)*100
# In[23]:
#summary of tenure
df["tenure"].describe()
# In[24]:
#dividing tenure into 3 categories for further analysisanalysis
#tenure>=60 months-->highest
#tenure 20 to 60 months-->medium
#tenure 0 to 20 months--->lowest
df["tenure_groups"] = np.where(df["tenure"]>=60,"highest",np.where(df["tenure"]<20,"lowest","medium"))
# In[25]:
sns.countplot(df["tenure_groups"],data=df)
pd.crosstab(df["tenure_groups"],columns="frequency")
# In[26]:
#Multivariate Analysis
#checking which tenure period gives more churning.Around 44% among the lowest tenure group has churned
tenure_Crosstab = pd.crosstab(df.tenure_groups, columns=df.Churn)
row_tot = tenure_Crosstab.sum(axis=1)
tenure_Crosstab_prop = round(tenure_Crosstab.div(row_tot,axis=0)*100)
print("---------------------------------------------------------------------------------------------------------------------------")
print("The proportion of churning in different tenure groups namley lowest,medium and highest in the order of their tenure period is: ")
print("---------------------------------------------------------------------------------------------------------------------------")
print(tenure_Crosstab_prop)
tenure_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
#lowest tenure period gives more churning
# **tenure vs Monthly charges and total charges**
# In[27]:
#as tenure is less and monthly or total charges increases, churning happens
g=sns.PairGrid(df,x_vars=["MonthlyCharges","TotalCharges"],y_vars="tenure",hue="Churn",palette="coolwarm",height=8)
g.map(plt.scatter,alpha=0.5)
plt.legend(loc=(-0.3,0.6))
# **Summary:
# low tenure is a reason for churning.This means that new joining customers are getting churned.**
# # MonthlyCharges
# In[28]:
#univarate analysis
#summary of Monthly Charges
df["MonthlyCharges"].describe()
# In[29]:
#histogram showing the distribution of monthly charges
sns.distplot(df["MonthlyCharges"])
# In[30]:
#we can see that as monthly charges increases, churning increases
sns.boxplot(x="Churn",y="MonthlyCharges",data=df,palette="coolwarm")
# **Monthly Charges vs Multiple Lines**
# In[31]:
df.MultipleLines.value_counts()
# In[32]:
"""
multiple lines with high monthly charges is showing high churning rate.
Whether or not the person has multiple lines, if he has high monthly charges, he has a tendency to churn.
"""
print(sns.boxplot(x="MultipleLines",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm"))
# **Monthly Charges vs Internet Service**
# In[33]:
#Fibre optic services have a high monthly charge when compared to others and so is the churn rate
sns.boxplot(x="InternetService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Phone Service**
# In[34]:
#churning is there for people having phone service and high monthly charges
sns.boxplot(x="PhoneService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Total Charges**
# In[35]:
plt.figure(figsize=(13,8))
sns.scatterplot(x="MonthlyCharges",y="TotalCharges",data = df,palette="coolwarm",hue = "Churn")
# using monthly charges for further analysis instead of total charges as both are proportional and taking anyone of this would be only required
# **Summary:As monthly charges and total charges increases, churning increases**
# # Senior Citizen
# In[36]:
#We can infer that there are less senior citizen people(1142 senior citizens) joined when compared to youngsters
sns.countplot(x="SeniorCitizen",data=df)
pd.crosstab(df["SeniorCitizen"],columns="frequency")
# In[37]:
#here among the senior citzens,around 42% has churned where as youngsters have churned less(among youngsters, 24% only churned)
SeniorCitizen_Crosstab = pd.crosstab(df.SeniorCitizen, columns=df.Churn)
row_tot = SeniorCitizen_Crosstab.sum(axis=1)
print("------------------------------------------------------------------------------------")
SeniorCitizen_Crosstab_prop = round(SeniorCitizen_Crosstab.div(row_tot,axis=0)*100)
print("Percentage of people who got attrited among the senior citizen and youngsters: ")
print("------------------------------------------------------------------------------------")
print(SeniorCitizen_Crosstab_prop)
SeniorCitizen_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
# In[38]:
#senior citizen vs payment method
# In[39]:
#senior citizens have opted electronic check more when compared to other payment methods.
#So we need to know if there was any issue regarding electronic check
sns.barplot(x="SeniorCitizen",y="PaymentMethod",data=df)
# In[40]:
#The average monthly charges were around 90 dollars for senior citizens who have churned
#whereas the average is less for people who haven't churned around 65 dollars
sns.boxplot(x="SeniorCitizen",y="MonthlyCharges",data=df)
# **Summary:
# Senior citizens are comparitively very less.ie, around 16%.Among these 16%, around 48% are churned .
# When checked their monthly charges, it looks comparitively higher for people who churned among the senior citizens
# Also, the payment method used was electronic check.We need to further analyse whether electronic check is creating an issue for them causing churning**
# # All other services including:
# **OnlineSecurity,OnlineBackup,DeviceProtection,TechSupport,StreamingTV,StreamingMovies**
#
# In[41]:
replace_cols=['OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',
'StreamingTV', 'StreamingMovies']
#To display these columns together with subplots using for loop
x=0
y=0
num=0
plt.tight_layout()
fig, axes =plt.subplots(2,3,figsize=(15,8))
for x in range(2):
for y in range(3):
| ted the services, the churning rate(shown in pink) is not higher with respect to the
#churning rate of people who haven't opted in an overall view
# In[42]:
#univariate crosstab
df_service=df[['OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',
'StreamingTV', 'StreamingMovies']]
#def crosstab(df_service):
for idx, variable in enumerate(df_service.columns):
#univariate crosstab
other_services_Crosstab = pd.crosstab(df_service[variable],columns = df.Churn)
row_tot = other_services_Crosstab.sum(axis=1)
other_services_Crosstab_prop = round(other_services_Crosstab.div(row_tot,axis=0)*100)
print("Showing the percentage of churn happened for people opting {}".format(variable))
print("-----------------------------------------------------------------------------------")
print(other_services_Crosstab_prop)
print("-----------------------------------------------------------------------------------")
#churn yes and service yes is checked here
#for people who have opted the services, the churning rate is not higher as expected
# **Summary:**
#
# **As the churning rate is less for yes category of the services, we consider this to be not effecting churning**
# # Contract
# In[43]:
#people with monthly contract showing high churning rate.
sns.countplot(x="Contract",data=df,hue="Churn",palette="coolwarm")
# In[44]:
#monthly charges is high for for all kind of contracts
sns.boxplot(x="Contract",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **summary:Month to month contract gives churning.The reason might be they can leave the service if they are not interested.So people thinking that in mind might have chosen monthly contract.Churning is very low for one year and two year contract even if the monthly charges are high**
# # PaperlessBilling
# In[45]:
#around 60% of the customers have chosen paperless billing
round(pd.crosstab(df.PaperlessBilling,columns="frequency")/len(df)*100,2)
# In[46]:
sns.countplot(x="PaperlessBilling",data=df,palette="coolwarm")
# In[47]:
"""
Churn rate is more for people opted paperless billing.
"""
sns.countplot(x="PaperlessBilling",hue="Churn",data=df,palette="coolwarm")
print("Among the people who chose paperless billing",round(1400/(1400+2771)*100),"% are churned")
print("Among the people who didn't choose paperless billing",round(469/(469+2403)*100),"% are only churned")
df.groupby(["PaperlessBilling","Churn"])["PaperlessBilling"].agg(["count"])
# **Summary:Paperless billing is very common for customers and people opted this are having high churning**
# # Payment Method
# In[48]:
#Checking the count of different payment methods
print(df["PaymentMethod"].value_counts())
sns.countplot(x="PaymentMethod",data=df,palette="coolwarm")
plt.tight_layout()
plt.xticks(rotation=45)
#electronic check is more used
# In[49]:
PaymentMethod_Crosstab = pd.crosstab(df.PaymentMethod, columns=df.Churn)
row_tot = PaymentMethod_Crosstab.sum(axis=1)
PaymentMethod_Crosstab_prop = round(PaymentMethod_Crosstab.div(row_tot,axis=0)*100)
print("Percentage of people who got attrited among the senior citizen and youngsters: ")
print(PaymentMethod_Crosstab_prop)
PaymentMethod_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
#Electronic check payment method is giving more churning.
#among the people who have opted electronic check, around 45% are churned
# **Summary:Electronic check is causing more churning of customers even if it is more preferred.This might be because of loading issues due to traffic or there might be other complaints.**
# In[50]:
#Using a heatmap to find the correlation between the numerical columns
tc=df.corr()
sns.heatmap(tc,xticklabels=True,annot=True,cmap="coolwarm")
#here we see tenure and monthly charges show the highest churning
# # Summary on EDA
# Variables causing Churning:
#
# 1)tenure
#
# 2)Monthly Charges
#
# 3)Total Charges
#
# 4)Internet Service-Fibre optic service
#
# 5)Senior Citizen due to monthly charges and payment method which is electronic check
#
# 6)payment method-electronic check
#
# 7)Contract-Month to Month
#
# 8)PaperlessBilling
# # Converting Categorical columns to numerical columns
# In[51]:
#Creating dummy values for the categorical columns for visualization and modelling purpose
dummy=["gender","Partner","Dependents","PhoneService","MultipleLines","InternetService","OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies","PaperlessBilling","PaymentMethod","Churn"]
df=pd.get_dummies(df, prefix=dummy, columns=dummy,drop_first=True)#to get either one of the columns.ie, yes or no
df.head()
# In[52]:
contract = pd.get_dummies(df["Contract"])
contract[["Month-to-month"]].head()
# In[53]:
df = pd.concat([df,contract],axis=1)
# In[54]:
df.rename(columns = {'Month-to-month' : 'Month_to_month', 'PaymentMethod_Electronic check' : 'PaymentMethod_Electronic_check','InternetService_Fiber optic':'InternetService_Fiber_optic'}, inplace = True)
# In[55]:
#Splitting dataset into X using the important features
X=df[["TotalCharges","tenure","SeniorCitizen","MonthlyCharges",'Month_to_month']]
#target column(dependent coumn) is taken i.e churn/not churn
y = df["Churn_Yes"]
# In[56]:
X
# In[57]:
X.dtypes
# # Train Test Split
# In[58]:
#Splitting the whole dataset into 2:Train and Test
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0)
# # Feature Scaling
# In[59]:
"""
Using a standard scaler to scale all columns into a small range which makes prediction more easier and
decreses the chances of the model getting biased.
"""
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train) #Fitting and transforming the X train dataset
X_test=sc.transform(X_test)#Transforming the test dataset
# # Modelling Using Classification Models and Checking Performance
# In[60]:
#Classification Model-1
from sklearn.linear_model import LogisticRegression
classifier=LogisticRegression()
#fitting the train values( both X and y )
classifier.fit(X_train,y_train)
#Predicting the y valus for the X_test
y_pred=classifier.predict(X_test)
#Performance check using Confusion matrix,accuracy score and Classification Report
from sklearn.metrics import classification_report,accuracy_score,confusion_matrix
print(confusion_matrix(y_pred,y_test))
print("\n") #correct prediction= 1425+250, wrong prediction=135+303
print(classification_report(y_pred,y_test))
print("\n") # to get the values of precision,recall,f1-score and support
print("accuracy score of Logistic Regression : ",round(accuracy_score(y_pred,y_test),2))
#Accuracy=( no:of correct predictions)/(Total no:of predictions)
# In[61]:
import pickle
# In[62]:
pkl_file = open("classifiertelcom.pkl", "wb") # create a binary file, open it and then only we can save the model
# wb -> opening file in writing mode
pickle.dump(classifier, pkl_file) # saving of the trained model into pkl file
# close the file
pkl_file.close()
# In[63]:
get_ipython().system('ls')
# In[64]:
classifier.predict([[30,10,0,40,1]])
# In[ ]:
| sns.countplot(x=replace_cols[num],data=df,hue = "Churn",ax=axes[x,y],palette="coolwarm")
num +=1
#for people who have op | conditional_block |
Telecom_customer_churn.py | #!/usr/bin/env python
# coding: utf-8
# # Analysis on Telcom Customer Churn
# Context:
# -----------
#
# "Predict behavior to retain customers. You can analyze all relevant customer data and develop focused customer retention programs."
#
# Content:
# ------------
#
# Each row represents a customer, each column contains customer’s attributes described on the column Metadata.
#
# The data set includes information about:
# ------------------------------------------------------------
#
# Customers who left within the last month – the column is called Churn
#
# Services that each customer has signed up for – phone, multiple lines, internet, online security, online backup, device protection, tech support, and streaming TV and movies
#
# Customer account information – how long they’ve been a customer, contract, payment method, paperless billing, monthly charges, and total charges
#
# Demographic info about customers – gender, age range, and if they have partners and dependents
# # Exploratory Data Analysis
# In[1]:
#import library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
#reading the file
df=pd.read_csv("telecom_customer_churn.csv")
#pd.options.display.max_columns = 30
#pd.options.display.max_rows = None
#
# Context
# "Predict behavior to retain customers. You can analyze all relevant customer data and develop focused customer retention programs." [IBM Sample Data Sets]
#
# Content
# Each row represents a customer, each column contains customer’s attributes described on the column Metadata.
#
# The data set includes information about:
#
# Customers who left within the last month – the column is called Churn
# Services that each customer has signed up for – phone, multiple lines, internet, online security, online backup, device protection, tech support, and streaming TV and movies
# Customer account information – how long they’ve been a customer, contract, payment method, paperless billing, monthly charges, and total charges
# Demographic info about customers – gender, age range, and if they have partners and dependents
#
# In[3]:
#first 5 rows are obtained
df.head()
# In[4]:
#shape gives the no:of rows and columns
df.shape
# In[5]:
#gives the general info about the dataset like the non-null counts and the datatype of each column
df.info()
# In[6]:
df.dtypes
#total charges is seen to be object.
# In[7]:
#shows the list of columns
df.columns
# In[8]:
#to find whether there is any null values for each column
df.isnull().sum()
#no null values
# In[9]:
#df.TotalCharges = df.TotalCharges.astype('float64')
#gives errors as this column is having spaces instead of null values .So need to convert to 0's and type cast accordingly
# In[10]:
#showing the rows having spaces for the column total charges
df[df["TotalCharges"]==" "][["TotalCharges"]]
# In[11]:
# Replacing the mssing value into 0 and converting the object into float value.
df['TotalCharges'] = df['TotalCharges'].replace(" ", 0).astype('float32')
# In[12]:
#checking whether the column is converted into float type
df.info()
# In[13]:
#summary of numerical columns
df.describe()
# In[14]:
#summary of categorical columns
df.describe(include = object)
# In[15]:
#The columns having "No internet service" are replaced to "No"
#The column "MultipleLines" having "No phone service" are replaced to "No"
replace_cols=["OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies"]
for i in replace_cols:
df[i] = df[i].replace('No internet service' , 'No')
df["MultipleLines"]=df["MultipleLines"].replace("No phone service","No")
# In[16]:
#to check if duplicated rows are present
df.duplicated().sum()
# In[17]:
y = pd.crosstab(df["Churn"],columns = "Frequency")
print(y)
#no: of customers churned = 1869
#no: of customers not churned = 5174
# In[18]:
#bar plot showing the customers who churned and who didn't
y_bar = y.plot(kind="bar")
y_percent = y/len(df)*100
print(round(y_percent,2))
#27% churned
#73% not churned
# In[19]:
#categorical columns and numerical columns
categorical_cols = ["gender","Partner","Dependents","PhoneService","MultipleLines","InternetService","OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies","Contract","PaperlessBilling","PaymentMethod","Churn"]
numerical_cols = ["SeniorCitizen","tenure","MonthlyCharges","TotalCharges"]
# # Hypothesis Generation
# Possible Questions or variables to be checked:
#
# 1)tenure - which category of people (people with high tenure or low tenure) are getting churned.We need to know if recently joining cstomers are churning or not
#
# 2)MonthlyCharges - if the monthly charges are high, there is a chance for churning.We need to analyse whether monthly charges are high or not
#
# 3)TotalCharges - Same as monthly charge, total charge should increase accoding to monthly charges
#
# 4)SeniorCitizen - need to check whether senior citizens are more tending to churn
#
# 5)PaymentMethod - to check whether payment method is creating any transaction issues which is causing churning.Which among them is causing issue
#
# 6)PaperlessBilling - to see how many customers using paperless billing and analyse it with respect to churning
#
# 7)There are multiple services that company is providing like phone,internet,multiple lines, etc.check which particular service or which all services is giving more churning
# **KDE PLOT on tenure, MonthlyCharges and TotalCharges.**
# In[20]:
"""
checking the churn status of other numerical fields using kde plot
we can see that recent joiners have a churning tendency more and high monthly charges leads to churning
"""
def kde(feature):
plt.figure(figsize=(9,4))
plt.title("kde plot for {}".format(feature))
ax0=sns.kdeplot(df[df["Churn"]=="Yes"][feature],color="red",label= "Churn - Yes")
ax1=sns.kdeplot(df[df["Churn"]=="No"][feature],color="green",label="Churn - No")
kde("tenure")
kde("MonthlyCharges")
kde("TotalCharges")
# # Tenure
# In[21]:
#Univariate Analysis
#histogram
sns.distplot(df["tenure"])
# In[22]:
# there is a good no: of people with less than 10 months of tenure approximately 26%
df[df["tenure"]<10]["tenure"].count()/len(df)*100
# In[23]:
#summary of tenure
df["tenure"].describe()
# In[24]:
#dividing tenure into 3 categories for further analysisanalysis
#tenure>=60 months-->highest
#tenure 20 to 60 months-->medium
#tenure 0 to 20 months--->lowest
df["tenure_groups"] = np.where(df["tenure"]>=60,"highest",np.where(df["tenure"]<20,"lowest","medium"))
# In[25]:
sns.countplot(df["tenure_groups"],data=df)
pd.crosstab(df["tenure_groups"],columns="frequency")
# In[26]:
#Multivariate Analysis
#checking which tenure period gives more churning.Around 44% among the lowest tenure group has churned
tenure_Crosstab = pd.crosstab(df.tenure_groups, columns=df.Churn)
row_tot = tenure_Crosstab.sum(axis=1)
tenure_Crosstab_prop = round(tenure_Crosstab.div(row_tot,axis=0)*100)
print("---------------------------------------------------------------------------------------------------------------------------")
print("The proportion of churning in different tenure groups namley lowest,medium and highest in the order of their tenure period is: ")
print("---------------------------------------------------------------------------------------------------------------------------")
print(tenure_Crosstab_prop)
tenure_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
#lowest tenure period gives more churning
# **tenure vs Monthly charges and total charges**
# In[27]:
#as tenure is less and monthly or total charges increases, churning happens
g=sns.PairGrid(df,x_vars=["MonthlyCharges","TotalCharges"],y_vars="tenure",hue="Churn",palette="coolwarm",height=8)
g.map(plt.scatter,alpha=0.5)
plt.legend(loc=(-0.3,0.6))
# **Summary:
# low tenure is a reason for churning.This means that new joining customers are getting churned.**
# # MonthlyCharges
# In[28]:
#univarate analysis
#summary of Monthly Charges
df["MonthlyCharges"].describe()
# In[29]:
#histogram showing the distribution of monthly charges
sns.distplot(df["MonthlyCharges"])
# In[30]:
#we can see that as monthly charges increases, churning increases
sns.boxplot(x="Churn",y="MonthlyCharges",data=df,palette="coolwarm")
# **Monthly Charges vs Multiple Lines**
# In[31]:
df.MultipleLines.value_counts()
# In[32]:
"""
multiple lines with high monthly charges is showing high churning rate.
Whether or not the person has multiple lines, if he has high monthly charges, he has a tendency to churn.
"""
print(sns.boxplot(x="MultipleLines",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm"))
# **Monthly Charges vs Internet Service**
# In[33]:
#Fibre optic services have a high monthly charge when compared to others and so is the churn rate
sns.boxplot(x="InternetService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Phone Service**
# In[34]:
#churning is there for people having phone service and high monthly charges
sns.boxplot(x="PhoneService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Total Charges**
# In[35]:
plt.figure(figsize=(13,8))
sns.scatterplot(x="MonthlyCharges",y="TotalCharges",data = df,palette="coolwarm",hue = "Churn")
# using monthly charges for further analysis instead of total charges as both are proportional and taking anyone of this would be only required
# **Summary:As monthly charges and total charges increases, churning increases**
# # Senior Citizen
# In[36]:
#We can infer that there are less senior citizen people(1142 senior citizens) joined when compared to youngsters
sns.countplot(x="SeniorCitizen",data=df)
pd.crosstab(df["SeniorCitizen"],columns="frequency")
# In[37]:
#here among the senior citzens,around 42% has churned where as youngsters have churned less(among youngsters, 24% only churned)
SeniorCitizen_Crosstab = pd.crosstab(df.SeniorCitizen, columns=df.Churn)
row_tot = SeniorCitizen_Crosstab.sum(axis=1)
print("------------------------------------------------------------------------------------")
SeniorCitizen_Crosstab_prop = round(SeniorCitizen_Crosstab.div(row_tot,axis=0)*100)
print("Percentage of people who got attrited among the senior citizen and youngsters: ")
print("------------------------------------------------------------------------------------")
print(SeniorCitizen_Crosstab_prop) |
# In[38]:
#senior citizen vs payment method
# In[39]:
#senior citizens have opted electronic check more when compared to other payment methods.
#So we need to know if there was any issue regarding electronic check
sns.barplot(x="SeniorCitizen",y="PaymentMethod",data=df)
# In[40]:
#The average monthly charges were around 90 dollars for senior citizens who have churned
#whereas the average is less for people who haven't churned around 65 dollars
sns.boxplot(x="SeniorCitizen",y="MonthlyCharges",data=df)
# **Summary:
# Senior citizens are comparitively very less.ie, around 16%.Among these 16%, around 48% are churned .
# When checked their monthly charges, it looks comparitively higher for people who churned among the senior citizens
# Also, the payment method used was electronic check.We need to further analyse whether electronic check is creating an issue for them causing churning**
# # All other services including:
# **OnlineSecurity,OnlineBackup,DeviceProtection,TechSupport,StreamingTV,StreamingMovies**
#
# In[41]:
replace_cols=['OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',
'StreamingTV', 'StreamingMovies']
#To display these columns together with subplots using for loop
x=0
y=0
num=0
plt.tight_layout()
fig, axes =plt.subplots(2,3,figsize=(15,8))
for x in range(2):
for y in range(3):
sns.countplot(x=replace_cols[num],data=df,hue = "Churn",ax=axes[x,y],palette="coolwarm")
num +=1
#for people who have opted the services, the churning rate(shown in pink) is not higher with respect to the
#churning rate of people who haven't opted in an overall view
# In[42]:
#univariate crosstab
df_service=df[['OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',
'StreamingTV', 'StreamingMovies']]
#def crosstab(df_service):
for idx, variable in enumerate(df_service.columns):
#univariate crosstab
other_services_Crosstab = pd.crosstab(df_service[variable],columns = df.Churn)
row_tot = other_services_Crosstab.sum(axis=1)
other_services_Crosstab_prop = round(other_services_Crosstab.div(row_tot,axis=0)*100)
print("Showing the percentage of churn happened for people opting {}".format(variable))
print("-----------------------------------------------------------------------------------")
print(other_services_Crosstab_prop)
print("-----------------------------------------------------------------------------------")
#churn yes and service yes is checked here
#for people who have opted the services, the churning rate is not higher as expected
# **Summary:**
#
# **As the churning rate is less for yes category of the services, we consider this to be not effecting churning**
# # Contract
# In[43]:
#people with monthly contract showing high churning rate.
sns.countplot(x="Contract",data=df,hue="Churn",palette="coolwarm")
# In[44]:
#monthly charges is high for for all kind of contracts
sns.boxplot(x="Contract",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **summary:Month to month contract gives churning.The reason might be they can leave the service if they are not interested.So people thinking that in mind might have chosen monthly contract.Churning is very low for one year and two year contract even if the monthly charges are high**
# # PaperlessBilling
# In[45]:
#around 60% of the customers have chosen paperless billing
round(pd.crosstab(df.PaperlessBilling,columns="frequency")/len(df)*100,2)
# In[46]:
sns.countplot(x="PaperlessBilling",data=df,palette="coolwarm")
# In[47]:
"""
Churn rate is more for people opted paperless billing.
"""
sns.countplot(x="PaperlessBilling",hue="Churn",data=df,palette="coolwarm")
print("Among the people who chose paperless billing",round(1400/(1400+2771)*100),"% are churned")
print("Among the people who didn't choose paperless billing",round(469/(469+2403)*100),"% are only churned")
df.groupby(["PaperlessBilling","Churn"])["PaperlessBilling"].agg(["count"])
# **Summary:Paperless billing is very common for customers and people opted this are having high churning**
# # Payment Method
# In[48]:
#Checking the count of different payment methods
print(df["PaymentMethod"].value_counts())
sns.countplot(x="PaymentMethod",data=df,palette="coolwarm")
plt.tight_layout()
plt.xticks(rotation=45)
#electronic check is more used
# In[49]:
PaymentMethod_Crosstab = pd.crosstab(df.PaymentMethod, columns=df.Churn)
row_tot = PaymentMethod_Crosstab.sum(axis=1)
PaymentMethod_Crosstab_prop = round(PaymentMethod_Crosstab.div(row_tot,axis=0)*100)
print("Percentage of people who got attrited among the senior citizen and youngsters: ")
print(PaymentMethod_Crosstab_prop)
PaymentMethod_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
#Electronic check payment method is giving more churning.
#among the people who have opted electronic check, around 45% are churned
# **Summary:Electronic check is causing more churning of customers even if it is more preferred.This might be because of loading issues due to traffic or there might be other complaints.**
# In[50]:
#Using a heatmap to find the correlation between the numerical columns
tc=df.corr()
sns.heatmap(tc,xticklabels=True,annot=True,cmap="coolwarm")
#here we see tenure and monthly charges show the highest churning
# # Summary on EDA
# Variables causing Churning:
#
# 1)tenure
#
# 2)Monthly Charges
#
# 3)Total Charges
#
# 4)Internet Service-Fibre optic service
#
# 5)Senior Citizen due to monthly charges and payment method which is electronic check
#
# 6)payment method-electronic check
#
# 7)Contract-Month to Month
#
# 8)PaperlessBilling
# # Converting Categorical columns to numerical columns
# In[51]:
#Creating dummy values for the categorical columns for visualization and modelling purpose
dummy=["gender","Partner","Dependents","PhoneService","MultipleLines","InternetService","OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies","PaperlessBilling","PaymentMethod","Churn"]
df=pd.get_dummies(df, prefix=dummy, columns=dummy,drop_first=True)#to get either one of the columns.ie, yes or no
df.head()
# In[52]:
contract = pd.get_dummies(df["Contract"])
contract[["Month-to-month"]].head()
# In[53]:
df = pd.concat([df,contract],axis=1)
# In[54]:
df.rename(columns = {'Month-to-month' : 'Month_to_month', 'PaymentMethod_Electronic check' : 'PaymentMethod_Electronic_check','InternetService_Fiber optic':'InternetService_Fiber_optic'}, inplace = True)
# In[55]:
#Splitting dataset into X using the important features
X=df[["TotalCharges","tenure","SeniorCitizen","MonthlyCharges",'Month_to_month']]
#target column(dependent coumn) is taken i.e churn/not churn
y = df["Churn_Yes"]
# In[56]:
X
# In[57]:
X.dtypes
# # Train Test Split
# In[58]:
#Splitting the whole dataset into 2:Train and Test
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0)
# # Feature Scaling
# In[59]:
"""
Using a standard scaler to scale all columns into a small range which makes prediction more easier and
decreses the chances of the model getting biased.
"""
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train) #Fitting and transforming the X train dataset
X_test=sc.transform(X_test)#Transforming the test dataset
# # Modelling Using Classification Models and Checking Performance
# In[60]:
#Classification Model-1
from sklearn.linear_model import LogisticRegression
classifier=LogisticRegression()
#fitting the train values( both X and y )
classifier.fit(X_train,y_train)
#Predicting the y valus for the X_test
y_pred=classifier.predict(X_test)
#Performance check using Confusion matrix,accuracy score and Classification Report
from sklearn.metrics import classification_report,accuracy_score,confusion_matrix
print(confusion_matrix(y_pred,y_test))
print("\n") #correct prediction= 1425+250, wrong prediction=135+303
print(classification_report(y_pred,y_test))
print("\n") # to get the values of precision,recall,f1-score and support
print("accuracy score of Logistic Regression : ",round(accuracy_score(y_pred,y_test),2))
#Accuracy=( no:of correct predictions)/(Total no:of predictions)
# In[61]:
import pickle
# In[62]:
pkl_file = open("classifiertelcom.pkl", "wb") # create a binary file, open it and then only we can save the model
# wb -> opening file in writing mode
pickle.dump(classifier, pkl_file) # saving of the trained model into pkl file
# close the file
pkl_file.close()
# In[63]:
get_ipython().system('ls')
# In[64]:
classifier.predict([[30,10,0,40,1]])
# In[ ]: | SeniorCitizen_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
| random_line_split |
export_saved_model.py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Scripts to export saved models.
The anchor implementation is based on:
https://github.com/tensorflow/tpu/blob/master/models/official/detection/dataloader/anchor.py
"""
import enum
import logging
from typing import Dict, Any
from absl import app
from absl import flags
from flax.training import checkpoints
import gin
import jax
import numpy as np
import tensorflow as tf
from utils import saved_model_lib
_INPUT_DIR = flags.DEFINE_string(
'input_dir', None, 'Path under which to load the JAX model.'
)
_OUTPUT_DIR = flags.DEFINE_string(
'output_dir', None, 'Path under which to save the SavedModel.'
)
_MODEL_NAME = flags.DEFINE_string(
'model_name', 'resnet_50', 'The name of the backbone model to export.'
)
_IMAGE_SIZE = flags.DEFINE_integer(
'image_size', 1024, 'Image size to serve the model at.'
)
_VLM_WEIGHT = flags.DEFINE_float(
'vlm_weight',
0.65,
'A float between [0, 1] as a tradeoff between open/closed-set detection.',
)
_SERVING_BATCH_SIZE = flags.DEFINE_integer(
'serving_batch_size',
1,
'For what batch size to prepare the serving signature.',
)
_MAX_NUM_CLASSES = flags.DEFINE_integer(
'max_num_classes', 30, 'Maximum number of classes to feed in by the user.'
)
_INCLUDE_MASK = flags.DEFINE_bool(
'include_mask', True, 'Whether to include mask.'
)
_MODEL_CONFIG_PATH = flags.DEFINE_string(
'model_config_path',
'./configs/export_model.gin',
'The path to model gin config.',
)
@gin.constants_from_enum
class ExecutionMode(enum.Enum):
"""Defines the model execution mode."""
TRAIN = 1
EVAL = 2
PREDICT = 3
class Anchor:
"""Anchor class for anchor-based object detectors."""
def __init__(self,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
image_size):
"""Constructs multiscale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instance, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of float numbers representing the aspect ratio anchors
added on each level. The number indicates the ratio of width to height.
For instance, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
scale level.
anchor_size: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: a list of integer numbers or Tensors representing
[height, width] of the input image size.The image_size should be divided
by the largest feature stride 2^max_level.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_size = anchor_size
self.image_size = image_size
self.boxes = self._generate_boxes()
def _generate_boxes(self):
"""Generates multiscale anchor boxes.
Returns:
a Tensor of shape [N, 4], representing anchor boxes of all levels
concatenated together.
"""
boxes_all = []
for level in range(self.min_level, self.max_level + 1):
boxes_l = []
for scale in range(self.num_scales):
for aspect_ratio in self.aspect_ratios:
stride = 2 ** level
intermidate_scale = 2 ** (scale / float(self.num_scales))
base_anchor_size = self.anchor_size * stride * intermidate_scale
aspect_x = aspect_ratio ** 0.5
aspect_y = aspect_ratio ** -0.5
half_anchor_size_x = base_anchor_size * aspect_x / 2.0
half_anchor_size_y = base_anchor_size * aspect_y / 2.0
x = tf.range(stride / 2, self.image_size[1], stride)
y = tf.range(stride / 2, self.image_size[0], stride)
xv, yv = tf.meshgrid(x, y)
xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
# Tensor shape Nx4.
boxes = tf.stack([yv - half_anchor_size_y, xv - half_anchor_size_x,
yv + half_anchor_size_y, xv + half_anchor_size_x],
axis=1)
boxes_l.append(boxes)
# Concat anchors on the same level to tensor shape NxAx4.
boxes_l = tf.stack(boxes_l, axis=1)
boxes_l = tf.reshape(boxes_l, [-1, 4])
boxes_all.append(boxes_l)
return tf.concat(boxes_all, axis=0)
def unpack_labels(self, labels,
is_box = False):
"""Unpacks an array of labels into multiscales labels.
Args:
labels: labels to unpack.
is_box: to unpack anchor boxes or not. If it is true, will unpack to 2D,
otherwise, will unpack to 3D.
Returns:
unpacked_labels: a dictionary contains unpack labels in different levels.
"""
unpacked_labels = {}
count = 0
for level in range(self.min_level, self.max_level + 1):
feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32)
feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32)
steps = feat_size_y * feat_size_x * self.anchors_per_location
if is_box:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[-1, 4])
else:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[feat_size_y, feat_size_x, -1])
count += steps
return unpacked_labels
@property
def anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
@property
def multilevel_boxes(self):
|
def generate_anchors_info():
"""Generate anchors and image info."""
original_height, original_width = 512, 640
input_anchor = Anchor(
min_level=2,
max_level=6,
num_scales=1,
aspect_ratios=[1.0, 2.0, 0.5],
anchor_size=8,
image_size=(_IMAGE_SIZE.value, _IMAGE_SIZE.value))
anchor_boxes = input_anchor.multilevel_boxes
for key in anchor_boxes:
anchor_boxes[key] = anchor_boxes[key].numpy()
scale = min(_IMAGE_SIZE.value / original_height,
_IMAGE_SIZE.value / original_width)
image_info = np.array([[[original_height, original_width],
[_IMAGE_SIZE.value, _IMAGE_SIZE.value],
[scale, scale], [0, 0]]])
return anchor_boxes, image_info
def load_fvlm_gin_configs():
"""Load gin configs for F-VLM model."""
clip_model_embed_dim = {
'resnet_50': (1024, 32, 7),
'resnet_50x4': (640, 40, 9),
'resnet_50x16': (768, 48, 12),
'resnet_50x64': (1024, 64, 14),
}
config_path = _MODEL_CONFIG_PATH.value
text_dim, model_num_heads, roi_size = clip_model_embed_dim[_MODEL_NAME.value]
gin.parse_config_file(config_path)
gin.parse_config(f'CATG_PAD_SIZE = {_MAX_NUM_CLASSES.value}')
gin.parse_config(f'CLIP_NAME = "{_MODEL_NAME.value}"')
gin.parse_config(f'TEXT_DIM = {text_dim}')
gin.parse_config(f'AttentionPool.num_heads = {model_num_heads}')
gin.parse_config(f'ClipFasterRCNNHead.roi_output_size = {roi_size}')
gin.parse_config(f'ClipFasterRCNNHead.novel_vlm_weight = {_VLM_WEIGHT.value}')
gin.parse_config(f'INCLUDE_MASK = {_INCLUDE_MASK.value}')
return _MAX_NUM_CLASSES.value, text_dim
def generate_rng_dict(base_rng):
"""Generates a dictionary of rngs to pass in to `nn.Module`s.
Stochastic layers in Flax Modules use separate stream of random number
generators (e.g. dropout requires an rng named 'dropout'). This function
generates all rngs needed for stochastic layers.
Args:
base_rng: The base rng to split.
Returns:
A dictionary of rngs to be used in calling modules.
"""
keys = ('dropout', 'stochastic_depth', 'rng')
rngs = jax.random.split(base_rng, len(keys))
return {key: rngs[i] for i, key in enumerate(keys)}
@gin.configurable
def create_predict_step(model_fn = gin.REQUIRED):
"""Get prediction step function.
Args:
model_fn: A flax.deprecated.nn.module of forward model to use.
Returns:
model_outputs: A dictionary of model_outputs.
"""
def predict_step_v2(variables, batch, rng):
features, _ = batch if isinstance(batch, tuple) else (batch, {})
rng, _ = jax.random.split(rng)
pred_model_fn = model_fn(mode=ExecutionMode.EVAL)
model_outputs = pred_model_fn.apply(
variables,
**features,
mutable=False,
_do_remap=True,
rngs=generate_rng_dict(rng))
return model_outputs
return predict_step_v2
def get_fvlm_predict_fn(serving_batch_size):
"""Get predict function and input signatures for F-VLM model."""
num_classes, text_dim = load_fvlm_gin_configs()
predict_step = create_predict_step()
anchor_boxes, image_info = generate_anchors_info()
def predict_fn(params, input_dict):
input_dict['labels'] = {
'detection': {
'anchor_boxes': anchor_boxes,
'image_info': image_info,
}
}
output = predict_step(params, input_dict, jax.random.PRNGKey(0))
output = output['detection']
output.pop('rpn_score_outputs')
output.pop('rpn_box_outputs')
output.pop('class_outputs')
output.pop('box_outputs')
return output
input_signatures = {
'image':
tf.TensorSpec(
shape=(serving_batch_size, _IMAGE_SIZE.value, _IMAGE_SIZE.value,
3),
dtype=tf.bfloat16,
name='image'),
'text':
tf.TensorSpec(
shape=(serving_batch_size, num_classes, text_dim),
dtype=tf.float32,
name='queries'),
}
return predict_fn, input_signatures
def restore_checkpoint(restore_dir):
"""Restore checkpoint into variables.
Args:
restore_dir: A string of path to restore checkpoint from.
Returns:
variables: A nested dictionary of restore parameters and model states.
"""
restored_train_state = checkpoints.restore_checkpoint(restore_dir, None)
variables = {'params': restored_train_state['optimizer']['target']}
model_state = restored_train_state['model_state']
variables.update(model_state)
return variables
def main(argv):
del argv
logging.info('Creating predict_fn.')
predict_fn, input_signatures = get_fvlm_predict_fn(_SERVING_BATCH_SIZE.value)
logging.info('Loading model for %s.', _INPUT_DIR.value)
predict_params = restore_checkpoint(_INPUT_DIR.value)
logging.info('Saving model for %s.', _OUTPUT_DIR.value)
saved_model_lib.convert_and_save_model(
predict_fn,
predict_params,
_OUTPUT_DIR.value,
input_signatures=[input_signatures],
polymorphic_shapes=None,
)
if __name__ == '__main__':
app.run(main)
flags.mark_flag_as_required('input_dir')
flags.mark_flag_as_required('output_dir')
| return self.unpack_labels(self.boxes, is_box=True) | identifier_body |
export_saved_model.py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Scripts to export saved models.
The anchor implementation is based on:
https://github.com/tensorflow/tpu/blob/master/models/official/detection/dataloader/anchor.py
"""
import enum
import logging
from typing import Dict, Any
from absl import app
from absl import flags
from flax.training import checkpoints
import gin
import jax
import numpy as np
import tensorflow as tf
from utils import saved_model_lib
_INPUT_DIR = flags.DEFINE_string(
'input_dir', None, 'Path under which to load the JAX model.'
)
_OUTPUT_DIR = flags.DEFINE_string(
'output_dir', None, 'Path under which to save the SavedModel.'
)
_MODEL_NAME = flags.DEFINE_string(
'model_name', 'resnet_50', 'The name of the backbone model to export.'
)
_IMAGE_SIZE = flags.DEFINE_integer(
'image_size', 1024, 'Image size to serve the model at.'
)
_VLM_WEIGHT = flags.DEFINE_float(
'vlm_weight',
0.65,
'A float between [0, 1] as a tradeoff between open/closed-set detection.',
)
_SERVING_BATCH_SIZE = flags.DEFINE_integer(
'serving_batch_size',
1,
'For what batch size to prepare the serving signature.',
)
_MAX_NUM_CLASSES = flags.DEFINE_integer(
'max_num_classes', 30, 'Maximum number of classes to feed in by the user.'
)
_INCLUDE_MASK = flags.DEFINE_bool(
'include_mask', True, 'Whether to include mask.'
)
_MODEL_CONFIG_PATH = flags.DEFINE_string(
'model_config_path',
'./configs/export_model.gin',
'The path to model gin config.',
)
@gin.constants_from_enum
class ExecutionMode(enum.Enum):
"""Defines the model execution mode."""
TRAIN = 1
EVAL = 2
PREDICT = 3
class Anchor:
"""Anchor class for anchor-based object detectors."""
def __init__(self,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
image_size):
"""Constructs multiscale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instance, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of float numbers representing the aspect ratio anchors
added on each level. The number indicates the ratio of width to height.
For instance, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
scale level.
anchor_size: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: a list of integer numbers or Tensors representing
[height, width] of the input image size.The image_size should be divided
by the largest feature stride 2^max_level.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_size = anchor_size
self.image_size = image_size
self.boxes = self._generate_boxes()
def _generate_boxes(self):
"""Generates multiscale anchor boxes.
Returns:
a Tensor of shape [N, 4], representing anchor boxes of all levels
concatenated together.
"""
boxes_all = []
for level in range(self.min_level, self.max_level + 1):
boxes_l = []
for scale in range(self.num_scales):
for aspect_ratio in self.aspect_ratios:
stride = 2 ** level
intermidate_scale = 2 ** (scale / float(self.num_scales))
base_anchor_size = self.anchor_size * stride * intermidate_scale
aspect_x = aspect_ratio ** 0.5
aspect_y = aspect_ratio ** -0.5
half_anchor_size_x = base_anchor_size * aspect_x / 2.0
half_anchor_size_y = base_anchor_size * aspect_y / 2.0
x = tf.range(stride / 2, self.image_size[1], stride)
y = tf.range(stride / 2, self.image_size[0], stride)
xv, yv = tf.meshgrid(x, y)
xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
# Tensor shape Nx4.
boxes = tf.stack([yv - half_anchor_size_y, xv - half_anchor_size_x,
yv + half_anchor_size_y, xv + half_anchor_size_x],
axis=1)
boxes_l.append(boxes)
# Concat anchors on the same level to tensor shape NxAx4.
boxes_l = tf.stack(boxes_l, axis=1)
boxes_l = tf.reshape(boxes_l, [-1, 4])
boxes_all.append(boxes_l)
return tf.concat(boxes_all, axis=0)
def | (self, labels,
is_box = False):
"""Unpacks an array of labels into multiscales labels.
Args:
labels: labels to unpack.
is_box: to unpack anchor boxes or not. If it is true, will unpack to 2D,
otherwise, will unpack to 3D.
Returns:
unpacked_labels: a dictionary contains unpack labels in different levels.
"""
unpacked_labels = {}
count = 0
for level in range(self.min_level, self.max_level + 1):
feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32)
feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32)
steps = feat_size_y * feat_size_x * self.anchors_per_location
if is_box:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[-1, 4])
else:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[feat_size_y, feat_size_x, -1])
count += steps
return unpacked_labels
@property
def anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
@property
def multilevel_boxes(self):
return self.unpack_labels(self.boxes, is_box=True)
def generate_anchors_info():
"""Generate anchors and image info."""
original_height, original_width = 512, 640
input_anchor = Anchor(
min_level=2,
max_level=6,
num_scales=1,
aspect_ratios=[1.0, 2.0, 0.5],
anchor_size=8,
image_size=(_IMAGE_SIZE.value, _IMAGE_SIZE.value))
anchor_boxes = input_anchor.multilevel_boxes
for key in anchor_boxes:
anchor_boxes[key] = anchor_boxes[key].numpy()
scale = min(_IMAGE_SIZE.value / original_height,
_IMAGE_SIZE.value / original_width)
image_info = np.array([[[original_height, original_width],
[_IMAGE_SIZE.value, _IMAGE_SIZE.value],
[scale, scale], [0, 0]]])
return anchor_boxes, image_info
def load_fvlm_gin_configs():
"""Load gin configs for F-VLM model."""
clip_model_embed_dim = {
'resnet_50': (1024, 32, 7),
'resnet_50x4': (640, 40, 9),
'resnet_50x16': (768, 48, 12),
'resnet_50x64': (1024, 64, 14),
}
config_path = _MODEL_CONFIG_PATH.value
text_dim, model_num_heads, roi_size = clip_model_embed_dim[_MODEL_NAME.value]
gin.parse_config_file(config_path)
gin.parse_config(f'CATG_PAD_SIZE = {_MAX_NUM_CLASSES.value}')
gin.parse_config(f'CLIP_NAME = "{_MODEL_NAME.value}"')
gin.parse_config(f'TEXT_DIM = {text_dim}')
gin.parse_config(f'AttentionPool.num_heads = {model_num_heads}')
gin.parse_config(f'ClipFasterRCNNHead.roi_output_size = {roi_size}')
gin.parse_config(f'ClipFasterRCNNHead.novel_vlm_weight = {_VLM_WEIGHT.value}')
gin.parse_config(f'INCLUDE_MASK = {_INCLUDE_MASK.value}')
return _MAX_NUM_CLASSES.value, text_dim
def generate_rng_dict(base_rng):
"""Generates a dictionary of rngs to pass in to `nn.Module`s.
Stochastic layers in Flax Modules use separate stream of random number
generators (e.g. dropout requires an rng named 'dropout'). This function
generates all rngs needed for stochastic layers.
Args:
base_rng: The base rng to split.
Returns:
A dictionary of rngs to be used in calling modules.
"""
keys = ('dropout', 'stochastic_depth', 'rng')
rngs = jax.random.split(base_rng, len(keys))
return {key: rngs[i] for i, key in enumerate(keys)}
@gin.configurable
def create_predict_step(model_fn = gin.REQUIRED):
"""Get prediction step function.
Args:
model_fn: A flax.deprecated.nn.module of forward model to use.
Returns:
model_outputs: A dictionary of model_outputs.
"""
def predict_step_v2(variables, batch, rng):
features, _ = batch if isinstance(batch, tuple) else (batch, {})
rng, _ = jax.random.split(rng)
pred_model_fn = model_fn(mode=ExecutionMode.EVAL)
model_outputs = pred_model_fn.apply(
variables,
**features,
mutable=False,
_do_remap=True,
rngs=generate_rng_dict(rng))
return model_outputs
return predict_step_v2
def get_fvlm_predict_fn(serving_batch_size):
"""Get predict function and input signatures for F-VLM model."""
num_classes, text_dim = load_fvlm_gin_configs()
predict_step = create_predict_step()
anchor_boxes, image_info = generate_anchors_info()
def predict_fn(params, input_dict):
input_dict['labels'] = {
'detection': {
'anchor_boxes': anchor_boxes,
'image_info': image_info,
}
}
output = predict_step(params, input_dict, jax.random.PRNGKey(0))
output = output['detection']
output.pop('rpn_score_outputs')
output.pop('rpn_box_outputs')
output.pop('class_outputs')
output.pop('box_outputs')
return output
input_signatures = {
'image':
tf.TensorSpec(
shape=(serving_batch_size, _IMAGE_SIZE.value, _IMAGE_SIZE.value,
3),
dtype=tf.bfloat16,
name='image'),
'text':
tf.TensorSpec(
shape=(serving_batch_size, num_classes, text_dim),
dtype=tf.float32,
name='queries'),
}
return predict_fn, input_signatures
def restore_checkpoint(restore_dir):
"""Restore checkpoint into variables.
Args:
restore_dir: A string of path to restore checkpoint from.
Returns:
variables: A nested dictionary of restore parameters and model states.
"""
restored_train_state = checkpoints.restore_checkpoint(restore_dir, None)
variables = {'params': restored_train_state['optimizer']['target']}
model_state = restored_train_state['model_state']
variables.update(model_state)
return variables
def main(argv):
del argv
logging.info('Creating predict_fn.')
predict_fn, input_signatures = get_fvlm_predict_fn(_SERVING_BATCH_SIZE.value)
logging.info('Loading model for %s.', _INPUT_DIR.value)
predict_params = restore_checkpoint(_INPUT_DIR.value)
logging.info('Saving model for %s.', _OUTPUT_DIR.value)
saved_model_lib.convert_and_save_model(
predict_fn,
predict_params,
_OUTPUT_DIR.value,
input_signatures=[input_signatures],
polymorphic_shapes=None,
)
if __name__ == '__main__':
app.run(main)
flags.mark_flag_as_required('input_dir')
flags.mark_flag_as_required('output_dir')
| unpack_labels | identifier_name |
export_saved_model.py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Scripts to export saved models.
The anchor implementation is based on:
https://github.com/tensorflow/tpu/blob/master/models/official/detection/dataloader/anchor.py
"""
import enum
import logging
from typing import Dict, Any
from absl import app
from absl import flags
from flax.training import checkpoints
import gin
import jax
import numpy as np
import tensorflow as tf
from utils import saved_model_lib
_INPUT_DIR = flags.DEFINE_string(
'input_dir', None, 'Path under which to load the JAX model.'
)
_OUTPUT_DIR = flags.DEFINE_string(
'output_dir', None, 'Path under which to save the SavedModel.'
)
_MODEL_NAME = flags.DEFINE_string(
'model_name', 'resnet_50', 'The name of the backbone model to export.'
)
_IMAGE_SIZE = flags.DEFINE_integer(
'image_size', 1024, 'Image size to serve the model at.'
)
_VLM_WEIGHT = flags.DEFINE_float(
'vlm_weight',
0.65,
'A float between [0, 1] as a tradeoff between open/closed-set detection.',
)
_SERVING_BATCH_SIZE = flags.DEFINE_integer(
'serving_batch_size',
1,
'For what batch size to prepare the serving signature.',
)
_MAX_NUM_CLASSES = flags.DEFINE_integer(
'max_num_classes', 30, 'Maximum number of classes to feed in by the user.'
)
_INCLUDE_MASK = flags.DEFINE_bool(
'include_mask', True, 'Whether to include mask.'
)
_MODEL_CONFIG_PATH = flags.DEFINE_string(
'model_config_path',
'./configs/export_model.gin',
'The path to model gin config.',
)
@gin.constants_from_enum
class ExecutionMode(enum.Enum):
"""Defines the model execution mode."""
TRAIN = 1
EVAL = 2
PREDICT = 3
class Anchor:
"""Anchor class for anchor-based object detectors."""
def __init__(self,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
image_size):
"""Constructs multiscale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instance, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of float numbers representing the aspect ratio anchors
added on each level. The number indicates the ratio of width to height.
For instance, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
scale level.
anchor_size: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: a list of integer numbers or Tensors representing
[height, width] of the input image size.The image_size should be divided
by the largest feature stride 2^max_level.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_size = anchor_size
self.image_size = image_size
self.boxes = self._generate_boxes()
def _generate_boxes(self):
"""Generates multiscale anchor boxes.
Returns:
a Tensor of shape [N, 4], representing anchor boxes of all levels
concatenated together.
"""
boxes_all = []
for level in range(self.min_level, self.max_level + 1):
|
return tf.concat(boxes_all, axis=0)
def unpack_labels(self, labels,
is_box = False):
"""Unpacks an array of labels into multiscales labels.
Args:
labels: labels to unpack.
is_box: to unpack anchor boxes or not. If it is true, will unpack to 2D,
otherwise, will unpack to 3D.
Returns:
unpacked_labels: a dictionary contains unpack labels in different levels.
"""
unpacked_labels = {}
count = 0
for level in range(self.min_level, self.max_level + 1):
feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32)
feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32)
steps = feat_size_y * feat_size_x * self.anchors_per_location
if is_box:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[-1, 4])
else:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[feat_size_y, feat_size_x, -1])
count += steps
return unpacked_labels
@property
def anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
@property
def multilevel_boxes(self):
return self.unpack_labels(self.boxes, is_box=True)
def generate_anchors_info():
"""Generate anchors and image info."""
original_height, original_width = 512, 640
input_anchor = Anchor(
min_level=2,
max_level=6,
num_scales=1,
aspect_ratios=[1.0, 2.0, 0.5],
anchor_size=8,
image_size=(_IMAGE_SIZE.value, _IMAGE_SIZE.value))
anchor_boxes = input_anchor.multilevel_boxes
for key in anchor_boxes:
anchor_boxes[key] = anchor_boxes[key].numpy()
scale = min(_IMAGE_SIZE.value / original_height,
_IMAGE_SIZE.value / original_width)
image_info = np.array([[[original_height, original_width],
[_IMAGE_SIZE.value, _IMAGE_SIZE.value],
[scale, scale], [0, 0]]])
return anchor_boxes, image_info
def load_fvlm_gin_configs():
"""Load gin configs for F-VLM model."""
clip_model_embed_dim = {
'resnet_50': (1024, 32, 7),
'resnet_50x4': (640, 40, 9),
'resnet_50x16': (768, 48, 12),
'resnet_50x64': (1024, 64, 14),
}
config_path = _MODEL_CONFIG_PATH.value
text_dim, model_num_heads, roi_size = clip_model_embed_dim[_MODEL_NAME.value]
gin.parse_config_file(config_path)
gin.parse_config(f'CATG_PAD_SIZE = {_MAX_NUM_CLASSES.value}')
gin.parse_config(f'CLIP_NAME = "{_MODEL_NAME.value}"')
gin.parse_config(f'TEXT_DIM = {text_dim}')
gin.parse_config(f'AttentionPool.num_heads = {model_num_heads}')
gin.parse_config(f'ClipFasterRCNNHead.roi_output_size = {roi_size}')
gin.parse_config(f'ClipFasterRCNNHead.novel_vlm_weight = {_VLM_WEIGHT.value}')
gin.parse_config(f'INCLUDE_MASK = {_INCLUDE_MASK.value}')
return _MAX_NUM_CLASSES.value, text_dim
def generate_rng_dict(base_rng):
"""Generates a dictionary of rngs to pass in to `nn.Module`s.
Stochastic layers in Flax Modules use separate stream of random number
generators (e.g. dropout requires an rng named 'dropout'). This function
generates all rngs needed for stochastic layers.
Args:
base_rng: The base rng to split.
Returns:
A dictionary of rngs to be used in calling modules.
"""
keys = ('dropout', 'stochastic_depth', 'rng')
rngs = jax.random.split(base_rng, len(keys))
return {key: rngs[i] for i, key in enumerate(keys)}
@gin.configurable
def create_predict_step(model_fn = gin.REQUIRED):
"""Get prediction step function.
Args:
model_fn: A flax.deprecated.nn.module of forward model to use.
Returns:
model_outputs: A dictionary of model_outputs.
"""
def predict_step_v2(variables, batch, rng):
features, _ = batch if isinstance(batch, tuple) else (batch, {})
rng, _ = jax.random.split(rng)
pred_model_fn = model_fn(mode=ExecutionMode.EVAL)
model_outputs = pred_model_fn.apply(
variables,
**features,
mutable=False,
_do_remap=True,
rngs=generate_rng_dict(rng))
return model_outputs
return predict_step_v2
def get_fvlm_predict_fn(serving_batch_size):
"""Get predict function and input signatures for F-VLM model."""
num_classes, text_dim = load_fvlm_gin_configs()
predict_step = create_predict_step()
anchor_boxes, image_info = generate_anchors_info()
def predict_fn(params, input_dict):
input_dict['labels'] = {
'detection': {
'anchor_boxes': anchor_boxes,
'image_info': image_info,
}
}
output = predict_step(params, input_dict, jax.random.PRNGKey(0))
output = output['detection']
output.pop('rpn_score_outputs')
output.pop('rpn_box_outputs')
output.pop('class_outputs')
output.pop('box_outputs')
return output
input_signatures = {
'image':
tf.TensorSpec(
shape=(serving_batch_size, _IMAGE_SIZE.value, _IMAGE_SIZE.value,
3),
dtype=tf.bfloat16,
name='image'),
'text':
tf.TensorSpec(
shape=(serving_batch_size, num_classes, text_dim),
dtype=tf.float32,
name='queries'),
}
return predict_fn, input_signatures
def restore_checkpoint(restore_dir):
"""Restore checkpoint into variables.
Args:
restore_dir: A string of path to restore checkpoint from.
Returns:
variables: A nested dictionary of restore parameters and model states.
"""
restored_train_state = checkpoints.restore_checkpoint(restore_dir, None)
variables = {'params': restored_train_state['optimizer']['target']}
model_state = restored_train_state['model_state']
variables.update(model_state)
return variables
def main(argv):
del argv
logging.info('Creating predict_fn.')
predict_fn, input_signatures = get_fvlm_predict_fn(_SERVING_BATCH_SIZE.value)
logging.info('Loading model for %s.', _INPUT_DIR.value)
predict_params = restore_checkpoint(_INPUT_DIR.value)
logging.info('Saving model for %s.', _OUTPUT_DIR.value)
saved_model_lib.convert_and_save_model(
predict_fn,
predict_params,
_OUTPUT_DIR.value,
input_signatures=[input_signatures],
polymorphic_shapes=None,
)
if __name__ == '__main__':
app.run(main)
flags.mark_flag_as_required('input_dir')
flags.mark_flag_as_required('output_dir')
| boxes_l = []
for scale in range(self.num_scales):
for aspect_ratio in self.aspect_ratios:
stride = 2 ** level
intermidate_scale = 2 ** (scale / float(self.num_scales))
base_anchor_size = self.anchor_size * stride * intermidate_scale
aspect_x = aspect_ratio ** 0.5
aspect_y = aspect_ratio ** -0.5
half_anchor_size_x = base_anchor_size * aspect_x / 2.0
half_anchor_size_y = base_anchor_size * aspect_y / 2.0
x = tf.range(stride / 2, self.image_size[1], stride)
y = tf.range(stride / 2, self.image_size[0], stride)
xv, yv = tf.meshgrid(x, y)
xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
# Tensor shape Nx4.
boxes = tf.stack([yv - half_anchor_size_y, xv - half_anchor_size_x,
yv + half_anchor_size_y, xv + half_anchor_size_x],
axis=1)
boxes_l.append(boxes)
# Concat anchors on the same level to tensor shape NxAx4.
boxes_l = tf.stack(boxes_l, axis=1)
boxes_l = tf.reshape(boxes_l, [-1, 4])
boxes_all.append(boxes_l) | conditional_block |
export_saved_model.py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Scripts to export saved models.
The anchor implementation is based on:
https://github.com/tensorflow/tpu/blob/master/models/official/detection/dataloader/anchor.py
"""
import enum
import logging
from typing import Dict, Any
from absl import app
from absl import flags
from flax.training import checkpoints
import gin
import jax
import numpy as np
import tensorflow as tf
from utils import saved_model_lib
_INPUT_DIR = flags.DEFINE_string(
'input_dir', None, 'Path under which to load the JAX model.'
)
_OUTPUT_DIR = flags.DEFINE_string(
'output_dir', None, 'Path under which to save the SavedModel.'
)
_MODEL_NAME = flags.DEFINE_string(
'model_name', 'resnet_50', 'The name of the backbone model to export.'
)
_IMAGE_SIZE = flags.DEFINE_integer(
'image_size', 1024, 'Image size to serve the model at.'
)
_VLM_WEIGHT = flags.DEFINE_float(
'vlm_weight',
0.65,
'A float between [0, 1] as a tradeoff between open/closed-set detection.',
)
_SERVING_BATCH_SIZE = flags.DEFINE_integer(
'serving_batch_size',
1,
'For what batch size to prepare the serving signature.',
)
_MAX_NUM_CLASSES = flags.DEFINE_integer(
'max_num_classes', 30, 'Maximum number of classes to feed in by the user.'
)
_INCLUDE_MASK = flags.DEFINE_bool(
'include_mask', True, 'Whether to include mask.'
)
_MODEL_CONFIG_PATH = flags.DEFINE_string(
'model_config_path',
'./configs/export_model.gin',
'The path to model gin config.',
)
@gin.constants_from_enum
class ExecutionMode(enum.Enum):
"""Defines the model execution mode."""
TRAIN = 1
EVAL = 2
PREDICT = 3
class Anchor:
"""Anchor class for anchor-based object detectors."""
def __init__(self,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
image_size):
"""Constructs multiscale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instance, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of float numbers representing the aspect ratio anchors
added on each level. The number indicates the ratio of width to height.
For instance, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
scale level.
anchor_size: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: a list of integer numbers or Tensors representing
[height, width] of the input image size.The image_size should be divided
by the largest feature stride 2^max_level.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_size = anchor_size
self.image_size = image_size
self.boxes = self._generate_boxes()
def _generate_boxes(self):
"""Generates multiscale anchor boxes.
Returns:
a Tensor of shape [N, 4], representing anchor boxes of all levels
concatenated together.
"""
boxes_all = []
for level in range(self.min_level, self.max_level + 1):
boxes_l = []
for scale in range(self.num_scales):
for aspect_ratio in self.aspect_ratios:
stride = 2 ** level
intermidate_scale = 2 ** (scale / float(self.num_scales))
base_anchor_size = self.anchor_size * stride * intermidate_scale
aspect_x = aspect_ratio ** 0.5
aspect_y = aspect_ratio ** -0.5
half_anchor_size_x = base_anchor_size * aspect_x / 2.0
half_anchor_size_y = base_anchor_size * aspect_y / 2.0
x = tf.range(stride / 2, self.image_size[1], stride)
y = tf.range(stride / 2, self.image_size[0], stride)
xv, yv = tf.meshgrid(x, y)
xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
# Tensor shape Nx4.
boxes = tf.stack([yv - half_anchor_size_y, xv - half_anchor_size_x,
yv + half_anchor_size_y, xv + half_anchor_size_x],
axis=1)
boxes_l.append(boxes)
# Concat anchors on the same level to tensor shape NxAx4.
boxes_l = tf.stack(boxes_l, axis=1)
boxes_l = tf.reshape(boxes_l, [-1, 4])
boxes_all.append(boxes_l)
return tf.concat(boxes_all, axis=0)
def unpack_labels(self, labels,
is_box = False):
"""Unpacks an array of labels into multiscales labels.
Args:
labels: labels to unpack.
is_box: to unpack anchor boxes or not. If it is true, will unpack to 2D,
otherwise, will unpack to 3D.
Returns:
unpacked_labels: a dictionary contains unpack labels in different levels.
"""
unpacked_labels = {}
count = 0
for level in range(self.min_level, self.max_level + 1):
feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32)
feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32)
steps = feat_size_y * feat_size_x * self.anchors_per_location
if is_box:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[-1, 4])
else:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[feat_size_y, feat_size_x, -1])
count += steps
return unpacked_labels
@property
def anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
@property
def multilevel_boxes(self):
return self.unpack_labels(self.boxes, is_box=True)
def generate_anchors_info():
"""Generate anchors and image info."""
original_height, original_width = 512, 640
input_anchor = Anchor(
min_level=2,
max_level=6,
num_scales=1,
aspect_ratios=[1.0, 2.0, 0.5],
anchor_size=8,
image_size=(_IMAGE_SIZE.value, _IMAGE_SIZE.value))
anchor_boxes = input_anchor.multilevel_boxes
for key in anchor_boxes:
anchor_boxes[key] = anchor_boxes[key].numpy()
scale = min(_IMAGE_SIZE.value / original_height,
_IMAGE_SIZE.value / original_width)
image_info = np.array([[[original_height, original_width],
[_IMAGE_SIZE.value, _IMAGE_SIZE.value],
[scale, scale], [0, 0]]])
return anchor_boxes, image_info
def load_fvlm_gin_configs():
"""Load gin configs for F-VLM model."""
clip_model_embed_dim = {
'resnet_50': (1024, 32, 7),
'resnet_50x4': (640, 40, 9),
'resnet_50x16': (768, 48, 12),
'resnet_50x64': (1024, 64, 14),
}
config_path = _MODEL_CONFIG_PATH.value
text_dim, model_num_heads, roi_size = clip_model_embed_dim[_MODEL_NAME.value]
gin.parse_config_file(config_path)
gin.parse_config(f'CATG_PAD_SIZE = {_MAX_NUM_CLASSES.value}')
gin.parse_config(f'CLIP_NAME = "{_MODEL_NAME.value}"')
gin.parse_config(f'TEXT_DIM = {text_dim}')
gin.parse_config(f'AttentionPool.num_heads = {model_num_heads}')
gin.parse_config(f'ClipFasterRCNNHead.roi_output_size = {roi_size}')
gin.parse_config(f'ClipFasterRCNNHead.novel_vlm_weight = {_VLM_WEIGHT.value}')
gin.parse_config(f'INCLUDE_MASK = {_INCLUDE_MASK.value}')
return _MAX_NUM_CLASSES.value, text_dim
def generate_rng_dict(base_rng):
"""Generates a dictionary of rngs to pass in to `nn.Module`s.
Stochastic layers in Flax Modules use separate stream of random number
generators (e.g. dropout requires an rng named 'dropout'). This function
generates all rngs needed for stochastic layers.
Args:
base_rng: The base rng to split.
Returns:
A dictionary of rngs to be used in calling modules.
"""
keys = ('dropout', 'stochastic_depth', 'rng')
rngs = jax.random.split(base_rng, len(keys))
return {key: rngs[i] for i, key in enumerate(keys)}
@gin.configurable
def create_predict_step(model_fn = gin.REQUIRED):
"""Get prediction step function.
Args:
model_fn: A flax.deprecated.nn.module of forward model to use.
Returns:
model_outputs: A dictionary of model_outputs.
"""
def predict_step_v2(variables, batch, rng):
features, _ = batch if isinstance(batch, tuple) else (batch, {})
rng, _ = jax.random.split(rng)
pred_model_fn = model_fn(mode=ExecutionMode.EVAL)
model_outputs = pred_model_fn.apply(
variables,
**features,
mutable=False,
_do_remap=True,
rngs=generate_rng_dict(rng))
return model_outputs
return predict_step_v2
def get_fvlm_predict_fn(serving_batch_size):
"""Get predict function and input signatures for F-VLM model."""
num_classes, text_dim = load_fvlm_gin_configs()
predict_step = create_predict_step()
anchor_boxes, image_info = generate_anchors_info()
def predict_fn(params, input_dict):
input_dict['labels'] = {
'detection': {
'anchor_boxes': anchor_boxes,
'image_info': image_info,
}
}
output = predict_step(params, input_dict, jax.random.PRNGKey(0))
output = output['detection']
output.pop('rpn_score_outputs')
output.pop('rpn_box_outputs')
output.pop('class_outputs')
output.pop('box_outputs')
return output
input_signatures = { | 3),
dtype=tf.bfloat16,
name='image'),
'text':
tf.TensorSpec(
shape=(serving_batch_size, num_classes, text_dim),
dtype=tf.float32,
name='queries'),
}
return predict_fn, input_signatures
def restore_checkpoint(restore_dir):
"""Restore checkpoint into variables.
Args:
restore_dir: A string of path to restore checkpoint from.
Returns:
variables: A nested dictionary of restore parameters and model states.
"""
restored_train_state = checkpoints.restore_checkpoint(restore_dir, None)
variables = {'params': restored_train_state['optimizer']['target']}
model_state = restored_train_state['model_state']
variables.update(model_state)
return variables
def main(argv):
del argv
logging.info('Creating predict_fn.')
predict_fn, input_signatures = get_fvlm_predict_fn(_SERVING_BATCH_SIZE.value)
logging.info('Loading model for %s.', _INPUT_DIR.value)
predict_params = restore_checkpoint(_INPUT_DIR.value)
logging.info('Saving model for %s.', _OUTPUT_DIR.value)
saved_model_lib.convert_and_save_model(
predict_fn,
predict_params,
_OUTPUT_DIR.value,
input_signatures=[input_signatures],
polymorphic_shapes=None,
)
if __name__ == '__main__':
app.run(main)
flags.mark_flag_as_required('input_dir')
flags.mark_flag_as_required('output_dir') | 'image':
tf.TensorSpec(
shape=(serving_batch_size, _IMAGE_SIZE.value, _IMAGE_SIZE.value, | random_line_split |
old_main.rs | use std::rc::Rc;
use std::slice::Iter;
use glutin_window::GlutinWindow;
use graphics::{Context, line_from_to, triangulation};
use graphics::character::CharacterCache;
use graphics::color::{BLACK, WHITE};
use graphics::Transformed;
use graphics::math::{identity, Matrix2d, Vec2d, scale, translate};
use graphics::types::Color;
use ncollide2d::bounding_volume::AABB;
use ncollide2d::partitioning::BVH;
use ncollide2d::query::visitors::PointInterferencesCollector;
use opengl_graphics::{GlGraphics, OpenGL};
use piston::event_loop::{Events, EventSettings};
use piston::input::*;
use piston::window::{AdvancedWindow, Size, Window, WindowSettings};
use rand::{Rng, thread_rng};
use rand::seq::SliceRandom;
use vecmath::*;
#[allow(unused)]
use crate::dungeon::{GridDungeon, GridDungeonGenerator, RandomRoomGridDungeonGenerator, RoomId, RoomSize};
use crate::dungeon::{BasicGridDungeon, BiasGraph, GeneratorStep, GeneratorStrategy, GridDungeonGraph, Sliding, SlidingIter};
use crate::geom::{Corners, Point, point_eq, Rect};
use crate::graph::{DungeonFloorGraph, FloorNode, FloorNodeId};
use crate::tile::{CompassDirection, TileAddress, WallType};
use graphics::ellipse::circle;
mod dungeon;
mod geom;
mod graph;
mod tile;
const CURSOR_COLOR: Color = BLACK;
const PATH_COLOR: Color = [0.0, 0.0, 0.0, 0.75];
const BACKGROUND_COLOR: Color = [0.4, 0.4, 0.4, 1.0];
const WALKABLE_ROOM_COLOR: Color = [0.1, 0.6, 1.0, 0.5];
const WALKABLE_DOOR_COLOR: Color = [0.1, 0.9, 0.2, 0.5];
const POINTED_ROOM_COLOR: Color = [1.0, 0.0, 0.0, 0.9];
const DEBUG_ROOM_LOW: Color = [0.1, 0.6, 1.0, 1.0];
const DEBUG_ROOM_HIGH: Color = WHITE;
const DEBUG_WALL_COLOR: Color = BLACK;
const WEIGHT_ROOM_LOW: Color = [0.1, 0.9, 0.2, 1.0];
const WEIGHT_ROOM_HIGH: Color = [1.0, 0.1, 0.1, 1.0];
fn main() {
let opengl = OpenGL::V3_2;
let initial_screen_size = [1024, 768];
let mut window: GlutinWindow = WindowSettings::new("Procedural Dungeon", initial_screen_size)
.graphics_api(opengl)
.exit_on_esc(false) // example code used true but I don't want that
.build()
.unwrap();
let mut events = Events::new(EventSettings::new());
let mut app = App::new(opengl, window);
while let Some(e) = events.next(&mut app.window) {
e.render(|args| {
app.render(args);
});
e.update(|args| {
app.update(args.dt);
});
// handle keyboard/button presses
e.press(|button| {
if let Button::Keyboard(key) = button {
if key == Key::Space {
app.generate_requested = true;
}
println!("Typed key: {:?}", key);
}
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = true;
app.nav_requested = true;
}
});
e.release(|button| {
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = false;
}
});
e.mouse_cursor(|pos| {
if app.set_cursor(pos) {
if app.mouse_pressed {
app.nav_requested = true;
}
}
});
e.mouse_relative(|change| {
// TODO: only do this if the cursor is "captured"
// if app.update_pointer(None, Some(&change)) { app.route_requested = true; }
});
}
}
struct NavController {
current: Option<Nav>,
last_goal: Option<Point>
}
impl NavController {
fn new() -> Self {
NavController {
current: None,
last_goal: None,
}
}
fn forget(&mut self) {
self.current = None;
self.last_goal = None;
}
fn update_nav(&mut self, goal: Point, player_pos: &Point, graph: &DungeonFloorGraph) {
let should_update = match self.last_goal {
Some(g) => !point_eq(&goal, &g),
None => true,
};
if should_update {
self.current = graph.find_route(player_pos, &goal).map(|route| Nav::new(route));
self.last_goal = Some(goal);
}
}
}
struct Nav {
waypoints: Vec<Point>,
progress: usize,
}
impl Nav {
fn new(waypoints: Vec<Point>) -> Self {
Nav { waypoints, progress: 0, }
}
fn waypoints(&self) -> &Vec<Point> {
&self.waypoints
}
fn progress(&self) -> usize {
self.progress
}
fn current_target(&self) -> Option<&Point> {
self.waypoints.get(self.progress)
}
fn is_complete(&self) -> bool {
self.progress >= self.waypoints.len()
}
/// Modify `pos` by moving it `step` units towards the next waypoint, or no-op if navigation is complete.
/// Returns `true` to indicate navigation is complete, or `false` to indicate there is further movement to do.
fn advance_by(&mut self, step: f64, pos: &mut Point) -> bool {
if let Some(&target) = self.current_target() {
let to_target = vec2_sub(target, *pos);
let dist = vec2_len(to_target);
if dist < step {
// `pos` has reached the current target, so we can update the `progress`,
// then recurse to spend the remaining `step` to progress to the next waypoint
*pos = target;
self.progress += 1;
self.advance_by(step - dist, pos)
} else {
// move as far as the player can in the direction of the target; this should end the loop
let movement = vec2_scale(to_target, step / dist);
pos[0] += movement[0];
pos[1] += movement[1];
// Navigation is not yet complete
false
}
} else {
// Navigation is complete
true
}
}
}
struct App {
gl: GlGraphics,
window: GlutinWindow,
world: MyGameWorld,
pcc: PlayerCameraCursor,
mouse_pressed: bool,
generate_requested: bool,
pointed_room: PointedRoom,
nav_requested: bool,
nav: NavController,
}
impl App {
fn new(opengl: OpenGL, window: GlutinWindow) -> Self {
let screen_size = window.size().into();
App {
gl: GlGraphics::new(opengl),
window,
world: MyGameWorld::new(),
pcc: PlayerCameraCursor::new(screen_size),
mouse_pressed: false,
generate_requested: true,
pointed_room: PointedRoom::new(),
nav_requested: false,
nav: NavController::new(),
}
}
fn update(&mut self, dt: f64) {
// if the world needs to regenerate, do it now
if self.generate_requested {
let Size { width, height } = self.window.size();
self.regenerate(width as i32, height as i32);
}
// update the navigation target as long as the mouse is down
if self.mouse_pressed {
if let Some(graph) = &self.world.floor_graph {
self.nav.update_nav(self.pcc.cursor_pos, &self.pcc.player_pos, graph);
}
}
// move the player along the current navigation path
if let Some(nav) = &mut self.nav.current {
self.pcc.modify(|PccState { player_pos, .. }| {
nav.advance_by(200.0 * dt, player_pos);
});
}
// update the player camera/cursor if it was modified since the last update
self.pcc.update();
// re-check the 'pointed room' if the mouse cursor's world position has changed
if let Some(graph) = &self.world.floor_graph {
self.pointed_room.update(self.pcc.cursor_pos, graph);
}
}
fn render(&mut self, args: &RenderArgs) {
use graphics::*;
let world = &self.world;
let pcc = &self.pcc;
let player_pos = &pcc.player_pos;
let cursor = pcc.cursor_pos;
let pointed_room = &self.pointed_room;
let nav_opt = &self.nav.current;
&self.gl.draw(args.viewport(), |_c, gl| {
let c = _c.append_transform(pcc.camera);
clear(BACKGROUND_COLOR, gl);
// PRETTY room tiles + walls + doors
if let Some(dungeon) = world.dungeon() {
let tiles = dungeon.tiles();
let tile_size = world.tile_pixel_size() as f64;
// fill in a square for each room tile in the grid
for addr in tiles.tile_addresses() {
if let Some((_room_id, room_weight)) = tiles[addr] {
let color = {
if room_weight >= 1.0 && room_weight <= 2.0 {
lerp_color(&DEBUG_ROOM_LOW, &DEBUG_ROOM_HIGH, room_weight - 1.0)
} else if room_weight >= 1.0 {
WHITE
} else {
lerp_color(&WEIGHT_ROOM_LOW, &WEIGHT_ROOM_HIGH, room_weight)
}
};
let x = addr.x as f64 * tile_size;
let y = addr.y as f64 * tile_size;
let rect = [x, y, tile_size, tile_size];
rectangle(color, rect, c.transform, gl);
}
}
// draw an appropriate line(s) for each wall in the dungeon
for (wall_addr, wall_type) in dungeon.walls().iter() {
match *wall_type {
WallType::Clear => (),
WallType::Wall => {
let TileAddress { x, y } = wall_addr.tile();
let (base_to, base_from) = match wall_addr.direction() {
CompassDirection::North => ((0, 1), (1, 1)),
CompassDirection::East => ((1, 1), (1, 0)),
CompassDirection::South => ((0, 0), (1, 0)),
CompassDirection::West => ((0, 0), (0, 1)),
};
let to_px = |(dx, dy)| {
[(dx + x) as f64 * tile_size, (dy + y) as f64 * tile_size]
};
line_from_to(DEBUG_WALL_COLOR, 0.5, to_px(base_from), to_px(base_to), c.transform, gl);
}
WallType::Door => {
let TileAddress { x, y } = wall_addr.tile();
match wall_addr.direction() {
CompassDirection::North => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64 + 1.0),
CompassDirection::East => draw_vertical_door(&c, gl, tile_size, (x + 1) as f64, y as f64),
CompassDirection::South => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64),
CompassDirection::West => draw_vertical_door(&c, gl, tile_size, x as f64, y as f64),
}
}
}
}
}
// NAVIGATION-related debug
if let Some(floor_graph) = &world.floor_graph {
// DEBUG: walkable areas
for node in floor_graph.nodes().iter() {
let bounds = &floor_graph.get_bounds(*node.id());
let color = match node {
FloorNode::Room { .. } => WALKABLE_ROOM_COLOR,
FloorNode::Door { .. } => WALKABLE_DOOR_COLOR,
};
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(color, rect, c.transform, gl);
}
// DEBUG: cursor target walkable area
if let Some(pointed_room) = pointed_room.current {
let bounds = floor_graph.get_bounds(pointed_room);
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(POINTED_ROOM_COLOR, rect, c.transform, gl);
}
}
if let Some(nav) = nav_opt {
let start = Some(player_pos.clone());
let lines = start.iter().chain(nav.waypoints().iter().skip(nav.progress)).sliding();
for (from, to) in lines {
line_from_to(PATH_COLOR, 1.0, *from, *to, c.transform, gl);
}
}
// DEBUG: cursor
{
let [cx, cy] = cursor;
let vertical = rectangle::centered([cx, cy, 1.0, 4.0]);
let horizontal = rectangle::centered([cx, cy, 4.0, 1.0]);
rectangle(CURSOR_COLOR, vertical, c.transform, gl);
rectangle(CURSOR_COLOR, horizontal, c.transform, gl);
}
{
let [x, y] = player_pos;
let player = circle(*x, *y, 3.0);
ellipse(CURSOR_COLOR, player, c.transform, gl);
}
});
}
// updates the app's knowledge of the mouse cursor, returning `true` if the cursor position has changed since last time
fn set_cursor(&mut self, cursor_screen: [f64; 2]) -> bool {
self.pcc.modify(|PccState { cursor_px, .. }| {
*cursor_px = cursor_screen;
});
self.pcc.dirty
}
fn regenerate(&mut self, width: i32, height: i32) {
// regenerate the "world"
self.world.regenerate(Rect::from_xywh(0, 0, width, height));
// reset any app state that depends on the previous "world"
self.nav.forget();
self.pointed_room.forget();
self.generate_requested = false;
// pick a random position for the player
let new_player_pos = self.world.floor_graph.as_ref().and_then(|graph| {
let mut rng = thread_rng();
graph.nodes().choose(&mut rng).map(|n| {
let point = graph.get_bounds(*n.id()).center();
[point.x, point.y]
}).clone()
});
if let Some(pos) = new_player_pos {
self.pcc.modify(|PccState { player_pos, .. }| {
*player_pos = pos;
});
}
}
}
struct | {
current: Option<FloorNodeId>,
last_pointer: Option<Point>
}
impl PointedRoom {
fn new() -> Self {
PointedRoom {
current: None,
last_pointer: None,
}
}
fn forget(&mut self) {
self.current = None;
self.last_pointer = None;
}
fn update(&mut self, pointer: Point, graph: &DungeonFloorGraph) {
let should_update = match self.last_pointer {
Some(last) => last[0] != pointer[0] || last[1] != pointer[1],
None => true,
};
if should_update {
self.current = graph.node_at_point(&pointer).map(|n| n.id()).cloned();
self.last_pointer = Some(pointer);
}
}
}
struct PccState<'a> {
pub cursor_px: &'a mut [f64; 2],
pub screen_px: &'a mut [f64; 2],
pub player_pos: &'a mut [f64; 2],
}
struct PlayerCameraCursor {
cursor_px: [f64; 2],
cursor_pos: [f64; 2],
screen_px: [f64; 2],
player_pos: [f64; 2],
camera: Matrix2d,
camera_inv: Matrix2d,
dirty: bool,
}
impl PlayerCameraCursor {
fn new(screen_size: [u32; 2]) -> Self {
PlayerCameraCursor {
cursor_px: [0.0, 0.0],
cursor_pos: [0.0, 0.0],
screen_px: [screen_size[0] as f64, screen_size[1] as f64],
player_pos: [screen_size[0] as f64 / 2.0, screen_size[1] as f64 / 2.0],
camera: identity(),
camera_inv: identity(),
dirty: true,
}
}
fn update(&mut self) {
if self.dirty {
let zoom_factor = 4.0;
// this is some kind of voodoo...
// for one, the order of operations seems wrong to me
// for two, after translating by `-player_pos` without a scale factor,
// you have to apply the scale factor to the half_screen translation??
self.camera = identity()
.zoom(zoom_factor)
.trans_pos(vec2_neg(self.player_pos))
.trans_pos(vec2_scale(self.screen_px, 0.5 / zoom_factor));
self.camera_inv = mat2x3_inv(self.camera);
self.cursor_pos = row_mat2x3_transform_pos2(self.camera_inv, self.cursor_px);
self.dirty = false;
}
}
fn modify<F>(&mut self, f: F)
where F: FnOnce(PccState) -> ()
{
let [cx1, cy1] = self.cursor_px;
let [sx1, sy1] = self.screen_px;
let [px1, py1] = self.player_pos;
f(PccState {
cursor_px: &mut self.cursor_px,
screen_px: &mut self.screen_px,
player_pos: &mut self.player_pos,
});
let [cx2, cy2] = self.cursor_px;
let [sx2, sy2] = self.screen_px;
let [px2, py2] = self.player_pos;
if (cx1 != cx2) || (cy1 != cy2) || (sx1 != sx2) || (sy1 != sy2) || (px1 != px2) || (py1 != py2) {
self.dirty = true;
}
}
}
fn draw_horizontal_door(ctx: &Context, gl: &mut GlGraphics, tile_size: f64, x: f64, y: f64) {
let pixel_pos = |xt: f64, yt: f64| { [xt * tile_size, yt * tile_size] };
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x, y), pixel_pos(x + 0.25, y), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.75, y), pixel_pos(x + 1.0, y), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.25, y + 0.1), pixel_pos(x + 0.25, y - 0.1), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.75, y + 0.1), pixel_pos(x + 0.75, y - 0.1), ctx.transform, gl);
}
fn draw_vertical_door(ctx: &Context, gl: &mut GlGraphics, tile_size: f64, x: f64, y: f64) {
let pixel_pos = |xt: f64, yt: f64| { [xt * tile_size, yt * tile_size] };
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x, y), pixel_pos(x, y + 0.25), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x, y + 0.75), pixel_pos(x, y + 1.0), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x - 0.1, y + 0.25), pixel_pos(x + 0.1, y + 0.25), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x - 0.1, y + 0.75), pixel_pos(x + 0.1, y + 0.75), ctx.transform, gl);
}
fn lerp_color(from: &Color, to: &Color, ratio: f32) -> Color {
let [r1, g1, b1, a1] = from;
let [r2, g2, b2, a2] = to;
[
r1 + (r2 - r1) * ratio,
g1 + (g2 - g1) * ratio,
b1 + (b2 - b1) * ratio,
a1 + (a2 - a1) * ratio,
]
}
struct MyGameWorld {
tile_pixel_size: usize,
dungeon: Option<BasicGridDungeon>,
floor_graph: Option<DungeonFloorGraph>,
generator: GeneratorStrategy<&'static str>,
}
impl MyGameWorld {
fn new() -> Self {
let generator = GeneratorStrategy {
room_chances: vec![
(RoomSize::new(5,4), 1),
(RoomSize::new(4,4), 2),
(RoomSize::new(4,3), 3),
(RoomSize::new(4,2), 3),
(RoomSize::new(4,1), 1),
(RoomSize::new(3,2), 5),
(RoomSize::new(3,1), 2),
(RoomSize::new(2,2), 5),
(RoomSize::new(2,1), 1),
(RoomSize::new(1,1), 1)
].into_iter().flat_map(|(size_opt, chance)| {
size_opt.map(|s| (s, chance))
}).collect(),
bias_graph: BiasGraph::new(
&vec![
("start", (0.0, 0.0)),
("upper-left", (0.25, 0.75)),
("lower-right", (0.75, 0.25)),
("end", (1.0, 1.0))
],
&vec![
("start", "upper-left"),
("upper-left", "lower-right"),
("lower-right", "end"),
("start", "lower-right"),
("upper-left", "end")
]
),
steps: vec![
GeneratorStep::Branches { count: 15 },
GeneratorStep::Clusters { count: 10, iterations: 50 },
GeneratorStep::Widen { iterations: 150 },
// GeneratorStep::Branches { count: 5 },
// GeneratorStep::Clusters { count: 3, iterations: 20 },
// GeneratorStep::Widen { iterations: 20 },
]
};
MyGameWorld {
tile_pixel_size: 16,
dungeon: None,
floor_graph: None,
generator
}
}
fn regenerate(&mut self, pixel_bounds: Rect) -> () {
let grid_width = pixel_bounds.width() as usize / self.tile_pixel_size;
let grid_height = pixel_bounds.height() as usize / self.tile_pixel_size;
let dungeon = self.generator.generate(grid_width, grid_height);
self.floor_graph = Some(graph::decompose(&dungeon, self.tile_pixel_size as f64, 2.0, 6.0));
self.dungeon = Some(dungeon);
}
fn tile_pixel_size(&self) -> usize { self.tile_pixel_size }
fn dungeon(&self) -> &Option<BasicGridDungeon> { &self.dungeon }
}
| PointedRoom | identifier_name |
old_main.rs | use std::rc::Rc;
use std::slice::Iter;
use glutin_window::GlutinWindow;
use graphics::{Context, line_from_to, triangulation};
use graphics::character::CharacterCache;
use graphics::color::{BLACK, WHITE};
use graphics::Transformed;
use graphics::math::{identity, Matrix2d, Vec2d, scale, translate};
use graphics::types::Color;
use ncollide2d::bounding_volume::AABB;
use ncollide2d::partitioning::BVH;
use ncollide2d::query::visitors::PointInterferencesCollector;
use opengl_graphics::{GlGraphics, OpenGL};
use piston::event_loop::{Events, EventSettings};
use piston::input::*;
use piston::window::{AdvancedWindow, Size, Window, WindowSettings};
use rand::{Rng, thread_rng};
use rand::seq::SliceRandom;
use vecmath::*;
#[allow(unused)]
use crate::dungeon::{GridDungeon, GridDungeonGenerator, RandomRoomGridDungeonGenerator, RoomId, RoomSize};
use crate::dungeon::{BasicGridDungeon, BiasGraph, GeneratorStep, GeneratorStrategy, GridDungeonGraph, Sliding, SlidingIter};
use crate::geom::{Corners, Point, point_eq, Rect};
use crate::graph::{DungeonFloorGraph, FloorNode, FloorNodeId};
use crate::tile::{CompassDirection, TileAddress, WallType};
use graphics::ellipse::circle;
mod dungeon;
mod geom;
mod graph;
mod tile;
const CURSOR_COLOR: Color = BLACK;
const PATH_COLOR: Color = [0.0, 0.0, 0.0, 0.75];
const BACKGROUND_COLOR: Color = [0.4, 0.4, 0.4, 1.0];
const WALKABLE_ROOM_COLOR: Color = [0.1, 0.6, 1.0, 0.5];
const WALKABLE_DOOR_COLOR: Color = [0.1, 0.9, 0.2, 0.5];
const POINTED_ROOM_COLOR: Color = [1.0, 0.0, 0.0, 0.9];
const DEBUG_ROOM_LOW: Color = [0.1, 0.6, 1.0, 1.0];
const DEBUG_ROOM_HIGH: Color = WHITE;
const DEBUG_WALL_COLOR: Color = BLACK;
const WEIGHT_ROOM_LOW: Color = [0.1, 0.9, 0.2, 1.0];
const WEIGHT_ROOM_HIGH: Color = [1.0, 0.1, 0.1, 1.0];
fn main() {
let opengl = OpenGL::V3_2;
let initial_screen_size = [1024, 768];
let mut window: GlutinWindow = WindowSettings::new("Procedural Dungeon", initial_screen_size)
.graphics_api(opengl)
.exit_on_esc(false) // example code used true but I don't want that
.build()
.unwrap();
let mut events = Events::new(EventSettings::new());
let mut app = App::new(opengl, window);
while let Some(e) = events.next(&mut app.window) {
e.render(|args| {
app.render(args);
});
e.update(|args| {
app.update(args.dt);
});
// handle keyboard/button presses
e.press(|button| {
if let Button::Keyboard(key) = button {
if key == Key::Space {
app.generate_requested = true;
}
println!("Typed key: {:?}", key);
}
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = true;
app.nav_requested = true;
}
});
e.release(|button| {
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = false;
}
});
e.mouse_cursor(|pos| {
if app.set_cursor(pos) {
if app.mouse_pressed {
app.nav_requested = true;
}
}
});
e.mouse_relative(|change| {
// TODO: only do this if the cursor is "captured"
// if app.update_pointer(None, Some(&change)) { app.route_requested = true; }
});
}
}
struct NavController {
current: Option<Nav>,
last_goal: Option<Point>
}
impl NavController {
fn new() -> Self {
NavController {
current: None,
last_goal: None,
}
}
fn forget(&mut self) {
self.current = None;
self.last_goal = None;
}
fn update_nav(&mut self, goal: Point, player_pos: &Point, graph: &DungeonFloorGraph) {
let should_update = match self.last_goal {
Some(g) => !point_eq(&goal, &g),
None => true,
};
if should_update {
self.current = graph.find_route(player_pos, &goal).map(|route| Nav::new(route));
self.last_goal = Some(goal);
}
}
}
struct Nav {
waypoints: Vec<Point>,
progress: usize,
}
impl Nav {
fn new(waypoints: Vec<Point>) -> Self {
Nav { waypoints, progress: 0, }
}
fn waypoints(&self) -> &Vec<Point> {
&self.waypoints
}
fn progress(&self) -> usize {
self.progress
}
fn current_target(&self) -> Option<&Point> {
self.waypoints.get(self.progress)
}
fn is_complete(&self) -> bool {
self.progress >= self.waypoints.len()
}
/// Modify `pos` by moving it `step` units towards the next waypoint, or no-op if navigation is complete.
/// Returns `true` to indicate navigation is complete, or `false` to indicate there is further movement to do.
fn advance_by(&mut self, step: f64, pos: &mut Point) -> bool {
if let Some(&target) = self.current_target() {
let to_target = vec2_sub(target, *pos);
let dist = vec2_len(to_target);
if dist < step {
// `pos` has reached the current target, so we can update the `progress`,
// then recurse to spend the remaining `step` to progress to the next waypoint
*pos = target;
self.progress += 1;
self.advance_by(step - dist, pos)
} else {
// move as far as the player can in the direction of the target; this should end the loop
let movement = vec2_scale(to_target, step / dist);
pos[0] += movement[0];
pos[1] += movement[1];
// Navigation is not yet complete
false
}
} else {
// Navigation is complete
true
}
}
}
struct App {
gl: GlGraphics,
window: GlutinWindow,
world: MyGameWorld,
pcc: PlayerCameraCursor,
mouse_pressed: bool,
generate_requested: bool,
pointed_room: PointedRoom,
nav_requested: bool,
nav: NavController,
}
impl App {
fn new(opengl: OpenGL, window: GlutinWindow) -> Self {
let screen_size = window.size().into();
App {
gl: GlGraphics::new(opengl),
window,
world: MyGameWorld::new(),
pcc: PlayerCameraCursor::new(screen_size),
mouse_pressed: false,
generate_requested: true,
pointed_room: PointedRoom::new(),
nav_requested: false,
nav: NavController::new(),
}
}
fn update(&mut self, dt: f64) {
// if the world needs to regenerate, do it now
if self.generate_requested {
let Size { width, height } = self.window.size();
self.regenerate(width as i32, height as i32);
}
// update the navigation target as long as the mouse is down
if self.mouse_pressed {
if let Some(graph) = &self.world.floor_graph {
self.nav.update_nav(self.pcc.cursor_pos, &self.pcc.player_pos, graph);
}
}
// move the player along the current navigation path
if let Some(nav) = &mut self.nav.current {
self.pcc.modify(|PccState { player_pos, .. }| {
nav.advance_by(200.0 * dt, player_pos);
});
}
// update the player camera/cursor if it was modified since the last update
self.pcc.update();
// re-check the 'pointed room' if the mouse cursor's world position has changed
if let Some(graph) = &self.world.floor_graph {
self.pointed_room.update(self.pcc.cursor_pos, graph);
}
}
fn render(&mut self, args: &RenderArgs) {
use graphics::*;
let world = &self.world;
let pcc = &self.pcc;
let player_pos = &pcc.player_pos;
let cursor = pcc.cursor_pos;
let pointed_room = &self.pointed_room;
let nav_opt = &self.nav.current;
&self.gl.draw(args.viewport(), |_c, gl| {
let c = _c.append_transform(pcc.camera);
clear(BACKGROUND_COLOR, gl);
// PRETTY room tiles + walls + doors
if let Some(dungeon) = world.dungeon() {
let tiles = dungeon.tiles();
let tile_size = world.tile_pixel_size() as f64;
// fill in a square for each room tile in the grid
for addr in tiles.tile_addresses() {
if let Some((_room_id, room_weight)) = tiles[addr] {
let color = {
if room_weight >= 1.0 && room_weight <= 2.0 {
lerp_color(&DEBUG_ROOM_LOW, &DEBUG_ROOM_HIGH, room_weight - 1.0)
} else if room_weight >= 1.0 {
WHITE
} else {
lerp_color(&WEIGHT_ROOM_LOW, &WEIGHT_ROOM_HIGH, room_weight)
}
};
let x = addr.x as f64 * tile_size;
let y = addr.y as f64 * tile_size;
let rect = [x, y, tile_size, tile_size];
rectangle(color, rect, c.transform, gl);
}
}
// draw an appropriate line(s) for each wall in the dungeon
for (wall_addr, wall_type) in dungeon.walls().iter() {
match *wall_type {
WallType::Clear => (),
WallType::Wall => {
let TileAddress { x, y } = wall_addr.tile();
let (base_to, base_from) = match wall_addr.direction() {
CompassDirection::North => ((0, 1), (1, 1)),
CompassDirection::East => ((1, 1), (1, 0)),
CompassDirection::South => ((0, 0), (1, 0)),
CompassDirection::West => ((0, 0), (0, 1)),
};
let to_px = |(dx, dy)| {
[(dx + x) as f64 * tile_size, (dy + y) as f64 * tile_size]
};
line_from_to(DEBUG_WALL_COLOR, 0.5, to_px(base_from), to_px(base_to), c.transform, gl);
}
WallType::Door => {
let TileAddress { x, y } = wall_addr.tile();
match wall_addr.direction() {
CompassDirection::North => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64 + 1.0),
CompassDirection::East => draw_vertical_door(&c, gl, tile_size, (x + 1) as f64, y as f64),
CompassDirection::South => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64),
CompassDirection::West => draw_vertical_door(&c, gl, tile_size, x as f64, y as f64),
}
}
}
}
}
// NAVIGATION-related debug
if let Some(floor_graph) = &world.floor_graph {
// DEBUG: walkable areas
for node in floor_graph.nodes().iter() {
let bounds = &floor_graph.get_bounds(*node.id());
let color = match node {
FloorNode::Room { .. } => WALKABLE_ROOM_COLOR,
FloorNode::Door { .. } => WALKABLE_DOOR_COLOR,
};
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(color, rect, c.transform, gl);
}
// DEBUG: cursor target walkable area
if let Some(pointed_room) = pointed_room.current {
let bounds = floor_graph.get_bounds(pointed_room);
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(POINTED_ROOM_COLOR, rect, c.transform, gl);
}
}
if let Some(nav) = nav_opt |
// DEBUG: cursor
{
let [cx, cy] = cursor;
let vertical = rectangle::centered([cx, cy, 1.0, 4.0]);
let horizontal = rectangle::centered([cx, cy, 4.0, 1.0]);
rectangle(CURSOR_COLOR, vertical, c.transform, gl);
rectangle(CURSOR_COLOR, horizontal, c.transform, gl);
}
{
let [x, y] = player_pos;
let player = circle(*x, *y, 3.0);
ellipse(CURSOR_COLOR, player, c.transform, gl);
}
});
}
// updates the app's knowledge of the mouse cursor, returning `true` if the cursor position has changed since last time
fn set_cursor(&mut self, cursor_screen: [f64; 2]) -> bool {
self.pcc.modify(|PccState { cursor_px, .. }| {
*cursor_px = cursor_screen;
});
self.pcc.dirty
}
fn regenerate(&mut self, width: i32, height: i32) {
// regenerate the "world"
self.world.regenerate(Rect::from_xywh(0, 0, width, height));
// reset any app state that depends on the previous "world"
self.nav.forget();
self.pointed_room.forget();
self.generate_requested = false;
// pick a random position for the player
let new_player_pos = self.world.floor_graph.as_ref().and_then(|graph| {
let mut rng = thread_rng();
graph.nodes().choose(&mut rng).map(|n| {
let point = graph.get_bounds(*n.id()).center();
[point.x, point.y]
}).clone()
});
if let Some(pos) = new_player_pos {
self.pcc.modify(|PccState { player_pos, .. }| {
*player_pos = pos;
});
}
}
}
struct PointedRoom {
current: Option<FloorNodeId>,
last_pointer: Option<Point>
}
impl PointedRoom {
fn new() -> Self {
PointedRoom {
current: None,
last_pointer: None,
}
}
fn forget(&mut self) {
self.current = None;
self.last_pointer = None;
}
fn update(&mut self, pointer: Point, graph: &DungeonFloorGraph) {
let should_update = match self.last_pointer {
Some(last) => last[0] != pointer[0] || last[1] != pointer[1],
None => true,
};
if should_update {
self.current = graph.node_at_point(&pointer).map(|n| n.id()).cloned();
self.last_pointer = Some(pointer);
}
}
}
struct PccState<'a> {
pub cursor_px: &'a mut [f64; 2],
pub screen_px: &'a mut [f64; 2],
pub player_pos: &'a mut [f64; 2],
}
struct PlayerCameraCursor {
cursor_px: [f64; 2],
cursor_pos: [f64; 2],
screen_px: [f64; 2],
player_pos: [f64; 2],
camera: Matrix2d,
camera_inv: Matrix2d,
dirty: bool,
}
impl PlayerCameraCursor {
fn new(screen_size: [u32; 2]) -> Self {
PlayerCameraCursor {
cursor_px: [0.0, 0.0],
cursor_pos: [0.0, 0.0],
screen_px: [screen_size[0] as f64, screen_size[1] as f64],
player_pos: [screen_size[0] as f64 / 2.0, screen_size[1] as f64 / 2.0],
camera: identity(),
camera_inv: identity(),
dirty: true,
}
}
fn update(&mut self) {
if self.dirty {
let zoom_factor = 4.0;
// this is some kind of voodoo...
// for one, the order of operations seems wrong to me
// for two, after translating by `-player_pos` without a scale factor,
// you have to apply the scale factor to the half_screen translation??
self.camera = identity()
.zoom(zoom_factor)
.trans_pos(vec2_neg(self.player_pos))
.trans_pos(vec2_scale(self.screen_px, 0.5 / zoom_factor));
self.camera_inv = mat2x3_inv(self.camera);
self.cursor_pos = row_mat2x3_transform_pos2(self.camera_inv, self.cursor_px);
self.dirty = false;
}
}
fn modify<F>(&mut self, f: F)
where F: FnOnce(PccState) -> ()
{
let [cx1, cy1] = self.cursor_px;
let [sx1, sy1] = self.screen_px;
let [px1, py1] = self.player_pos;
f(PccState {
cursor_px: &mut self.cursor_px,
screen_px: &mut self.screen_px,
player_pos: &mut self.player_pos,
});
let [cx2, cy2] = self.cursor_px;
let [sx2, sy2] = self.screen_px;
let [px2, py2] = self.player_pos;
if (cx1 != cx2) || (cy1 != cy2) || (sx1 != sx2) || (sy1 != sy2) || (px1 != px2) || (py1 != py2) {
self.dirty = true;
}
}
}
fn draw_horizontal_door(ctx: &Context, gl: &mut GlGraphics, tile_size: f64, x: f64, y: f64) {
let pixel_pos = |xt: f64, yt: f64| { [xt * tile_size, yt * tile_size] };
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x, y), pixel_pos(x + 0.25, y), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.75, y), pixel_pos(x + 1.0, y), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.25, y + 0.1), pixel_pos(x + 0.25, y - 0.1), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.75, y + 0.1), pixel_pos(x + 0.75, y - 0.1), ctx.transform, gl);
}
fn draw_vertical_door(ctx: &Context, gl: &mut GlGraphics, tile_size: f64, x: f64, y: f64) {
let pixel_pos = |xt: f64, yt: f64| { [xt * tile_size, yt * tile_size] };
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x, y), pixel_pos(x, y + 0.25), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x, y + 0.75), pixel_pos(x, y + 1.0), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x - 0.1, y + 0.25), pixel_pos(x + 0.1, y + 0.25), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x - 0.1, y + 0.75), pixel_pos(x + 0.1, y + 0.75), ctx.transform, gl);
}
fn lerp_color(from: &Color, to: &Color, ratio: f32) -> Color {
let [r1, g1, b1, a1] = from;
let [r2, g2, b2, a2] = to;
[
r1 + (r2 - r1) * ratio,
g1 + (g2 - g1) * ratio,
b1 + (b2 - b1) * ratio,
a1 + (a2 - a1) * ratio,
]
}
struct MyGameWorld {
tile_pixel_size: usize,
dungeon: Option<BasicGridDungeon>,
floor_graph: Option<DungeonFloorGraph>,
generator: GeneratorStrategy<&'static str>,
}
impl MyGameWorld {
fn new() -> Self {
let generator = GeneratorStrategy {
room_chances: vec![
(RoomSize::new(5,4), 1),
(RoomSize::new(4,4), 2),
(RoomSize::new(4,3), 3),
(RoomSize::new(4,2), 3),
(RoomSize::new(4,1), 1),
(RoomSize::new(3,2), 5),
(RoomSize::new(3,1), 2),
(RoomSize::new(2,2), 5),
(RoomSize::new(2,1), 1),
(RoomSize::new(1,1), 1)
].into_iter().flat_map(|(size_opt, chance)| {
size_opt.map(|s| (s, chance))
}).collect(),
bias_graph: BiasGraph::new(
&vec![
("start", (0.0, 0.0)),
("upper-left", (0.25, 0.75)),
("lower-right", (0.75, 0.25)),
("end", (1.0, 1.0))
],
&vec![
("start", "upper-left"),
("upper-left", "lower-right"),
("lower-right", "end"),
("start", "lower-right"),
("upper-left", "end")
]
),
steps: vec![
GeneratorStep::Branches { count: 15 },
GeneratorStep::Clusters { count: 10, iterations: 50 },
GeneratorStep::Widen { iterations: 150 },
// GeneratorStep::Branches { count: 5 },
// GeneratorStep::Clusters { count: 3, iterations: 20 },
// GeneratorStep::Widen { iterations: 20 },
]
};
MyGameWorld {
tile_pixel_size: 16,
dungeon: None,
floor_graph: None,
generator
}
}
fn regenerate(&mut self, pixel_bounds: Rect) -> () {
let grid_width = pixel_bounds.width() as usize / self.tile_pixel_size;
let grid_height = pixel_bounds.height() as usize / self.tile_pixel_size;
let dungeon = self.generator.generate(grid_width, grid_height);
self.floor_graph = Some(graph::decompose(&dungeon, self.tile_pixel_size as f64, 2.0, 6.0));
self.dungeon = Some(dungeon);
}
fn tile_pixel_size(&self) -> usize { self.tile_pixel_size }
fn dungeon(&self) -> &Option<BasicGridDungeon> { &self.dungeon }
}
| {
let start = Some(player_pos.clone());
let lines = start.iter().chain(nav.waypoints().iter().skip(nav.progress)).sliding();
for (from, to) in lines {
line_from_to(PATH_COLOR, 1.0, *from, *to, c.transform, gl);
}
} | conditional_block |
old_main.rs | use std::rc::Rc;
use std::slice::Iter;
use glutin_window::GlutinWindow;
use graphics::{Context, line_from_to, triangulation};
use graphics::character::CharacterCache;
use graphics::color::{BLACK, WHITE};
use graphics::Transformed;
use graphics::math::{identity, Matrix2d, Vec2d, scale, translate};
use graphics::types::Color;
use ncollide2d::bounding_volume::AABB;
use ncollide2d::partitioning::BVH;
use ncollide2d::query::visitors::PointInterferencesCollector;
use opengl_graphics::{GlGraphics, OpenGL};
use piston::event_loop::{Events, EventSettings};
use piston::input::*;
use piston::window::{AdvancedWindow, Size, Window, WindowSettings};
use rand::{Rng, thread_rng};
use rand::seq::SliceRandom;
use vecmath::*;
#[allow(unused)]
use crate::dungeon::{GridDungeon, GridDungeonGenerator, RandomRoomGridDungeonGenerator, RoomId, RoomSize};
use crate::dungeon::{BasicGridDungeon, BiasGraph, GeneratorStep, GeneratorStrategy, GridDungeonGraph, Sliding, SlidingIter};
use crate::geom::{Corners, Point, point_eq, Rect};
use crate::graph::{DungeonFloorGraph, FloorNode, FloorNodeId};
use crate::tile::{CompassDirection, TileAddress, WallType};
use graphics::ellipse::circle;
mod dungeon;
mod geom;
mod graph;
mod tile;
const CURSOR_COLOR: Color = BLACK;
const PATH_COLOR: Color = [0.0, 0.0, 0.0, 0.75];
const BACKGROUND_COLOR: Color = [0.4, 0.4, 0.4, 1.0];
const WALKABLE_ROOM_COLOR: Color = [0.1, 0.6, 1.0, 0.5];
const WALKABLE_DOOR_COLOR: Color = [0.1, 0.9, 0.2, 0.5];
const POINTED_ROOM_COLOR: Color = [1.0, 0.0, 0.0, 0.9];
const DEBUG_ROOM_LOW: Color = [0.1, 0.6, 1.0, 1.0];
const DEBUG_ROOM_HIGH: Color = WHITE;
const DEBUG_WALL_COLOR: Color = BLACK;
const WEIGHT_ROOM_LOW: Color = [0.1, 0.9, 0.2, 1.0];
const WEIGHT_ROOM_HIGH: Color = [1.0, 0.1, 0.1, 1.0];
fn main() {
let opengl = OpenGL::V3_2;
let initial_screen_size = [1024, 768];
let mut window: GlutinWindow = WindowSettings::new("Procedural Dungeon", initial_screen_size)
.graphics_api(opengl)
.exit_on_esc(false) // example code used true but I don't want that
.build()
.unwrap();
let mut events = Events::new(EventSettings::new());
let mut app = App::new(opengl, window);
while let Some(e) = events.next(&mut app.window) {
e.render(|args| { | e.update(|args| {
app.update(args.dt);
});
// handle keyboard/button presses
e.press(|button| {
if let Button::Keyboard(key) = button {
if key == Key::Space {
app.generate_requested = true;
}
println!("Typed key: {:?}", key);
}
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = true;
app.nav_requested = true;
}
});
e.release(|button| {
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = false;
}
});
e.mouse_cursor(|pos| {
if app.set_cursor(pos) {
if app.mouse_pressed {
app.nav_requested = true;
}
}
});
e.mouse_relative(|change| {
// TODO: only do this if the cursor is "captured"
// if app.update_pointer(None, Some(&change)) { app.route_requested = true; }
});
}
}
struct NavController {
current: Option<Nav>,
last_goal: Option<Point>
}
impl NavController {
fn new() -> Self {
NavController {
current: None,
last_goal: None,
}
}
fn forget(&mut self) {
self.current = None;
self.last_goal = None;
}
fn update_nav(&mut self, goal: Point, player_pos: &Point, graph: &DungeonFloorGraph) {
let should_update = match self.last_goal {
Some(g) => !point_eq(&goal, &g),
None => true,
};
if should_update {
self.current = graph.find_route(player_pos, &goal).map(|route| Nav::new(route));
self.last_goal = Some(goal);
}
}
}
struct Nav {
waypoints: Vec<Point>,
progress: usize,
}
impl Nav {
fn new(waypoints: Vec<Point>) -> Self {
Nav { waypoints, progress: 0, }
}
fn waypoints(&self) -> &Vec<Point> {
&self.waypoints
}
fn progress(&self) -> usize {
self.progress
}
fn current_target(&self) -> Option<&Point> {
self.waypoints.get(self.progress)
}
fn is_complete(&self) -> bool {
self.progress >= self.waypoints.len()
}
/// Modify `pos` by moving it `step` units towards the next waypoint, or no-op if navigation is complete.
/// Returns `true` to indicate navigation is complete, or `false` to indicate there is further movement to do.
fn advance_by(&mut self, step: f64, pos: &mut Point) -> bool {
if let Some(&target) = self.current_target() {
let to_target = vec2_sub(target, *pos);
let dist = vec2_len(to_target);
if dist < step {
// `pos` has reached the current target, so we can update the `progress`,
// then recurse to spend the remaining `step` to progress to the next waypoint
*pos = target;
self.progress += 1;
self.advance_by(step - dist, pos)
} else {
// move as far as the player can in the direction of the target; this should end the loop
let movement = vec2_scale(to_target, step / dist);
pos[0] += movement[0];
pos[1] += movement[1];
// Navigation is not yet complete
false
}
} else {
// Navigation is complete
true
}
}
}
struct App {
gl: GlGraphics,
window: GlutinWindow,
world: MyGameWorld,
pcc: PlayerCameraCursor,
mouse_pressed: bool,
generate_requested: bool,
pointed_room: PointedRoom,
nav_requested: bool,
nav: NavController,
}
impl App {
fn new(opengl: OpenGL, window: GlutinWindow) -> Self {
let screen_size = window.size().into();
App {
gl: GlGraphics::new(opengl),
window,
world: MyGameWorld::new(),
pcc: PlayerCameraCursor::new(screen_size),
mouse_pressed: false,
generate_requested: true,
pointed_room: PointedRoom::new(),
nav_requested: false,
nav: NavController::new(),
}
}
fn update(&mut self, dt: f64) {
// if the world needs to regenerate, do it now
if self.generate_requested {
let Size { width, height } = self.window.size();
self.regenerate(width as i32, height as i32);
}
// update the navigation target as long as the mouse is down
if self.mouse_pressed {
if let Some(graph) = &self.world.floor_graph {
self.nav.update_nav(self.pcc.cursor_pos, &self.pcc.player_pos, graph);
}
}
// move the player along the current navigation path
if let Some(nav) = &mut self.nav.current {
self.pcc.modify(|PccState { player_pos, .. }| {
nav.advance_by(200.0 * dt, player_pos);
});
}
// update the player camera/cursor if it was modified since the last update
self.pcc.update();
// re-check the 'pointed room' if the mouse cursor's world position has changed
if let Some(graph) = &self.world.floor_graph {
self.pointed_room.update(self.pcc.cursor_pos, graph);
}
}
fn render(&mut self, args: &RenderArgs) {
use graphics::*;
let world = &self.world;
let pcc = &self.pcc;
let player_pos = &pcc.player_pos;
let cursor = pcc.cursor_pos;
let pointed_room = &self.pointed_room;
let nav_opt = &self.nav.current;
&self.gl.draw(args.viewport(), |_c, gl| {
let c = _c.append_transform(pcc.camera);
clear(BACKGROUND_COLOR, gl);
// PRETTY room tiles + walls + doors
if let Some(dungeon) = world.dungeon() {
let tiles = dungeon.tiles();
let tile_size = world.tile_pixel_size() as f64;
// fill in a square for each room tile in the grid
for addr in tiles.tile_addresses() {
if let Some((_room_id, room_weight)) = tiles[addr] {
let color = {
if room_weight >= 1.0 && room_weight <= 2.0 {
lerp_color(&DEBUG_ROOM_LOW, &DEBUG_ROOM_HIGH, room_weight - 1.0)
} else if room_weight >= 1.0 {
WHITE
} else {
lerp_color(&WEIGHT_ROOM_LOW, &WEIGHT_ROOM_HIGH, room_weight)
}
};
let x = addr.x as f64 * tile_size;
let y = addr.y as f64 * tile_size;
let rect = [x, y, tile_size, tile_size];
rectangle(color, rect, c.transform, gl);
}
}
// draw an appropriate line(s) for each wall in the dungeon
for (wall_addr, wall_type) in dungeon.walls().iter() {
match *wall_type {
WallType::Clear => (),
WallType::Wall => {
let TileAddress { x, y } = wall_addr.tile();
let (base_to, base_from) = match wall_addr.direction() {
CompassDirection::North => ((0, 1), (1, 1)),
CompassDirection::East => ((1, 1), (1, 0)),
CompassDirection::South => ((0, 0), (1, 0)),
CompassDirection::West => ((0, 0), (0, 1)),
};
let to_px = |(dx, dy)| {
[(dx + x) as f64 * tile_size, (dy + y) as f64 * tile_size]
};
line_from_to(DEBUG_WALL_COLOR, 0.5, to_px(base_from), to_px(base_to), c.transform, gl);
}
WallType::Door => {
let TileAddress { x, y } = wall_addr.tile();
match wall_addr.direction() {
CompassDirection::North => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64 + 1.0),
CompassDirection::East => draw_vertical_door(&c, gl, tile_size, (x + 1) as f64, y as f64),
CompassDirection::South => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64),
CompassDirection::West => draw_vertical_door(&c, gl, tile_size, x as f64, y as f64),
}
}
}
}
}
// NAVIGATION-related debug
if let Some(floor_graph) = &world.floor_graph {
// DEBUG: walkable areas
for node in floor_graph.nodes().iter() {
let bounds = &floor_graph.get_bounds(*node.id());
let color = match node {
FloorNode::Room { .. } => WALKABLE_ROOM_COLOR,
FloorNode::Door { .. } => WALKABLE_DOOR_COLOR,
};
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(color, rect, c.transform, gl);
}
// DEBUG: cursor target walkable area
if let Some(pointed_room) = pointed_room.current {
let bounds = floor_graph.get_bounds(pointed_room);
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(POINTED_ROOM_COLOR, rect, c.transform, gl);
}
}
if let Some(nav) = nav_opt {
let start = Some(player_pos.clone());
let lines = start.iter().chain(nav.waypoints().iter().skip(nav.progress)).sliding();
for (from, to) in lines {
line_from_to(PATH_COLOR, 1.0, *from, *to, c.transform, gl);
}
}
// DEBUG: cursor
{
let [cx, cy] = cursor;
let vertical = rectangle::centered([cx, cy, 1.0, 4.0]);
let horizontal = rectangle::centered([cx, cy, 4.0, 1.0]);
rectangle(CURSOR_COLOR, vertical, c.transform, gl);
rectangle(CURSOR_COLOR, horizontal, c.transform, gl);
}
{
let [x, y] = player_pos;
let player = circle(*x, *y, 3.0);
ellipse(CURSOR_COLOR, player, c.transform, gl);
}
});
}
// updates the app's knowledge of the mouse cursor, returning `true` if the cursor position has changed since last time
fn set_cursor(&mut self, cursor_screen: [f64; 2]) -> bool {
self.pcc.modify(|PccState { cursor_px, .. }| {
*cursor_px = cursor_screen;
});
self.pcc.dirty
}
fn regenerate(&mut self, width: i32, height: i32) {
// regenerate the "world"
self.world.regenerate(Rect::from_xywh(0, 0, width, height));
// reset any app state that depends on the previous "world"
self.nav.forget();
self.pointed_room.forget();
self.generate_requested = false;
// pick a random position for the player
let new_player_pos = self.world.floor_graph.as_ref().and_then(|graph| {
let mut rng = thread_rng();
graph.nodes().choose(&mut rng).map(|n| {
let point = graph.get_bounds(*n.id()).center();
[point.x, point.y]
}).clone()
});
if let Some(pos) = new_player_pos {
self.pcc.modify(|PccState { player_pos, .. }| {
*player_pos = pos;
});
}
}
}
struct PointedRoom {
current: Option<FloorNodeId>,
last_pointer: Option<Point>
}
impl PointedRoom {
fn new() -> Self {
PointedRoom {
current: None,
last_pointer: None,
}
}
fn forget(&mut self) {
self.current = None;
self.last_pointer = None;
}
fn update(&mut self, pointer: Point, graph: &DungeonFloorGraph) {
let should_update = match self.last_pointer {
Some(last) => last[0] != pointer[0] || last[1] != pointer[1],
None => true,
};
if should_update {
self.current = graph.node_at_point(&pointer).map(|n| n.id()).cloned();
self.last_pointer = Some(pointer);
}
}
}
struct PccState<'a> {
pub cursor_px: &'a mut [f64; 2],
pub screen_px: &'a mut [f64; 2],
pub player_pos: &'a mut [f64; 2],
}
struct PlayerCameraCursor {
cursor_px: [f64; 2],
cursor_pos: [f64; 2],
screen_px: [f64; 2],
player_pos: [f64; 2],
camera: Matrix2d,
camera_inv: Matrix2d,
dirty: bool,
}
impl PlayerCameraCursor {
fn new(screen_size: [u32; 2]) -> Self {
PlayerCameraCursor {
cursor_px: [0.0, 0.0],
cursor_pos: [0.0, 0.0],
screen_px: [screen_size[0] as f64, screen_size[1] as f64],
player_pos: [screen_size[0] as f64 / 2.0, screen_size[1] as f64 / 2.0],
camera: identity(),
camera_inv: identity(),
dirty: true,
}
}
fn update(&mut self) {
if self.dirty {
let zoom_factor = 4.0;
// this is some kind of voodoo...
// for one, the order of operations seems wrong to me
// for two, after translating by `-player_pos` without a scale factor,
// you have to apply the scale factor to the half_screen translation??
self.camera = identity()
.zoom(zoom_factor)
.trans_pos(vec2_neg(self.player_pos))
.trans_pos(vec2_scale(self.screen_px, 0.5 / zoom_factor));
self.camera_inv = mat2x3_inv(self.camera);
self.cursor_pos = row_mat2x3_transform_pos2(self.camera_inv, self.cursor_px);
self.dirty = false;
}
}
fn modify<F>(&mut self, f: F)
where F: FnOnce(PccState) -> ()
{
let [cx1, cy1] = self.cursor_px;
let [sx1, sy1] = self.screen_px;
let [px1, py1] = self.player_pos;
f(PccState {
cursor_px: &mut self.cursor_px,
screen_px: &mut self.screen_px,
player_pos: &mut self.player_pos,
});
let [cx2, cy2] = self.cursor_px;
let [sx2, sy2] = self.screen_px;
let [px2, py2] = self.player_pos;
if (cx1 != cx2) || (cy1 != cy2) || (sx1 != sx2) || (sy1 != sy2) || (px1 != px2) || (py1 != py2) {
self.dirty = true;
}
}
}
fn draw_horizontal_door(ctx: &Context, gl: &mut GlGraphics, tile_size: f64, x: f64, y: f64) {
let pixel_pos = |xt: f64, yt: f64| { [xt * tile_size, yt * tile_size] };
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x, y), pixel_pos(x + 0.25, y), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.75, y), pixel_pos(x + 1.0, y), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.25, y + 0.1), pixel_pos(x + 0.25, y - 0.1), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.75, y + 0.1), pixel_pos(x + 0.75, y - 0.1), ctx.transform, gl);
}
fn draw_vertical_door(ctx: &Context, gl: &mut GlGraphics, tile_size: f64, x: f64, y: f64) {
let pixel_pos = |xt: f64, yt: f64| { [xt * tile_size, yt * tile_size] };
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x, y), pixel_pos(x, y + 0.25), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x, y + 0.75), pixel_pos(x, y + 1.0), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x - 0.1, y + 0.25), pixel_pos(x + 0.1, y + 0.25), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x - 0.1, y + 0.75), pixel_pos(x + 0.1, y + 0.75), ctx.transform, gl);
}
fn lerp_color(from: &Color, to: &Color, ratio: f32) -> Color {
let [r1, g1, b1, a1] = from;
let [r2, g2, b2, a2] = to;
[
r1 + (r2 - r1) * ratio,
g1 + (g2 - g1) * ratio,
b1 + (b2 - b1) * ratio,
a1 + (a2 - a1) * ratio,
]
}
struct MyGameWorld {
tile_pixel_size: usize,
dungeon: Option<BasicGridDungeon>,
floor_graph: Option<DungeonFloorGraph>,
generator: GeneratorStrategy<&'static str>,
}
impl MyGameWorld {
fn new() -> Self {
let generator = GeneratorStrategy {
room_chances: vec![
(RoomSize::new(5,4), 1),
(RoomSize::new(4,4), 2),
(RoomSize::new(4,3), 3),
(RoomSize::new(4,2), 3),
(RoomSize::new(4,1), 1),
(RoomSize::new(3,2), 5),
(RoomSize::new(3,1), 2),
(RoomSize::new(2,2), 5),
(RoomSize::new(2,1), 1),
(RoomSize::new(1,1), 1)
].into_iter().flat_map(|(size_opt, chance)| {
size_opt.map(|s| (s, chance))
}).collect(),
bias_graph: BiasGraph::new(
&vec![
("start", (0.0, 0.0)),
("upper-left", (0.25, 0.75)),
("lower-right", (0.75, 0.25)),
("end", (1.0, 1.0))
],
&vec![
("start", "upper-left"),
("upper-left", "lower-right"),
("lower-right", "end"),
("start", "lower-right"),
("upper-left", "end")
]
),
steps: vec![
GeneratorStep::Branches { count: 15 },
GeneratorStep::Clusters { count: 10, iterations: 50 },
GeneratorStep::Widen { iterations: 150 },
// GeneratorStep::Branches { count: 5 },
// GeneratorStep::Clusters { count: 3, iterations: 20 },
// GeneratorStep::Widen { iterations: 20 },
]
};
MyGameWorld {
tile_pixel_size: 16,
dungeon: None,
floor_graph: None,
generator
}
}
fn regenerate(&mut self, pixel_bounds: Rect) -> () {
let grid_width = pixel_bounds.width() as usize / self.tile_pixel_size;
let grid_height = pixel_bounds.height() as usize / self.tile_pixel_size;
let dungeon = self.generator.generate(grid_width, grid_height);
self.floor_graph = Some(graph::decompose(&dungeon, self.tile_pixel_size as f64, 2.0, 6.0));
self.dungeon = Some(dungeon);
}
fn tile_pixel_size(&self) -> usize { self.tile_pixel_size }
fn dungeon(&self) -> &Option<BasicGridDungeon> { &self.dungeon }
} | app.render(args);
});
| random_line_split |
environ_config.py | # © Copyright Databand.ai, an IBM Company 2022
# << should be run before import to airflow >>
# otherwise airflow.configuration will fail
# fix AIRFLOW_HOME for all runs
import os
from configparser import ConfigParser
from contextlib import contextmanager
from typing import Optional
from dbnd._core.configuration.project_env import (
_init_windows_python_path,
_is_init_mode,
)
from dbnd._core.log import dbnd_log
from dbnd._core.log.dbnd_log import dbnd_log_init_msg
from dbnd._core.utils.basics.environ_utils import (
environ_enabled,
environ_int,
set_env_dir,
set_on,
)
from dbnd._core.utils.basics.path_utils import abs_join, relative_path
DATABAND_AIRFLOW_CONN_ID = "dbnd_config" # DBND connection ID for Airflow connections
_MARKER_FILES = ["databand.cfg", "project.cfg", "databand-system.cfg"]
PARAM_ENV_TEMPLATE = "DBND__{S}__{K}"
ENV_DBND__DISABLED = "DBND__DISABLED"
ENV_DBND__TRACKING = (
"DBND__TRACKING" # implicit DBND tracking ( on any @task/log_ call)
)
ENV_DBND__UNITTEST_MODE = "DBND__UNITTEST"
ENV_DBND_QUIET = "DBND__QUIET"
ENV_DBND_HOME = "DBND_HOME"
ENV_DBND_SYSTEM = "DBND_SYSTEM"
ENV_DBND_LIB = "DBND_LIB"
ENV_DBND_CONFIG = "DBND_CONFIG" # extra config for DBND
ENV_DBND__USER_PRE_INIT = "DBND__USER_PRE_INIT" # run on user init
ENV_DBND__NO_MODULES = (
"DBND__NO_MODULES" # do not auto-load user modules (for example: airflow)
)
ENV_DBND__NO_PLUGINS = "DBND__NO_PLUGINS" # do not auto-load user plugins (for example all dbnd-tensorflow)
ENV_DBND__NO_TABLES = "DBND__NO_TABLES" # do not print fancy tables
ENV_DBND__SHOW_STACK_ON_SIGQUIT = "DBND__SHOW_STACK_ON_SIGQUIT"
ENV_DBND__OVERRIDE_AIRFLOW_LOG_SYSTEM_FOR_TRACKING = (
"DBND__OVERRIDE_AIRFLOW_LOG_SYSTEM_FOR_TRACKING"
)
ENV_DBND__DISABLE_AIRFLOW_SUBDAG_TRACKING = "DBND__DISABLE_AIRFLOW_SUBDAG_TRACKING"
ENV_DBND_USER = "DBND_USER"
ENV_DBND_ENV = "DBND_ENV"
# DBND RUN info variables
SCHEDULED_DAG_RUN_ID_ENV = "SCHEDULED_DAG_RUN_ID"
SCHEDULED_DATE_ENV = "SCHEDULED_DATE"
SCHEDULED_JOB_UID_ENV = "SCHEDULED_JOB_UID"
DBND_ROOT_RUN_UID = "DBND_ROOT_RUN_UID"
DBND_ROOT_RUN_TRACKER_URL = "DBND_ROOT_RUN_TRACKER_URL"
DBND_PARENT_TASK_RUN_UID = "DBND_PARENT_TASK_RUN_UID"
DBND_PARENT_TASK_RUN_ATTEMPT_UID = "DBND_PARENT_TASK_RUN_ATTEMPT_UID"
DBND_RUN_SUBMIT_UID = "DBND_SUBMIT_UID"
DBND_RUN_UID = "DBND_RUN_UID"
DBND_RESUBMIT_RUN = "DBND_RESUBMIT_RUN"
DBND_TASK_RUN_ATTEMPT_UID = "DBND_TASK_RUN_ATTEMPT_UID"
DBND_TRACE_ID = "DBND_TRACE_ID"
DBND_MAX_CALLS_PER_RUN = "DBND_MAX_CALL_PER_FUNC"
ENV_DBND_DISABLE_SCHEDULED_DAGS_LOAD = "DBND_DISABLE_SCHEDULED_DAGS_LOAD"
ENV_DBND__ENV_MACHINE = "DBND__ENV_MACHINE"
ENV_DBND__ENV_IMAGE = "DBND__ENV_IMAGE"
ENV_DBND__CORE__PLUGINS = "DBND__CORE__PLUGINS"
ENV_SHELL_COMPLETION = "_DBND_COMPLETE"
ENV_DBND_FIX_PYSPARK_IMPORTS = "DBND__FIX_PYSPARK_IMPORTS"
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING = "DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING"
ENV_DBND__ENABLE__SPARK_CONTEXT_ENV = "DBND__ENABLE__SPARK_CONTEXT_ENV"
ENV_DBND__AUTO_TRACKING = "DBND__AUTO_TRACKING"
DEFAULT_MAX_CALLS_PER_RUN = 100
ENV_DBND_TRACKING_ATTEMPT_UID = "DBND__TRACKING_ATTEMPT_UID"
ENV_DBND_SCRIPT_NAME = "DBND__SCRIPT_NAME"
_databand_package = relative_path(__file__, "..", "..")
def is_databand_enabled():
return not get_dbnd_project_config().disabled
def disable_databand():
get_dbnd_project_config().disabled = True
def set_dbnd_unit_test_mode():
set_on(ENV_DBND__UNITTEST_MODE) # bypass to subprocess
get_dbnd_project_config().unit_test_mode = True
def get_max_calls_per_func():
return get_dbnd_project_config().max_calls_per_run
# User setup configs
def get_dbnd_environ_config_file():
return os.environ.get(ENV_DBND_CONFIG, None)
def get_user_preinit():
return os.environ.get(ENV_DBND__USER_PRE_INIT, None)
def in_quiet_mode():
"""
quiet mode was made for the scheduler to silence the launcher runners.
Don't want this flag to propagate into the actual scheduled cmd
"""
return get_dbnd_project_config().quiet_mode
def in_tracking_mode():
return get_dbnd_project_config().is_tracking_mode()
def in_airflow_tracking_mode():
return get_dbnd_project_config().is_in_airflow_tracking_mode()
def is_unit_test_mode():
return get_dbnd_project_config().unit_test_mode
def spark_tracking_enabled():
return environ_enabled(ENV_DBND__ENABLE__SPARK_CONTEXT_ENV)
def should_fix_pyspark_imports():
return environ_enabled(ENV_DBND_FIX_PYSPARK_IMPORTS)
_project_config = None # type: Optional[DbndProjectConfig]
def get_dbnd_project_config():
global _project_config
if not _project_config:
# initialize dbnd home first
_project_config = DbndProjectConfig()
_initialize_dbnd_home()
return _project_config
def get_dbnd_custom_config():
try:
import dbnd_custom_config
return dbnd_custom_config.get_config_file_path()
except Exception:
return ""
def reset_dbnd_project_config():
global _project_config
_project_config = None
@contextmanager
def tracking_mode_context(tracking=None):
"""
change the tracking mode for the scope of the `with`
"""
is_current_tracking = get_dbnd_project_config()._dbnd_tracking
get_dbnd_project_config()._dbnd_tracking = tracking
try:
yield
finally:
get_dbnd_project_config()._dbnd_tracking = is_current_tracking
def try_get_script_name():
# type: () -> Optional[str]
return os.environ.get(ENV_DBND_SCRIPT_NAME)
class DbndProjectConfig(object):
"""
very basic environment config!
"""
def __init__(self):
# IF FALSE - we will not modify decorated @task code
self._disabled = environ_enabled(ENV_DBND__DISABLED, False)
self.unit_test_mode = environ_enabled(ENV_DBND__UNITTEST_MODE)
self.max_calls_per_run = environ_int(
DBND_MAX_CALLS_PER_RUN, DEFAULT_MAX_CALLS_PER_RUN
)
self.shell_cmd_complete_mode = ENV_SHELL_COMPLETION in os.environ
self.quiet_mode = (
os.environ.pop(ENV_DBND_QUIET, None) is not None
or self.shell_cmd_complete_mode
)
# external process can create "wrapper run" (airflow scheduler)
# a run with partial information,
# when we have a subprocess, only nested run will have all actual details
# so we are going to "resubmit" them
self.resubmit_run = (
DBND_RESUBMIT_RUN in os.environ
and os.environ.pop(DBND_RESUBMIT_RUN) == "true"
)
self.is_no_modules = environ_enabled(ENV_DBND__NO_MODULES)
self.is_no_plugins = environ_enabled(ENV_DBND__NO_PLUGINS)
self.disable_pluggy_entrypoint_loading = environ_enabled(
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING
)
self.is_sigquit_handler_on = environ_enabled(ENV_DBND__SHOW_STACK_ON_SIGQUIT)
self._dbnd_tracking = environ_enabled(ENV_DBND__TRACKING, default=None)
self._airflow_context = False
self._inline_tracking = None
self.disable_inline = False
self.airflow_auto_tracking = environ_enabled(
ENV_DBND__AUTO_TRACKING, default=True
)
self._is_airflow_runtime = None
@property
def disabled(self):
return self._disabled
@disabled.setter
def disabled(self, value):
set_on(ENV_DBND__DISABLED)
self._disabled = value
def airflow_context(self):
if not self._airflow_context:
from dbnd._core.tracking.airflow_dag_inplace_tracking import (
try_get_airflow_context,
)
self._airflow_context = try_get_airflow_context()
return self._airflow_context
def is_tracking_mode(self):
if self.disabled:
return False
if self._dbnd_tracking is None:
return self.is_in_airflow_tracking_mode()
return self._dbnd_tracking
def is_in_airflow_tracking_mode(self):
if self._is_airflow_runtime is None:
self._is_airflow_runtime = bool(self.airflow_context())
return self._is_airflow_runtime
def is_verbose(self):
return dbnd_log.is_verbose()
def dbnd_home(self):
r |
def dbnd_lib_path(self, *path):
return abs_join(_databand_package, *path)
def dbnd_config_path(self, *path):
return self.dbnd_lib_path("conf", *path)
def dbnd_system_path(self, *path):
dbnd_system = os.environ.get(ENV_DBND_SYSTEM) or self.dbnd_home()
return abs_join(dbnd_system, *path)
def dbnd_project_path(self, *path):
return abs_join(self.dbnd_home(), *path)
def validate_init(self):
dbnd_log_init_msg("Successfully created dbnd project config")
def set_is_airflow_runtime(self):
self._is_airflow_runtime = True
class DatabandHomeError(Exception):
pass
def _find_project_by_import():
"""
check if we can have project marker file by import it
"""
try:
import _databand_project
return abs_join(_databand_project.__file__, "..")
except ImportError:
dbnd_log_init_msg("Can't import `_databand_project` marker.")
return None
def _process_cfg(folder):
# dbnd home is being pointed inside [databand] in 'config' files
found_dbnd_home = False
config_file = None
config_files = ["tox.ini", "setup.cfg"]
for file in config_files:
config_path = os.path.join(folder, file)
try:
parser = ConfigParser()
parser.read(config_path)
config_root, config_name = os.path.split(config_path)
source = os.path.basename(config_path)
if not parser.has_section("databand"):
continue
for config_key in ["dbnd_home", "dbnd_system", "dbnd_config"]:
# TODO: hidden magic, do we need these setters?
if not parser.has_option("databand", config_key):
continue
config_value = parser.get("databand", config_key)
config_value = os.path.abspath(os.path.join(config_root, config_value))
set_env_dir(config_key, config_value)
dbnd_log_init_msg("%s: %s=%s" % (source, config_key, config_value))
except Exception as ex:
print("Failed to process %s: %s" % (config_path, ex))
return found_dbnd_home, config_file
def _has_marker_file(folder):
# dbnd home is where 'marker' files are located in
for file in _MARKER_FILES:
file_path = os.path.join(folder, file)
if os.path.exists(file_path):
return folder, file_path
return False, None
def __find_dbnd_home_at(folder):
dbnd_home, config_file = _process_cfg(folder)
if dbnd_home:
dbnd_log_init_msg(
"Found dbnd home by at %s, by config file: %s" % (dbnd_home, config_file)
)
return dbnd_home
dbnd_home, marker_file = _has_marker_file(folder)
if dbnd_home:
dbnd_log_init_msg(
"Found dbnd home by at %s, by marker file: %s" % (dbnd_home, marker_file)
)
return dbnd_home
dbnd_log_init_msg("dbnd home was not found at %s" % folder)
return False
def _find_and_set_dbnd_home():
# falling back to simple version
if _is_init_mode():
dbnd_home = os.path.abspath(".")
print("Initializing new dbnd environment, using %s as DBND_HOME" % dbnd_home)
set_env_dir(ENV_DBND_HOME, dbnd_home)
return True
# looking for dbnd project folder to be set as home
project_folder = _find_project_by_import()
if project_folder:
dbnd_log_init_msg(
"Found project folder by import from marker %s" % project_folder
)
# we know about project folder, let try to find "custom" configs in it
dbnd_home = __find_dbnd_home_at(project_folder)
if not dbnd_home:
dbnd_log_init_msg(
"No markers at %s! setting dbnd_home to %s" % (dbnd_home, dbnd_home)
)
dbnd_home = project_folder
set_env_dir(ENV_DBND_HOME, dbnd_home)
return True
# traversing all the way up to until finding relevant anchor files
cur_dir = os.path.normpath(os.getcwd())
cur_dir_split = cur_dir.split(os.sep)
cur_dir_split_reversed = reversed(list(enumerate(cur_dir_split)))
dbnd_log_init_msg(
"Trying to find dbnd_home by traversing up to the root folder starting at %s"
% cur_dir
)
for idx, cur_folder in cur_dir_split_reversed:
cur_path = os.path.join("/", *cur_dir_split[1 : (idx + 1)])
dbnd_system_file = os.path.join(cur_path, ".dbnd", "databand-system.cfg")
if os.path.exists(dbnd_system_file):
set_env_dir(ENV_DBND_HOME, cur_path)
return True
dbnd_home = __find_dbnd_home_at(cur_path)
if dbnd_home:
set_env_dir(ENV_DBND_HOME, dbnd_home)
return True
# last chance, we couldn't find dbnd project so we'll use user's home folder
user_home = os.path.expanduser("~")
if user_home:
dbnd_log_init_msg("dbnd home was not found. Using user's home: %s" % user_home)
set_env_dir(ENV_DBND_HOME, user_home)
return True
return False
def _env_banner():
return "\tDBND_HOME={dbnd}\n\tDBND_SYSTEM={system}".format(
dbnd=os.environ[ENV_DBND_HOME], system=os.environ[ENV_DBND_SYSTEM]
)
_DBND_ENVIRONMENT = False
def _initialize_dbnd_home():
global _DBND_ENVIRONMENT
if _DBND_ENVIRONMENT:
return
_DBND_ENVIRONMENT = True
dbnd_log_init_msg("_initialize_dbnd_home")
project_config = get_dbnd_project_config()
if project_config.disabled:
dbnd_log_init_msg("databand is disabled via %s" % ENV_DBND__DISABLED)
return
dbnd_log_init_msg("Initializing Databand Basic Environment")
_initialize_google_composer()
_init_windows_python_path(_databand_package)
# main logic
__initialize_dbnd_home_environ()
__initialize_airflow_home()
if project_config.quiet_mode:
# we should not print anything if we are in shell completion!
import logging
logging.getLogger().setLevel(logging.CRITICAL + 1)
dbnd_log_init_msg(_env_banner())
def __initialize_dbnd_home_environ():
# MAIN PART OF THE SCRIPT
if ENV_DBND_HOME not in os.environ:
_find_and_set_dbnd_home()
if ENV_DBND_HOME not in os.environ:
raise DatabandHomeError(
"\nDBND_HOME could not be found when searching from current directory '%s' to root folder! \n "
"Use `export DBND__DEBUG_INIT=True` to get more debug information.\n"
"Trying fixing that issue by:\n"
"\t 1. Explicitly set current directory to DBND HOME via: `export DBND_HOME=ROOT_OF_YOUR_PROJECT`.\n"
"\t 2. `cd` into your project directory.\n"
"\t 3. Create one of the following files inside current directory: [%s].\n"
"\t 4. Run 'dbnd project-init' in current directory."
% (os.getcwd(), ", ".join(_MARKER_FILES))
)
_dbnd_home = os.environ[ENV_DBND_HOME]
if ENV_DBND_SYSTEM not in os.environ:
dbnd_system = os.path.join(_dbnd_home, ".dbnd")
# backward compatibility to $DBND_HOME/dbnd folder
if not os.path.exists(dbnd_system) and os.path.exists(
os.path.join(_dbnd_home, "dbnd")
):
dbnd_system = os.path.join(_dbnd_home, "dbnd")
os.environ[ENV_DBND_SYSTEM] = dbnd_system
if ENV_DBND_LIB in os.environ:
# usually will not happen
dbnd_log_init_msg("Using DBND Library from %s" % os.environ[ENV_DBND_LIB])
else:
os.environ[ENV_DBND_LIB] = _databand_package
def __initialize_airflow_home():
ENV_AIRFLOW_HOME = "AIRFLOW_HOME"
if ENV_AIRFLOW_HOME in os.environ:
# user settings - we do nothing
dbnd_log_init_msg(
"Found user defined AIRFLOW_HOME at %s" % os.environ[ENV_AIRFLOW_HOME]
)
return
for dbnd_airflow_home in [
os.path.join(os.environ[ENV_DBND_SYSTEM], "airflow"),
os.path.join(os.environ[ENV_DBND_HOME], ".airflow"),
]:
if not os.path.exists(dbnd_airflow_home):
continue
dbnd_log_init_msg(
"Found airflow home folder at DBND, setting AIRFLOW_HOME to %s"
% dbnd_airflow_home
)
os.environ[ENV_AIRFLOW_HOME] = dbnd_airflow_home
def _initialize_google_composer():
if "COMPOSER_ENVIRONMENT" not in os.environ:
return
dbnd_log_init_msg("Initializing Google Composer Environment")
if ENV_DBND_HOME not in os.environ:
os.environ[ENV_DBND_HOME] = os.environ["HOME"]
env_tracker_raise_on_error = "DBND__CORE__TRACKER_RAISE_ON_ERROR"
if env_tracker_raise_on_error not in os.environ:
os.environ[env_tracker_raise_on_error] = "false"
def dbnd_project_path(*path):
return get_dbnd_project_config().dbnd_project_path(*path)
| eturn os.environ.get(ENV_DBND_HOME) or os.curdir
| identifier_body |
environ_config.py | # © Copyright Databand.ai, an IBM Company 2022
# << should be run before import to airflow >>
# otherwise airflow.configuration will fail
# fix AIRFLOW_HOME for all runs
import os
from configparser import ConfigParser
from contextlib import contextmanager
from typing import Optional
from dbnd._core.configuration.project_env import (
_init_windows_python_path,
_is_init_mode,
)
from dbnd._core.log import dbnd_log
from dbnd._core.log.dbnd_log import dbnd_log_init_msg
from dbnd._core.utils.basics.environ_utils import (
environ_enabled,
environ_int,
set_env_dir,
set_on,
)
from dbnd._core.utils.basics.path_utils import abs_join, relative_path
DATABAND_AIRFLOW_CONN_ID = "dbnd_config" # DBND connection ID for Airflow connections
_MARKER_FILES = ["databand.cfg", "project.cfg", "databand-system.cfg"]
PARAM_ENV_TEMPLATE = "DBND__{S}__{K}"
ENV_DBND__DISABLED = "DBND__DISABLED"
ENV_DBND__TRACKING = (
"DBND__TRACKING" # implicit DBND tracking ( on any @task/log_ call)
)
ENV_DBND__UNITTEST_MODE = "DBND__UNITTEST"
ENV_DBND_QUIET = "DBND__QUIET"
ENV_DBND_HOME = "DBND_HOME"
ENV_DBND_SYSTEM = "DBND_SYSTEM"
ENV_DBND_LIB = "DBND_LIB"
ENV_DBND_CONFIG = "DBND_CONFIG" # extra config for DBND
ENV_DBND__USER_PRE_INIT = "DBND__USER_PRE_INIT" # run on user init
ENV_DBND__NO_MODULES = (
"DBND__NO_MODULES" # do not auto-load user modules (for example: airflow)
)
ENV_DBND__NO_PLUGINS = "DBND__NO_PLUGINS" # do not auto-load user plugins (for example all dbnd-tensorflow)
ENV_DBND__NO_TABLES = "DBND__NO_TABLES" # do not print fancy tables
ENV_DBND__SHOW_STACK_ON_SIGQUIT = "DBND__SHOW_STACK_ON_SIGQUIT"
ENV_DBND__OVERRIDE_AIRFLOW_LOG_SYSTEM_FOR_TRACKING = (
"DBND__OVERRIDE_AIRFLOW_LOG_SYSTEM_FOR_TRACKING"
)
ENV_DBND__DISABLE_AIRFLOW_SUBDAG_TRACKING = "DBND__DISABLE_AIRFLOW_SUBDAG_TRACKING"
ENV_DBND_USER = "DBND_USER"
ENV_DBND_ENV = "DBND_ENV"
# DBND RUN info variables
SCHEDULED_DAG_RUN_ID_ENV = "SCHEDULED_DAG_RUN_ID"
SCHEDULED_DATE_ENV = "SCHEDULED_DATE"
SCHEDULED_JOB_UID_ENV = "SCHEDULED_JOB_UID"
DBND_ROOT_RUN_UID = "DBND_ROOT_RUN_UID"
DBND_ROOT_RUN_TRACKER_URL = "DBND_ROOT_RUN_TRACKER_URL"
DBND_PARENT_TASK_RUN_UID = "DBND_PARENT_TASK_RUN_UID"
DBND_PARENT_TASK_RUN_ATTEMPT_UID = "DBND_PARENT_TASK_RUN_ATTEMPT_UID"
DBND_RUN_SUBMIT_UID = "DBND_SUBMIT_UID"
DBND_RUN_UID = "DBND_RUN_UID"
DBND_RESUBMIT_RUN = "DBND_RESUBMIT_RUN"
DBND_TASK_RUN_ATTEMPT_UID = "DBND_TASK_RUN_ATTEMPT_UID"
DBND_TRACE_ID = "DBND_TRACE_ID"
DBND_MAX_CALLS_PER_RUN = "DBND_MAX_CALL_PER_FUNC"
ENV_DBND_DISABLE_SCHEDULED_DAGS_LOAD = "DBND_DISABLE_SCHEDULED_DAGS_LOAD"
ENV_DBND__ENV_MACHINE = "DBND__ENV_MACHINE"
ENV_DBND__ENV_IMAGE = "DBND__ENV_IMAGE"
ENV_DBND__CORE__PLUGINS = "DBND__CORE__PLUGINS"
ENV_SHELL_COMPLETION = "_DBND_COMPLETE"
ENV_DBND_FIX_PYSPARK_IMPORTS = "DBND__FIX_PYSPARK_IMPORTS"
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING = "DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING"
ENV_DBND__ENABLE__SPARK_CONTEXT_ENV = "DBND__ENABLE__SPARK_CONTEXT_ENV"
ENV_DBND__AUTO_TRACKING = "DBND__AUTO_TRACKING"
DEFAULT_MAX_CALLS_PER_RUN = 100
ENV_DBND_TRACKING_ATTEMPT_UID = "DBND__TRACKING_ATTEMPT_UID"
ENV_DBND_SCRIPT_NAME = "DBND__SCRIPT_NAME"
_databand_package = relative_path(__file__, "..", "..")
def is_databand_enabled():
return not get_dbnd_project_config().disabled
def disable_databand():
get_dbnd_project_config().disabled = True
def set_dbnd_unit_test_mode():
set_on(ENV_DBND__UNITTEST_MODE) # bypass to subprocess
get_dbnd_project_config().unit_test_mode = True
def get_max_calls_per_func():
return get_dbnd_project_config().max_calls_per_run
# User setup configs
def get_dbnd_environ_config_file():
return os.environ.get(ENV_DBND_CONFIG, None)
def get_user_preinit():
return os.environ.get(ENV_DBND__USER_PRE_INIT, None)
def in_quiet_mode():
"""
quiet mode was made for the scheduler to silence the launcher runners.
Don't want this flag to propagate into the actual scheduled cmd
"""
return get_dbnd_project_config().quiet_mode
def in_tracking_mode():
return get_dbnd_project_config().is_tracking_mode()
def in_airflow_tracking_mode():
return get_dbnd_project_config().is_in_airflow_tracking_mode()
def is_unit_test_mode():
return get_dbnd_project_config().unit_test_mode
def spark_tracking_enabled():
return environ_enabled(ENV_DBND__ENABLE__SPARK_CONTEXT_ENV)
def should_fix_pyspark_imports():
return environ_enabled(ENV_DBND_FIX_PYSPARK_IMPORTS)
_project_config = None # type: Optional[DbndProjectConfig]
def get_dbnd_project_config():
global _project_config
if not _project_config:
# initialize dbnd home first
_project_config = DbndProjectConfig()
_initialize_dbnd_home()
return _project_config
def get_dbnd_custom_config():
try:
import dbnd_custom_config
return dbnd_custom_config.get_config_file_path()
except Exception:
return ""
def reset_dbnd_project_config():
global _project_config
_project_config = None
@contextmanager
def tracking_mode_context(tracking=None):
"""
change the tracking mode for the scope of the `with`
"""
is_current_tracking = get_dbnd_project_config()._dbnd_tracking
get_dbnd_project_config()._dbnd_tracking = tracking
try:
yield
finally:
get_dbnd_project_config()._dbnd_tracking = is_current_tracking
def try_get_script_name():
# type: () -> Optional[str]
return os.environ.get(ENV_DBND_SCRIPT_NAME)
class DbndProjectConfig(object):
"""
very basic environment config!
"""
def __init__(self):
# IF FALSE - we will not modify decorated @task code
self._disabled = environ_enabled(ENV_DBND__DISABLED, False)
self.unit_test_mode = environ_enabled(ENV_DBND__UNITTEST_MODE)
self.max_calls_per_run = environ_int(
DBND_MAX_CALLS_PER_RUN, DEFAULT_MAX_CALLS_PER_RUN
)
self.shell_cmd_complete_mode = ENV_SHELL_COMPLETION in os.environ
self.quiet_mode = (
os.environ.pop(ENV_DBND_QUIET, None) is not None
or self.shell_cmd_complete_mode
)
# external process can create "wrapper run" (airflow scheduler)
# a run with partial information,
# when we have a subprocess, only nested run will have all actual details
# so we are going to "resubmit" them
self.resubmit_run = (
DBND_RESUBMIT_RUN in os.environ
and os.environ.pop(DBND_RESUBMIT_RUN) == "true"
)
self.is_no_modules = environ_enabled(ENV_DBND__NO_MODULES)
self.is_no_plugins = environ_enabled(ENV_DBND__NO_PLUGINS)
self.disable_pluggy_entrypoint_loading = environ_enabled(
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING
)
self.is_sigquit_handler_on = environ_enabled(ENV_DBND__SHOW_STACK_ON_SIGQUIT)
self._dbnd_tracking = environ_enabled(ENV_DBND__TRACKING, default=None)
self._airflow_context = False
self._inline_tracking = None
self.disable_inline = False
self.airflow_auto_tracking = environ_enabled(
ENV_DBND__AUTO_TRACKING, default=True
)
self._is_airflow_runtime = None
@property
def disabled(self):
return self._disabled
@disabled.setter
def disabled(self, value):
set_on(ENV_DBND__DISABLED)
self._disabled = value
def airflow_context(self):
if not self._airflow_context:
from dbnd._core.tracking.airflow_dag_inplace_tracking import (
try_get_airflow_context,
)
self._airflow_context = try_get_airflow_context()
return self._airflow_context
def is_tracking_mode(self):
if self.disabled:
return False
if self._dbnd_tracking is None:
return self.is_in_airflow_tracking_mode()
return self._dbnd_tracking
def is_in_airflow_tracking_mode(self):
if self._is_airflow_runtime is None:
self._is_airflow_runtime = bool(self.airflow_context())
return self._is_airflow_runtime
def is_verbose(self):
return dbnd_log.is_verbose()
def dbnd_home(self):
return os.environ.get(ENV_DBND_HOME) or os.curdir
def dbnd_lib_path(self, *path):
return abs_join(_databand_package, *path)
def dbnd_config_path(self, *path):
return self.dbnd_lib_path("conf", *path)
def dbnd_system_path(self, *path):
dbnd_system = os.environ.get(ENV_DBND_SYSTEM) or self.dbnd_home()
return abs_join(dbnd_system, *path)
def dbnd_project_path(self, *path):
return abs_join(self.dbnd_home(), *path)
def validate_init(self):
dbnd_log_init_msg("Successfully created dbnd project config")
def set_is_airflow_runtime(self):
self._is_airflow_runtime = True
class DatabandHomeError(Exception):
pass
def _find_project_by_import():
"""
check if we can have project marker file by import it
"""
try:
import _databand_project
return abs_join(_databand_project.__file__, "..")
except ImportError:
dbnd_log_init_msg("Can't import `_databand_project` marker.")
return None
def _process_cfg(folder):
# dbnd home is being pointed inside [databand] in 'config' files
found_dbnd_home = False
config_file = None
config_files = ["tox.ini", "setup.cfg"]
for file in config_files:
config_path = os.path.join(folder, file)
try:
parser = ConfigParser()
parser.read(config_path)
config_root, config_name = os.path.split(config_path)
source = os.path.basename(config_path)
if not parser.has_section("databand"):
continue
for config_key in ["dbnd_home", "dbnd_system", "dbnd_config"]:
# TODO: hidden magic, do we need these setters?
if not parser.has_option("databand", config_key):
continue
config_value = parser.get("databand", config_key)
config_value = os.path.abspath(os.path.join(config_root, config_value))
set_env_dir(config_key, config_value)
dbnd_log_init_msg("%s: %s=%s" % (source, config_key, config_value))
except Exception as ex:
print("Failed to process %s: %s" % (config_path, ex))
return found_dbnd_home, config_file
def _ | folder):
# dbnd home is where 'marker' files are located in
for file in _MARKER_FILES:
file_path = os.path.join(folder, file)
if os.path.exists(file_path):
return folder, file_path
return False, None
def __find_dbnd_home_at(folder):
dbnd_home, config_file = _process_cfg(folder)
if dbnd_home:
dbnd_log_init_msg(
"Found dbnd home by at %s, by config file: %s" % (dbnd_home, config_file)
)
return dbnd_home
dbnd_home, marker_file = _has_marker_file(folder)
if dbnd_home:
dbnd_log_init_msg(
"Found dbnd home by at %s, by marker file: %s" % (dbnd_home, marker_file)
)
return dbnd_home
dbnd_log_init_msg("dbnd home was not found at %s" % folder)
return False
def _find_and_set_dbnd_home():
# falling back to simple version
if _is_init_mode():
dbnd_home = os.path.abspath(".")
print("Initializing new dbnd environment, using %s as DBND_HOME" % dbnd_home)
set_env_dir(ENV_DBND_HOME, dbnd_home)
return True
# looking for dbnd project folder to be set as home
project_folder = _find_project_by_import()
if project_folder:
dbnd_log_init_msg(
"Found project folder by import from marker %s" % project_folder
)
# we know about project folder, let try to find "custom" configs in it
dbnd_home = __find_dbnd_home_at(project_folder)
if not dbnd_home:
dbnd_log_init_msg(
"No markers at %s! setting dbnd_home to %s" % (dbnd_home, dbnd_home)
)
dbnd_home = project_folder
set_env_dir(ENV_DBND_HOME, dbnd_home)
return True
# traversing all the way up to until finding relevant anchor files
cur_dir = os.path.normpath(os.getcwd())
cur_dir_split = cur_dir.split(os.sep)
cur_dir_split_reversed = reversed(list(enumerate(cur_dir_split)))
dbnd_log_init_msg(
"Trying to find dbnd_home by traversing up to the root folder starting at %s"
% cur_dir
)
for idx, cur_folder in cur_dir_split_reversed:
cur_path = os.path.join("/", *cur_dir_split[1 : (idx + 1)])
dbnd_system_file = os.path.join(cur_path, ".dbnd", "databand-system.cfg")
if os.path.exists(dbnd_system_file):
set_env_dir(ENV_DBND_HOME, cur_path)
return True
dbnd_home = __find_dbnd_home_at(cur_path)
if dbnd_home:
set_env_dir(ENV_DBND_HOME, dbnd_home)
return True
# last chance, we couldn't find dbnd project so we'll use user's home folder
user_home = os.path.expanduser("~")
if user_home:
dbnd_log_init_msg("dbnd home was not found. Using user's home: %s" % user_home)
set_env_dir(ENV_DBND_HOME, user_home)
return True
return False
def _env_banner():
return "\tDBND_HOME={dbnd}\n\tDBND_SYSTEM={system}".format(
dbnd=os.environ[ENV_DBND_HOME], system=os.environ[ENV_DBND_SYSTEM]
)
_DBND_ENVIRONMENT = False
def _initialize_dbnd_home():
global _DBND_ENVIRONMENT
if _DBND_ENVIRONMENT:
return
_DBND_ENVIRONMENT = True
dbnd_log_init_msg("_initialize_dbnd_home")
project_config = get_dbnd_project_config()
if project_config.disabled:
dbnd_log_init_msg("databand is disabled via %s" % ENV_DBND__DISABLED)
return
dbnd_log_init_msg("Initializing Databand Basic Environment")
_initialize_google_composer()
_init_windows_python_path(_databand_package)
# main logic
__initialize_dbnd_home_environ()
__initialize_airflow_home()
if project_config.quiet_mode:
# we should not print anything if we are in shell completion!
import logging
logging.getLogger().setLevel(logging.CRITICAL + 1)
dbnd_log_init_msg(_env_banner())
def __initialize_dbnd_home_environ():
# MAIN PART OF THE SCRIPT
if ENV_DBND_HOME not in os.environ:
_find_and_set_dbnd_home()
if ENV_DBND_HOME not in os.environ:
raise DatabandHomeError(
"\nDBND_HOME could not be found when searching from current directory '%s' to root folder! \n "
"Use `export DBND__DEBUG_INIT=True` to get more debug information.\n"
"Trying fixing that issue by:\n"
"\t 1. Explicitly set current directory to DBND HOME via: `export DBND_HOME=ROOT_OF_YOUR_PROJECT`.\n"
"\t 2. `cd` into your project directory.\n"
"\t 3. Create one of the following files inside current directory: [%s].\n"
"\t 4. Run 'dbnd project-init' in current directory."
% (os.getcwd(), ", ".join(_MARKER_FILES))
)
_dbnd_home = os.environ[ENV_DBND_HOME]
if ENV_DBND_SYSTEM not in os.environ:
dbnd_system = os.path.join(_dbnd_home, ".dbnd")
# backward compatibility to $DBND_HOME/dbnd folder
if not os.path.exists(dbnd_system) and os.path.exists(
os.path.join(_dbnd_home, "dbnd")
):
dbnd_system = os.path.join(_dbnd_home, "dbnd")
os.environ[ENV_DBND_SYSTEM] = dbnd_system
if ENV_DBND_LIB in os.environ:
# usually will not happen
dbnd_log_init_msg("Using DBND Library from %s" % os.environ[ENV_DBND_LIB])
else:
os.environ[ENV_DBND_LIB] = _databand_package
def __initialize_airflow_home():
ENV_AIRFLOW_HOME = "AIRFLOW_HOME"
if ENV_AIRFLOW_HOME in os.environ:
# user settings - we do nothing
dbnd_log_init_msg(
"Found user defined AIRFLOW_HOME at %s" % os.environ[ENV_AIRFLOW_HOME]
)
return
for dbnd_airflow_home in [
os.path.join(os.environ[ENV_DBND_SYSTEM], "airflow"),
os.path.join(os.environ[ENV_DBND_HOME], ".airflow"),
]:
if not os.path.exists(dbnd_airflow_home):
continue
dbnd_log_init_msg(
"Found airflow home folder at DBND, setting AIRFLOW_HOME to %s"
% dbnd_airflow_home
)
os.environ[ENV_AIRFLOW_HOME] = dbnd_airflow_home
def _initialize_google_composer():
if "COMPOSER_ENVIRONMENT" not in os.environ:
return
dbnd_log_init_msg("Initializing Google Composer Environment")
if ENV_DBND_HOME not in os.environ:
os.environ[ENV_DBND_HOME] = os.environ["HOME"]
env_tracker_raise_on_error = "DBND__CORE__TRACKER_RAISE_ON_ERROR"
if env_tracker_raise_on_error not in os.environ:
os.environ[env_tracker_raise_on_error] = "false"
def dbnd_project_path(*path):
return get_dbnd_project_config().dbnd_project_path(*path)
| has_marker_file( | identifier_name |
environ_config.py | # © Copyright Databand.ai, an IBM Company 2022
# << should be run before import to airflow >>
# otherwise airflow.configuration will fail
# fix AIRFLOW_HOME for all runs
import os
from configparser import ConfigParser
from contextlib import contextmanager
from typing import Optional
from dbnd._core.configuration.project_env import (
_init_windows_python_path,
_is_init_mode,
)
from dbnd._core.log import dbnd_log
from dbnd._core.log.dbnd_log import dbnd_log_init_msg
from dbnd._core.utils.basics.environ_utils import (
environ_enabled,
environ_int,
set_env_dir,
set_on,
)
from dbnd._core.utils.basics.path_utils import abs_join, relative_path
DATABAND_AIRFLOW_CONN_ID = "dbnd_config" # DBND connection ID for Airflow connections
_MARKER_FILES = ["databand.cfg", "project.cfg", "databand-system.cfg"]
PARAM_ENV_TEMPLATE = "DBND__{S}__{K}"
ENV_DBND__DISABLED = "DBND__DISABLED"
ENV_DBND__TRACKING = (
"DBND__TRACKING" # implicit DBND tracking ( on any @task/log_ call)
)
ENV_DBND__UNITTEST_MODE = "DBND__UNITTEST"
ENV_DBND_QUIET = "DBND__QUIET"
ENV_DBND_HOME = "DBND_HOME"
ENV_DBND_SYSTEM = "DBND_SYSTEM"
ENV_DBND_LIB = "DBND_LIB"
ENV_DBND_CONFIG = "DBND_CONFIG" # extra config for DBND
ENV_DBND__USER_PRE_INIT = "DBND__USER_PRE_INIT" # run on user init
ENV_DBND__NO_MODULES = (
"DBND__NO_MODULES" # do not auto-load user modules (for example: airflow)
)
ENV_DBND__NO_PLUGINS = "DBND__NO_PLUGINS" # do not auto-load user plugins (for example all dbnd-tensorflow)
ENV_DBND__NO_TABLES = "DBND__NO_TABLES" # do not print fancy tables
ENV_DBND__SHOW_STACK_ON_SIGQUIT = "DBND__SHOW_STACK_ON_SIGQUIT"
ENV_DBND__OVERRIDE_AIRFLOW_LOG_SYSTEM_FOR_TRACKING = (
"DBND__OVERRIDE_AIRFLOW_LOG_SYSTEM_FOR_TRACKING"
)
ENV_DBND__DISABLE_AIRFLOW_SUBDAG_TRACKING = "DBND__DISABLE_AIRFLOW_SUBDAG_TRACKING"
ENV_DBND_USER = "DBND_USER"
ENV_DBND_ENV = "DBND_ENV"
# DBND RUN info variables
SCHEDULED_DAG_RUN_ID_ENV = "SCHEDULED_DAG_RUN_ID"
SCHEDULED_DATE_ENV = "SCHEDULED_DATE"
SCHEDULED_JOB_UID_ENV = "SCHEDULED_JOB_UID"
DBND_ROOT_RUN_UID = "DBND_ROOT_RUN_UID"
DBND_ROOT_RUN_TRACKER_URL = "DBND_ROOT_RUN_TRACKER_URL"
DBND_PARENT_TASK_RUN_UID = "DBND_PARENT_TASK_RUN_UID"
DBND_PARENT_TASK_RUN_ATTEMPT_UID = "DBND_PARENT_TASK_RUN_ATTEMPT_UID"
DBND_RUN_SUBMIT_UID = "DBND_SUBMIT_UID"
DBND_RUN_UID = "DBND_RUN_UID"
DBND_RESUBMIT_RUN = "DBND_RESUBMIT_RUN"
DBND_TASK_RUN_ATTEMPT_UID = "DBND_TASK_RUN_ATTEMPT_UID"
DBND_TRACE_ID = "DBND_TRACE_ID"
DBND_MAX_CALLS_PER_RUN = "DBND_MAX_CALL_PER_FUNC"
ENV_DBND_DISABLE_SCHEDULED_DAGS_LOAD = "DBND_DISABLE_SCHEDULED_DAGS_LOAD"
ENV_DBND__ENV_MACHINE = "DBND__ENV_MACHINE"
ENV_DBND__ENV_IMAGE = "DBND__ENV_IMAGE"
ENV_DBND__CORE__PLUGINS = "DBND__CORE__PLUGINS"
ENV_SHELL_COMPLETION = "_DBND_COMPLETE"
ENV_DBND_FIX_PYSPARK_IMPORTS = "DBND__FIX_PYSPARK_IMPORTS"
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING = "DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING"
ENV_DBND__ENABLE__SPARK_CONTEXT_ENV = "DBND__ENABLE__SPARK_CONTEXT_ENV"
ENV_DBND__AUTO_TRACKING = "DBND__AUTO_TRACKING"
DEFAULT_MAX_CALLS_PER_RUN = 100
ENV_DBND_TRACKING_ATTEMPT_UID = "DBND__TRACKING_ATTEMPT_UID"
ENV_DBND_SCRIPT_NAME = "DBND__SCRIPT_NAME"
_databand_package = relative_path(__file__, "..", "..")
def is_databand_enabled():
return not get_dbnd_project_config().disabled
def disable_databand():
get_dbnd_project_config().disabled = True
def set_dbnd_unit_test_mode():
set_on(ENV_DBND__UNITTEST_MODE) # bypass to subprocess
get_dbnd_project_config().unit_test_mode = True
def get_max_calls_per_func():
return get_dbnd_project_config().max_calls_per_run
# User setup configs
def get_dbnd_environ_config_file():
return os.environ.get(ENV_DBND_CONFIG, None)
def get_user_preinit():
return os.environ.get(ENV_DBND__USER_PRE_INIT, None)
def in_quiet_mode():
"""
quiet mode was made for the scheduler to silence the launcher runners.
Don't want this flag to propagate into the actual scheduled cmd
"""
return get_dbnd_project_config().quiet_mode
def in_tracking_mode():
return get_dbnd_project_config().is_tracking_mode()
def in_airflow_tracking_mode():
return get_dbnd_project_config().is_in_airflow_tracking_mode()
def is_unit_test_mode():
return get_dbnd_project_config().unit_test_mode
def spark_tracking_enabled():
return environ_enabled(ENV_DBND__ENABLE__SPARK_CONTEXT_ENV)
def should_fix_pyspark_imports():
return environ_enabled(ENV_DBND_FIX_PYSPARK_IMPORTS)
_project_config = None # type: Optional[DbndProjectConfig]
def get_dbnd_project_config():
global _project_config
if not _project_config:
# initialize dbnd home first
_project_config = DbndProjectConfig()
_initialize_dbnd_home()
return _project_config
def get_dbnd_custom_config():
try:
import dbnd_custom_config
return dbnd_custom_config.get_config_file_path()
except Exception:
return ""
def reset_dbnd_project_config():
global _project_config
_project_config = None
@contextmanager
def tracking_mode_context(tracking=None):
"""
change the tracking mode for the scope of the `with`
"""
is_current_tracking = get_dbnd_project_config()._dbnd_tracking
get_dbnd_project_config()._dbnd_tracking = tracking
try:
yield
finally:
get_dbnd_project_config()._dbnd_tracking = is_current_tracking
def try_get_script_name():
# type: () -> Optional[str]
return os.environ.get(ENV_DBND_SCRIPT_NAME)
class DbndProjectConfig(object):
"""
very basic environment config!
"""
def __init__(self):
# IF FALSE - we will not modify decorated @task code
self._disabled = environ_enabled(ENV_DBND__DISABLED, False)
self.unit_test_mode = environ_enabled(ENV_DBND__UNITTEST_MODE)
self.max_calls_per_run = environ_int(
DBND_MAX_CALLS_PER_RUN, DEFAULT_MAX_CALLS_PER_RUN
)
self.shell_cmd_complete_mode = ENV_SHELL_COMPLETION in os.environ
self.quiet_mode = (
os.environ.pop(ENV_DBND_QUIET, None) is not None
or self.shell_cmd_complete_mode
)
# external process can create "wrapper run" (airflow scheduler)
# a run with partial information,
# when we have a subprocess, only nested run will have all actual details
# so we are going to "resubmit" them
self.resubmit_run = (
DBND_RESUBMIT_RUN in os.environ
and os.environ.pop(DBND_RESUBMIT_RUN) == "true"
)
self.is_no_modules = environ_enabled(ENV_DBND__NO_MODULES)
self.is_no_plugins = environ_enabled(ENV_DBND__NO_PLUGINS)
self.disable_pluggy_entrypoint_loading = environ_enabled(
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING
)
self.is_sigquit_handler_on = environ_enabled(ENV_DBND__SHOW_STACK_ON_SIGQUIT)
self._dbnd_tracking = environ_enabled(ENV_DBND__TRACKING, default=None)
self._airflow_context = False
self._inline_tracking = None
self.disable_inline = False
self.airflow_auto_tracking = environ_enabled(
ENV_DBND__AUTO_TRACKING, default=True
)
self._is_airflow_runtime = None
@property
def disabled(self):
return self._disabled
@disabled.setter
def disabled(self, value):
set_on(ENV_DBND__DISABLED)
self._disabled = value
def airflow_context(self):
if not self._airflow_context:
from dbnd._core.tracking.airflow_dag_inplace_tracking import (
try_get_airflow_context,
)
self._airflow_context = try_get_airflow_context()
return self._airflow_context
def is_tracking_mode(self):
if self.disabled:
return False
if self._dbnd_tracking is None:
return self.is_in_airflow_tracking_mode()
return self._dbnd_tracking
def is_in_airflow_tracking_mode(self):
if self._is_airflow_runtime is None:
self._is_airflow_runtime = bool(self.airflow_context())
return self._is_airflow_runtime
def is_verbose(self):
return dbnd_log.is_verbose()
def dbnd_home(self):
return os.environ.get(ENV_DBND_HOME) or os.curdir
def dbnd_lib_path(self, *path):
return abs_join(_databand_package, *path)
def dbnd_config_path(self, *path):
return self.dbnd_lib_path("conf", *path)
def dbnd_system_path(self, *path):
dbnd_system = os.environ.get(ENV_DBND_SYSTEM) or self.dbnd_home()
return abs_join(dbnd_system, *path)
def dbnd_project_path(self, *path):
return abs_join(self.dbnd_home(), *path)
def validate_init(self):
dbnd_log_init_msg("Successfully created dbnd project config")
def set_is_airflow_runtime(self):
self._is_airflow_runtime = True
class DatabandHomeError(Exception):
pass
def _find_project_by_import():
"""
check if we can have project marker file by import it
"""
try:
import _databand_project
return abs_join(_databand_project.__file__, "..")
except ImportError:
dbnd_log_init_msg("Can't import `_databand_project` marker.")
return None
def _process_cfg(folder):
# dbnd home is being pointed inside [databand] in 'config' files
found_dbnd_home = False
config_file = None
config_files = ["tox.ini", "setup.cfg"]
for file in config_files:
config_path = os.path.join(folder, file)
try:
parser = ConfigParser()
parser.read(config_path)
config_root, config_name = os.path.split(config_path)
source = os.path.basename(config_path)
if not parser.has_section("databand"):
continue
for config_key in ["dbnd_home", "dbnd_system", "dbnd_config"]:
# TODO: hidden magic, do we need these setters?
if not parser.has_option("databand", config_key):
continue
config_value = parser.get("databand", config_key)
config_value = os.path.abspath(os.path.join(config_root, config_value))
set_env_dir(config_key, config_value)
dbnd_log_init_msg("%s: %s=%s" % (source, config_key, config_value))
except Exception as ex:
print("Failed to process %s: %s" % (config_path, ex))
return found_dbnd_home, config_file
def _has_marker_file(folder):
# dbnd home is where 'marker' files are located in
for file in _MARKER_FILES:
file_path = os.path.join(folder, file)
if os.path.exists(file_path): | return False, None
def __find_dbnd_home_at(folder):
dbnd_home, config_file = _process_cfg(folder)
if dbnd_home:
dbnd_log_init_msg(
"Found dbnd home by at %s, by config file: %s" % (dbnd_home, config_file)
)
return dbnd_home
dbnd_home, marker_file = _has_marker_file(folder)
if dbnd_home:
dbnd_log_init_msg(
"Found dbnd home by at %s, by marker file: %s" % (dbnd_home, marker_file)
)
return dbnd_home
dbnd_log_init_msg("dbnd home was not found at %s" % folder)
return False
def _find_and_set_dbnd_home():
# falling back to simple version
if _is_init_mode():
dbnd_home = os.path.abspath(".")
print("Initializing new dbnd environment, using %s as DBND_HOME" % dbnd_home)
set_env_dir(ENV_DBND_HOME, dbnd_home)
return True
# looking for dbnd project folder to be set as home
project_folder = _find_project_by_import()
if project_folder:
dbnd_log_init_msg(
"Found project folder by import from marker %s" % project_folder
)
# we know about project folder, let try to find "custom" configs in it
dbnd_home = __find_dbnd_home_at(project_folder)
if not dbnd_home:
dbnd_log_init_msg(
"No markers at %s! setting dbnd_home to %s" % (dbnd_home, dbnd_home)
)
dbnd_home = project_folder
set_env_dir(ENV_DBND_HOME, dbnd_home)
return True
# traversing all the way up to until finding relevant anchor files
cur_dir = os.path.normpath(os.getcwd())
cur_dir_split = cur_dir.split(os.sep)
cur_dir_split_reversed = reversed(list(enumerate(cur_dir_split)))
dbnd_log_init_msg(
"Trying to find dbnd_home by traversing up to the root folder starting at %s"
% cur_dir
)
for idx, cur_folder in cur_dir_split_reversed:
cur_path = os.path.join("/", *cur_dir_split[1 : (idx + 1)])
dbnd_system_file = os.path.join(cur_path, ".dbnd", "databand-system.cfg")
if os.path.exists(dbnd_system_file):
set_env_dir(ENV_DBND_HOME, cur_path)
return True
dbnd_home = __find_dbnd_home_at(cur_path)
if dbnd_home:
set_env_dir(ENV_DBND_HOME, dbnd_home)
return True
# last chance, we couldn't find dbnd project so we'll use user's home folder
user_home = os.path.expanduser("~")
if user_home:
dbnd_log_init_msg("dbnd home was not found. Using user's home: %s" % user_home)
set_env_dir(ENV_DBND_HOME, user_home)
return True
return False
def _env_banner():
return "\tDBND_HOME={dbnd}\n\tDBND_SYSTEM={system}".format(
dbnd=os.environ[ENV_DBND_HOME], system=os.environ[ENV_DBND_SYSTEM]
)
_DBND_ENVIRONMENT = False
def _initialize_dbnd_home():
global _DBND_ENVIRONMENT
if _DBND_ENVIRONMENT:
return
_DBND_ENVIRONMENT = True
dbnd_log_init_msg("_initialize_dbnd_home")
project_config = get_dbnd_project_config()
if project_config.disabled:
dbnd_log_init_msg("databand is disabled via %s" % ENV_DBND__DISABLED)
return
dbnd_log_init_msg("Initializing Databand Basic Environment")
_initialize_google_composer()
_init_windows_python_path(_databand_package)
# main logic
__initialize_dbnd_home_environ()
__initialize_airflow_home()
if project_config.quiet_mode:
# we should not print anything if we are in shell completion!
import logging
logging.getLogger().setLevel(logging.CRITICAL + 1)
dbnd_log_init_msg(_env_banner())
def __initialize_dbnd_home_environ():
# MAIN PART OF THE SCRIPT
if ENV_DBND_HOME not in os.environ:
_find_and_set_dbnd_home()
if ENV_DBND_HOME not in os.environ:
raise DatabandHomeError(
"\nDBND_HOME could not be found when searching from current directory '%s' to root folder! \n "
"Use `export DBND__DEBUG_INIT=True` to get more debug information.\n"
"Trying fixing that issue by:\n"
"\t 1. Explicitly set current directory to DBND HOME via: `export DBND_HOME=ROOT_OF_YOUR_PROJECT`.\n"
"\t 2. `cd` into your project directory.\n"
"\t 3. Create one of the following files inside current directory: [%s].\n"
"\t 4. Run 'dbnd project-init' in current directory."
% (os.getcwd(), ", ".join(_MARKER_FILES))
)
_dbnd_home = os.environ[ENV_DBND_HOME]
if ENV_DBND_SYSTEM not in os.environ:
dbnd_system = os.path.join(_dbnd_home, ".dbnd")
# backward compatibility to $DBND_HOME/dbnd folder
if not os.path.exists(dbnd_system) and os.path.exists(
os.path.join(_dbnd_home, "dbnd")
):
dbnd_system = os.path.join(_dbnd_home, "dbnd")
os.environ[ENV_DBND_SYSTEM] = dbnd_system
if ENV_DBND_LIB in os.environ:
# usually will not happen
dbnd_log_init_msg("Using DBND Library from %s" % os.environ[ENV_DBND_LIB])
else:
os.environ[ENV_DBND_LIB] = _databand_package
def __initialize_airflow_home():
ENV_AIRFLOW_HOME = "AIRFLOW_HOME"
if ENV_AIRFLOW_HOME in os.environ:
# user settings - we do nothing
dbnd_log_init_msg(
"Found user defined AIRFLOW_HOME at %s" % os.environ[ENV_AIRFLOW_HOME]
)
return
for dbnd_airflow_home in [
os.path.join(os.environ[ENV_DBND_SYSTEM], "airflow"),
os.path.join(os.environ[ENV_DBND_HOME], ".airflow"),
]:
if not os.path.exists(dbnd_airflow_home):
continue
dbnd_log_init_msg(
"Found airflow home folder at DBND, setting AIRFLOW_HOME to %s"
% dbnd_airflow_home
)
os.environ[ENV_AIRFLOW_HOME] = dbnd_airflow_home
def _initialize_google_composer():
if "COMPOSER_ENVIRONMENT" not in os.environ:
return
dbnd_log_init_msg("Initializing Google Composer Environment")
if ENV_DBND_HOME not in os.environ:
os.environ[ENV_DBND_HOME] = os.environ["HOME"]
env_tracker_raise_on_error = "DBND__CORE__TRACKER_RAISE_ON_ERROR"
if env_tracker_raise_on_error not in os.environ:
os.environ[env_tracker_raise_on_error] = "false"
def dbnd_project_path(*path):
return get_dbnd_project_config().dbnd_project_path(*path) | return folder, file_path
| random_line_split |
environ_config.py | # © Copyright Databand.ai, an IBM Company 2022
# << should be run before import to airflow >>
# otherwise airflow.configuration will fail
# fix AIRFLOW_HOME for all runs
import os
from configparser import ConfigParser
from contextlib import contextmanager
from typing import Optional
from dbnd._core.configuration.project_env import (
_init_windows_python_path,
_is_init_mode,
)
from dbnd._core.log import dbnd_log
from dbnd._core.log.dbnd_log import dbnd_log_init_msg
from dbnd._core.utils.basics.environ_utils import (
environ_enabled,
environ_int,
set_env_dir,
set_on,
)
from dbnd._core.utils.basics.path_utils import abs_join, relative_path
DATABAND_AIRFLOW_CONN_ID = "dbnd_config" # DBND connection ID for Airflow connections
_MARKER_FILES = ["databand.cfg", "project.cfg", "databand-system.cfg"]
PARAM_ENV_TEMPLATE = "DBND__{S}__{K}"
ENV_DBND__DISABLED = "DBND__DISABLED"
ENV_DBND__TRACKING = (
"DBND__TRACKING" # implicit DBND tracking ( on any @task/log_ call)
)
ENV_DBND__UNITTEST_MODE = "DBND__UNITTEST"
ENV_DBND_QUIET = "DBND__QUIET"
ENV_DBND_HOME = "DBND_HOME"
ENV_DBND_SYSTEM = "DBND_SYSTEM"
ENV_DBND_LIB = "DBND_LIB"
ENV_DBND_CONFIG = "DBND_CONFIG" # extra config for DBND
ENV_DBND__USER_PRE_INIT = "DBND__USER_PRE_INIT" # run on user init
ENV_DBND__NO_MODULES = (
"DBND__NO_MODULES" # do not auto-load user modules (for example: airflow)
)
ENV_DBND__NO_PLUGINS = "DBND__NO_PLUGINS" # do not auto-load user plugins (for example all dbnd-tensorflow)
ENV_DBND__NO_TABLES = "DBND__NO_TABLES" # do not print fancy tables
ENV_DBND__SHOW_STACK_ON_SIGQUIT = "DBND__SHOW_STACK_ON_SIGQUIT"
ENV_DBND__OVERRIDE_AIRFLOW_LOG_SYSTEM_FOR_TRACKING = (
"DBND__OVERRIDE_AIRFLOW_LOG_SYSTEM_FOR_TRACKING"
)
ENV_DBND__DISABLE_AIRFLOW_SUBDAG_TRACKING = "DBND__DISABLE_AIRFLOW_SUBDAG_TRACKING"
ENV_DBND_USER = "DBND_USER"
ENV_DBND_ENV = "DBND_ENV"
# DBND RUN info variables
SCHEDULED_DAG_RUN_ID_ENV = "SCHEDULED_DAG_RUN_ID"
SCHEDULED_DATE_ENV = "SCHEDULED_DATE"
SCHEDULED_JOB_UID_ENV = "SCHEDULED_JOB_UID"
DBND_ROOT_RUN_UID = "DBND_ROOT_RUN_UID"
DBND_ROOT_RUN_TRACKER_URL = "DBND_ROOT_RUN_TRACKER_URL"
DBND_PARENT_TASK_RUN_UID = "DBND_PARENT_TASK_RUN_UID"
DBND_PARENT_TASK_RUN_ATTEMPT_UID = "DBND_PARENT_TASK_RUN_ATTEMPT_UID"
DBND_RUN_SUBMIT_UID = "DBND_SUBMIT_UID"
DBND_RUN_UID = "DBND_RUN_UID"
DBND_RESUBMIT_RUN = "DBND_RESUBMIT_RUN"
DBND_TASK_RUN_ATTEMPT_UID = "DBND_TASK_RUN_ATTEMPT_UID"
DBND_TRACE_ID = "DBND_TRACE_ID"
DBND_MAX_CALLS_PER_RUN = "DBND_MAX_CALL_PER_FUNC"
ENV_DBND_DISABLE_SCHEDULED_DAGS_LOAD = "DBND_DISABLE_SCHEDULED_DAGS_LOAD"
ENV_DBND__ENV_MACHINE = "DBND__ENV_MACHINE"
ENV_DBND__ENV_IMAGE = "DBND__ENV_IMAGE"
ENV_DBND__CORE__PLUGINS = "DBND__CORE__PLUGINS"
ENV_SHELL_COMPLETION = "_DBND_COMPLETE"
ENV_DBND_FIX_PYSPARK_IMPORTS = "DBND__FIX_PYSPARK_IMPORTS"
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING = "DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING"
ENV_DBND__ENABLE__SPARK_CONTEXT_ENV = "DBND__ENABLE__SPARK_CONTEXT_ENV"
ENV_DBND__AUTO_TRACKING = "DBND__AUTO_TRACKING"
DEFAULT_MAX_CALLS_PER_RUN = 100
ENV_DBND_TRACKING_ATTEMPT_UID = "DBND__TRACKING_ATTEMPT_UID"
ENV_DBND_SCRIPT_NAME = "DBND__SCRIPT_NAME"
_databand_package = relative_path(__file__, "..", "..")
def is_databand_enabled():
return not get_dbnd_project_config().disabled
def disable_databand():
get_dbnd_project_config().disabled = True
def set_dbnd_unit_test_mode():
set_on(ENV_DBND__UNITTEST_MODE) # bypass to subprocess
get_dbnd_project_config().unit_test_mode = True
def get_max_calls_per_func():
return get_dbnd_project_config().max_calls_per_run
# User setup configs
def get_dbnd_environ_config_file():
return os.environ.get(ENV_DBND_CONFIG, None)
def get_user_preinit():
return os.environ.get(ENV_DBND__USER_PRE_INIT, None)
def in_quiet_mode():
"""
quiet mode was made for the scheduler to silence the launcher runners.
Don't want this flag to propagate into the actual scheduled cmd
"""
return get_dbnd_project_config().quiet_mode
def in_tracking_mode():
return get_dbnd_project_config().is_tracking_mode()
def in_airflow_tracking_mode():
return get_dbnd_project_config().is_in_airflow_tracking_mode()
def is_unit_test_mode():
return get_dbnd_project_config().unit_test_mode
def spark_tracking_enabled():
return environ_enabled(ENV_DBND__ENABLE__SPARK_CONTEXT_ENV)
def should_fix_pyspark_imports():
return environ_enabled(ENV_DBND_FIX_PYSPARK_IMPORTS)
_project_config = None # type: Optional[DbndProjectConfig]
def get_dbnd_project_config():
global _project_config
if not _project_config:
# initialize dbnd home first
_project_config = DbndProjectConfig()
_initialize_dbnd_home()
return _project_config
def get_dbnd_custom_config():
try:
import dbnd_custom_config
return dbnd_custom_config.get_config_file_path()
except Exception:
return ""
def reset_dbnd_project_config():
global _project_config
_project_config = None
@contextmanager
def tracking_mode_context(tracking=None):
"""
change the tracking mode for the scope of the `with`
"""
is_current_tracking = get_dbnd_project_config()._dbnd_tracking
get_dbnd_project_config()._dbnd_tracking = tracking
try:
yield
finally:
get_dbnd_project_config()._dbnd_tracking = is_current_tracking
def try_get_script_name():
# type: () -> Optional[str]
return os.environ.get(ENV_DBND_SCRIPT_NAME)
class DbndProjectConfig(object):
"""
very basic environment config!
"""
def __init__(self):
# IF FALSE - we will not modify decorated @task code
self._disabled = environ_enabled(ENV_DBND__DISABLED, False)
self.unit_test_mode = environ_enabled(ENV_DBND__UNITTEST_MODE)
self.max_calls_per_run = environ_int(
DBND_MAX_CALLS_PER_RUN, DEFAULT_MAX_CALLS_PER_RUN
)
self.shell_cmd_complete_mode = ENV_SHELL_COMPLETION in os.environ
self.quiet_mode = (
os.environ.pop(ENV_DBND_QUIET, None) is not None
or self.shell_cmd_complete_mode
)
# external process can create "wrapper run" (airflow scheduler)
# a run with partial information,
# when we have a subprocess, only nested run will have all actual details
# so we are going to "resubmit" them
self.resubmit_run = (
DBND_RESUBMIT_RUN in os.environ
and os.environ.pop(DBND_RESUBMIT_RUN) == "true"
)
self.is_no_modules = environ_enabled(ENV_DBND__NO_MODULES)
self.is_no_plugins = environ_enabled(ENV_DBND__NO_PLUGINS)
self.disable_pluggy_entrypoint_loading = environ_enabled(
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING
)
self.is_sigquit_handler_on = environ_enabled(ENV_DBND__SHOW_STACK_ON_SIGQUIT)
self._dbnd_tracking = environ_enabled(ENV_DBND__TRACKING, default=None)
self._airflow_context = False
self._inline_tracking = None
self.disable_inline = False
self.airflow_auto_tracking = environ_enabled(
ENV_DBND__AUTO_TRACKING, default=True
)
self._is_airflow_runtime = None
@property
def disabled(self):
return self._disabled
@disabled.setter
def disabled(self, value):
set_on(ENV_DBND__DISABLED)
self._disabled = value
def airflow_context(self):
if not self._airflow_context:
from dbnd._core.tracking.airflow_dag_inplace_tracking import (
try_get_airflow_context,
)
self._airflow_context = try_get_airflow_context()
return self._airflow_context
def is_tracking_mode(self):
if self.disabled:
return False
if self._dbnd_tracking is None:
return self.is_in_airflow_tracking_mode()
return self._dbnd_tracking
def is_in_airflow_tracking_mode(self):
if self._is_airflow_runtime is None:
self._is_airflow_runtime = bool(self.airflow_context())
return self._is_airflow_runtime
def is_verbose(self):
return dbnd_log.is_verbose()
def dbnd_home(self):
return os.environ.get(ENV_DBND_HOME) or os.curdir
def dbnd_lib_path(self, *path):
return abs_join(_databand_package, *path)
def dbnd_config_path(self, *path):
return self.dbnd_lib_path("conf", *path)
def dbnd_system_path(self, *path):
dbnd_system = os.environ.get(ENV_DBND_SYSTEM) or self.dbnd_home()
return abs_join(dbnd_system, *path)
def dbnd_project_path(self, *path):
return abs_join(self.dbnd_home(), *path)
def validate_init(self):
dbnd_log_init_msg("Successfully created dbnd project config")
def set_is_airflow_runtime(self):
self._is_airflow_runtime = True
class DatabandHomeError(Exception):
pass
def _find_project_by_import():
"""
check if we can have project marker file by import it
"""
try:
import _databand_project
return abs_join(_databand_project.__file__, "..")
except ImportError:
dbnd_log_init_msg("Can't import `_databand_project` marker.")
return None
def _process_cfg(folder):
# dbnd home is being pointed inside [databand] in 'config' files
found_dbnd_home = False
config_file = None
config_files = ["tox.ini", "setup.cfg"]
for file in config_files:
config_path = os.path.join(folder, file)
try:
parser = ConfigParser()
parser.read(config_path)
config_root, config_name = os.path.split(config_path)
source = os.path.basename(config_path)
if not parser.has_section("databand"):
continue
for config_key in ["dbnd_home", "dbnd_system", "dbnd_config"]:
# TODO: hidden magic, do we need these setters?
if not parser.has_option("databand", config_key):
c | config_value = parser.get("databand", config_key)
config_value = os.path.abspath(os.path.join(config_root, config_value))
set_env_dir(config_key, config_value)
dbnd_log_init_msg("%s: %s=%s" % (source, config_key, config_value))
except Exception as ex:
print("Failed to process %s: %s" % (config_path, ex))
return found_dbnd_home, config_file
def _has_marker_file(folder):
# dbnd home is where 'marker' files are located in
for file in _MARKER_FILES:
file_path = os.path.join(folder, file)
if os.path.exists(file_path):
return folder, file_path
return False, None
def __find_dbnd_home_at(folder):
dbnd_home, config_file = _process_cfg(folder)
if dbnd_home:
dbnd_log_init_msg(
"Found dbnd home by at %s, by config file: %s" % (dbnd_home, config_file)
)
return dbnd_home
dbnd_home, marker_file = _has_marker_file(folder)
if dbnd_home:
dbnd_log_init_msg(
"Found dbnd home by at %s, by marker file: %s" % (dbnd_home, marker_file)
)
return dbnd_home
dbnd_log_init_msg("dbnd home was not found at %s" % folder)
return False
def _find_and_set_dbnd_home():
# falling back to simple version
if _is_init_mode():
dbnd_home = os.path.abspath(".")
print("Initializing new dbnd environment, using %s as DBND_HOME" % dbnd_home)
set_env_dir(ENV_DBND_HOME, dbnd_home)
return True
# looking for dbnd project folder to be set as home
project_folder = _find_project_by_import()
if project_folder:
dbnd_log_init_msg(
"Found project folder by import from marker %s" % project_folder
)
# we know about project folder, let try to find "custom" configs in it
dbnd_home = __find_dbnd_home_at(project_folder)
if not dbnd_home:
dbnd_log_init_msg(
"No markers at %s! setting dbnd_home to %s" % (dbnd_home, dbnd_home)
)
dbnd_home = project_folder
set_env_dir(ENV_DBND_HOME, dbnd_home)
return True
# traversing all the way up to until finding relevant anchor files
cur_dir = os.path.normpath(os.getcwd())
cur_dir_split = cur_dir.split(os.sep)
cur_dir_split_reversed = reversed(list(enumerate(cur_dir_split)))
dbnd_log_init_msg(
"Trying to find dbnd_home by traversing up to the root folder starting at %s"
% cur_dir
)
for idx, cur_folder in cur_dir_split_reversed:
cur_path = os.path.join("/", *cur_dir_split[1 : (idx + 1)])
dbnd_system_file = os.path.join(cur_path, ".dbnd", "databand-system.cfg")
if os.path.exists(dbnd_system_file):
set_env_dir(ENV_DBND_HOME, cur_path)
return True
dbnd_home = __find_dbnd_home_at(cur_path)
if dbnd_home:
set_env_dir(ENV_DBND_HOME, dbnd_home)
return True
# last chance, we couldn't find dbnd project so we'll use user's home folder
user_home = os.path.expanduser("~")
if user_home:
dbnd_log_init_msg("dbnd home was not found. Using user's home: %s" % user_home)
set_env_dir(ENV_DBND_HOME, user_home)
return True
return False
def _env_banner():
return "\tDBND_HOME={dbnd}\n\tDBND_SYSTEM={system}".format(
dbnd=os.environ[ENV_DBND_HOME], system=os.environ[ENV_DBND_SYSTEM]
)
_DBND_ENVIRONMENT = False
def _initialize_dbnd_home():
global _DBND_ENVIRONMENT
if _DBND_ENVIRONMENT:
return
_DBND_ENVIRONMENT = True
dbnd_log_init_msg("_initialize_dbnd_home")
project_config = get_dbnd_project_config()
if project_config.disabled:
dbnd_log_init_msg("databand is disabled via %s" % ENV_DBND__DISABLED)
return
dbnd_log_init_msg("Initializing Databand Basic Environment")
_initialize_google_composer()
_init_windows_python_path(_databand_package)
# main logic
__initialize_dbnd_home_environ()
__initialize_airflow_home()
if project_config.quiet_mode:
# we should not print anything if we are in shell completion!
import logging
logging.getLogger().setLevel(logging.CRITICAL + 1)
dbnd_log_init_msg(_env_banner())
def __initialize_dbnd_home_environ():
# MAIN PART OF THE SCRIPT
if ENV_DBND_HOME not in os.environ:
_find_and_set_dbnd_home()
if ENV_DBND_HOME not in os.environ:
raise DatabandHomeError(
"\nDBND_HOME could not be found when searching from current directory '%s' to root folder! \n "
"Use `export DBND__DEBUG_INIT=True` to get more debug information.\n"
"Trying fixing that issue by:\n"
"\t 1. Explicitly set current directory to DBND HOME via: `export DBND_HOME=ROOT_OF_YOUR_PROJECT`.\n"
"\t 2. `cd` into your project directory.\n"
"\t 3. Create one of the following files inside current directory: [%s].\n"
"\t 4. Run 'dbnd project-init' in current directory."
% (os.getcwd(), ", ".join(_MARKER_FILES))
)
_dbnd_home = os.environ[ENV_DBND_HOME]
if ENV_DBND_SYSTEM not in os.environ:
dbnd_system = os.path.join(_dbnd_home, ".dbnd")
# backward compatibility to $DBND_HOME/dbnd folder
if not os.path.exists(dbnd_system) and os.path.exists(
os.path.join(_dbnd_home, "dbnd")
):
dbnd_system = os.path.join(_dbnd_home, "dbnd")
os.environ[ENV_DBND_SYSTEM] = dbnd_system
if ENV_DBND_LIB in os.environ:
# usually will not happen
dbnd_log_init_msg("Using DBND Library from %s" % os.environ[ENV_DBND_LIB])
else:
os.environ[ENV_DBND_LIB] = _databand_package
def __initialize_airflow_home():
ENV_AIRFLOW_HOME = "AIRFLOW_HOME"
if ENV_AIRFLOW_HOME in os.environ:
# user settings - we do nothing
dbnd_log_init_msg(
"Found user defined AIRFLOW_HOME at %s" % os.environ[ENV_AIRFLOW_HOME]
)
return
for dbnd_airflow_home in [
os.path.join(os.environ[ENV_DBND_SYSTEM], "airflow"),
os.path.join(os.environ[ENV_DBND_HOME], ".airflow"),
]:
if not os.path.exists(dbnd_airflow_home):
continue
dbnd_log_init_msg(
"Found airflow home folder at DBND, setting AIRFLOW_HOME to %s"
% dbnd_airflow_home
)
os.environ[ENV_AIRFLOW_HOME] = dbnd_airflow_home
def _initialize_google_composer():
if "COMPOSER_ENVIRONMENT" not in os.environ:
return
dbnd_log_init_msg("Initializing Google Composer Environment")
if ENV_DBND_HOME not in os.environ:
os.environ[ENV_DBND_HOME] = os.environ["HOME"]
env_tracker_raise_on_error = "DBND__CORE__TRACKER_RAISE_ON_ERROR"
if env_tracker_raise_on_error not in os.environ:
os.environ[env_tracker_raise_on_error] = "false"
def dbnd_project_path(*path):
return get_dbnd_project_config().dbnd_project_path(*path)
| ontinue
| conditional_block |
aarch64.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::fmt::{Display, Formatter};
use std::result;
use arch::aarch64::regs::Aarch64Register;
use kvm_ioctls::*;
use logger::{error, IncMetric, METRICS};
use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize;
use vm_memory::{Address, GuestAddress, GuestMemoryMmap};
use crate::vstate::vcpu::VcpuEmulation;
use crate::vstate::vm::Vm;
/// Errors associated with the wrappers over KVM ioctls.
#[derive(Debug)]
pub enum Error {
/// Error configuring the general purpose aarch64 registers.
ConfigureRegisters(arch::aarch64::regs::Error),
/// Cannot open the kvm related file descriptor.
CreateFd(kvm_ioctls::Error),
/// Error getting the Vcpu preferred target on Arm.
GetPreferredTarget(kvm_ioctls::Error),
/// Error doing Vcpu Init on Arm.
Init(kvm_ioctls::Error),
/// Failed to set value for some arm specific register.
RestoreState(arch::aarch64::regs::Error),
/// Failed to fetch value for some arm specific register.
SaveState(arch::aarch64::regs::Error),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
use self::Error::*;
match self {
ConfigureRegisters(err) => {
write!(
f,
"Error configuring the general purpose registers: {}",
err
)
}
CreateFd(err) => write!(f, "Error in opening the VCPU file descriptor: {}", err),
GetPreferredTarget(err) => {
write!(f, "Error retrieving the vcpu preferred target: {}", err)
}
Init(err) => write!(f, "Error initializing the vcpu: {}", err),
RestoreState(err) => write!(f, "Failed to restore the state of the vcpu: {}", err),
SaveState(err) => write!(f, "Failed to save the state of the vcpu: {}", err),
}
}
}
type Result<T> = result::Result<T, Error>;
/// A wrapper around creating and using a kvm aarch64 vcpu.
pub struct KvmVcpu {
pub index: u8,
pub fd: VcpuFd,
pub mmio_bus: Option<devices::Bus>,
mpidr: u64,
}
pub type KvmVcpuConfigureError = Error;
impl KvmVcpu {
/// Constructs a new kvm vcpu with arch specific functionality.
///
/// # Arguments
///
/// * `index` - Represents the 0-based CPU index between [0, max vcpus).
/// * `vm` - The vm to which this vcpu will get attached.
pub fn new(index: u8, vm: &Vm) -> Result<Self> {
let kvm_vcpu = vm.fd().create_vcpu(index.into()).map_err(Error::CreateFd)?;
Ok(KvmVcpu {
index,
fd: kvm_vcpu,
mmio_bus: None,
mpidr: 0,
})
}
/// Gets the MPIDR register value.
pub fn get_mpidr(&self) -> u64 {
self.mpidr
}
/// Configures an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `guest_mem` - The guest memory used by this microvm.
/// * `kernel_load_addr` - Offset from `guest_mem` at which the kernel is loaded.
pub fn configure(
&mut self,
guest_mem: &GuestMemoryMmap,
kernel_load_addr: GuestAddress,
) -> std::result::Result<(), KvmVcpuConfigureError> {
arch::aarch64::regs::setup_boot_regs(
&self.fd,
self.index,
kernel_load_addr.raw_value(),
guest_mem,
)
.map_err(Error::ConfigureRegisters)?;
self.mpidr =
arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::ConfigureRegisters)?;
Ok(())
}
/// Initializes an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `vm_fd` - The kvm `VmFd` for this microvm.
pub fn init(&self, vm_fd: &VmFd) -> Result<()> {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
// This reads back the kernel's preferred target type.
vm_fd
.get_preferred_target(&mut kvi)
.map_err(Error::GetPreferredTarget)?;
// We already checked that the capability is supported.
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_PSCI_0_2;
// Non-boot cpus are powered off initially.
if self.index > 0 {
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_POWER_OFF;
}
self.fd.vcpu_init(&kvi).map_err(Error::Init)
}
/// Save the KVM internal state.
pub fn save_state(&self) -> Result<VcpuState> {
let mut state = VcpuState {
mp_state: arch::regs::get_mpstate(&self.fd).map_err(Error::SaveState)?,
..Default::default()
};
arch::regs::save_core_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
arch::regs::save_system_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
state.mpidr = arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::SaveState)?;
Ok(state)
}
/// Use provided state to populate KVM internal state.
pub fn restore_state(&self, state: &VcpuState) -> Result<()> {
arch::regs::restore_registers(&self.fd, &state.regs).map_err(Error::RestoreState)?;
arch::regs::set_mpstate(&self.fd, state.mp_state).map_err(Error::RestoreState)?;
Ok(())
}
/// Runs the vCPU in KVM context and handles the kvm exit reason.
///
/// Returns error or enum specifying whether emulation was handled or interrupted.
pub fn run_arch_emulation(&self, exit: VcpuExit) -> super::Result<VcpuEmulation> {
METRICS.vcpu.failures.inc();
// TODO: Are we sure we want to finish running a vcpu upon
// receiving a vm exit that is not necessarily an error?
error!("Unexpected exit reason on vcpu run: {:?}", exit);
Err(super::Error::UnhandledKvmExit(format!("{:?}", exit)))
}
}
/// Structure holding VCPU kvm state.
#[derive(Clone, Default, Versionize)]
pub struct VcpuState {
pub mp_state: kvm_bindings::kvm_mp_state,
pub regs: Vec<Aarch64Register>,
// We will be using the mpidr for passing it to the VmState.
// The VmState will give this away for saving restoring the icc and redistributor
// registers.
pub mpidr: u64,
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use vm_memory::GuestMemoryMmap;
use super::*;
use crate::vstate::vm::tests::setup_vm;
use crate::vstate::vm::Vm;
fn setup_vcpu(mem_size: usize) -> (Vm, KvmVcpu, GuestMemoryMmap) {
let (mut vm, vm_mem) = setup_vm(mem_size);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vcpu.init(vm.fd()).unwrap();
vm.setup_irqchip(1).unwrap();
(vm, vcpu, vm_mem)
}
fn init_vcpu(vcpu: &VcpuFd, vm: &VmFd) {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
vcpu.vcpu_init(&kvi).unwrap();
}
#[test]
fn test_create_vcpu() {
let (vm, _) = setup_vm(0x1000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = KvmVcpu::new(0, &vm);
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error in opening the VCPU file descriptor: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_configure_vcpu() {
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
assert!(vcpu
.configure(&vm_mem, GuestAddress(arch::get_kernel_start()),)
.is_ok());
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_faulty_init_vcpu() {
let (vm, vcpu, _) = setup_vcpu(0x10000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = vcpu.init(vm.fd());
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error retrieving the vcpu preferred target: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_vcpu_save_restore_state() {
let (mut vm, _vm_mem) = setup_vm(0x1000);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vm.setup_irqchip(1).unwrap();
// Calling KVM_GET_REGLIST before KVM_VCPU_INIT will result in error.
let res = vcpu.save_state();
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(),
"Failed to save the state of the vcpu: Failed to get X0 register: Exec format error \
(os error 8)"
.to_string()
);
// Try to restore the register using a faulty state.
let faulty_vcpu_state = VcpuState {
regs: vec![Aarch64Register { id: 0, value: 0 }],
..Default::default()
};
let res = vcpu.restore_state(&faulty_vcpu_state);
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(), | (os error 8)"
.to_string()
);
init_vcpu(&vcpu.fd, vm.fd());
let state = vcpu.save_state().expect("Cannot save state of vcpu");
assert!(!state.regs.is_empty());
vcpu.restore_state(&state)
.expect("Cannot restore state of vcpu");
let value = vcpu
.fd
.get_one_reg(0x6030_0000_0010_003E)
.expect("Cannot get sp core register");
assert!(state.regs.contains(&Aarch64Register {
id: 0x6030_0000_0010_003E,
value
}));
}
#[test]
fn test_setup_non_boot_vcpu() {
let (vm, _) = setup_vm(0x1000);
let vcpu1 = KvmVcpu::new(0, &vm).unwrap();
assert!(vcpu1.init(vm.fd()).is_ok());
let vcpu2 = KvmVcpu::new(1, &vm).unwrap();
assert!(vcpu2.init(vm.fd()).is_ok());
}
} | "Failed to restore the state of the vcpu: Failed to set register: Exec format error \ | random_line_split |
aarch64.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::fmt::{Display, Formatter};
use std::result;
use arch::aarch64::regs::Aarch64Register;
use kvm_ioctls::*;
use logger::{error, IncMetric, METRICS};
use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize;
use vm_memory::{Address, GuestAddress, GuestMemoryMmap};
use crate::vstate::vcpu::VcpuEmulation;
use crate::vstate::vm::Vm;
/// Errors associated with the wrappers over KVM ioctls.
#[derive(Debug)]
pub enum Error {
/// Error configuring the general purpose aarch64 registers.
ConfigureRegisters(arch::aarch64::regs::Error),
/// Cannot open the kvm related file descriptor.
CreateFd(kvm_ioctls::Error),
/// Error getting the Vcpu preferred target on Arm.
GetPreferredTarget(kvm_ioctls::Error),
/// Error doing Vcpu Init on Arm.
Init(kvm_ioctls::Error),
/// Failed to set value for some arm specific register.
RestoreState(arch::aarch64::regs::Error),
/// Failed to fetch value for some arm specific register.
SaveState(arch::aarch64::regs::Error),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
use self::Error::*;
match self {
ConfigureRegisters(err) => {
write!(
f,
"Error configuring the general purpose registers: {}",
err
)
}
CreateFd(err) => write!(f, "Error in opening the VCPU file descriptor: {}", err),
GetPreferredTarget(err) => {
write!(f, "Error retrieving the vcpu preferred target: {}", err)
}
Init(err) => write!(f, "Error initializing the vcpu: {}", err),
RestoreState(err) => write!(f, "Failed to restore the state of the vcpu: {}", err),
SaveState(err) => write!(f, "Failed to save the state of the vcpu: {}", err),
}
}
}
type Result<T> = result::Result<T, Error>;
/// A wrapper around creating and using a kvm aarch64 vcpu.
pub struct KvmVcpu {
pub index: u8,
pub fd: VcpuFd,
pub mmio_bus: Option<devices::Bus>,
mpidr: u64,
}
pub type KvmVcpuConfigureError = Error;
impl KvmVcpu {
/// Constructs a new kvm vcpu with arch specific functionality.
///
/// # Arguments
///
/// * `index` - Represents the 0-based CPU index between [0, max vcpus).
/// * `vm` - The vm to which this vcpu will get attached.
pub fn | (index: u8, vm: &Vm) -> Result<Self> {
let kvm_vcpu = vm.fd().create_vcpu(index.into()).map_err(Error::CreateFd)?;
Ok(KvmVcpu {
index,
fd: kvm_vcpu,
mmio_bus: None,
mpidr: 0,
})
}
/// Gets the MPIDR register value.
pub fn get_mpidr(&self) -> u64 {
self.mpidr
}
/// Configures an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `guest_mem` - The guest memory used by this microvm.
/// * `kernel_load_addr` - Offset from `guest_mem` at which the kernel is loaded.
pub fn configure(
&mut self,
guest_mem: &GuestMemoryMmap,
kernel_load_addr: GuestAddress,
) -> std::result::Result<(), KvmVcpuConfigureError> {
arch::aarch64::regs::setup_boot_regs(
&self.fd,
self.index,
kernel_load_addr.raw_value(),
guest_mem,
)
.map_err(Error::ConfigureRegisters)?;
self.mpidr =
arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::ConfigureRegisters)?;
Ok(())
}
/// Initializes an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `vm_fd` - The kvm `VmFd` for this microvm.
pub fn init(&self, vm_fd: &VmFd) -> Result<()> {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
// This reads back the kernel's preferred target type.
vm_fd
.get_preferred_target(&mut kvi)
.map_err(Error::GetPreferredTarget)?;
// We already checked that the capability is supported.
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_PSCI_0_2;
// Non-boot cpus are powered off initially.
if self.index > 0 {
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_POWER_OFF;
}
self.fd.vcpu_init(&kvi).map_err(Error::Init)
}
/// Save the KVM internal state.
pub fn save_state(&self) -> Result<VcpuState> {
let mut state = VcpuState {
mp_state: arch::regs::get_mpstate(&self.fd).map_err(Error::SaveState)?,
..Default::default()
};
arch::regs::save_core_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
arch::regs::save_system_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
state.mpidr = arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::SaveState)?;
Ok(state)
}
/// Use provided state to populate KVM internal state.
pub fn restore_state(&self, state: &VcpuState) -> Result<()> {
arch::regs::restore_registers(&self.fd, &state.regs).map_err(Error::RestoreState)?;
arch::regs::set_mpstate(&self.fd, state.mp_state).map_err(Error::RestoreState)?;
Ok(())
}
/// Runs the vCPU in KVM context and handles the kvm exit reason.
///
/// Returns error or enum specifying whether emulation was handled or interrupted.
pub fn run_arch_emulation(&self, exit: VcpuExit) -> super::Result<VcpuEmulation> {
METRICS.vcpu.failures.inc();
// TODO: Are we sure we want to finish running a vcpu upon
// receiving a vm exit that is not necessarily an error?
error!("Unexpected exit reason on vcpu run: {:?}", exit);
Err(super::Error::UnhandledKvmExit(format!("{:?}", exit)))
}
}
/// Structure holding VCPU kvm state.
#[derive(Clone, Default, Versionize)]
pub struct VcpuState {
pub mp_state: kvm_bindings::kvm_mp_state,
pub regs: Vec<Aarch64Register>,
// We will be using the mpidr for passing it to the VmState.
// The VmState will give this away for saving restoring the icc and redistributor
// registers.
pub mpidr: u64,
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use vm_memory::GuestMemoryMmap;
use super::*;
use crate::vstate::vm::tests::setup_vm;
use crate::vstate::vm::Vm;
fn setup_vcpu(mem_size: usize) -> (Vm, KvmVcpu, GuestMemoryMmap) {
let (mut vm, vm_mem) = setup_vm(mem_size);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vcpu.init(vm.fd()).unwrap();
vm.setup_irqchip(1).unwrap();
(vm, vcpu, vm_mem)
}
fn init_vcpu(vcpu: &VcpuFd, vm: &VmFd) {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
vcpu.vcpu_init(&kvi).unwrap();
}
#[test]
fn test_create_vcpu() {
let (vm, _) = setup_vm(0x1000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = KvmVcpu::new(0, &vm);
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error in opening the VCPU file descriptor: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_configure_vcpu() {
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
assert!(vcpu
.configure(&vm_mem, GuestAddress(arch::get_kernel_start()),)
.is_ok());
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_faulty_init_vcpu() {
let (vm, vcpu, _) = setup_vcpu(0x10000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = vcpu.init(vm.fd());
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error retrieving the vcpu preferred target: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_vcpu_save_restore_state() {
let (mut vm, _vm_mem) = setup_vm(0x1000);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vm.setup_irqchip(1).unwrap();
// Calling KVM_GET_REGLIST before KVM_VCPU_INIT will result in error.
let res = vcpu.save_state();
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(),
"Failed to save the state of the vcpu: Failed to get X0 register: Exec format error \
(os error 8)"
.to_string()
);
// Try to restore the register using a faulty state.
let faulty_vcpu_state = VcpuState {
regs: vec![Aarch64Register { id: 0, value: 0 }],
..Default::default()
};
let res = vcpu.restore_state(&faulty_vcpu_state);
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(),
"Failed to restore the state of the vcpu: Failed to set register: Exec format error \
(os error 8)"
.to_string()
);
init_vcpu(&vcpu.fd, vm.fd());
let state = vcpu.save_state().expect("Cannot save state of vcpu");
assert!(!state.regs.is_empty());
vcpu.restore_state(&state)
.expect("Cannot restore state of vcpu");
let value = vcpu
.fd
.get_one_reg(0x6030_0000_0010_003E)
.expect("Cannot get sp core register");
assert!(state.regs.contains(&Aarch64Register {
id: 0x6030_0000_0010_003E,
value
}));
}
#[test]
fn test_setup_non_boot_vcpu() {
let (vm, _) = setup_vm(0x1000);
let vcpu1 = KvmVcpu::new(0, &vm).unwrap();
assert!(vcpu1.init(vm.fd()).is_ok());
let vcpu2 = KvmVcpu::new(1, &vm).unwrap();
assert!(vcpu2.init(vm.fd()).is_ok());
}
}
| new | identifier_name |
aarch64.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::fmt::{Display, Formatter};
use std::result;
use arch::aarch64::regs::Aarch64Register;
use kvm_ioctls::*;
use logger::{error, IncMetric, METRICS};
use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize;
use vm_memory::{Address, GuestAddress, GuestMemoryMmap};
use crate::vstate::vcpu::VcpuEmulation;
use crate::vstate::vm::Vm;
/// Errors associated with the wrappers over KVM ioctls.
#[derive(Debug)]
pub enum Error {
/// Error configuring the general purpose aarch64 registers.
ConfigureRegisters(arch::aarch64::regs::Error),
/// Cannot open the kvm related file descriptor.
CreateFd(kvm_ioctls::Error),
/// Error getting the Vcpu preferred target on Arm.
GetPreferredTarget(kvm_ioctls::Error),
/// Error doing Vcpu Init on Arm.
Init(kvm_ioctls::Error),
/// Failed to set value for some arm specific register.
RestoreState(arch::aarch64::regs::Error),
/// Failed to fetch value for some arm specific register.
SaveState(arch::aarch64::regs::Error),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
use self::Error::*;
match self {
ConfigureRegisters(err) => {
write!(
f,
"Error configuring the general purpose registers: {}",
err
)
}
CreateFd(err) => write!(f, "Error in opening the VCPU file descriptor: {}", err),
GetPreferredTarget(err) => {
write!(f, "Error retrieving the vcpu preferred target: {}", err)
}
Init(err) => write!(f, "Error initializing the vcpu: {}", err),
RestoreState(err) => write!(f, "Failed to restore the state of the vcpu: {}", err),
SaveState(err) => write!(f, "Failed to save the state of the vcpu: {}", err),
}
}
}
type Result<T> = result::Result<T, Error>;
/// A wrapper around creating and using a kvm aarch64 vcpu.
pub struct KvmVcpu {
pub index: u8,
pub fd: VcpuFd,
pub mmio_bus: Option<devices::Bus>,
mpidr: u64,
}
pub type KvmVcpuConfigureError = Error;
impl KvmVcpu {
/// Constructs a new kvm vcpu with arch specific functionality.
///
/// # Arguments
///
/// * `index` - Represents the 0-based CPU index between [0, max vcpus).
/// * `vm` - The vm to which this vcpu will get attached.
pub fn new(index: u8, vm: &Vm) -> Result<Self> {
let kvm_vcpu = vm.fd().create_vcpu(index.into()).map_err(Error::CreateFd)?;
Ok(KvmVcpu {
index,
fd: kvm_vcpu,
mmio_bus: None,
mpidr: 0,
})
}
/// Gets the MPIDR register value.
pub fn get_mpidr(&self) -> u64 {
self.mpidr
}
/// Configures an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `guest_mem` - The guest memory used by this microvm.
/// * `kernel_load_addr` - Offset from `guest_mem` at which the kernel is loaded.
pub fn configure(
&mut self,
guest_mem: &GuestMemoryMmap,
kernel_load_addr: GuestAddress,
) -> std::result::Result<(), KvmVcpuConfigureError> |
/// Initializes an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `vm_fd` - The kvm `VmFd` for this microvm.
pub fn init(&self, vm_fd: &VmFd) -> Result<()> {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
// This reads back the kernel's preferred target type.
vm_fd
.get_preferred_target(&mut kvi)
.map_err(Error::GetPreferredTarget)?;
// We already checked that the capability is supported.
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_PSCI_0_2;
// Non-boot cpus are powered off initially.
if self.index > 0 {
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_POWER_OFF;
}
self.fd.vcpu_init(&kvi).map_err(Error::Init)
}
/// Save the KVM internal state.
pub fn save_state(&self) -> Result<VcpuState> {
let mut state = VcpuState {
mp_state: arch::regs::get_mpstate(&self.fd).map_err(Error::SaveState)?,
..Default::default()
};
arch::regs::save_core_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
arch::regs::save_system_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
state.mpidr = arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::SaveState)?;
Ok(state)
}
/// Use provided state to populate KVM internal state.
pub fn restore_state(&self, state: &VcpuState) -> Result<()> {
arch::regs::restore_registers(&self.fd, &state.regs).map_err(Error::RestoreState)?;
arch::regs::set_mpstate(&self.fd, state.mp_state).map_err(Error::RestoreState)?;
Ok(())
}
/// Runs the vCPU in KVM context and handles the kvm exit reason.
///
/// Returns error or enum specifying whether emulation was handled or interrupted.
pub fn run_arch_emulation(&self, exit: VcpuExit) -> super::Result<VcpuEmulation> {
METRICS.vcpu.failures.inc();
// TODO: Are we sure we want to finish running a vcpu upon
// receiving a vm exit that is not necessarily an error?
error!("Unexpected exit reason on vcpu run: {:?}", exit);
Err(super::Error::UnhandledKvmExit(format!("{:?}", exit)))
}
}
/// Structure holding VCPU kvm state.
#[derive(Clone, Default, Versionize)]
pub struct VcpuState {
pub mp_state: kvm_bindings::kvm_mp_state,
pub regs: Vec<Aarch64Register>,
// We will be using the mpidr for passing it to the VmState.
// The VmState will give this away for saving restoring the icc and redistributor
// registers.
pub mpidr: u64,
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use vm_memory::GuestMemoryMmap;
use super::*;
use crate::vstate::vm::tests::setup_vm;
use crate::vstate::vm::Vm;
fn setup_vcpu(mem_size: usize) -> (Vm, KvmVcpu, GuestMemoryMmap) {
let (mut vm, vm_mem) = setup_vm(mem_size);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vcpu.init(vm.fd()).unwrap();
vm.setup_irqchip(1).unwrap();
(vm, vcpu, vm_mem)
}
fn init_vcpu(vcpu: &VcpuFd, vm: &VmFd) {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
vcpu.vcpu_init(&kvi).unwrap();
}
#[test]
fn test_create_vcpu() {
let (vm, _) = setup_vm(0x1000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = KvmVcpu::new(0, &vm);
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error in opening the VCPU file descriptor: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_configure_vcpu() {
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
assert!(vcpu
.configure(&vm_mem, GuestAddress(arch::get_kernel_start()),)
.is_ok());
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_faulty_init_vcpu() {
let (vm, vcpu, _) = setup_vcpu(0x10000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = vcpu.init(vm.fd());
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error retrieving the vcpu preferred target: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_vcpu_save_restore_state() {
let (mut vm, _vm_mem) = setup_vm(0x1000);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vm.setup_irqchip(1).unwrap();
// Calling KVM_GET_REGLIST before KVM_VCPU_INIT will result in error.
let res = vcpu.save_state();
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(),
"Failed to save the state of the vcpu: Failed to get X0 register: Exec format error \
(os error 8)"
.to_string()
);
// Try to restore the register using a faulty state.
let faulty_vcpu_state = VcpuState {
regs: vec![Aarch64Register { id: 0, value: 0 }],
..Default::default()
};
let res = vcpu.restore_state(&faulty_vcpu_state);
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(),
"Failed to restore the state of the vcpu: Failed to set register: Exec format error \
(os error 8)"
.to_string()
);
init_vcpu(&vcpu.fd, vm.fd());
let state = vcpu.save_state().expect("Cannot save state of vcpu");
assert!(!state.regs.is_empty());
vcpu.restore_state(&state)
.expect("Cannot restore state of vcpu");
let value = vcpu
.fd
.get_one_reg(0x6030_0000_0010_003E)
.expect("Cannot get sp core register");
assert!(state.regs.contains(&Aarch64Register {
id: 0x6030_0000_0010_003E,
value
}));
}
#[test]
fn test_setup_non_boot_vcpu() {
let (vm, _) = setup_vm(0x1000);
let vcpu1 = KvmVcpu::new(0, &vm).unwrap();
assert!(vcpu1.init(vm.fd()).is_ok());
let vcpu2 = KvmVcpu::new(1, &vm).unwrap();
assert!(vcpu2.init(vm.fd()).is_ok());
}
}
| {
arch::aarch64::regs::setup_boot_regs(
&self.fd,
self.index,
kernel_load_addr.raw_value(),
guest_mem,
)
.map_err(Error::ConfigureRegisters)?;
self.mpidr =
arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::ConfigureRegisters)?;
Ok(())
} | identifier_body |
ic7406.rs | // Copyright (c) 2021 Thomas J. Otterson
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
// Note that the imports for std::rc::Rc, std::cell::RefCell, and
// crate::components::pin::Pin are only necessary because of the demo non-macro constructor
// function.
/// Pin assignment constants for the Ic7406 struct.
pub mod constants {
/// The pin assignment for the input of inverter 1.
pub const A1: usize = 1;
/// The pin assignment for the input of inverter 2.
pub const A2: usize = 3;
/// The pin assignment for the input of inverter 3.
pub const A3: usize = 5;
/// The pin assignment for the input of inverter 4.
pub const A4: usize = 9;
/// The pin assignment for the input of inverter 5.
pub const A5: usize = 11;
/// The pin assignment for the input of inverter 6.
pub const A6: usize = 13;
/// The pin assignment for the output of inverter 1.
pub const Y1: usize = 2;
/// The pin assignment for the output of inverter 2.
pub const Y2: usize = 4;
/// The pin assignment for the output of inverter 3.
pub const Y3: usize = 6;
/// The pin assignment for the output of inverter 4.
pub const Y4: usize = 8;
/// The pin assignment for the output of inverter 5.
pub const Y5: usize = 10;
/// The pin assignment for the output of inverter 6.
pub const Y6: usize = 12;
/// The pin assignment for the +5V power supply.
pub const VCC: usize = 14;
/// The pin assignment for the ground.
pub const GND: usize = 7;
}
use std::{cell::RefCell, rc::Rc};
use crate::{
components::{
device::{Device, DeviceRef, LevelChange, DUMMY},
pin::{
Mode::{Input, Output, Unconnected},
Pin,
},
},
vectors::RefVec,
};
use self::constants::*;
const INPUTS: [usize; 6] = [A1, A2, A3, A4, A5, A6];
/// An emulation of the 7406 hex inverter.
///
/// The 7406 is one of the 7400-series TTL logic chips, consisting of six single-input
/// inverters. An inverter is the simplest of logic gates: if the input is low, the output
/// is high, and vice versa.
///
/// | An | Yn |
/// | :---: | :---: |
/// | L | **H** |
/// | H | **L** |
///
/// The chip comes in a 14-pin dual in-line package with the following pin assignments.
/// ```txt
/// +---+--+---+
/// A1 |1 +--+ 14| Vcc
/// Y1 |2 13| A6
/// A2 |3 12| Y6
/// Y2 |4 7406 11| A5
/// A3 |5 10| Y5
/// Y3 |6 9| A4
/// GND |7 8| Y4
/// +----------+
/// ```
/// GND and Vcc are ground and power supply pins respectively, and they are not emulated.
///
/// In the Commodore 64, U8 is a 7406. It's responsible for inverting logic signals that are
/// expected in the inverse they're given, such as the 6567's AEC signal being turned into
/// the inverse AEC signal for the 82S100.
pub struct Ic7406 {
/// The pins of the 7406, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
}
impl Ic7406 {
/// Creates a new 7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it.
pub fn new() -> DeviceRef {
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = pin!(Y1, "Y1", Output);
let y2 = pin!(Y2, "Y2", Output);
let y3 = pin!(Y3, "Y3", Output);
let y4 = pin!(Y4, "Y4", Output);
let y5 = pin!(Y5, "Y5", Output);
let y6 = pin!(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = pin!(GND, "GND", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let device: DeviceRef = new_ref!(Ic7406 {
pins: pins![a1, a2, a3, a4, a5, a6, y1, y2, y3, y4, y5, y6, vcc, gnd],
});
// All outputs begin high since all of the inputs begin non-high.
set!(y1, y2, y3, y4, y5, y6);
attach_to!(device, a1, a2, a3, a4, a5, a6);
device
}
/// Creates a new Ic7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it. This is identical to `new` except that this one is coded without
/// the benefit of crate-defined macros or type aliases (the vec! macro is still used,
/// but that's standard library). It's here in this struct only for demonstration
/// purposes.
pub fn new_no_macro() -> Rc<RefCell<dyn Device>> {
// Dummy pin, used as a spacer to put the index of the first real pin at 1.
let dummy = Pin::new(0, DUMMY, Unconnected);
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = Pin::new(A1, "A1", Input);
let a2 = Pin::new(A2, "A2", Input);
let a3 = Pin::new(A3, "A3", Input);
let a4 = Pin::new(A4, "A4", Input);
let a5 = Pin::new(A5, "A5", Input);
let a6 = Pin::new(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = Pin::new(Y1, "Y1", Output);
let y2 = Pin::new(Y2, "Y2", Output);
let y3 = Pin::new(Y3, "Y3", Output);
let y4 = Pin::new(Y4, "Y4", Output);
let y5 = Pin::new(Y5, "Y5", Output);
let y6 = Pin::new(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = Pin::new(GND, "GND", Unconnected);
let vcc = Pin::new(VCC, "VCC", Unconnected);
let device: Rc<RefCell<dyn Device>> = Rc::new(RefCell::new(Ic7406 {
pins: RefVec::with_vec(vec![
Rc::clone(&dummy),
Rc::clone(&a1),
Rc::clone(&y1),
Rc::clone(&a2),
Rc::clone(&y2),
Rc::clone(&a3),
Rc::clone(&y3),
Rc::clone(&gnd),
Rc::clone(&y4),
Rc::clone(&a4),
Rc::clone(&y5),
Rc::clone(&a5),
Rc::clone(&y6),
Rc::clone(&a6),
Rc::clone(&vcc),
]),
}));
// All outputs begin high since all of the inputs begin non-high.
y1.borrow_mut().set();
y2.borrow_mut().set();
y3.borrow_mut().set();
y4.borrow_mut().set();
y5.borrow_mut().set();
y6.borrow_mut().set();
a1.borrow_mut().attach(Rc::clone(&device));
a2.borrow_mut().attach(Rc::clone(&device));
a3.borrow_mut().attach(Rc::clone(&device));
a4.borrow_mut().attach(Rc::clone(&device));
a5.borrow_mut().attach(Rc::clone(&device));
a6.borrow_mut().attach(Rc::clone(&device));
device
}
}
/// Maps each input pin assignment ot its corresponding output pin assignment.
fn output_for(input: usize) -> usize {
match input {
A1 => Y1,
A2 => Y2,
A3 => Y3,
A4 => Y4,
A5 => Y5,
A6 => Y6,
_ => 0,
}
}
impl Device for Ic7406 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> {
Vec::new()
}
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if INPUTS.contains(&number!(pin)) => {
let o = output_for(number!(pin));
if high!(pin) {
clear!(self.pins[o]);
} else {
set!(self.pins[o]);
}
}
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::{components::trace::Trace, test_utils::make_traces};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>) {
let chip = Ic7406::new();
let tr = make_traces(&chip);
(chip, tr)
}
#[test]
fn input_high() {
let (_, tr) = before_each();
set!(tr[A1]);
assert!(low!(tr[Y1]), "Y1 should be low when A1 is high");
set!(tr[A2]);
assert!(low!(tr[Y2]), "Y2 should be low when A2 is high");
set!(tr[A3]);
assert!(low!(tr[Y3]), "Y3 should be low when A3 is high");
set!(tr[A4]);
assert!(low!(tr[Y4]), "Y4 should be low when A4 is high");
set!(tr[A5]); | assert!(low!(tr[Y6]), "Y6 should be low when A6 is high");
}
#[test]
fn input_low() {
let (_, tr) = before_each();
clear!(tr[A1]);
assert!(high!(tr[Y1]), "Y1 should be high when A1 is low");
clear!(tr[A2]);
assert!(high!(tr[Y2]), "Y2 should be high when A2 is low");
clear!(tr[A3]);
assert!(high!(tr[Y3]), "Y3 should be high when A3 is low");
clear!(tr[A4]);
assert!(high!(tr[Y4]), "Y4 should be high when A4 is low");
clear!(tr[A5]);
assert!(high!(tr[Y5]), "Y5 should be high when A5 is low");
clear!(tr[A6]);
assert!(high!(tr[Y6]), "Y6 should be high when A6 is low");
}
// Duplicate tests using no macros. These use the non-macro creation function as well
// because I like the symmetry. Only this struct has non-macro versions of the tests,
// and it's just for demonstration purposes.
#[test]
fn input_high_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().set();
assert!(tr[Y1].borrow().low(), "Y1 should be low when A1 is high");
tr[A2].borrow_mut().set();
assert!(tr[Y2].borrow().low(), "Y2 should be low when A2 is high");
tr[A3].borrow_mut().set();
assert!(tr[Y3].borrow().low(), "Y3 should be low when A3 is high");
tr[A4].borrow_mut().set();
assert!(tr[Y4].borrow().low(), "Y4 should be low when A4 is high");
tr[A5].borrow_mut().set();
assert!(tr[Y5].borrow().low(), "Y5 should be low when A5 is high");
tr[A6].borrow_mut().set();
assert!(tr[Y6].borrow().low(), "Y6 should be low when A6 is high");
}
#[test]
fn input_low_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().clear();
assert!(tr[Y1].borrow().high(), "Y1 should be high when A1 is low");
tr[A2].borrow_mut().clear();
assert!(tr[Y2].borrow().high(), "Y2 should be high when A2 is low");
tr[A3].borrow_mut().clear();
assert!(tr[Y3].borrow().high(), "Y3 should be high when A3 is low");
tr[A4].borrow_mut().clear();
assert!(tr[Y4].borrow().high(), "Y4 should be high when A4 is low");
tr[A5].borrow_mut().clear();
assert!(tr[Y5].borrow().high(), "Y5 should be high when A5 is low");
tr[A6].borrow_mut().clear();
assert!(tr[Y6].borrow().high(), "Y6 should be high when A6 is low");
}
} | assert!(low!(tr[Y5]), "Y5 should be low when A5 is high");
set!(tr[A6]); | random_line_split |
ic7406.rs | // Copyright (c) 2021 Thomas J. Otterson
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
// Note that the imports for std::rc::Rc, std::cell::RefCell, and
// crate::components::pin::Pin are only necessary because of the demo non-macro constructor
// function.
/// Pin assignment constants for the Ic7406 struct.
pub mod constants {
/// The pin assignment for the input of inverter 1.
pub const A1: usize = 1;
/// The pin assignment for the input of inverter 2.
pub const A2: usize = 3;
/// The pin assignment for the input of inverter 3.
pub const A3: usize = 5;
/// The pin assignment for the input of inverter 4.
pub const A4: usize = 9;
/// The pin assignment for the input of inverter 5.
pub const A5: usize = 11;
/// The pin assignment for the input of inverter 6.
pub const A6: usize = 13;
/// The pin assignment for the output of inverter 1.
pub const Y1: usize = 2;
/// The pin assignment for the output of inverter 2.
pub const Y2: usize = 4;
/// The pin assignment for the output of inverter 3.
pub const Y3: usize = 6;
/// The pin assignment for the output of inverter 4.
pub const Y4: usize = 8;
/// The pin assignment for the output of inverter 5.
pub const Y5: usize = 10;
/// The pin assignment for the output of inverter 6.
pub const Y6: usize = 12;
/// The pin assignment for the +5V power supply.
pub const VCC: usize = 14;
/// The pin assignment for the ground.
pub const GND: usize = 7;
}
use std::{cell::RefCell, rc::Rc};
use crate::{
components::{
device::{Device, DeviceRef, LevelChange, DUMMY},
pin::{
Mode::{Input, Output, Unconnected},
Pin,
},
},
vectors::RefVec,
};
use self::constants::*;
const INPUTS: [usize; 6] = [A1, A2, A3, A4, A5, A6];
/// An emulation of the 7406 hex inverter.
///
/// The 7406 is one of the 7400-series TTL logic chips, consisting of six single-input
/// inverters. An inverter is the simplest of logic gates: if the input is low, the output
/// is high, and vice versa.
///
/// | An | Yn |
/// | :---: | :---: |
/// | L | **H** |
/// | H | **L** |
///
/// The chip comes in a 14-pin dual in-line package with the following pin assignments.
/// ```txt
/// +---+--+---+
/// A1 |1 +--+ 14| Vcc
/// Y1 |2 13| A6
/// A2 |3 12| Y6
/// Y2 |4 7406 11| A5
/// A3 |5 10| Y5
/// Y3 |6 9| A4
/// GND |7 8| Y4
/// +----------+
/// ```
/// GND and Vcc are ground and power supply pins respectively, and they are not emulated.
///
/// In the Commodore 64, U8 is a 7406. It's responsible for inverting logic signals that are
/// expected in the inverse they're given, such as the 6567's AEC signal being turned into
/// the inverse AEC signal for the 82S100.
pub struct Ic7406 {
/// The pins of the 7406, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
}
impl Ic7406 {
/// Creates a new 7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it.
pub fn new() -> DeviceRef {
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = pin!(Y1, "Y1", Output);
let y2 = pin!(Y2, "Y2", Output);
let y3 = pin!(Y3, "Y3", Output);
let y4 = pin!(Y4, "Y4", Output);
let y5 = pin!(Y5, "Y5", Output);
let y6 = pin!(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = pin!(GND, "GND", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let device: DeviceRef = new_ref!(Ic7406 {
pins: pins![a1, a2, a3, a4, a5, a6, y1, y2, y3, y4, y5, y6, vcc, gnd],
});
// All outputs begin high since all of the inputs begin non-high.
set!(y1, y2, y3, y4, y5, y6);
attach_to!(device, a1, a2, a3, a4, a5, a6);
device
}
/// Creates a new Ic7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it. This is identical to `new` except that this one is coded without
/// the benefit of crate-defined macros or type aliases (the vec! macro is still used,
/// but that's standard library). It's here in this struct only for demonstration
/// purposes.
pub fn new_no_macro() -> Rc<RefCell<dyn Device>> {
// Dummy pin, used as a spacer to put the index of the first real pin at 1.
let dummy = Pin::new(0, DUMMY, Unconnected);
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = Pin::new(A1, "A1", Input);
let a2 = Pin::new(A2, "A2", Input);
let a3 = Pin::new(A3, "A3", Input);
let a4 = Pin::new(A4, "A4", Input);
let a5 = Pin::new(A5, "A5", Input);
let a6 = Pin::new(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = Pin::new(Y1, "Y1", Output);
let y2 = Pin::new(Y2, "Y2", Output);
let y3 = Pin::new(Y3, "Y3", Output);
let y4 = Pin::new(Y4, "Y4", Output);
let y5 = Pin::new(Y5, "Y5", Output);
let y6 = Pin::new(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = Pin::new(GND, "GND", Unconnected);
let vcc = Pin::new(VCC, "VCC", Unconnected);
let device: Rc<RefCell<dyn Device>> = Rc::new(RefCell::new(Ic7406 {
pins: RefVec::with_vec(vec![
Rc::clone(&dummy),
Rc::clone(&a1),
Rc::clone(&y1),
Rc::clone(&a2),
Rc::clone(&y2),
Rc::clone(&a3),
Rc::clone(&y3),
Rc::clone(&gnd),
Rc::clone(&y4),
Rc::clone(&a4),
Rc::clone(&y5),
Rc::clone(&a5),
Rc::clone(&y6),
Rc::clone(&a6),
Rc::clone(&vcc),
]),
}));
// All outputs begin high since all of the inputs begin non-high.
y1.borrow_mut().set();
y2.borrow_mut().set();
y3.borrow_mut().set();
y4.borrow_mut().set();
y5.borrow_mut().set();
y6.borrow_mut().set();
a1.borrow_mut().attach(Rc::clone(&device));
a2.borrow_mut().attach(Rc::clone(&device));
a3.borrow_mut().attach(Rc::clone(&device));
a4.borrow_mut().attach(Rc::clone(&device));
a5.borrow_mut().attach(Rc::clone(&device));
a6.borrow_mut().attach(Rc::clone(&device));
device
}
}
/// Maps each input pin assignment ot its corresponding output pin assignment.
fn output_for(input: usize) -> usize {
match input {
A1 => Y1,
A2 => Y2,
A3 => Y3,
A4 => Y4,
A5 => Y5,
A6 => Y6,
_ => 0,
}
}
impl Device for Ic7406 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> {
Vec::new()
}
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if INPUTS.contains(&number!(pin)) => {
let o = output_for(number!(pin));
if high!(pin) {
clear!(self.pins[o]);
} else {
set!(self.pins[o]);
}
}
_ => |
}
}
}
#[cfg(test)]
mod test {
use crate::{components::trace::Trace, test_utils::make_traces};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>) {
let chip = Ic7406::new();
let tr = make_traces(&chip);
(chip, tr)
}
#[test]
fn input_high() {
let (_, tr) = before_each();
set!(tr[A1]);
assert!(low!(tr[Y1]), "Y1 should be low when A1 is high");
set!(tr[A2]);
assert!(low!(tr[Y2]), "Y2 should be low when A2 is high");
set!(tr[A3]);
assert!(low!(tr[Y3]), "Y3 should be low when A3 is high");
set!(tr[A4]);
assert!(low!(tr[Y4]), "Y4 should be low when A4 is high");
set!(tr[A5]);
assert!(low!(tr[Y5]), "Y5 should be low when A5 is high");
set!(tr[A6]);
assert!(low!(tr[Y6]), "Y6 should be low when A6 is high");
}
#[test]
fn input_low() {
let (_, tr) = before_each();
clear!(tr[A1]);
assert!(high!(tr[Y1]), "Y1 should be high when A1 is low");
clear!(tr[A2]);
assert!(high!(tr[Y2]), "Y2 should be high when A2 is low");
clear!(tr[A3]);
assert!(high!(tr[Y3]), "Y3 should be high when A3 is low");
clear!(tr[A4]);
assert!(high!(tr[Y4]), "Y4 should be high when A4 is low");
clear!(tr[A5]);
assert!(high!(tr[Y5]), "Y5 should be high when A5 is low");
clear!(tr[A6]);
assert!(high!(tr[Y6]), "Y6 should be high when A6 is low");
}
// Duplicate tests using no macros. These use the non-macro creation function as well
// because I like the symmetry. Only this struct has non-macro versions of the tests,
// and it's just for demonstration purposes.
#[test]
fn input_high_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().set();
assert!(tr[Y1].borrow().low(), "Y1 should be low when A1 is high");
tr[A2].borrow_mut().set();
assert!(tr[Y2].borrow().low(), "Y2 should be low when A2 is high");
tr[A3].borrow_mut().set();
assert!(tr[Y3].borrow().low(), "Y3 should be low when A3 is high");
tr[A4].borrow_mut().set();
assert!(tr[Y4].borrow().low(), "Y4 should be low when A4 is high");
tr[A5].borrow_mut().set();
assert!(tr[Y5].borrow().low(), "Y5 should be low when A5 is high");
tr[A6].borrow_mut().set();
assert!(tr[Y6].borrow().low(), "Y6 should be low when A6 is high");
}
#[test]
fn input_low_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().clear();
assert!(tr[Y1].borrow().high(), "Y1 should be high when A1 is low");
tr[A2].borrow_mut().clear();
assert!(tr[Y2].borrow().high(), "Y2 should be high when A2 is low");
tr[A3].borrow_mut().clear();
assert!(tr[Y3].borrow().high(), "Y3 should be high when A3 is low");
tr[A4].borrow_mut().clear();
assert!(tr[Y4].borrow().high(), "Y4 should be high when A4 is low");
tr[A5].borrow_mut().clear();
assert!(tr[Y5].borrow().high(), "Y5 should be high when A5 is low");
tr[A6].borrow_mut().clear();
assert!(tr[Y6].borrow().high(), "Y6 should be high when A6 is low");
}
}
| {} | conditional_block |
ic7406.rs | // Copyright (c) 2021 Thomas J. Otterson
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
// Note that the imports for std::rc::Rc, std::cell::RefCell, and
// crate::components::pin::Pin are only necessary because of the demo non-macro constructor
// function.
/// Pin assignment constants for the Ic7406 struct.
pub mod constants {
/// The pin assignment for the input of inverter 1.
pub const A1: usize = 1;
/// The pin assignment for the input of inverter 2.
pub const A2: usize = 3;
/// The pin assignment for the input of inverter 3.
pub const A3: usize = 5;
/// The pin assignment for the input of inverter 4.
pub const A4: usize = 9;
/// The pin assignment for the input of inverter 5.
pub const A5: usize = 11;
/// The pin assignment for the input of inverter 6.
pub const A6: usize = 13;
/// The pin assignment for the output of inverter 1.
pub const Y1: usize = 2;
/// The pin assignment for the output of inverter 2.
pub const Y2: usize = 4;
/// The pin assignment for the output of inverter 3.
pub const Y3: usize = 6;
/// The pin assignment for the output of inverter 4.
pub const Y4: usize = 8;
/// The pin assignment for the output of inverter 5.
pub const Y5: usize = 10;
/// The pin assignment for the output of inverter 6.
pub const Y6: usize = 12;
/// The pin assignment for the +5V power supply.
pub const VCC: usize = 14;
/// The pin assignment for the ground.
pub const GND: usize = 7;
}
use std::{cell::RefCell, rc::Rc};
use crate::{
components::{
device::{Device, DeviceRef, LevelChange, DUMMY},
pin::{
Mode::{Input, Output, Unconnected},
Pin,
},
},
vectors::RefVec,
};
use self::constants::*;
const INPUTS: [usize; 6] = [A1, A2, A3, A4, A5, A6];
/// An emulation of the 7406 hex inverter.
///
/// The 7406 is one of the 7400-series TTL logic chips, consisting of six single-input
/// inverters. An inverter is the simplest of logic gates: if the input is low, the output
/// is high, and vice versa.
///
/// | An | Yn |
/// | :---: | :---: |
/// | L | **H** |
/// | H | **L** |
///
/// The chip comes in a 14-pin dual in-line package with the following pin assignments.
/// ```txt
/// +---+--+---+
/// A1 |1 +--+ 14| Vcc
/// Y1 |2 13| A6
/// A2 |3 12| Y6
/// Y2 |4 7406 11| A5
/// A3 |5 10| Y5
/// Y3 |6 9| A4
/// GND |7 8| Y4
/// +----------+
/// ```
/// GND and Vcc are ground and power supply pins respectively, and they are not emulated.
///
/// In the Commodore 64, U8 is a 7406. It's responsible for inverting logic signals that are
/// expected in the inverse they're given, such as the 6567's AEC signal being turned into
/// the inverse AEC signal for the 82S100.
pub struct Ic7406 {
/// The pins of the 7406, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
}
impl Ic7406 {
/// Creates a new 7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it.
pub fn new() -> DeviceRef {
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = pin!(Y1, "Y1", Output);
let y2 = pin!(Y2, "Y2", Output);
let y3 = pin!(Y3, "Y3", Output);
let y4 = pin!(Y4, "Y4", Output);
let y5 = pin!(Y5, "Y5", Output);
let y6 = pin!(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = pin!(GND, "GND", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let device: DeviceRef = new_ref!(Ic7406 {
pins: pins![a1, a2, a3, a4, a5, a6, y1, y2, y3, y4, y5, y6, vcc, gnd],
});
// All outputs begin high since all of the inputs begin non-high.
set!(y1, y2, y3, y4, y5, y6);
attach_to!(device, a1, a2, a3, a4, a5, a6);
device
}
/// Creates a new Ic7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it. This is identical to `new` except that this one is coded without
/// the benefit of crate-defined macros or type aliases (the vec! macro is still used,
/// but that's standard library). It's here in this struct only for demonstration
/// purposes.
pub fn new_no_macro() -> Rc<RefCell<dyn Device>> {
// Dummy pin, used as a spacer to put the index of the first real pin at 1.
let dummy = Pin::new(0, DUMMY, Unconnected);
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = Pin::new(A1, "A1", Input);
let a2 = Pin::new(A2, "A2", Input);
let a3 = Pin::new(A3, "A3", Input);
let a4 = Pin::new(A4, "A4", Input);
let a5 = Pin::new(A5, "A5", Input);
let a6 = Pin::new(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = Pin::new(Y1, "Y1", Output);
let y2 = Pin::new(Y2, "Y2", Output);
let y3 = Pin::new(Y3, "Y3", Output);
let y4 = Pin::new(Y4, "Y4", Output);
let y5 = Pin::new(Y5, "Y5", Output);
let y6 = Pin::new(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = Pin::new(GND, "GND", Unconnected);
let vcc = Pin::new(VCC, "VCC", Unconnected);
let device: Rc<RefCell<dyn Device>> = Rc::new(RefCell::new(Ic7406 {
pins: RefVec::with_vec(vec![
Rc::clone(&dummy),
Rc::clone(&a1),
Rc::clone(&y1),
Rc::clone(&a2),
Rc::clone(&y2),
Rc::clone(&a3),
Rc::clone(&y3),
Rc::clone(&gnd),
Rc::clone(&y4),
Rc::clone(&a4),
Rc::clone(&y5),
Rc::clone(&a5),
Rc::clone(&y6),
Rc::clone(&a6),
Rc::clone(&vcc),
]),
}));
// All outputs begin high since all of the inputs begin non-high.
y1.borrow_mut().set();
y2.borrow_mut().set();
y3.borrow_mut().set();
y4.borrow_mut().set();
y5.borrow_mut().set();
y6.borrow_mut().set();
a1.borrow_mut().attach(Rc::clone(&device));
a2.borrow_mut().attach(Rc::clone(&device));
a3.borrow_mut().attach(Rc::clone(&device));
a4.borrow_mut().attach(Rc::clone(&device));
a5.borrow_mut().attach(Rc::clone(&device));
a6.borrow_mut().attach(Rc::clone(&device));
device
}
}
/// Maps each input pin assignment ot its corresponding output pin assignment.
fn output_for(input: usize) -> usize {
match input {
A1 => Y1,
A2 => Y2,
A3 => Y3,
A4 => Y4,
A5 => Y5,
A6 => Y6,
_ => 0,
}
}
impl Device for Ic7406 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> |
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if INPUTS.contains(&number!(pin)) => {
let o = output_for(number!(pin));
if high!(pin) {
clear!(self.pins[o]);
} else {
set!(self.pins[o]);
}
}
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::{components::trace::Trace, test_utils::make_traces};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>) {
let chip = Ic7406::new();
let tr = make_traces(&chip);
(chip, tr)
}
#[test]
fn input_high() {
let (_, tr) = before_each();
set!(tr[A1]);
assert!(low!(tr[Y1]), "Y1 should be low when A1 is high");
set!(tr[A2]);
assert!(low!(tr[Y2]), "Y2 should be low when A2 is high");
set!(tr[A3]);
assert!(low!(tr[Y3]), "Y3 should be low when A3 is high");
set!(tr[A4]);
assert!(low!(tr[Y4]), "Y4 should be low when A4 is high");
set!(tr[A5]);
assert!(low!(tr[Y5]), "Y5 should be low when A5 is high");
set!(tr[A6]);
assert!(low!(tr[Y6]), "Y6 should be low when A6 is high");
}
#[test]
fn input_low() {
let (_, tr) = before_each();
clear!(tr[A1]);
assert!(high!(tr[Y1]), "Y1 should be high when A1 is low");
clear!(tr[A2]);
assert!(high!(tr[Y2]), "Y2 should be high when A2 is low");
clear!(tr[A3]);
assert!(high!(tr[Y3]), "Y3 should be high when A3 is low");
clear!(tr[A4]);
assert!(high!(tr[Y4]), "Y4 should be high when A4 is low");
clear!(tr[A5]);
assert!(high!(tr[Y5]), "Y5 should be high when A5 is low");
clear!(tr[A6]);
assert!(high!(tr[Y6]), "Y6 should be high when A6 is low");
}
// Duplicate tests using no macros. These use the non-macro creation function as well
// because I like the symmetry. Only this struct has non-macro versions of the tests,
// and it's just for demonstration purposes.
#[test]
fn input_high_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().set();
assert!(tr[Y1].borrow().low(), "Y1 should be low when A1 is high");
tr[A2].borrow_mut().set();
assert!(tr[Y2].borrow().low(), "Y2 should be low when A2 is high");
tr[A3].borrow_mut().set();
assert!(tr[Y3].borrow().low(), "Y3 should be low when A3 is high");
tr[A4].borrow_mut().set();
assert!(tr[Y4].borrow().low(), "Y4 should be low when A4 is high");
tr[A5].borrow_mut().set();
assert!(tr[Y5].borrow().low(), "Y5 should be low when A5 is high");
tr[A6].borrow_mut().set();
assert!(tr[Y6].borrow().low(), "Y6 should be low when A6 is high");
}
#[test]
fn input_low_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().clear();
assert!(tr[Y1].borrow().high(), "Y1 should be high when A1 is low");
tr[A2].borrow_mut().clear();
assert!(tr[Y2].borrow().high(), "Y2 should be high when A2 is low");
tr[A3].borrow_mut().clear();
assert!(tr[Y3].borrow().high(), "Y3 should be high when A3 is low");
tr[A4].borrow_mut().clear();
assert!(tr[Y4].borrow().high(), "Y4 should be high when A4 is low");
tr[A5].borrow_mut().clear();
assert!(tr[Y5].borrow().high(), "Y5 should be high when A5 is low");
tr[A6].borrow_mut().clear();
assert!(tr[Y6].borrow().high(), "Y6 should be high when A6 is low");
}
}
| {
Vec::new()
} | identifier_body |
ic7406.rs | // Copyright (c) 2021 Thomas J. Otterson
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
// Note that the imports for std::rc::Rc, std::cell::RefCell, and
// crate::components::pin::Pin are only necessary because of the demo non-macro constructor
// function.
/// Pin assignment constants for the Ic7406 struct.
pub mod constants {
/// The pin assignment for the input of inverter 1.
pub const A1: usize = 1;
/// The pin assignment for the input of inverter 2.
pub const A2: usize = 3;
/// The pin assignment for the input of inverter 3.
pub const A3: usize = 5;
/// The pin assignment for the input of inverter 4.
pub const A4: usize = 9;
/// The pin assignment for the input of inverter 5.
pub const A5: usize = 11;
/// The pin assignment for the input of inverter 6.
pub const A6: usize = 13;
/// The pin assignment for the output of inverter 1.
pub const Y1: usize = 2;
/// The pin assignment for the output of inverter 2.
pub const Y2: usize = 4;
/// The pin assignment for the output of inverter 3.
pub const Y3: usize = 6;
/// The pin assignment for the output of inverter 4.
pub const Y4: usize = 8;
/// The pin assignment for the output of inverter 5.
pub const Y5: usize = 10;
/// The pin assignment for the output of inverter 6.
pub const Y6: usize = 12;
/// The pin assignment for the +5V power supply.
pub const VCC: usize = 14;
/// The pin assignment for the ground.
pub const GND: usize = 7;
}
use std::{cell::RefCell, rc::Rc};
use crate::{
components::{
device::{Device, DeviceRef, LevelChange, DUMMY},
pin::{
Mode::{Input, Output, Unconnected},
Pin,
},
},
vectors::RefVec,
};
use self::constants::*;
const INPUTS: [usize; 6] = [A1, A2, A3, A4, A5, A6];
/// An emulation of the 7406 hex inverter.
///
/// The 7406 is one of the 7400-series TTL logic chips, consisting of six single-input
/// inverters. An inverter is the simplest of logic gates: if the input is low, the output
/// is high, and vice versa.
///
/// | An | Yn |
/// | :---: | :---: |
/// | L | **H** |
/// | H | **L** |
///
/// The chip comes in a 14-pin dual in-line package with the following pin assignments.
/// ```txt
/// +---+--+---+
/// A1 |1 +--+ 14| Vcc
/// Y1 |2 13| A6
/// A2 |3 12| Y6
/// Y2 |4 7406 11| A5
/// A3 |5 10| Y5
/// Y3 |6 9| A4
/// GND |7 8| Y4
/// +----------+
/// ```
/// GND and Vcc are ground and power supply pins respectively, and they are not emulated.
///
/// In the Commodore 64, U8 is a 7406. It's responsible for inverting logic signals that are
/// expected in the inverse they're given, such as the 6567's AEC signal being turned into
/// the inverse AEC signal for the 82S100.
pub struct Ic7406 {
/// The pins of the 7406, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
}
impl Ic7406 {
/// Creates a new 7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it.
pub fn new() -> DeviceRef {
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = pin!(Y1, "Y1", Output);
let y2 = pin!(Y2, "Y2", Output);
let y3 = pin!(Y3, "Y3", Output);
let y4 = pin!(Y4, "Y4", Output);
let y5 = pin!(Y5, "Y5", Output);
let y6 = pin!(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = pin!(GND, "GND", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let device: DeviceRef = new_ref!(Ic7406 {
pins: pins![a1, a2, a3, a4, a5, a6, y1, y2, y3, y4, y5, y6, vcc, gnd],
});
// All outputs begin high since all of the inputs begin non-high.
set!(y1, y2, y3, y4, y5, y6);
attach_to!(device, a1, a2, a3, a4, a5, a6);
device
}
/// Creates a new Ic7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it. This is identical to `new` except that this one is coded without
/// the benefit of crate-defined macros or type aliases (the vec! macro is still used,
/// but that's standard library). It's here in this struct only for demonstration
/// purposes.
pub fn new_no_macro() -> Rc<RefCell<dyn Device>> {
// Dummy pin, used as a spacer to put the index of the first real pin at 1.
let dummy = Pin::new(0, DUMMY, Unconnected);
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = Pin::new(A1, "A1", Input);
let a2 = Pin::new(A2, "A2", Input);
let a3 = Pin::new(A3, "A3", Input);
let a4 = Pin::new(A4, "A4", Input);
let a5 = Pin::new(A5, "A5", Input);
let a6 = Pin::new(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = Pin::new(Y1, "Y1", Output);
let y2 = Pin::new(Y2, "Y2", Output);
let y3 = Pin::new(Y3, "Y3", Output);
let y4 = Pin::new(Y4, "Y4", Output);
let y5 = Pin::new(Y5, "Y5", Output);
let y6 = Pin::new(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = Pin::new(GND, "GND", Unconnected);
let vcc = Pin::new(VCC, "VCC", Unconnected);
let device: Rc<RefCell<dyn Device>> = Rc::new(RefCell::new(Ic7406 {
pins: RefVec::with_vec(vec![
Rc::clone(&dummy),
Rc::clone(&a1),
Rc::clone(&y1),
Rc::clone(&a2),
Rc::clone(&y2),
Rc::clone(&a3),
Rc::clone(&y3),
Rc::clone(&gnd),
Rc::clone(&y4),
Rc::clone(&a4),
Rc::clone(&y5),
Rc::clone(&a5),
Rc::clone(&y6),
Rc::clone(&a6),
Rc::clone(&vcc),
]),
}));
// All outputs begin high since all of the inputs begin non-high.
y1.borrow_mut().set();
y2.borrow_mut().set();
y3.borrow_mut().set();
y4.borrow_mut().set();
y5.borrow_mut().set();
y6.borrow_mut().set();
a1.borrow_mut().attach(Rc::clone(&device));
a2.borrow_mut().attach(Rc::clone(&device));
a3.borrow_mut().attach(Rc::clone(&device));
a4.borrow_mut().attach(Rc::clone(&device));
a5.borrow_mut().attach(Rc::clone(&device));
a6.borrow_mut().attach(Rc::clone(&device));
device
}
}
/// Maps each input pin assignment ot its corresponding output pin assignment.
fn output_for(input: usize) -> usize {
match input {
A1 => Y1,
A2 => Y2,
A3 => Y3,
A4 => Y4,
A5 => Y5,
A6 => Y6,
_ => 0,
}
}
impl Device for Ic7406 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> {
Vec::new()
}
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if INPUTS.contains(&number!(pin)) => {
let o = output_for(number!(pin));
if high!(pin) {
clear!(self.pins[o]);
} else {
set!(self.pins[o]);
}
}
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::{components::trace::Trace, test_utils::make_traces};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>) {
let chip = Ic7406::new();
let tr = make_traces(&chip);
(chip, tr)
}
#[test]
fn | () {
let (_, tr) = before_each();
set!(tr[A1]);
assert!(low!(tr[Y1]), "Y1 should be low when A1 is high");
set!(tr[A2]);
assert!(low!(tr[Y2]), "Y2 should be low when A2 is high");
set!(tr[A3]);
assert!(low!(tr[Y3]), "Y3 should be low when A3 is high");
set!(tr[A4]);
assert!(low!(tr[Y4]), "Y4 should be low when A4 is high");
set!(tr[A5]);
assert!(low!(tr[Y5]), "Y5 should be low when A5 is high");
set!(tr[A6]);
assert!(low!(tr[Y6]), "Y6 should be low when A6 is high");
}
#[test]
fn input_low() {
let (_, tr) = before_each();
clear!(tr[A1]);
assert!(high!(tr[Y1]), "Y1 should be high when A1 is low");
clear!(tr[A2]);
assert!(high!(tr[Y2]), "Y2 should be high when A2 is low");
clear!(tr[A3]);
assert!(high!(tr[Y3]), "Y3 should be high when A3 is low");
clear!(tr[A4]);
assert!(high!(tr[Y4]), "Y4 should be high when A4 is low");
clear!(tr[A5]);
assert!(high!(tr[Y5]), "Y5 should be high when A5 is low");
clear!(tr[A6]);
assert!(high!(tr[Y6]), "Y6 should be high when A6 is low");
}
// Duplicate tests using no macros. These use the non-macro creation function as well
// because I like the symmetry. Only this struct has non-macro versions of the tests,
// and it's just for demonstration purposes.
#[test]
fn input_high_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().set();
assert!(tr[Y1].borrow().low(), "Y1 should be low when A1 is high");
tr[A2].borrow_mut().set();
assert!(tr[Y2].borrow().low(), "Y2 should be low when A2 is high");
tr[A3].borrow_mut().set();
assert!(tr[Y3].borrow().low(), "Y3 should be low when A3 is high");
tr[A4].borrow_mut().set();
assert!(tr[Y4].borrow().low(), "Y4 should be low when A4 is high");
tr[A5].borrow_mut().set();
assert!(tr[Y5].borrow().low(), "Y5 should be low when A5 is high");
tr[A6].borrow_mut().set();
assert!(tr[Y6].borrow().low(), "Y6 should be low when A6 is high");
}
#[test]
fn input_low_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().clear();
assert!(tr[Y1].borrow().high(), "Y1 should be high when A1 is low");
tr[A2].borrow_mut().clear();
assert!(tr[Y2].borrow().high(), "Y2 should be high when A2 is low");
tr[A3].borrow_mut().clear();
assert!(tr[Y3].borrow().high(), "Y3 should be high when A3 is low");
tr[A4].borrow_mut().clear();
assert!(tr[Y4].borrow().high(), "Y4 should be high when A4 is low");
tr[A5].borrow_mut().clear();
assert!(tr[Y5].borrow().high(), "Y5 should be high when A5 is low");
tr[A6].borrow_mut().clear();
assert!(tr[Y6].borrow().high(), "Y6 should be high when A6 is low");
}
}
| input_high | identifier_name |
render.go | package mustache
import (
"fmt"
"html"
"math"
"reflect"
"strconv"
"strings"
"github.com/eriklott/mustache/internal/ast"
"github.com/eriklott/mustache/internal/parse"
)
const maxPartialDepth = 100000
// renderer represents the state of the rendering of a single template.
type renderer struct {
template *Template // the template that initiated the render
stack []reflect.Value // the context stack
depth int // the depth of executing partials
// write fields
w strings.Builder // the writer
indent string // the current indent string
indentNext bool // when true, apply indent before next write
}
// newRenderer returns a newly initialized renderer.
func (t *Template) newRenderer() *renderer {
return &renderer{template: t}
}
func (r *renderer) String() string {
return r.w.String()
}
// renderToString sub-renders a tree into a string. If an error occurs,
// rendering stops and the error is returned.
func (r *renderer) | (tree *ast.Tree) (string, error) {
subRenderer := &renderer{
template: r.template,
stack: r.stack,
depth: 0,
w: strings.Builder{},
indent: "",
indentNext: false,
}
err := subRenderer.walk(tree.Name, tree)
s := subRenderer.String()
// the subRenderer may have pushed and popped enough contexts onto the stack
// to cause the slice to allocate to a new larger underlaying array. If this
// has happened, we want to keep the pointer to that larger array to minimize
// allocations.
r.stack = subRenderer.stack
return s, err
}
// write a string to the template output.
func (r *renderer) write(s string, unescaped bool) {
if r.indentNext {
r.indentNext = false
r.w.WriteString(r.indent)
}
if !unescaped {
s = html.EscapeString(s)
}
r.w.WriteString(s)
}
// conceptually shifts a context onto the stack. Since the stack is actually in
// reverse order, the context is pushed.
func (r *renderer) push(context reflect.Value) {
r.stack = append(r.stack, context)
}
// conceptually unshifts a context onto the stack. Since the stack is actually in
// reverse order, the context is popped.
func (r *renderer) pop() reflect.Value {
if len(r.stack) == 0 {
return reflect.Value{}
}
ctx := r.stack[len(r.stack)-1]
r.stack = r.stack[:len(r.stack)-1]
return ctx
}
// render recursively walks each node of the tree, incrementally building the template
// string output.
func (r *renderer) walk(treeName string, node interface{}) error {
switch t := node.(type) {
case *ast.Tree:
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
case *ast.Text:
r.write(t.Text, true)
if t.EndOfLine {
r.indentNext = true
}
case *ast.Variable:
v, err := r.lookup(treeName, t.Line, t.Column, t.Key)
if err != nil {
return err
}
s, err := r.toString(v, parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return err
}
r.write(s, t.Unescaped)
case *ast.Section:
v, err := r.lookup(treeName, t.Line, t.Column, t.Key)
if err != nil {
return err
}
v, err = r.toTruthyValue(v)
if err != nil {
return err
}
isTruthy := v.IsValid()
if !t.Inverted && isTruthy {
switch v.Kind() {
case reflect.Slice, reflect.Array:
for i := 0; i < v.Len(); i++ {
r.push(v.Index(i))
for j := range t.Nodes {
err := r.walk(treeName, t.Nodes[j])
if err != nil {
return err
}
}
r.pop()
}
case reflect.Func:
s := v.Call([]reflect.Value{reflect.ValueOf(t.Text)})[0].String()
tree, err := parse.Parse("lambda", s, t.LDelim, t.RDelim)
if err != nil {
return nil
}
err = r.walk(treeName, tree)
if err != nil {
return err
}
default:
r.push(v)
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
r.pop()
}
} else if t.Inverted && !isTruthy {
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
}
case *ast.Partial:
tree, ok := r.template.treeMap[t.Key]
if !ok {
if r.template.ContextErrorsEnabled {
return fmt.Errorf("%s:%d:%d: partial not found: %s", treeName, t.Line, t.Column, t.Key)
}
return nil
}
origIndent := r.indent
r.indent += t.Indent
r.indentNext = true
r.depth++
if r.depth >= maxPartialDepth {
return fmt.Errorf("exceeded maximum partial depth: %d", maxPartialDepth)
}
err := r.walk(tree.Name, tree)
if err != nil {
return err
}
r.depth--
r.indent = origIndent
}
return nil
}
// toString transforms a reflect.Value into a string.
func (r *renderer) toString(v reflect.Value, ldelim, rdelim string) (string, error) {
switch v.Kind() {
case reflect.String:
return v.String(), nil
case reflect.Bool:
return strconv.FormatBool(v.Bool()), nil
case reflect.Complex64, reflect.Complex128:
return fmt.Sprintf("%v", v.Complex()), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(v.Int(), 10), nil
case reflect.Float32, reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(v.Uint(), 10), nil
case reflect.Func:
if v.IsNil() {
return "", nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if !isArity0 {
return "", nil
}
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toString(v, ldelim, rdelim)
}
tree, err := parse.Parse("lambda", v.String(), ldelim, rdelim)
if err != nil {
return "", err
}
s, err := r.renderToString(tree)
if err != nil {
return "", err
}
return s, nil
case reflect.Ptr, reflect.Interface:
return r.toString(indirect(v), ldelim, rdelim)
case reflect.Chan:
return "", nil
case reflect.Invalid:
return "", nil
default:
return fmt.Sprintf("%v", v.Interface()), nil
}
}
// toTruthyValue returns a value when it is "truthy". If the value is
// falsey, the reflect zero value is returned.
func (r *renderer) toTruthyValue(v reflect.Value) (reflect.Value, error) {
switch v.Kind() {
case reflect.Bool:
if !v.Bool() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if v.Int() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if v.Uint() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Float32, reflect.Float64:
if math.Float64bits(v.Float()) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Complex64, reflect.Complex128:
c := v.Complex()
if math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.String:
if v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Array, reflect.Slice:
if v.IsNil() || v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Func:
if v.IsNil() {
return reflect.Value{}, nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if isArity0 {
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toTruthyValue(v)
}
tree, err := parse.Parse("lambda", v.String(), parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return reflect.Value{}, nil
}
s, err := r.renderToString(tree)
if err != nil {
return reflect.Value{}, nil
}
return r.toTruthyValue(reflect.ValueOf(s))
}
isArity1 := t.NumIn() == 1 && t.In(0).Kind() == reflect.String && t.NumOut() == 1 && t.Out(0).Kind() == reflect.String
if isArity1 {
return v, nil
}
return reflect.Value{}, nil
case reflect.Ptr, reflect.Interface:
return r.toTruthyValue(indirect(v))
case reflect.Map:
if v.IsNil() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Struct:
return v, nil
case reflect.Invalid:
return reflect.Value{}, nil
default:
return reflect.Value{}, nil
}
}
// indirect returns the value that v points to, or concrete
// element underlying an interface.
func indirect(v reflect.Value) reflect.Value {
loop:
for v.IsValid() {
switch av := v; av.Kind() {
case reflect.Ptr:
v = av.Elem()
case reflect.Interface:
v = av.Elem()
default:
break loop
}
}
return v
}
// lookup a key in the context stack. If a value was not found, the reflect.Value zero
// type is returned.
func (r *renderer) lookup(name string, ln, col int, key []string) (reflect.Value, error) {
v := lookupKeysStack(key, r.stack)
if !v.IsValid() && r.template.ContextErrorsEnabled {
return v, fmt.Errorf("%s:%d:%d: cannot find value %s in context", name, ln, col, strings.Join(key, "."))
}
return v, nil
}
// lookupKeysStack obtains a value for a dotted key - eg: a.b.c . If a value
// was not found, the reflect.Value zero type is returned.
func lookupKeysStack(key []string, contexts []reflect.Value) reflect.Value {
var v reflect.Value
if len(key) == 0 {
return v
}
for i := range key {
if i == 0 {
v = lookupKeyStack(key[i], contexts)
continue
}
v = lookupKeyContext(key[i], v)
if !v.IsValid() {
break
}
}
return v
}
// lookupKeyStack returns a value from the first context in the stack that
// contains a value for that key. If a value was not found, the reflect.Value zero
// type is returned.
func lookupKeyStack(key string, contexts []reflect.Value) reflect.Value {
var v reflect.Value
for i := len(contexts) - 1; i >= 0; i-- {
ctx := contexts[i]
v = lookupKeyContext(key, ctx)
if v.IsValid() {
break
}
}
return v
}
// lookup returns a value by key from the context. If a value
// was not found, the reflect.Value zero type is returned.
func lookupKeyContext(key string, ctx reflect.Value) reflect.Value {
if key == "." {
return ctx
}
// check context for method by name
if ctx.IsValid() {
method := ctx.MethodByName(key)
if method.IsValid() {
return method
}
}
// check for fields and keys on concrete types.
switch ctx.Kind() {
case reflect.Ptr, reflect.Interface:
return lookupKeyContext(key, indirect(ctx))
case reflect.Map:
return ctx.MapIndex(reflect.ValueOf(key))
case reflect.Struct:
return ctx.FieldByName(key)
default:
return reflect.Value{}
}
}
| renderToString | identifier_name |
render.go | package mustache
import (
"fmt"
"html"
"math"
"reflect"
"strconv"
"strings"
"github.com/eriklott/mustache/internal/ast"
"github.com/eriklott/mustache/internal/parse"
)
const maxPartialDepth = 100000
// renderer represents the state of the rendering of a single template.
type renderer struct {
template *Template // the template that initiated the render
stack []reflect.Value // the context stack
depth int // the depth of executing partials
// write fields
w strings.Builder // the writer
indent string // the current indent string
indentNext bool // when true, apply indent before next write
}
// newRenderer returns a newly initialized renderer.
func (t *Template) newRenderer() *renderer {
return &renderer{template: t}
}
func (r *renderer) String() string {
return r.w.String()
}
// renderToString sub-renders a tree into a string. If an error occurs,
// rendering stops and the error is returned.
func (r *renderer) renderToString(tree *ast.Tree) (string, error) {
subRenderer := &renderer{
template: r.template,
stack: r.stack,
depth: 0,
w: strings.Builder{},
indent: "",
indentNext: false,
}
err := subRenderer.walk(tree.Name, tree)
s := subRenderer.String()
// the subRenderer may have pushed and popped enough contexts onto the stack
// to cause the slice to allocate to a new larger underlaying array. If this
// has happened, we want to keep the pointer to that larger array to minimize
// allocations.
r.stack = subRenderer.stack
return s, err
}
// write a string to the template output.
func (r *renderer) write(s string, unescaped bool) {
if r.indentNext {
r.indentNext = false
r.w.WriteString(r.indent)
}
if !unescaped {
s = html.EscapeString(s)
}
r.w.WriteString(s)
}
// conceptually shifts a context onto the stack. Since the stack is actually in
// reverse order, the context is pushed.
func (r *renderer) push(context reflect.Value) {
r.stack = append(r.stack, context)
}
// conceptually unshifts a context onto the stack. Since the stack is actually in
// reverse order, the context is popped.
func (r *renderer) pop() reflect.Value {
if len(r.stack) == 0 {
return reflect.Value{}
}
ctx := r.stack[len(r.stack)-1]
r.stack = r.stack[:len(r.stack)-1]
return ctx
}
// render recursively walks each node of the tree, incrementally building the template
// string output.
func (r *renderer) walk(treeName string, node interface{}) error {
switch t := node.(type) {
case *ast.Tree:
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
case *ast.Text:
r.write(t.Text, true)
if t.EndOfLine {
r.indentNext = true
}
case *ast.Variable:
v, err := r.lookup(treeName, t.Line, t.Column, t.Key)
if err != nil {
return err
}
s, err := r.toString(v, parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return err
}
r.write(s, t.Unescaped)
case *ast.Section:
v, err := r.lookup(treeName, t.Line, t.Column, t.Key)
if err != nil {
return err
}
v, err = r.toTruthyValue(v)
if err != nil {
return err
}
isTruthy := v.IsValid()
if !t.Inverted && isTruthy {
switch v.Kind() {
case reflect.Slice, reflect.Array:
for i := 0; i < v.Len(); i++ {
r.push(v.Index(i))
for j := range t.Nodes {
err := r.walk(treeName, t.Nodes[j])
if err != nil {
return err
}
}
r.pop()
}
case reflect.Func:
s := v.Call([]reflect.Value{reflect.ValueOf(t.Text)})[0].String()
tree, err := parse.Parse("lambda", s, t.LDelim, t.RDelim)
if err != nil {
return nil
}
err = r.walk(treeName, tree)
if err != nil {
return err
}
default:
r.push(v)
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
r.pop()
}
} else if t.Inverted && !isTruthy {
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
}
case *ast.Partial:
tree, ok := r.template.treeMap[t.Key]
if !ok {
if r.template.ContextErrorsEnabled {
return fmt.Errorf("%s:%d:%d: partial not found: %s", treeName, t.Line, t.Column, t.Key)
}
return nil
}
origIndent := r.indent
r.indent += t.Indent
r.indentNext = true
r.depth++
if r.depth >= maxPartialDepth {
return fmt.Errorf("exceeded maximum partial depth: %d", maxPartialDepth)
}
err := r.walk(tree.Name, tree)
if err != nil {
return err
}
r.depth--
r.indent = origIndent
}
return nil
}
// toString transforms a reflect.Value into a string.
func (r *renderer) toString(v reflect.Value, ldelim, rdelim string) (string, error) {
switch v.Kind() {
case reflect.String:
return v.String(), nil
case reflect.Bool:
return strconv.FormatBool(v.Bool()), nil
case reflect.Complex64, reflect.Complex128:
return fmt.Sprintf("%v", v.Complex()), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(v.Int(), 10), nil
case reflect.Float32, reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(v.Uint(), 10), nil
case reflect.Func:
if v.IsNil() {
return "", nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if !isArity0 {
return "", nil
}
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toString(v, ldelim, rdelim)
}
tree, err := parse.Parse("lambda", v.String(), ldelim, rdelim)
if err != nil {
return "", err
}
s, err := r.renderToString(tree)
if err != nil {
return "", err
}
return s, nil
case reflect.Ptr, reflect.Interface:
return r.toString(indirect(v), ldelim, rdelim)
case reflect.Chan:
return "", nil
case reflect.Invalid:
return "", nil
default:
return fmt.Sprintf("%v", v.Interface()), nil
}
}
// toTruthyValue returns a value when it is "truthy". If the value is
// falsey, the reflect zero value is returned.
func (r *renderer) toTruthyValue(v reflect.Value) (reflect.Value, error) {
switch v.Kind() {
case reflect.Bool:
if !v.Bool() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if v.Int() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if v.Uint() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Float32, reflect.Float64:
if math.Float64bits(v.Float()) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Complex64, reflect.Complex128:
c := v.Complex()
if math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.String:
if v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Array, reflect.Slice:
if v.IsNil() || v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Func:
if v.IsNil() {
return reflect.Value{}, nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if isArity0 {
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toTruthyValue(v)
}
tree, err := parse.Parse("lambda", v.String(), parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return reflect.Value{}, nil
}
s, err := r.renderToString(tree)
if err != nil {
return reflect.Value{}, nil
}
return r.toTruthyValue(reflect.ValueOf(s))
}
isArity1 := t.NumIn() == 1 && t.In(0).Kind() == reflect.String && t.NumOut() == 1 && t.Out(0).Kind() == reflect.String
if isArity1 {
return v, nil
}
return reflect.Value{}, nil
case reflect.Ptr, reflect.Interface:
return r.toTruthyValue(indirect(v))
case reflect.Map:
if v.IsNil() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Struct:
return v, nil
case reflect.Invalid:
return reflect.Value{}, nil
default:
return reflect.Value{}, nil
}
}
// indirect returns the value that v points to, or concrete
// element underlying an interface.
func indirect(v reflect.Value) reflect.Value {
loop:
for v.IsValid() {
switch av := v; av.Kind() {
case reflect.Ptr:
v = av.Elem()
case reflect.Interface:
v = av.Elem()
default:
break loop
}
}
return v
}
// lookup a key in the context stack. If a value was not found, the reflect.Value zero
// type is returned.
func (r *renderer) lookup(name string, ln, col int, key []string) (reflect.Value, error) {
v := lookupKeysStack(key, r.stack)
if !v.IsValid() && r.template.ContextErrorsEnabled {
return v, fmt.Errorf("%s:%d:%d: cannot find value %s in context", name, ln, col, strings.Join(key, "."))
}
return v, nil
}
// lookupKeysStack obtains a value for a dotted key - eg: a.b.c . If a value
// was not found, the reflect.Value zero type is returned.
func lookupKeysStack(key []string, contexts []reflect.Value) reflect.Value {
var v reflect.Value
if len(key) == 0 {
return v
}
for i := range key {
if i == 0 {
v = lookupKeyStack(key[i], contexts)
continue
}
v = lookupKeyContext(key[i], v)
if !v.IsValid() {
break
}
}
return v
}
// lookupKeyStack returns a value from the first context in the stack that
// contains a value for that key. If a value was not found, the reflect.Value zero
// type is returned.
func lookupKeyStack(key string, contexts []reflect.Value) reflect.Value {
var v reflect.Value
for i := len(contexts) - 1; i >= 0; i-- {
ctx := contexts[i]
v = lookupKeyContext(key, ctx)
if v.IsValid() |
}
return v
}
// lookup returns a value by key from the context. If a value
// was not found, the reflect.Value zero type is returned.
func lookupKeyContext(key string, ctx reflect.Value) reflect.Value {
if key == "." {
return ctx
}
// check context for method by name
if ctx.IsValid() {
method := ctx.MethodByName(key)
if method.IsValid() {
return method
}
}
// check for fields and keys on concrete types.
switch ctx.Kind() {
case reflect.Ptr, reflect.Interface:
return lookupKeyContext(key, indirect(ctx))
case reflect.Map:
return ctx.MapIndex(reflect.ValueOf(key))
case reflect.Struct:
return ctx.FieldByName(key)
default:
return reflect.Value{}
}
}
| {
break
} | conditional_block |
render.go | package mustache
import (
"fmt"
"html"
"math"
"reflect"
"strconv"
"strings"
"github.com/eriklott/mustache/internal/ast"
"github.com/eriklott/mustache/internal/parse"
)
const maxPartialDepth = 100000
// renderer represents the state of the rendering of a single template.
type renderer struct {
template *Template // the template that initiated the render
stack []reflect.Value // the context stack
depth int // the depth of executing partials
// write fields
w strings.Builder // the writer
indent string // the current indent string
indentNext bool // when true, apply indent before next write
}
// newRenderer returns a newly initialized renderer.
func (t *Template) newRenderer() *renderer {
return &renderer{template: t}
}
func (r *renderer) String() string {
return r.w.String()
}
// renderToString sub-renders a tree into a string. If an error occurs,
// rendering stops and the error is returned.
func (r *renderer) renderToString(tree *ast.Tree) (string, error) {
subRenderer := &renderer{
template: r.template,
stack: r.stack,
depth: 0,
w: strings.Builder{},
indent: "",
indentNext: false,
}
err := subRenderer.walk(tree.Name, tree)
s := subRenderer.String()
// the subRenderer may have pushed and popped enough contexts onto the stack
// to cause the slice to allocate to a new larger underlaying array. If this
// has happened, we want to keep the pointer to that larger array to minimize
// allocations.
r.stack = subRenderer.stack
return s, err
}
// write a string to the template output.
func (r *renderer) write(s string, unescaped bool) {
if r.indentNext {
r.indentNext = false
r.w.WriteString(r.indent)
}
if !unescaped {
s = html.EscapeString(s)
}
r.w.WriteString(s)
}
// conceptually shifts a context onto the stack. Since the stack is actually in
// reverse order, the context is pushed.
func (r *renderer) push(context reflect.Value) {
r.stack = append(r.stack, context)
}
// conceptually unshifts a context onto the stack. Since the stack is actually in
// reverse order, the context is popped.
func (r *renderer) pop() reflect.Value {
if len(r.stack) == 0 {
return reflect.Value{}
}
ctx := r.stack[len(r.stack)-1]
r.stack = r.stack[:len(r.stack)-1]
return ctx
}
// render recursively walks each node of the tree, incrementally building the template
// string output.
func (r *renderer) walk(treeName string, node interface{}) error {
switch t := node.(type) {
case *ast.Tree:
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
case *ast.Text:
r.write(t.Text, true)
if t.EndOfLine {
r.indentNext = true
}
case *ast.Variable:
v, err := r.lookup(treeName, t.Line, t.Column, t.Key)
if err != nil {
return err
}
s, err := r.toString(v, parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return err
}
r.write(s, t.Unescaped)
case *ast.Section:
v, err := r.lookup(treeName, t.Line, t.Column, t.Key)
if err != nil {
return err
}
v, err = r.toTruthyValue(v)
if err != nil {
return err
}
isTruthy := v.IsValid()
if !t.Inverted && isTruthy {
switch v.Kind() {
case reflect.Slice, reflect.Array:
for i := 0; i < v.Len(); i++ {
r.push(v.Index(i))
for j := range t.Nodes {
err := r.walk(treeName, t.Nodes[j])
if err != nil {
return err
}
}
r.pop()
}
case reflect.Func:
s := v.Call([]reflect.Value{reflect.ValueOf(t.Text)})[0].String()
tree, err := parse.Parse("lambda", s, t.LDelim, t.RDelim)
if err != nil {
return nil
}
err = r.walk(treeName, tree)
if err != nil {
return err
}
default:
r.push(v)
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
r.pop()
}
} else if t.Inverted && !isTruthy {
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
}
case *ast.Partial:
tree, ok := r.template.treeMap[t.Key]
if !ok {
if r.template.ContextErrorsEnabled {
return fmt.Errorf("%s:%d:%d: partial not found: %s", treeName, t.Line, t.Column, t.Key)
}
return nil
}
origIndent := r.indent
r.indent += t.Indent
r.indentNext = true
r.depth++
if r.depth >= maxPartialDepth {
return fmt.Errorf("exceeded maximum partial depth: %d", maxPartialDepth)
}
err := r.walk(tree.Name, tree)
if err != nil {
return err
}
r.depth--
r.indent = origIndent
}
return nil
}
// toString transforms a reflect.Value into a string.
func (r *renderer) toString(v reflect.Value, ldelim, rdelim string) (string, error) {
switch v.Kind() {
case reflect.String:
return v.String(), nil
case reflect.Bool:
return strconv.FormatBool(v.Bool()), nil
case reflect.Complex64, reflect.Complex128:
return fmt.Sprintf("%v", v.Complex()), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(v.Int(), 10), nil
case reflect.Float32, reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(v.Uint(), 10), nil
case reflect.Func:
if v.IsNil() {
return "", nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if !isArity0 {
return "", nil
}
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toString(v, ldelim, rdelim)
}
tree, err := parse.Parse("lambda", v.String(), ldelim, rdelim)
if err != nil {
return "", err
}
s, err := r.renderToString(tree)
if err != nil {
return "", err
}
return s, nil
case reflect.Ptr, reflect.Interface:
return r.toString(indirect(v), ldelim, rdelim)
case reflect.Chan:
return "", nil
case reflect.Invalid:
return "", nil
default:
return fmt.Sprintf("%v", v.Interface()), nil
}
}
// toTruthyValue returns a value when it is "truthy". If the value is
// falsey, the reflect zero value is returned.
func (r *renderer) toTruthyValue(v reflect.Value) (reflect.Value, error) {
switch v.Kind() {
case reflect.Bool:
if !v.Bool() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if v.Int() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if v.Uint() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Float32, reflect.Float64:
if math.Float64bits(v.Float()) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Complex64, reflect.Complex128:
c := v.Complex()
if math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.String:
if v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Array, reflect.Slice:
if v.IsNil() || v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Func:
if v.IsNil() {
return reflect.Value{}, nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if isArity0 {
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toTruthyValue(v)
}
tree, err := parse.Parse("lambda", v.String(), parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return reflect.Value{}, nil
}
s, err := r.renderToString(tree)
if err != nil {
return reflect.Value{}, nil
}
return r.toTruthyValue(reflect.ValueOf(s))
}
isArity1 := t.NumIn() == 1 && t.In(0).Kind() == reflect.String && t.NumOut() == 1 && t.Out(0).Kind() == reflect.String
if isArity1 {
return v, nil
}
return reflect.Value{}, nil
case reflect.Ptr, reflect.Interface:
return r.toTruthyValue(indirect(v))
case reflect.Map:
if v.IsNil() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Struct:
return v, nil
case reflect.Invalid:
return reflect.Value{}, nil
default:
return reflect.Value{}, nil
}
}
// indirect returns the value that v points to, or concrete
// element underlying an interface.
func indirect(v reflect.Value) reflect.Value |
// lookup a key in the context stack. If a value was not found, the reflect.Value zero
// type is returned.
func (r *renderer) lookup(name string, ln, col int, key []string) (reflect.Value, error) {
v := lookupKeysStack(key, r.stack)
if !v.IsValid() && r.template.ContextErrorsEnabled {
return v, fmt.Errorf("%s:%d:%d: cannot find value %s in context", name, ln, col, strings.Join(key, "."))
}
return v, nil
}
// lookupKeysStack obtains a value for a dotted key - eg: a.b.c . If a value
// was not found, the reflect.Value zero type is returned.
func lookupKeysStack(key []string, contexts []reflect.Value) reflect.Value {
var v reflect.Value
if len(key) == 0 {
return v
}
for i := range key {
if i == 0 {
v = lookupKeyStack(key[i], contexts)
continue
}
v = lookupKeyContext(key[i], v)
if !v.IsValid() {
break
}
}
return v
}
// lookupKeyStack returns a value from the first context in the stack that
// contains a value for that key. If a value was not found, the reflect.Value zero
// type is returned.
func lookupKeyStack(key string, contexts []reflect.Value) reflect.Value {
var v reflect.Value
for i := len(contexts) - 1; i >= 0; i-- {
ctx := contexts[i]
v = lookupKeyContext(key, ctx)
if v.IsValid() {
break
}
}
return v
}
// lookup returns a value by key from the context. If a value
// was not found, the reflect.Value zero type is returned.
func lookupKeyContext(key string, ctx reflect.Value) reflect.Value {
if key == "." {
return ctx
}
// check context for method by name
if ctx.IsValid() {
method := ctx.MethodByName(key)
if method.IsValid() {
return method
}
}
// check for fields and keys on concrete types.
switch ctx.Kind() {
case reflect.Ptr, reflect.Interface:
return lookupKeyContext(key, indirect(ctx))
case reflect.Map:
return ctx.MapIndex(reflect.ValueOf(key))
case reflect.Struct:
return ctx.FieldByName(key)
default:
return reflect.Value{}
}
}
| {
loop:
for v.IsValid() {
switch av := v; av.Kind() {
case reflect.Ptr:
v = av.Elem()
case reflect.Interface:
v = av.Elem()
default:
break loop
}
}
return v
} | identifier_body |
render.go | package mustache
import (
"fmt"
"html"
"math"
"reflect"
"strconv"
"strings"
"github.com/eriklott/mustache/internal/ast"
"github.com/eriklott/mustache/internal/parse"
)
const maxPartialDepth = 100000
// renderer represents the state of the rendering of a single template.
type renderer struct {
template *Template // the template that initiated the render
stack []reflect.Value // the context stack
depth int // the depth of executing partials
// write fields
w strings.Builder // the writer
indent string // the current indent string
indentNext bool // when true, apply indent before next write
}
// newRenderer returns a newly initialized renderer.
func (t *Template) newRenderer() *renderer {
return &renderer{template: t}
}
func (r *renderer) String() string {
return r.w.String()
}
// renderToString sub-renders a tree into a string. If an error occurs,
// rendering stops and the error is returned.
func (r *renderer) renderToString(tree *ast.Tree) (string, error) {
subRenderer := &renderer{
template: r.template,
stack: r.stack,
depth: 0,
w: strings.Builder{},
indent: "",
indentNext: false,
}
err := subRenderer.walk(tree.Name, tree)
s := subRenderer.String()
// the subRenderer may have pushed and popped enough contexts onto the stack
// to cause the slice to allocate to a new larger underlaying array. If this
// has happened, we want to keep the pointer to that larger array to minimize
// allocations.
r.stack = subRenderer.stack
return s, err
}
// write a string to the template output.
func (r *renderer) write(s string, unescaped bool) {
if r.indentNext {
r.indentNext = false
r.w.WriteString(r.indent)
}
if !unescaped {
s = html.EscapeString(s)
}
r.w.WriteString(s)
}
// conceptually shifts a context onto the stack. Since the stack is actually in
// reverse order, the context is pushed.
func (r *renderer) push(context reflect.Value) {
r.stack = append(r.stack, context)
}
// conceptually unshifts a context onto the stack. Since the stack is actually in
// reverse order, the context is popped.
func (r *renderer) pop() reflect.Value {
if len(r.stack) == 0 {
return reflect.Value{}
}
ctx := r.stack[len(r.stack)-1]
r.stack = r.stack[:len(r.stack)-1]
return ctx
}
// render recursively walks each node of the tree, incrementally building the template
// string output.
func (r *renderer) walk(treeName string, node interface{}) error {
switch t := node.(type) {
case *ast.Tree:
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
case *ast.Text:
r.write(t.Text, true)
if t.EndOfLine {
r.indentNext = true
}
case *ast.Variable:
v, err := r.lookup(treeName, t.Line, t.Column, t.Key)
if err != nil {
return err
}
s, err := r.toString(v, parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return err
}
r.write(s, t.Unescaped)
case *ast.Section:
v, err := r.lookup(treeName, t.Line, t.Column, t.Key)
if err != nil {
return err
}
v, err = r.toTruthyValue(v)
if err != nil {
return err
}
isTruthy := v.IsValid()
if !t.Inverted && isTruthy {
switch v.Kind() {
case reflect.Slice, reflect.Array:
for i := 0; i < v.Len(); i++ {
r.push(v.Index(i))
for j := range t.Nodes {
err := r.walk(treeName, t.Nodes[j])
if err != nil {
return err
}
}
r.pop()
}
case reflect.Func:
s := v.Call([]reflect.Value{reflect.ValueOf(t.Text)})[0].String()
tree, err := parse.Parse("lambda", s, t.LDelim, t.RDelim)
if err != nil {
return nil
}
err = r.walk(treeName, tree)
if err != nil {
return err
}
default:
r.push(v)
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
r.pop()
}
} else if t.Inverted && !isTruthy {
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
}
case *ast.Partial:
tree, ok := r.template.treeMap[t.Key]
if !ok {
if r.template.ContextErrorsEnabled {
return fmt.Errorf("%s:%d:%d: partial not found: %s", treeName, t.Line, t.Column, t.Key)
}
return nil
}
origIndent := r.indent
r.indent += t.Indent
r.indentNext = true
r.depth++
if r.depth >= maxPartialDepth {
return fmt.Errorf("exceeded maximum partial depth: %d", maxPartialDepth)
}
err := r.walk(tree.Name, tree)
if err != nil {
return err
}
r.depth--
r.indent = origIndent
}
return nil
}
// toString transforms a reflect.Value into a string.
func (r *renderer) toString(v reflect.Value, ldelim, rdelim string) (string, error) {
switch v.Kind() {
case reflect.String:
return v.String(), nil
case reflect.Bool:
return strconv.FormatBool(v.Bool()), nil
case reflect.Complex64, reflect.Complex128:
return fmt.Sprintf("%v", v.Complex()), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(v.Int(), 10), nil
case reflect.Float32, reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(v.Uint(), 10), nil
case reflect.Func:
if v.IsNil() {
return "", nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if !isArity0 {
return "", nil
}
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toString(v, ldelim, rdelim)
}
tree, err := parse.Parse("lambda", v.String(), ldelim, rdelim)
if err != nil {
return "", err
}
s, err := r.renderToString(tree)
if err != nil {
return "", err
}
return s, nil
case reflect.Ptr, reflect.Interface:
return r.toString(indirect(v), ldelim, rdelim)
case reflect.Chan:
return "", nil
case reflect.Invalid:
return "", nil
default:
return fmt.Sprintf("%v", v.Interface()), nil
}
}
// toTruthyValue returns a value when it is "truthy". If the value is
// falsey, the reflect zero value is returned.
func (r *renderer) toTruthyValue(v reflect.Value) (reflect.Value, error) {
switch v.Kind() {
case reflect.Bool:
if !v.Bool() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if v.Int() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if v.Uint() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Float32, reflect.Float64:
if math.Float64bits(v.Float()) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Complex64, reflect.Complex128:
c := v.Complex()
if math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.String:
if v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Array, reflect.Slice:
if v.IsNil() || v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Func:
if v.IsNil() {
return reflect.Value{}, nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if isArity0 {
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toTruthyValue(v)
}
tree, err := parse.Parse("lambda", v.String(), parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return reflect.Value{}, nil
}
s, err := r.renderToString(tree)
if err != nil {
return reflect.Value{}, nil
}
return r.toTruthyValue(reflect.ValueOf(s))
}
isArity1 := t.NumIn() == 1 && t.In(0).Kind() == reflect.String && t.NumOut() == 1 && t.Out(0).Kind() == reflect.String
if isArity1 {
return v, nil
}
return reflect.Value{}, nil
case reflect.Ptr, reflect.Interface:
return r.toTruthyValue(indirect(v))
case reflect.Map:
if v.IsNil() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Struct:
return v, nil
case reflect.Invalid:
return reflect.Value{}, nil
default:
return reflect.Value{}, nil
}
}
// indirect returns the value that v points to, or concrete
// element underlying an interface.
func indirect(v reflect.Value) reflect.Value {
loop:
for v.IsValid() {
switch av := v; av.Kind() {
case reflect.Ptr:
v = av.Elem()
case reflect.Interface:
v = av.Elem()
default:
break loop
}
}
return v
}
// lookup a key in the context stack. If a value was not found, the reflect.Value zero
// type is returned.
func (r *renderer) lookup(name string, ln, col int, key []string) (reflect.Value, error) {
v := lookupKeysStack(key, r.stack)
if !v.IsValid() && r.template.ContextErrorsEnabled {
return v, fmt.Errorf("%s:%d:%d: cannot find value %s in context", name, ln, col, strings.Join(key, "."))
}
return v, nil
}
// lookupKeysStack obtains a value for a dotted key - eg: a.b.c . If a value
// was not found, the reflect.Value zero type is returned.
func lookupKeysStack(key []string, contexts []reflect.Value) reflect.Value {
var v reflect.Value
if len(key) == 0 {
return v
}
for i := range key {
if i == 0 {
v = lookupKeyStack(key[i], contexts)
continue
}
v = lookupKeyContext(key[i], v)
if !v.IsValid() {
break
}
}
return v
}
// lookupKeyStack returns a value from the first context in the stack that
// contains a value for that key. If a value was not found, the reflect.Value zero
// type is returned.
func lookupKeyStack(key string, contexts []reflect.Value) reflect.Value {
var v reflect.Value
for i := len(contexts) - 1; i >= 0; i-- {
ctx := contexts[i]
v = lookupKeyContext(key, ctx)
if v.IsValid() {
break
}
}
return v
}
// lookup returns a value by key from the context. If a value
// was not found, the reflect.Value zero type is returned.
func lookupKeyContext(key string, ctx reflect.Value) reflect.Value {
if key == "." {
return ctx
}
// check context for method by name
if ctx.IsValid() {
method := ctx.MethodByName(key)
if method.IsValid() {
return method
}
}
// check for fields and keys on concrete types.
switch ctx.Kind() {
case reflect.Ptr, reflect.Interface: | case reflect.Map:
return ctx.MapIndex(reflect.ValueOf(key))
case reflect.Struct:
return ctx.FieldByName(key)
default:
return reflect.Value{}
}
} | return lookupKeyContext(key, indirect(ctx)) | random_line_split |
utils.go | /*
Copyright 2015-2020 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"math/rand"
"net"
"net/url"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/gravitational/trace"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/validation"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/constants"
apiutils "github.com/gravitational/teleport/api/utils"
)
// WriteContextCloser provides close method with context
type WriteContextCloser interface {
Close(ctx context.Context) error
io.Writer
}
// WriteCloserWithContext converts ContextCloser to io.Closer,
// whenever new Close method will be called, the ctx will be passed to it
func WriteCloserWithContext(ctx context.Context, closer WriteContextCloser) io.WriteCloser {
return &closerWithContext{
WriteContextCloser: closer,
ctx: ctx,
}
}
type closerWithContext struct {
WriteContextCloser
ctx context.Context
}
// Close closes all resources and returns the result
func (c *closerWithContext) Close() error {
return c.WriteContextCloser.Close(c.ctx)
}
// NilCloser returns closer if it's not nil
// otherwise returns a nop closer
func NilCloser(r io.Closer) io.Closer {
if r == nil {
return &nilCloser{}
}
return r
}
type nilCloser struct {
}
func (*nilCloser) Close() error {
return nil
}
// NopWriteCloser returns a WriteCloser with a no-op Close method wrapping
// the provided Writer w
func NopWriteCloser(r io.Writer) io.WriteCloser {
return nopWriteCloser{r}
}
type nopWriteCloser struct {
io.Writer
}
func (nopWriteCloser) Close() error { return nil }
// Tracer helps to trace execution of functions
type Tracer struct {
// Started records starting time of the call
Started time.Time
// Description is arbitrary description
Description string
}
// NewTracer returns a new tracer
func NewTracer(description string) *Tracer {
return &Tracer{Started: time.Now().UTC(), Description: description}
}
// Start logs start of the trace
func (t *Tracer) Start() *Tracer {
log.Debugf("Tracer started %v.", t.Description)
return t
}
// Stop logs stop of the trace
func (t *Tracer) Stop() *Tracer {
log.Debugf("Tracer completed %v in %v.", t.Description, time.Since(t.Started))
return t
}
// ThisFunction returns calling function name
func ThisFunction() string {
var pc [32]uintptr
runtime.Callers(2, pc[:])
return runtime.FuncForPC(pc[0]).Name()
}
// SyncString is a string value
// that can be concurrently accessed
type SyncString struct {
sync.Mutex
string
}
// Value returns value of the string
func (s *SyncString) Value() string |
// Set sets the value of the string
func (s *SyncString) Set(v string) {
s.Lock()
defer s.Unlock()
s.string = v
}
// ClickableURL fixes address in url to make sure
// it's clickable, e.g. it replaces "undefined" address like
// 0.0.0.0 used in network listeners format with loopback 127.0.0.1
func ClickableURL(in string) string {
out, err := url.Parse(in)
if err != nil {
return in
}
host, port, err := net.SplitHostPort(out.Host)
if err != nil {
return in
}
ip := net.ParseIP(host)
// if address is not an IP, unspecified, e.g. all interfaces 0.0.0.0 or multicast,
// replace with localhost that is clickable
if len(ip) == 0 || ip.IsUnspecified() || ip.IsMulticast() {
out.Host = fmt.Sprintf("127.0.0.1:%v", port)
return out.String()
}
return out.String()
}
// AsBool converts string to bool, in case of the value is empty
// or unknown, defaults to false
func AsBool(v string) bool {
if v == "" {
return false
}
out, _ := apiutils.ParseBool(v)
return out
}
// ParseAdvertiseAddr validates advertise address,
// makes sure it's not an unreachable or multicast address
// returns address split into host and port, port could be empty
// if not specified
func ParseAdvertiseAddr(advertiseIP string) (string, string, error) {
advertiseIP = strings.TrimSpace(advertiseIP)
host := advertiseIP
port := ""
if len(net.ParseIP(host)) == 0 && strings.Contains(advertiseIP, ":") {
var err error
host, port, err = net.SplitHostPort(advertiseIP)
if err != nil {
return "", "", trace.BadParameter("failed to parse address %q", advertiseIP)
}
if _, err := strconv.Atoi(port); err != nil {
return "", "", trace.BadParameter("bad port %q, expected integer", port)
}
if host == "" {
return "", "", trace.BadParameter("missing host parameter")
}
}
ip := net.ParseIP(host)
if len(ip) != 0 {
if ip.IsUnspecified() || ip.IsMulticast() {
return "", "", trace.BadParameter("unreachable advertise IP: %v", advertiseIP)
}
}
return host, port, nil
}
// StringsSliceFromSet returns a sorted strings slice from set
func StringsSliceFromSet(in map[string]struct{}) []string {
if in == nil {
return nil
}
out := make([]string, 0, len(in))
for key := range in {
out = append(out, key)
}
sort.Strings(out)
return out
}
// StringsSet creates set of string (map[string]struct{})
// from a list of strings
func StringsSet(in []string) map[string]struct{} {
if in == nil {
return map[string]struct{}{}
}
out := make(map[string]struct{})
for _, v := range in {
out[v] = struct{}{}
}
return out
}
// ParseOnOff parses whether value is "on" or "off", parameterName is passed for error
// reporting purposes, defaultValue is returned when no value is set
func ParseOnOff(parameterName, val string, defaultValue bool) (bool, error) {
switch val {
case teleport.On:
return true, nil
case teleport.Off:
return false, nil
case "":
return defaultValue, nil
default:
return false, trace.BadParameter("bad %q parameter value: %q, supported values are on or off", parameterName, val)
}
}
// IsGroupMember returns whether currently logged user is a member of a group
func IsGroupMember(gid int) (bool, error) {
groups, err := os.Getgroups()
if err != nil {
return false, trace.ConvertSystemError(err)
}
for _, group := range groups {
if group == gid {
return true, nil
}
}
return false, nil
}
// DNSName extracts DNS name from host:port string.
func DNSName(hostport string) (string, error) {
host, err := Host(hostport)
if err != nil {
return "", trace.Wrap(err)
}
if ip := net.ParseIP(host); len(ip) != 0 {
return "", trace.BadParameter("%v is an IP address", host)
}
return host, nil
}
// Host extracts host from host:port string
func Host(hostname string) (string, error) {
if hostname == "" {
return "", trace.BadParameter("missing parameter hostname")
}
// if this is IPv4 or V6, return as is
if ip := net.ParseIP(hostname); len(ip) != 0 {
return hostname, nil
}
// has no indication of port, return, note that
// it will not break ipv6 as it always has at least one colon
if !strings.Contains(hostname, ":") {
return hostname, nil
}
host, _, err := SplitHostPort(hostname)
if err != nil {
return "", trace.Wrap(err)
}
return host, nil
}
// SplitHostPort splits host and port and checks that host is not empty
func SplitHostPort(hostname string) (string, string, error) {
host, port, err := net.SplitHostPort(hostname)
if err != nil {
return "", "", trace.Wrap(err)
}
if host == "" {
return "", "", trace.BadParameter("empty hostname")
}
return host, port, nil
}
// IsValidHostname checks if a string represents a valid hostname.
func IsValidHostname(hostname string) bool {
for _, label := range strings.Split(hostname, ".") {
if len(validation.IsDNS1035Label(label)) > 0 {
return false
}
}
return true
}
// ReadPath reads file contents
func ReadPath(path string) ([]byte, error) {
if path == "" {
return nil, trace.NotFound("empty path")
}
s, err := filepath.Abs(path)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
abs, err := filepath.EvalSymlinks(s)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
bytes, err := os.ReadFile(abs)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
return bytes, nil
}
type multiCloser struct {
closers []io.Closer
}
func (mc *multiCloser) Close() error {
for _, closer := range mc.closers {
if err := closer.Close(); err != nil {
return trace.Wrap(err)
}
}
return nil
}
// MultiCloser implements io.Close, it sequentially calls Close() on each object
func MultiCloser(closers ...io.Closer) io.Closer {
return &multiCloser{
closers: closers,
}
}
// IsHandshakeFailedError specifies whether this error indicates
// failed handshake
func IsHandshakeFailedError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: handshake failed")
}
// IsCertExpiredError specifies whether this error indicates
// expired SSH certificate
func IsCertExpiredError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: cert has expired")
}
// OpaqueAccessDenied returns a generic NotFound instead of AccessDenied
// so as to avoid leaking the existence of secret resources.
func OpaqueAccessDenied(err error) error {
if trace.IsAccessDenied(err) {
return trace.NotFound("not found")
}
return trace.Wrap(err)
}
// PortList is a list of TCP ports.
type PortList struct {
ports []string
sync.Mutex
}
// Pop returns a value from the list, it panics if the value is not there
func (p *PortList) Pop() string {
p.Lock()
defer p.Unlock()
if len(p.ports) == 0 {
panic("list is empty")
}
val := p.ports[len(p.ports)-1]
p.ports = p.ports[:len(p.ports)-1]
return val
}
// PopInt returns a value from the list, it panics if not enough values
// were allocated
func (p *PortList) PopInt() int {
i, err := strconv.Atoi(p.Pop())
if err != nil {
panic(err)
}
return i
}
// PortStartingNumber is a starting port number for tests
const PortStartingNumber = 20000
// GetFreeTCPPorts returns n ports starting from port 20000.
func GetFreeTCPPorts(n int, offset ...int) (PortList, error) {
list := make([]string, 0, n)
start := PortStartingNumber
if len(offset) != 0 {
start = offset[0]
}
for i := start; i < start+n; i++ {
list = append(list, strconv.Itoa(i))
}
return PortList{ports: list}, nil
}
// HostUUIDExistsLocally checks if dataDir/host_uuid file exists in local storage.
func HostUUIDExistsLocally(dataDir string) bool {
_, err := ReadHostUUID(dataDir)
return err == nil
}
// ReadHostUUID reads host UUID from the file in the data dir
func ReadHostUUID(dataDir string) (string, error) {
out, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return "", err
}
return "", trace.ConvertSystemError(err)
}
id := strings.TrimSpace(string(out))
if id == "" {
return "", trace.NotFound("host uuid is empty")
}
return id, nil
}
// WriteHostUUID writes host UUID into a file
func WriteHostUUID(dataDir string, id string) error {
err := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return err
}
return trace.ConvertSystemError(err)
}
return nil
}
// ReadOrMakeHostUUID looks for a hostid file in the data dir. If present,
// returns the UUID from it, otherwise generates one
func ReadOrMakeHostUUID(dataDir string) (string, error) {
id, err := ReadHostUUID(dataDir)
if err == nil {
return id, nil
}
if !trace.IsNotFound(err) {
return "", trace.Wrap(err)
}
// Checking error instead of the usual uuid.New() in case uuid generation
// fails due to not enough randomness. It's been known to happen happen when
// Teleport starts very early in the node initialization cycle and /dev/urandom
// isn't ready yet.
rawID, err := uuid.NewRandom()
if err != nil {
return "", trace.BadParameter("" +
"Teleport failed to generate host UUID. " +
"This may happen if randomness source is not fully initialized when the node is starting up. " +
"Please try restarting Teleport again.")
}
id = rawID.String()
if err = WriteHostUUID(dataDir, id); err != nil {
return "", trace.Wrap(err)
}
return id, nil
}
// StringSliceSubset returns true if b is a subset of a.
func StringSliceSubset(a []string, b []string) error {
aset := make(map[string]bool)
for _, v := range a {
aset[v] = true
}
for _, v := range b {
_, ok := aset[v]
if !ok {
return trace.BadParameter("%v not in set", v)
}
}
return nil
}
// UintSliceSubset returns true if b is a subset of a.
func UintSliceSubset(a []uint16, b []uint16) error {
aset := make(map[uint16]bool)
for _, v := range a {
aset[v] = true
}
for _, v := range b {
_, ok := aset[v]
if !ok {
return trace.BadParameter("%v not in set", v)
}
}
return nil
}
// RemoveFromSlice makes a copy of the slice and removes the passed in values from the copy.
func RemoveFromSlice(slice []string, values ...string) []string {
output := make([]string, 0, len(slice))
remove := make(map[string]bool)
for _, value := range values {
remove[value] = true
}
for _, s := range slice {
_, ok := remove[s]
if ok {
continue
}
output = append(output, s)
}
return output
}
// ChooseRandomString returns a random string from the given slice.
func ChooseRandomString(slice []string) string {
switch len(slice) {
case 0:
return ""
case 1:
return slice[0]
default:
return slice[rand.Intn(len(slice))]
}
}
// CheckCertificateFormatFlag checks if the certificate format is valid.
func CheckCertificateFormatFlag(s string) (string, error) {
switch s {
case constants.CertificateFormatStandard, teleport.CertificateFormatOldSSH, teleport.CertificateFormatUnspecified:
return s, nil
default:
return "", trace.BadParameter("invalid certificate format parameter: %q", s)
}
}
// AddrsFromStrings returns strings list converted to address list
func AddrsFromStrings(s apiutils.Strings, defaultPort int) ([]NetAddr, error) {
addrs := make([]NetAddr, len(s))
for i, val := range s {
addr, err := ParseHostPortAddr(val, defaultPort)
if err != nil {
return nil, trace.Wrap(err)
}
addrs[i] = *addr
}
return addrs, nil
}
// FileExists checks whether a file exists at a given path
func FileExists(fp string) bool {
_, err := os.Stat(fp)
if err != nil && os.IsNotExist(err) {
return false
}
return true
}
// StoreErrorOf stores the error returned by f within *err.
func StoreErrorOf(f func() error, err *error) {
*err = trace.NewAggregate(*err, f())
}
// ReadAtMost reads up to limit bytes from r, and reports an error
// when limit bytes are read.
func ReadAtMost(r io.Reader, limit int64) ([]byte, error) {
limitedReader := &io.LimitedReader{R: r, N: limit}
data, err := io.ReadAll(limitedReader)
if err != nil {
return data, err
}
if limitedReader.N <= 0 {
return data, ErrLimitReached
}
return data, nil
}
// HasPrefixAny determines if any of the string values have the given prefix.
func HasPrefixAny(prefix string, values []string) bool {
for _, val := range values {
if strings.HasPrefix(val, prefix) {
return true
}
}
return false
}
// ByteCount converts a size in bytes to a human-readable string.
func ByteCount(b int64) string {
const unit = 1000
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB",
float64(b)/float64(div), "kMGTPE"[exp])
}
// ErrLimitReached means that the read limit is reached.
var ErrLimitReached = &trace.LimitExceededError{Message: "the read limit is reached"}
const (
// CertTeleportUser specifies teleport user
CertTeleportUser = "x-teleport-user"
// CertTeleportUserCA specifies teleport certificate authority
CertTeleportUserCA = "x-teleport-user-ca"
// CertExtensionRole specifies teleport role
CertExtensionRole = "x-teleport-role"
// CertExtensionAuthority specifies teleport authority's name
// that signed this domain
CertExtensionAuthority = "x-teleport-authority"
// HostUUIDFile is the file name where the host UUID file is stored
HostUUIDFile = "host_uuid"
// CertTeleportClusterName is a name of the teleport cluster
CertTeleportClusterName = "x-teleport-cluster-name"
// CertTeleportUserCertificate is the certificate of the authenticated in user.
CertTeleportUserCertificate = "x-teleport-certificate"
// ExtIntCertType is an internal extension used to propagate cert type.
ExtIntCertType = "certtype@teleport"
// ExtIntCertTypeHost indicates a host-type certificate.
ExtIntCertTypeHost = "host"
// ExtIntCertTypeUser indicates a user-type certificate.
ExtIntCertTypeUser = "user"
)
| {
s.Lock()
defer s.Unlock()
return s.string
} | identifier_body |
utils.go | /*
Copyright 2015-2020 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"math/rand"
"net"
"net/url"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/gravitational/trace"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/validation"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/constants"
apiutils "github.com/gravitational/teleport/api/utils"
)
// WriteContextCloser provides close method with context
type WriteContextCloser interface {
Close(ctx context.Context) error
io.Writer
}
// WriteCloserWithContext converts ContextCloser to io.Closer,
// whenever new Close method will be called, the ctx will be passed to it
func WriteCloserWithContext(ctx context.Context, closer WriteContextCloser) io.WriteCloser {
return &closerWithContext{
WriteContextCloser: closer,
ctx: ctx,
}
}
type closerWithContext struct {
WriteContextCloser
ctx context.Context
}
// Close closes all resources and returns the result
func (c *closerWithContext) Close() error {
return c.WriteContextCloser.Close(c.ctx)
}
// NilCloser returns closer if it's not nil
// otherwise returns a nop closer
func NilCloser(r io.Closer) io.Closer {
if r == nil {
return &nilCloser{}
}
return r
}
type nilCloser struct {
}
func (*nilCloser) Close() error {
return nil
}
// NopWriteCloser returns a WriteCloser with a no-op Close method wrapping
// the provided Writer w
func NopWriteCloser(r io.Writer) io.WriteCloser {
return nopWriteCloser{r}
}
type nopWriteCloser struct {
io.Writer
}
func (nopWriteCloser) Close() error { return nil }
// Tracer helps to trace execution of functions
type Tracer struct {
// Started records starting time of the call
Started time.Time
// Description is arbitrary description
Description string
}
// NewTracer returns a new tracer
func NewTracer(description string) *Tracer {
return &Tracer{Started: time.Now().UTC(), Description: description}
}
// Start logs start of the trace
func (t *Tracer) Start() *Tracer {
log.Debugf("Tracer started %v.", t.Description)
return t
}
// Stop logs stop of the trace
func (t *Tracer) Stop() *Tracer {
log.Debugf("Tracer completed %v in %v.", t.Description, time.Since(t.Started))
return t
}
// ThisFunction returns calling function name
func ThisFunction() string {
var pc [32]uintptr
runtime.Callers(2, pc[:])
return runtime.FuncForPC(pc[0]).Name()
}
// SyncString is a string value
// that can be concurrently accessed
type SyncString struct {
sync.Mutex
string
}
// Value returns value of the string
func (s *SyncString) Value() string {
s.Lock()
defer s.Unlock()
return s.string
}
// Set sets the value of the string
func (s *SyncString) Set(v string) {
s.Lock()
defer s.Unlock()
s.string = v
}
// ClickableURL fixes address in url to make sure
// it's clickable, e.g. it replaces "undefined" address like
// 0.0.0.0 used in network listeners format with loopback 127.0.0.1
func ClickableURL(in string) string {
out, err := url.Parse(in)
if err != nil {
return in
}
host, port, err := net.SplitHostPort(out.Host)
if err != nil {
return in
}
ip := net.ParseIP(host)
// if address is not an IP, unspecified, e.g. all interfaces 0.0.0.0 or multicast,
// replace with localhost that is clickable
if len(ip) == 0 || ip.IsUnspecified() || ip.IsMulticast() {
out.Host = fmt.Sprintf("127.0.0.1:%v", port)
return out.String()
}
return out.String()
}
// AsBool converts string to bool, in case of the value is empty
// or unknown, defaults to false
func AsBool(v string) bool {
if v == "" {
return false
}
out, _ := apiutils.ParseBool(v)
return out
}
// ParseAdvertiseAddr validates advertise address,
// makes sure it's not an unreachable or multicast address
// returns address split into host and port, port could be empty
// if not specified
func ParseAdvertiseAddr(advertiseIP string) (string, string, error) {
advertiseIP = strings.TrimSpace(advertiseIP)
host := advertiseIP
port := ""
if len(net.ParseIP(host)) == 0 && strings.Contains(advertiseIP, ":") {
var err error
host, port, err = net.SplitHostPort(advertiseIP)
if err != nil {
return "", "", trace.BadParameter("failed to parse address %q", advertiseIP)
}
if _, err := strconv.Atoi(port); err != nil {
return "", "", trace.BadParameter("bad port %q, expected integer", port)
}
if host == "" {
return "", "", trace.BadParameter("missing host parameter")
}
}
ip := net.ParseIP(host)
if len(ip) != 0 {
if ip.IsUnspecified() || ip.IsMulticast() {
return "", "", trace.BadParameter("unreachable advertise IP: %v", advertiseIP)
}
}
return host, port, nil
}
// StringsSliceFromSet returns a sorted strings slice from set
func StringsSliceFromSet(in map[string]struct{}) []string {
if in == nil {
return nil
}
out := make([]string, 0, len(in))
for key := range in {
out = append(out, key)
}
sort.Strings(out)
return out
}
// StringsSet creates set of string (map[string]struct{})
// from a list of strings
func StringsSet(in []string) map[string]struct{} {
if in == nil {
return map[string]struct{}{}
}
out := make(map[string]struct{})
for _, v := range in {
out[v] = struct{}{}
}
return out
}
// ParseOnOff parses whether value is "on" or "off", parameterName is passed for error
// reporting purposes, defaultValue is returned when no value is set
func ParseOnOff(parameterName, val string, defaultValue bool) (bool, error) {
switch val {
case teleport.On:
return true, nil
case teleport.Off:
return false, nil
case "":
return defaultValue, nil
default:
return false, trace.BadParameter("bad %q parameter value: %q, supported values are on or off", parameterName, val)
}
}
// IsGroupMember returns whether currently logged user is a member of a group
func IsGroupMember(gid int) (bool, error) {
groups, err := os.Getgroups()
if err != nil {
return false, trace.ConvertSystemError(err)
}
for _, group := range groups {
if group == gid {
return true, nil
}
}
return false, nil
}
// DNSName extracts DNS name from host:port string.
func DNSName(hostport string) (string, error) {
host, err := Host(hostport)
if err != nil {
return "", trace.Wrap(err)
}
if ip := net.ParseIP(host); len(ip) != 0 {
return "", trace.BadParameter("%v is an IP address", host)
}
return host, nil
}
// Host extracts host from host:port string
func Host(hostname string) (string, error) {
if hostname == "" {
return "", trace.BadParameter("missing parameter hostname")
}
// if this is IPv4 or V6, return as is
if ip := net.ParseIP(hostname); len(ip) != 0 {
return hostname, nil
}
// has no indication of port, return, note that
// it will not break ipv6 as it always has at least one colon
if !strings.Contains(hostname, ":") {
return hostname, nil
}
host, _, err := SplitHostPort(hostname)
if err != nil {
return "", trace.Wrap(err)
}
return host, nil
}
// SplitHostPort splits host and port and checks that host is not empty
func SplitHostPort(hostname string) (string, string, error) {
host, port, err := net.SplitHostPort(hostname)
if err != nil {
return "", "", trace.Wrap(err)
}
if host == "" {
return "", "", trace.BadParameter("empty hostname")
}
return host, port, nil
}
// IsValidHostname checks if a string represents a valid hostname.
func IsValidHostname(hostname string) bool {
for _, label := range strings.Split(hostname, ".") {
if len(validation.IsDNS1035Label(label)) > 0 {
return false
}
}
return true
}
// ReadPath reads file contents
func ReadPath(path string) ([]byte, error) {
if path == "" {
return nil, trace.NotFound("empty path")
}
s, err := filepath.Abs(path)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
abs, err := filepath.EvalSymlinks(s)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
bytes, err := os.ReadFile(abs)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
return bytes, nil
}
type multiCloser struct {
closers []io.Closer
}
func (mc *multiCloser) Close() error {
for _, closer := range mc.closers {
if err := closer.Close(); err != nil {
return trace.Wrap(err)
}
}
return nil
}
// MultiCloser implements io.Close, it sequentially calls Close() on each object
func MultiCloser(closers ...io.Closer) io.Closer {
return &multiCloser{
closers: closers,
}
}
// IsHandshakeFailedError specifies whether this error indicates
// failed handshake
func IsHandshakeFailedError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: handshake failed")
}
// IsCertExpiredError specifies whether this error indicates
// expired SSH certificate
func IsCertExpiredError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: cert has expired")
}
// OpaqueAccessDenied returns a generic NotFound instead of AccessDenied
// so as to avoid leaking the existence of secret resources.
func OpaqueAccessDenied(err error) error {
if trace.IsAccessDenied(err) {
return trace.NotFound("not found")
}
return trace.Wrap(err)
}
// PortList is a list of TCP ports.
type PortList struct {
ports []string
sync.Mutex
}
// Pop returns a value from the list, it panics if the value is not there
func (p *PortList) | () string {
p.Lock()
defer p.Unlock()
if len(p.ports) == 0 {
panic("list is empty")
}
val := p.ports[len(p.ports)-1]
p.ports = p.ports[:len(p.ports)-1]
return val
}
// PopInt returns a value from the list, it panics if not enough values
// were allocated
func (p *PortList) PopInt() int {
i, err := strconv.Atoi(p.Pop())
if err != nil {
panic(err)
}
return i
}
// PortStartingNumber is a starting port number for tests
const PortStartingNumber = 20000
// GetFreeTCPPorts returns n ports starting from port 20000.
func GetFreeTCPPorts(n int, offset ...int) (PortList, error) {
list := make([]string, 0, n)
start := PortStartingNumber
if len(offset) != 0 {
start = offset[0]
}
for i := start; i < start+n; i++ {
list = append(list, strconv.Itoa(i))
}
return PortList{ports: list}, nil
}
// HostUUIDExistsLocally checks if dataDir/host_uuid file exists in local storage.
func HostUUIDExistsLocally(dataDir string) bool {
_, err := ReadHostUUID(dataDir)
return err == nil
}
// ReadHostUUID reads host UUID from the file in the data dir
func ReadHostUUID(dataDir string) (string, error) {
out, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return "", err
}
return "", trace.ConvertSystemError(err)
}
id := strings.TrimSpace(string(out))
if id == "" {
return "", trace.NotFound("host uuid is empty")
}
return id, nil
}
// WriteHostUUID writes host UUID into a file
func WriteHostUUID(dataDir string, id string) error {
err := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return err
}
return trace.ConvertSystemError(err)
}
return nil
}
// ReadOrMakeHostUUID looks for a hostid file in the data dir. If present,
// returns the UUID from it, otherwise generates one
func ReadOrMakeHostUUID(dataDir string) (string, error) {
id, err := ReadHostUUID(dataDir)
if err == nil {
return id, nil
}
if !trace.IsNotFound(err) {
return "", trace.Wrap(err)
}
// Checking error instead of the usual uuid.New() in case uuid generation
// fails due to not enough randomness. It's been known to happen happen when
// Teleport starts very early in the node initialization cycle and /dev/urandom
// isn't ready yet.
rawID, err := uuid.NewRandom()
if err != nil {
return "", trace.BadParameter("" +
"Teleport failed to generate host UUID. " +
"This may happen if randomness source is not fully initialized when the node is starting up. " +
"Please try restarting Teleport again.")
}
id = rawID.String()
if err = WriteHostUUID(dataDir, id); err != nil {
return "", trace.Wrap(err)
}
return id, nil
}
// StringSliceSubset returns true if b is a subset of a.
func StringSliceSubset(a []string, b []string) error {
aset := make(map[string]bool)
for _, v := range a {
aset[v] = true
}
for _, v := range b {
_, ok := aset[v]
if !ok {
return trace.BadParameter("%v not in set", v)
}
}
return nil
}
// UintSliceSubset returns true if b is a subset of a.
func UintSliceSubset(a []uint16, b []uint16) error {
aset := make(map[uint16]bool)
for _, v := range a {
aset[v] = true
}
for _, v := range b {
_, ok := aset[v]
if !ok {
return trace.BadParameter("%v not in set", v)
}
}
return nil
}
// RemoveFromSlice makes a copy of the slice and removes the passed in values from the copy.
func RemoveFromSlice(slice []string, values ...string) []string {
output := make([]string, 0, len(slice))
remove := make(map[string]bool)
for _, value := range values {
remove[value] = true
}
for _, s := range slice {
_, ok := remove[s]
if ok {
continue
}
output = append(output, s)
}
return output
}
// ChooseRandomString returns a random string from the given slice.
func ChooseRandomString(slice []string) string {
switch len(slice) {
case 0:
return ""
case 1:
return slice[0]
default:
return slice[rand.Intn(len(slice))]
}
}
// CheckCertificateFormatFlag checks if the certificate format is valid.
func CheckCertificateFormatFlag(s string) (string, error) {
switch s {
case constants.CertificateFormatStandard, teleport.CertificateFormatOldSSH, teleport.CertificateFormatUnspecified:
return s, nil
default:
return "", trace.BadParameter("invalid certificate format parameter: %q", s)
}
}
// AddrsFromStrings returns strings list converted to address list
func AddrsFromStrings(s apiutils.Strings, defaultPort int) ([]NetAddr, error) {
addrs := make([]NetAddr, len(s))
for i, val := range s {
addr, err := ParseHostPortAddr(val, defaultPort)
if err != nil {
return nil, trace.Wrap(err)
}
addrs[i] = *addr
}
return addrs, nil
}
// FileExists checks whether a file exists at a given path
func FileExists(fp string) bool {
_, err := os.Stat(fp)
if err != nil && os.IsNotExist(err) {
return false
}
return true
}
// StoreErrorOf stores the error returned by f within *err.
func StoreErrorOf(f func() error, err *error) {
*err = trace.NewAggregate(*err, f())
}
// ReadAtMost reads up to limit bytes from r, and reports an error
// when limit bytes are read.
func ReadAtMost(r io.Reader, limit int64) ([]byte, error) {
limitedReader := &io.LimitedReader{R: r, N: limit}
data, err := io.ReadAll(limitedReader)
if err != nil {
return data, err
}
if limitedReader.N <= 0 {
return data, ErrLimitReached
}
return data, nil
}
// HasPrefixAny determines if any of the string values have the given prefix.
func HasPrefixAny(prefix string, values []string) bool {
for _, val := range values {
if strings.HasPrefix(val, prefix) {
return true
}
}
return false
}
// ByteCount converts a size in bytes to a human-readable string.
func ByteCount(b int64) string {
const unit = 1000
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB",
float64(b)/float64(div), "kMGTPE"[exp])
}
// ErrLimitReached means that the read limit is reached.
var ErrLimitReached = &trace.LimitExceededError{Message: "the read limit is reached"}
const (
// CertTeleportUser specifies teleport user
CertTeleportUser = "x-teleport-user"
// CertTeleportUserCA specifies teleport certificate authority
CertTeleportUserCA = "x-teleport-user-ca"
// CertExtensionRole specifies teleport role
CertExtensionRole = "x-teleport-role"
// CertExtensionAuthority specifies teleport authority's name
// that signed this domain
CertExtensionAuthority = "x-teleport-authority"
// HostUUIDFile is the file name where the host UUID file is stored
HostUUIDFile = "host_uuid"
// CertTeleportClusterName is a name of the teleport cluster
CertTeleportClusterName = "x-teleport-cluster-name"
// CertTeleportUserCertificate is the certificate of the authenticated in user.
CertTeleportUserCertificate = "x-teleport-certificate"
// ExtIntCertType is an internal extension used to propagate cert type.
ExtIntCertType = "certtype@teleport"
// ExtIntCertTypeHost indicates a host-type certificate.
ExtIntCertTypeHost = "host"
// ExtIntCertTypeUser indicates a user-type certificate.
ExtIntCertTypeUser = "user"
)
| Pop | identifier_name |
utils.go | /*
Copyright 2015-2020 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"math/rand"
"net"
"net/url"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/gravitational/trace"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/validation"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/constants"
apiutils "github.com/gravitational/teleport/api/utils"
)
// WriteContextCloser provides close method with context
type WriteContextCloser interface {
Close(ctx context.Context) error
io.Writer
}
// WriteCloserWithContext converts ContextCloser to io.Closer,
// whenever new Close method will be called, the ctx will be passed to it
func WriteCloserWithContext(ctx context.Context, closer WriteContextCloser) io.WriteCloser {
return &closerWithContext{
WriteContextCloser: closer,
ctx: ctx,
}
}
type closerWithContext struct {
WriteContextCloser
ctx context.Context
}
// Close closes all resources and returns the result
func (c *closerWithContext) Close() error {
return c.WriteContextCloser.Close(c.ctx)
}
// NilCloser returns closer if it's not nil
// otherwise returns a nop closer
func NilCloser(r io.Closer) io.Closer {
if r == nil {
return &nilCloser{}
}
return r
}
type nilCloser struct {
}
func (*nilCloser) Close() error {
return nil
}
// NopWriteCloser returns a WriteCloser with a no-op Close method wrapping
// the provided Writer w
func NopWriteCloser(r io.Writer) io.WriteCloser {
return nopWriteCloser{r}
}
type nopWriteCloser struct {
io.Writer
}
func (nopWriteCloser) Close() error { return nil }
// Tracer helps to trace execution of functions
type Tracer struct {
// Started records starting time of the call
Started time.Time
// Description is arbitrary description
Description string
}
// NewTracer returns a new tracer
func NewTracer(description string) *Tracer {
return &Tracer{Started: time.Now().UTC(), Description: description}
}
// Start logs start of the trace
func (t *Tracer) Start() *Tracer {
log.Debugf("Tracer started %v.", t.Description)
return t
}
// Stop logs stop of the trace
func (t *Tracer) Stop() *Tracer {
log.Debugf("Tracer completed %v in %v.", t.Description, time.Since(t.Started))
return t
}
// ThisFunction returns calling function name
func ThisFunction() string {
var pc [32]uintptr
runtime.Callers(2, pc[:])
return runtime.FuncForPC(pc[0]).Name()
}
// SyncString is a string value
// that can be concurrently accessed
type SyncString struct {
sync.Mutex
string
}
// Value returns value of the string
func (s *SyncString) Value() string { | s.Lock()
defer s.Unlock()
return s.string
}
// Set sets the value of the string
func (s *SyncString) Set(v string) {
s.Lock()
defer s.Unlock()
s.string = v
}
// ClickableURL fixes address in url to make sure
// it's clickable, e.g. it replaces "undefined" address like
// 0.0.0.0 used in network listeners format with loopback 127.0.0.1
func ClickableURL(in string) string {
out, err := url.Parse(in)
if err != nil {
return in
}
host, port, err := net.SplitHostPort(out.Host)
if err != nil {
return in
}
ip := net.ParseIP(host)
// if address is not an IP, unspecified, e.g. all interfaces 0.0.0.0 or multicast,
// replace with localhost that is clickable
if len(ip) == 0 || ip.IsUnspecified() || ip.IsMulticast() {
out.Host = fmt.Sprintf("127.0.0.1:%v", port)
return out.String()
}
return out.String()
}
// AsBool converts string to bool, in case of the value is empty
// or unknown, defaults to false
func AsBool(v string) bool {
if v == "" {
return false
}
out, _ := apiutils.ParseBool(v)
return out
}
// ParseAdvertiseAddr validates advertise address,
// makes sure it's not an unreachable or multicast address
// returns address split into host and port, port could be empty
// if not specified
func ParseAdvertiseAddr(advertiseIP string) (string, string, error) {
advertiseIP = strings.TrimSpace(advertiseIP)
host := advertiseIP
port := ""
if len(net.ParseIP(host)) == 0 && strings.Contains(advertiseIP, ":") {
var err error
host, port, err = net.SplitHostPort(advertiseIP)
if err != nil {
return "", "", trace.BadParameter("failed to parse address %q", advertiseIP)
}
if _, err := strconv.Atoi(port); err != nil {
return "", "", trace.BadParameter("bad port %q, expected integer", port)
}
if host == "" {
return "", "", trace.BadParameter("missing host parameter")
}
}
ip := net.ParseIP(host)
if len(ip) != 0 {
if ip.IsUnspecified() || ip.IsMulticast() {
return "", "", trace.BadParameter("unreachable advertise IP: %v", advertiseIP)
}
}
return host, port, nil
}
// StringsSliceFromSet returns a sorted strings slice from set
func StringsSliceFromSet(in map[string]struct{}) []string {
if in == nil {
return nil
}
out := make([]string, 0, len(in))
for key := range in {
out = append(out, key)
}
sort.Strings(out)
return out
}
// StringsSet creates set of string (map[string]struct{})
// from a list of strings
func StringsSet(in []string) map[string]struct{} {
if in == nil {
return map[string]struct{}{}
}
out := make(map[string]struct{})
for _, v := range in {
out[v] = struct{}{}
}
return out
}
// ParseOnOff parses whether value is "on" or "off", parameterName is passed for error
// reporting purposes, defaultValue is returned when no value is set
func ParseOnOff(parameterName, val string, defaultValue bool) (bool, error) {
switch val {
case teleport.On:
return true, nil
case teleport.Off:
return false, nil
case "":
return defaultValue, nil
default:
return false, trace.BadParameter("bad %q parameter value: %q, supported values are on or off", parameterName, val)
}
}
// IsGroupMember returns whether currently logged user is a member of a group
func IsGroupMember(gid int) (bool, error) {
groups, err := os.Getgroups()
if err != nil {
return false, trace.ConvertSystemError(err)
}
for _, group := range groups {
if group == gid {
return true, nil
}
}
return false, nil
}
// DNSName extracts DNS name from host:port string.
func DNSName(hostport string) (string, error) {
host, err := Host(hostport)
if err != nil {
return "", trace.Wrap(err)
}
if ip := net.ParseIP(host); len(ip) != 0 {
return "", trace.BadParameter("%v is an IP address", host)
}
return host, nil
}
// Host extracts host from host:port string
func Host(hostname string) (string, error) {
if hostname == "" {
return "", trace.BadParameter("missing parameter hostname")
}
// if this is IPv4 or V6, return as is
if ip := net.ParseIP(hostname); len(ip) != 0 {
return hostname, nil
}
// has no indication of port, return, note that
// it will not break ipv6 as it always has at least one colon
if !strings.Contains(hostname, ":") {
return hostname, nil
}
host, _, err := SplitHostPort(hostname)
if err != nil {
return "", trace.Wrap(err)
}
return host, nil
}
// SplitHostPort splits host and port and checks that host is not empty
func SplitHostPort(hostname string) (string, string, error) {
host, port, err := net.SplitHostPort(hostname)
if err != nil {
return "", "", trace.Wrap(err)
}
if host == "" {
return "", "", trace.BadParameter("empty hostname")
}
return host, port, nil
}
// IsValidHostname checks if a string represents a valid hostname.
func IsValidHostname(hostname string) bool {
for _, label := range strings.Split(hostname, ".") {
if len(validation.IsDNS1035Label(label)) > 0 {
return false
}
}
return true
}
// ReadPath reads file contents
func ReadPath(path string) ([]byte, error) {
if path == "" {
return nil, trace.NotFound("empty path")
}
s, err := filepath.Abs(path)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
abs, err := filepath.EvalSymlinks(s)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
bytes, err := os.ReadFile(abs)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
return bytes, nil
}
type multiCloser struct {
closers []io.Closer
}
func (mc *multiCloser) Close() error {
for _, closer := range mc.closers {
if err := closer.Close(); err != nil {
return trace.Wrap(err)
}
}
return nil
}
// MultiCloser implements io.Close, it sequentially calls Close() on each object
func MultiCloser(closers ...io.Closer) io.Closer {
return &multiCloser{
closers: closers,
}
}
// IsHandshakeFailedError specifies whether this error indicates
// failed handshake
func IsHandshakeFailedError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: handshake failed")
}
// IsCertExpiredError specifies whether this error indicates
// expired SSH certificate
func IsCertExpiredError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: cert has expired")
}
// OpaqueAccessDenied returns a generic NotFound instead of AccessDenied
// so as to avoid leaking the existence of secret resources.
func OpaqueAccessDenied(err error) error {
if trace.IsAccessDenied(err) {
return trace.NotFound("not found")
}
return trace.Wrap(err)
}
// PortList is a list of TCP ports.
type PortList struct {
ports []string
sync.Mutex
}
// Pop returns a value from the list, it panics if the value is not there
func (p *PortList) Pop() string {
p.Lock()
defer p.Unlock()
if len(p.ports) == 0 {
panic("list is empty")
}
val := p.ports[len(p.ports)-1]
p.ports = p.ports[:len(p.ports)-1]
return val
}
// PopInt returns a value from the list, it panics if not enough values
// were allocated
func (p *PortList) PopInt() int {
i, err := strconv.Atoi(p.Pop())
if err != nil {
panic(err)
}
return i
}
// PortStartingNumber is a starting port number for tests
const PortStartingNumber = 20000
// GetFreeTCPPorts returns n ports starting from port 20000.
func GetFreeTCPPorts(n int, offset ...int) (PortList, error) {
list := make([]string, 0, n)
start := PortStartingNumber
if len(offset) != 0 {
start = offset[0]
}
for i := start; i < start+n; i++ {
list = append(list, strconv.Itoa(i))
}
return PortList{ports: list}, nil
}
// HostUUIDExistsLocally checks if dataDir/host_uuid file exists in local storage.
func HostUUIDExistsLocally(dataDir string) bool {
_, err := ReadHostUUID(dataDir)
return err == nil
}
// ReadHostUUID reads host UUID from the file in the data dir
func ReadHostUUID(dataDir string) (string, error) {
out, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return "", err
}
return "", trace.ConvertSystemError(err)
}
id := strings.TrimSpace(string(out))
if id == "" {
return "", trace.NotFound("host uuid is empty")
}
return id, nil
}
// WriteHostUUID writes host UUID into a file
func WriteHostUUID(dataDir string, id string) error {
err := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return err
}
return trace.ConvertSystemError(err)
}
return nil
}
// ReadOrMakeHostUUID looks for a hostid file in the data dir. If present,
// returns the UUID from it, otherwise generates one
func ReadOrMakeHostUUID(dataDir string) (string, error) {
id, err := ReadHostUUID(dataDir)
if err == nil {
return id, nil
}
if !trace.IsNotFound(err) {
return "", trace.Wrap(err)
}
// Checking error instead of the usual uuid.New() in case uuid generation
// fails due to not enough randomness. It's been known to happen happen when
// Teleport starts very early in the node initialization cycle and /dev/urandom
// isn't ready yet.
rawID, err := uuid.NewRandom()
if err != nil {
return "", trace.BadParameter("" +
"Teleport failed to generate host UUID. " +
"This may happen if randomness source is not fully initialized when the node is starting up. " +
"Please try restarting Teleport again.")
}
id = rawID.String()
if err = WriteHostUUID(dataDir, id); err != nil {
return "", trace.Wrap(err)
}
return id, nil
}
// StringSliceSubset returns true if b is a subset of a.
func StringSliceSubset(a []string, b []string) error {
aset := make(map[string]bool)
for _, v := range a {
aset[v] = true
}
for _, v := range b {
_, ok := aset[v]
if !ok {
return trace.BadParameter("%v not in set", v)
}
}
return nil
}
// UintSliceSubset returns true if b is a subset of a.
func UintSliceSubset(a []uint16, b []uint16) error {
aset := make(map[uint16]bool)
for _, v := range a {
aset[v] = true
}
for _, v := range b {
_, ok := aset[v]
if !ok {
return trace.BadParameter("%v not in set", v)
}
}
return nil
}
// RemoveFromSlice makes a copy of the slice and removes the passed in values from the copy.
func RemoveFromSlice(slice []string, values ...string) []string {
output := make([]string, 0, len(slice))
remove := make(map[string]bool)
for _, value := range values {
remove[value] = true
}
for _, s := range slice {
_, ok := remove[s]
if ok {
continue
}
output = append(output, s)
}
return output
}
// ChooseRandomString returns a random string from the given slice.
func ChooseRandomString(slice []string) string {
switch len(slice) {
case 0:
return ""
case 1:
return slice[0]
default:
return slice[rand.Intn(len(slice))]
}
}
// CheckCertificateFormatFlag checks if the certificate format is valid.
func CheckCertificateFormatFlag(s string) (string, error) {
switch s {
case constants.CertificateFormatStandard, teleport.CertificateFormatOldSSH, teleport.CertificateFormatUnspecified:
return s, nil
default:
return "", trace.BadParameter("invalid certificate format parameter: %q", s)
}
}
// AddrsFromStrings returns strings list converted to address list
func AddrsFromStrings(s apiutils.Strings, defaultPort int) ([]NetAddr, error) {
addrs := make([]NetAddr, len(s))
for i, val := range s {
addr, err := ParseHostPortAddr(val, defaultPort)
if err != nil {
return nil, trace.Wrap(err)
}
addrs[i] = *addr
}
return addrs, nil
}
// FileExists checks whether a file exists at a given path
func FileExists(fp string) bool {
_, err := os.Stat(fp)
if err != nil && os.IsNotExist(err) {
return false
}
return true
}
// StoreErrorOf stores the error returned by f within *err.
func StoreErrorOf(f func() error, err *error) {
*err = trace.NewAggregate(*err, f())
}
// ReadAtMost reads up to limit bytes from r, and reports an error
// when limit bytes are read.
func ReadAtMost(r io.Reader, limit int64) ([]byte, error) {
limitedReader := &io.LimitedReader{R: r, N: limit}
data, err := io.ReadAll(limitedReader)
if err != nil {
return data, err
}
if limitedReader.N <= 0 {
return data, ErrLimitReached
}
return data, nil
}
// HasPrefixAny determines if any of the string values have the given prefix.
func HasPrefixAny(prefix string, values []string) bool {
for _, val := range values {
if strings.HasPrefix(val, prefix) {
return true
}
}
return false
}
// ByteCount converts a size in bytes to a human-readable string.
func ByteCount(b int64) string {
const unit = 1000
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB",
float64(b)/float64(div), "kMGTPE"[exp])
}
// ErrLimitReached means that the read limit is reached.
var ErrLimitReached = &trace.LimitExceededError{Message: "the read limit is reached"}
const (
// CertTeleportUser specifies teleport user
CertTeleportUser = "x-teleport-user"
// CertTeleportUserCA specifies teleport certificate authority
CertTeleportUserCA = "x-teleport-user-ca"
// CertExtensionRole specifies teleport role
CertExtensionRole = "x-teleport-role"
// CertExtensionAuthority specifies teleport authority's name
// that signed this domain
CertExtensionAuthority = "x-teleport-authority"
// HostUUIDFile is the file name where the host UUID file is stored
HostUUIDFile = "host_uuid"
// CertTeleportClusterName is a name of the teleport cluster
CertTeleportClusterName = "x-teleport-cluster-name"
// CertTeleportUserCertificate is the certificate of the authenticated in user.
CertTeleportUserCertificate = "x-teleport-certificate"
// ExtIntCertType is an internal extension used to propagate cert type.
ExtIntCertType = "certtype@teleport"
// ExtIntCertTypeHost indicates a host-type certificate.
ExtIntCertTypeHost = "host"
// ExtIntCertTypeUser indicates a user-type certificate.
ExtIntCertTypeUser = "user"
) | random_line_split | |
utils.go | /*
Copyright 2015-2020 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"math/rand"
"net"
"net/url"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/gravitational/trace"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/validation"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/constants"
apiutils "github.com/gravitational/teleport/api/utils"
)
// WriteContextCloser provides close method with context
type WriteContextCloser interface {
Close(ctx context.Context) error
io.Writer
}
// WriteCloserWithContext converts ContextCloser to io.Closer,
// whenever new Close method will be called, the ctx will be passed to it
func WriteCloserWithContext(ctx context.Context, closer WriteContextCloser) io.WriteCloser {
return &closerWithContext{
WriteContextCloser: closer,
ctx: ctx,
}
}
type closerWithContext struct {
WriteContextCloser
ctx context.Context
}
// Close closes all resources and returns the result
func (c *closerWithContext) Close() error {
return c.WriteContextCloser.Close(c.ctx)
}
// NilCloser returns closer if it's not nil
// otherwise returns a nop closer
func NilCloser(r io.Closer) io.Closer {
if r == nil {
return &nilCloser{}
}
return r
}
type nilCloser struct {
}
func (*nilCloser) Close() error {
return nil
}
// NopWriteCloser returns a WriteCloser with a no-op Close method wrapping
// the provided Writer w
func NopWriteCloser(r io.Writer) io.WriteCloser {
return nopWriteCloser{r}
}
type nopWriteCloser struct {
io.Writer
}
func (nopWriteCloser) Close() error { return nil }
// Tracer helps to trace execution of functions
type Tracer struct {
// Started records starting time of the call
Started time.Time
// Description is arbitrary description
Description string
}
// NewTracer returns a new tracer
func NewTracer(description string) *Tracer {
return &Tracer{Started: time.Now().UTC(), Description: description}
}
// Start logs start of the trace
func (t *Tracer) Start() *Tracer {
log.Debugf("Tracer started %v.", t.Description)
return t
}
// Stop logs stop of the trace
func (t *Tracer) Stop() *Tracer {
log.Debugf("Tracer completed %v in %v.", t.Description, time.Since(t.Started))
return t
}
// ThisFunction returns calling function name
func ThisFunction() string {
var pc [32]uintptr
runtime.Callers(2, pc[:])
return runtime.FuncForPC(pc[0]).Name()
}
// SyncString is a string value
// that can be concurrently accessed
type SyncString struct {
sync.Mutex
string
}
// Value returns value of the string
func (s *SyncString) Value() string {
s.Lock()
defer s.Unlock()
return s.string
}
// Set sets the value of the string
func (s *SyncString) Set(v string) {
s.Lock()
defer s.Unlock()
s.string = v
}
// ClickableURL fixes address in url to make sure
// it's clickable, e.g. it replaces "undefined" address like
// 0.0.0.0 used in network listeners format with loopback 127.0.0.1
func ClickableURL(in string) string {
out, err := url.Parse(in)
if err != nil {
return in
}
host, port, err := net.SplitHostPort(out.Host)
if err != nil {
return in
}
ip := net.ParseIP(host)
// if address is not an IP, unspecified, e.g. all interfaces 0.0.0.0 or multicast,
// replace with localhost that is clickable
if len(ip) == 0 || ip.IsUnspecified() || ip.IsMulticast() {
out.Host = fmt.Sprintf("127.0.0.1:%v", port)
return out.String()
}
return out.String()
}
// AsBool converts string to bool, in case of the value is empty
// or unknown, defaults to false
func AsBool(v string) bool {
if v == "" {
return false
}
out, _ := apiutils.ParseBool(v)
return out
}
// ParseAdvertiseAddr validates advertise address,
// makes sure it's not an unreachable or multicast address
// returns address split into host and port, port could be empty
// if not specified
func ParseAdvertiseAddr(advertiseIP string) (string, string, error) {
advertiseIP = strings.TrimSpace(advertiseIP)
host := advertiseIP
port := ""
if len(net.ParseIP(host)) == 0 && strings.Contains(advertiseIP, ":") {
var err error
host, port, err = net.SplitHostPort(advertiseIP)
if err != nil {
return "", "", trace.BadParameter("failed to parse address %q", advertiseIP)
}
if _, err := strconv.Atoi(port); err != nil {
return "", "", trace.BadParameter("bad port %q, expected integer", port)
}
if host == "" {
return "", "", trace.BadParameter("missing host parameter")
}
}
ip := net.ParseIP(host)
if len(ip) != 0 {
if ip.IsUnspecified() || ip.IsMulticast() {
return "", "", trace.BadParameter("unreachable advertise IP: %v", advertiseIP)
}
}
return host, port, nil
}
// StringsSliceFromSet returns a sorted strings slice from set
func StringsSliceFromSet(in map[string]struct{}) []string {
if in == nil {
return nil
}
out := make([]string, 0, len(in))
for key := range in {
out = append(out, key)
}
sort.Strings(out)
return out
}
// StringsSet creates set of string (map[string]struct{})
// from a list of strings
func StringsSet(in []string) map[string]struct{} {
if in == nil {
return map[string]struct{}{}
}
out := make(map[string]struct{})
for _, v := range in {
out[v] = struct{}{}
}
return out
}
// ParseOnOff parses whether value is "on" or "off", parameterName is passed for error
// reporting purposes, defaultValue is returned when no value is set
func ParseOnOff(parameterName, val string, defaultValue bool) (bool, error) {
switch val {
case teleport.On:
return true, nil
case teleport.Off:
return false, nil
case "":
return defaultValue, nil
default:
return false, trace.BadParameter("bad %q parameter value: %q, supported values are on or off", parameterName, val)
}
}
// IsGroupMember returns whether currently logged user is a member of a group
func IsGroupMember(gid int) (bool, error) {
groups, err := os.Getgroups()
if err != nil {
return false, trace.ConvertSystemError(err)
}
for _, group := range groups {
if group == gid {
return true, nil
}
}
return false, nil
}
// DNSName extracts DNS name from host:port string.
func DNSName(hostport string) (string, error) {
host, err := Host(hostport)
if err != nil {
return "", trace.Wrap(err)
}
if ip := net.ParseIP(host); len(ip) != 0 {
return "", trace.BadParameter("%v is an IP address", host)
}
return host, nil
}
// Host extracts host from host:port string
func Host(hostname string) (string, error) {
if hostname == "" {
return "", trace.BadParameter("missing parameter hostname")
}
// if this is IPv4 or V6, return as is
if ip := net.ParseIP(hostname); len(ip) != 0 {
return hostname, nil
}
// has no indication of port, return, note that
// it will not break ipv6 as it always has at least one colon
if !strings.Contains(hostname, ":") {
return hostname, nil
}
host, _, err := SplitHostPort(hostname)
if err != nil {
return "", trace.Wrap(err)
}
return host, nil
}
// SplitHostPort splits host and port and checks that host is not empty
func SplitHostPort(hostname string) (string, string, error) {
host, port, err := net.SplitHostPort(hostname)
if err != nil {
return "", "", trace.Wrap(err)
}
if host == "" {
return "", "", trace.BadParameter("empty hostname")
}
return host, port, nil
}
// IsValidHostname checks if a string represents a valid hostname.
func IsValidHostname(hostname string) bool {
for _, label := range strings.Split(hostname, ".") {
if len(validation.IsDNS1035Label(label)) > 0 {
return false
}
}
return true
}
// ReadPath reads file contents
func ReadPath(path string) ([]byte, error) {
if path == "" {
return nil, trace.NotFound("empty path")
}
s, err := filepath.Abs(path)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
abs, err := filepath.EvalSymlinks(s)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
bytes, err := os.ReadFile(abs)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
return bytes, nil
}
type multiCloser struct {
closers []io.Closer
}
func (mc *multiCloser) Close() error {
for _, closer := range mc.closers {
if err := closer.Close(); err != nil {
return trace.Wrap(err)
}
}
return nil
}
// MultiCloser implements io.Close, it sequentially calls Close() on each object
func MultiCloser(closers ...io.Closer) io.Closer {
return &multiCloser{
closers: closers,
}
}
// IsHandshakeFailedError specifies whether this error indicates
// failed handshake
func IsHandshakeFailedError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: handshake failed")
}
// IsCertExpiredError specifies whether this error indicates
// expired SSH certificate
func IsCertExpiredError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: cert has expired")
}
// OpaqueAccessDenied returns a generic NotFound instead of AccessDenied
// so as to avoid leaking the existence of secret resources.
func OpaqueAccessDenied(err error) error {
if trace.IsAccessDenied(err) {
return trace.NotFound("not found")
}
return trace.Wrap(err)
}
// PortList is a list of TCP ports.
type PortList struct {
ports []string
sync.Mutex
}
// Pop returns a value from the list, it panics if the value is not there
func (p *PortList) Pop() string {
p.Lock()
defer p.Unlock()
if len(p.ports) == 0 {
panic("list is empty")
}
val := p.ports[len(p.ports)-1]
p.ports = p.ports[:len(p.ports)-1]
return val
}
// PopInt returns a value from the list, it panics if not enough values
// were allocated
func (p *PortList) PopInt() int {
i, err := strconv.Atoi(p.Pop())
if err != nil {
panic(err)
}
return i
}
// PortStartingNumber is a starting port number for tests
const PortStartingNumber = 20000
// GetFreeTCPPorts returns n ports starting from port 20000.
func GetFreeTCPPorts(n int, offset ...int) (PortList, error) {
list := make([]string, 0, n)
start := PortStartingNumber
if len(offset) != 0 {
start = offset[0]
}
for i := start; i < start+n; i++ {
list = append(list, strconv.Itoa(i))
}
return PortList{ports: list}, nil
}
// HostUUIDExistsLocally checks if dataDir/host_uuid file exists in local storage.
func HostUUIDExistsLocally(dataDir string) bool {
_, err := ReadHostUUID(dataDir)
return err == nil
}
// ReadHostUUID reads host UUID from the file in the data dir
func ReadHostUUID(dataDir string) (string, error) {
out, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return "", err
}
return "", trace.ConvertSystemError(err)
}
id := strings.TrimSpace(string(out))
if id == "" {
return "", trace.NotFound("host uuid is empty")
}
return id, nil
}
// WriteHostUUID writes host UUID into a file
func WriteHostUUID(dataDir string, id string) error {
err := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400)
if err != nil {
if errors.Is(err, fs.ErrPermission) |
return trace.ConvertSystemError(err)
}
return nil
}
// ReadOrMakeHostUUID looks for a hostid file in the data dir. If present,
// returns the UUID from it, otherwise generates one
func ReadOrMakeHostUUID(dataDir string) (string, error) {
id, err := ReadHostUUID(dataDir)
if err == nil {
return id, nil
}
if !trace.IsNotFound(err) {
return "", trace.Wrap(err)
}
// Checking error instead of the usual uuid.New() in case uuid generation
// fails due to not enough randomness. It's been known to happen happen when
// Teleport starts very early in the node initialization cycle and /dev/urandom
// isn't ready yet.
rawID, err := uuid.NewRandom()
if err != nil {
return "", trace.BadParameter("" +
"Teleport failed to generate host UUID. " +
"This may happen if randomness source is not fully initialized when the node is starting up. " +
"Please try restarting Teleport again.")
}
id = rawID.String()
if err = WriteHostUUID(dataDir, id); err != nil {
return "", trace.Wrap(err)
}
return id, nil
}
// StringSliceSubset returns true if b is a subset of a.
func StringSliceSubset(a []string, b []string) error {
aset := make(map[string]bool)
for _, v := range a {
aset[v] = true
}
for _, v := range b {
_, ok := aset[v]
if !ok {
return trace.BadParameter("%v not in set", v)
}
}
return nil
}
// UintSliceSubset returns true if b is a subset of a.
func UintSliceSubset(a []uint16, b []uint16) error {
aset := make(map[uint16]bool)
for _, v := range a {
aset[v] = true
}
for _, v := range b {
_, ok := aset[v]
if !ok {
return trace.BadParameter("%v not in set", v)
}
}
return nil
}
// RemoveFromSlice makes a copy of the slice and removes the passed in values from the copy.
func RemoveFromSlice(slice []string, values ...string) []string {
output := make([]string, 0, len(slice))
remove := make(map[string]bool)
for _, value := range values {
remove[value] = true
}
for _, s := range slice {
_, ok := remove[s]
if ok {
continue
}
output = append(output, s)
}
return output
}
// ChooseRandomString returns a random string from the given slice.
func ChooseRandomString(slice []string) string {
switch len(slice) {
case 0:
return ""
case 1:
return slice[0]
default:
return slice[rand.Intn(len(slice))]
}
}
// CheckCertificateFormatFlag checks if the certificate format is valid.
func CheckCertificateFormatFlag(s string) (string, error) {
switch s {
case constants.CertificateFormatStandard, teleport.CertificateFormatOldSSH, teleport.CertificateFormatUnspecified:
return s, nil
default:
return "", trace.BadParameter("invalid certificate format parameter: %q", s)
}
}
// AddrsFromStrings returns strings list converted to address list
func AddrsFromStrings(s apiutils.Strings, defaultPort int) ([]NetAddr, error) {
addrs := make([]NetAddr, len(s))
for i, val := range s {
addr, err := ParseHostPortAddr(val, defaultPort)
if err != nil {
return nil, trace.Wrap(err)
}
addrs[i] = *addr
}
return addrs, nil
}
// FileExists checks whether a file exists at a given path
func FileExists(fp string) bool {
_, err := os.Stat(fp)
if err != nil && os.IsNotExist(err) {
return false
}
return true
}
// StoreErrorOf stores the error returned by f within *err.
func StoreErrorOf(f func() error, err *error) {
*err = trace.NewAggregate(*err, f())
}
// ReadAtMost reads up to limit bytes from r, and reports an error
// when limit bytes are read.
func ReadAtMost(r io.Reader, limit int64) ([]byte, error) {
limitedReader := &io.LimitedReader{R: r, N: limit}
data, err := io.ReadAll(limitedReader)
if err != nil {
return data, err
}
if limitedReader.N <= 0 {
return data, ErrLimitReached
}
return data, nil
}
// HasPrefixAny determines if any of the string values have the given prefix.
func HasPrefixAny(prefix string, values []string) bool {
for _, val := range values {
if strings.HasPrefix(val, prefix) {
return true
}
}
return false
}
// ByteCount converts a size in bytes to a human-readable string.
func ByteCount(b int64) string {
const unit = 1000
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB",
float64(b)/float64(div), "kMGTPE"[exp])
}
// ErrLimitReached means that the read limit is reached.
var ErrLimitReached = &trace.LimitExceededError{Message: "the read limit is reached"}
const (
// CertTeleportUser specifies teleport user
CertTeleportUser = "x-teleport-user"
// CertTeleportUserCA specifies teleport certificate authority
CertTeleportUserCA = "x-teleport-user-ca"
// CertExtensionRole specifies teleport role
CertExtensionRole = "x-teleport-role"
// CertExtensionAuthority specifies teleport authority's name
// that signed this domain
CertExtensionAuthority = "x-teleport-authority"
// HostUUIDFile is the file name where the host UUID file is stored
HostUUIDFile = "host_uuid"
// CertTeleportClusterName is a name of the teleport cluster
CertTeleportClusterName = "x-teleport-cluster-name"
// CertTeleportUserCertificate is the certificate of the authenticated in user.
CertTeleportUserCertificate = "x-teleport-certificate"
// ExtIntCertType is an internal extension used to propagate cert type.
ExtIntCertType = "certtype@teleport"
// ExtIntCertTypeHost indicates a host-type certificate.
ExtIntCertTypeHost = "host"
// ExtIntCertTypeUser indicates a user-type certificate.
ExtIntCertTypeUser = "user"
)
| {
//do not convert to system error as this loses the ability to compare that it is a permission error
return err
} | conditional_block |
graphics.rs | use std::collections::HashMap;
use std::fs;
use wgpu::util::DeviceExt;
use crate::texture::Texture;
pub struct Graphics {
pub surface: wgpu::Surface,
pub device: wgpu::Device,
pub queue: wgpu::Queue,
pub swap_chain_descriptor: wgpu::SwapChainDescriptor,
pub swap_chain: wgpu::SwapChain,
pub size: (u32, u32),
pub models: HashMap<String, Mesh>,
pub textures: HashMap<String, wgpu::BindGroup>,
pub pipelines: HashMap<String, wgpu::RenderPipeline>,
pub uniforms: Uniforms,
pub uniform_buffer: wgpu::Buffer,
pub uniform_bind_group: wgpu::BindGroup,
texture_layout: wgpu::BindGroupLayout,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Vertex {
position: [f32; 3],
normal: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn Desc<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 6]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x2,
}
]
}
}
}
pub struct Mesh {
pub vertices: Vec<Vertex>,
pub indices: Vec<u16>,
pub vertex_buffer: Option<wgpu::Buffer>,
pub index_buffer: Option<wgpu::Buffer>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ModelProperties {
pub model_matrix: [[f32; 4]; 4],
}
fn create_quad() -> Mesh {
let mut vertices = Vec::new();
let vertexA = Vertex {
position: [-0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 0.0],
};
let vertexB = Vertex {
position: [0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 0.0],
};
let vertexC = Vertex {
position: [-0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 1.0],
};
let vertexD = Vertex {
position: [0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 1.0],
};
vertices.push(vertexA);
vertices.push(vertexB);
vertices.push(vertexC);
vertices.push(vertexD);
let indices = vec!(2, 1, 0, 1, 2, 3);
Mesh {
vertices,
indices,
vertex_buffer: None,
index_buffer: None,
}
}
impl Mesh {
fn upload_to_gpu(&mut self, device: &wgpu::Device) {
self.vertex_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.vertices),
usage: wgpu::BufferUsage::VERTEX,
}));
self.index_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.indices),
usage: wgpu::BufferUsage::INDEX,
}));
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Uniforms {
view_proj: [[f32; 4]; 4],
}
impl Uniforms {
pub fn new() -> Self {
use cgmath::SquareMatrix;
Self {
view_proj: cgmath::Matrix4::identity().into(),
}
}
pub fn update_view_proj(&mut self, matrix4: cgmath::Matrix4<f32>) {
self.view_proj = matrix4.into();
}
}
pub fn upload_texture_to_gpu(texture_name: &str, device: &wgpu::Device, queue: &wgpu::Queue, texture_bind_group_layout: &wgpu::BindGroupLayout) -> wgpu::BindGroup |
pub fn load_shader(shader_name: &str) -> Vec<u8> {
let mut shader_dir = std::env::current_dir().unwrap();
shader_dir.push("src\\resources\\shaders");
shader_dir.push(shader_name);
match fs::read(&shader_dir) {
Ok(v) => v,
Err(error) => panic!("Failed to read the file: {:?}. Error: {}", shader_dir.as_path(), error)
}
}
pub fn new_pipeline(device: &wgpu::Device, texture_format: wgpu::TextureFormat, vert_shader_name: &str, frag_shader_name: &str, texture_bind_group_layout: &wgpu::BindGroupLayout, uniform_bind_group_layout: &wgpu::BindGroupLayout, topology: wgpu::PrimitiveTopology, polygon_mode: wgpu::PolygonMode) -> wgpu::RenderPipeline {
let vert_shader_contents = load_shader(vert_shader_name);
let frag_shader_contents = load_shader(frag_shader_name);
let vertex_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(vert_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&vert_shader_contents),
});
let frag_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(frag_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&frag_shader_contents),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
push_constant_ranges: &[wgpu::PushConstantRange {
stages: wgpu_types::ShaderStage::VERTEX,
range: 0..128,
}],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &vertex_shader,
entry_point: "main",
buffers: &[Vertex::Desc()],
},
fragment: Some(wgpu::FragmentState {
module: &frag_shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: texture_format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING), // To select alpha
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: topology,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: polygon_mode,
clamp_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
});
pipeline
}
impl Graphics {
pub async fn new(window: &sdl2::video::Window) -> Self {
let size = window.size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// Surface is used to create the swap chain and adapter
let surface = unsafe { instance.create_surface(window) };
// Adapter is used to create the device and queue
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
// Specify any extra gpu feature. You can get a list of features supported by your device using adapter.features(), or device.features().
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::PUSH_CONSTANTS,
// The limits field describes the limit of certain types of resource we can create.
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits {
max_push_constant_size: 128,
..wgpu::Limits::default()
},
label: None,
},
None,
).await.unwrap();
// Define and creating the swap_chain.
let swap_chain_descriptor = wgpu::SwapChainDescriptor {
// The usage field describes how the swap_chain's underlying textures will be used.
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
// Defines how the swap_chains textures will be stored on the gpu
format: adapter.get_swap_chain_preferred_format(&surface).unwrap(),
width: size.0,
height: size.1,
// The present_mode uses the wgpu::PresentMode enum which determines how to sync the swap chain with the display.
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_descriptor);
let texture_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
});
let uniforms = Uniforms::new();
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
}
],
label: Some("uniform_bind_group"),
});
let mut pipelines : HashMap::<String, wgpu::RenderPipeline> = HashMap::new();
let pipeline = new_pipeline(&device, swap_chain_descriptor.format, "sprite.vert.spv", "sprite.frag.spv", &texture_layout, &uniform_bind_group_layout, wgpu::PrimitiveTopology::TriangleList, wgpu::PolygonMode::Fill);
pipelines.insert("sprite".to_owned(), pipeline);
let mut models : HashMap::<String, Mesh> = HashMap::new();
let mut triangle_mesh = create_quad();
triangle_mesh.upload_to_gpu(&device);
models.insert("quad".to_owned(), triangle_mesh);
let mut textures : HashMap<String, wgpu::BindGroup> = HashMap::new();
textures.insert("spaceship.png".to_owned(), upload_texture_to_gpu("spaceship.png", &device, &queue, &texture_layout));
Self {
surface,
device,
queue,
swap_chain_descriptor,
swap_chain,
size,
models,
textures,
pipelines,
texture_layout,
uniforms,
uniform_buffer,
uniform_bind_group,
}
}
pub fn resize(&mut self, new_size: (u32, u32)) {
self.size = new_size;
self.swap_chain_descriptor.width = new_size.0;
self.swap_chain_descriptor.height = new_size.1;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.swap_chain_descriptor);
}
} | {
let texture = Texture::load_texture(texture_name, &device, &queue).unwrap();
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
}
],
label: Some(texture_name),
})
} | identifier_body |
graphics.rs | use std::collections::HashMap;
use std::fs;
use wgpu::util::DeviceExt;
use crate::texture::Texture;
pub struct Graphics {
pub surface: wgpu::Surface,
pub device: wgpu::Device,
pub queue: wgpu::Queue,
pub swap_chain_descriptor: wgpu::SwapChainDescriptor,
pub swap_chain: wgpu::SwapChain,
pub size: (u32, u32),
pub models: HashMap<String, Mesh>,
pub textures: HashMap<String, wgpu::BindGroup>,
pub pipelines: HashMap<String, wgpu::RenderPipeline>,
pub uniforms: Uniforms,
pub uniform_buffer: wgpu::Buffer,
pub uniform_bind_group: wgpu::BindGroup,
texture_layout: wgpu::BindGroupLayout,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Vertex {
position: [f32; 3],
normal: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn Desc<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 6]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x2,
}
]
}
}
}
pub struct Mesh {
pub vertices: Vec<Vertex>,
pub indices: Vec<u16>,
pub vertex_buffer: Option<wgpu::Buffer>,
pub index_buffer: Option<wgpu::Buffer>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ModelProperties {
pub model_matrix: [[f32; 4]; 4],
}
fn create_quad() -> Mesh {
let mut vertices = Vec::new();
let vertexA = Vertex {
position: [-0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 0.0],
};
let vertexB = Vertex {
position: [0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 0.0],
};
let vertexC = Vertex {
position: [-0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 1.0],
};
let vertexD = Vertex {
position: [0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 1.0],
};
vertices.push(vertexA);
vertices.push(vertexB);
vertices.push(vertexC);
vertices.push(vertexD);
let indices = vec!(2, 1, 0, 1, 2, 3);
Mesh {
vertices,
indices,
vertex_buffer: None,
index_buffer: None,
}
}
impl Mesh {
fn upload_to_gpu(&mut self, device: &wgpu::Device) {
self.vertex_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.vertices),
usage: wgpu::BufferUsage::VERTEX,
}));
self.index_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.indices),
usage: wgpu::BufferUsage::INDEX,
}));
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Uniforms {
view_proj: [[f32; 4]; 4],
}
impl Uniforms {
pub fn new() -> Self {
use cgmath::SquareMatrix;
Self {
view_proj: cgmath::Matrix4::identity().into(),
}
}
pub fn update_view_proj(&mut self, matrix4: cgmath::Matrix4<f32>) {
self.view_proj = matrix4.into();
}
}
pub fn upload_texture_to_gpu(texture_name: &str, device: &wgpu::Device, queue: &wgpu::Queue, texture_bind_group_layout: &wgpu::BindGroupLayout) -> wgpu::BindGroup {
let texture = Texture::load_texture(texture_name, &device, &queue).unwrap();
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
}
],
label: Some(texture_name),
})
}
pub fn load_shader(shader_name: &str) -> Vec<u8> {
let mut shader_dir = std::env::current_dir().unwrap();
shader_dir.push("src\\resources\\shaders");
shader_dir.push(shader_name);
match fs::read(&shader_dir) {
Ok(v) => v,
Err(error) => panic!("Failed to read the file: {:?}. Error: {}", shader_dir.as_path(), error)
}
}
pub fn new_pipeline(device: &wgpu::Device, texture_format: wgpu::TextureFormat, vert_shader_name: &str, frag_shader_name: &str, texture_bind_group_layout: &wgpu::BindGroupLayout, uniform_bind_group_layout: &wgpu::BindGroupLayout, topology: wgpu::PrimitiveTopology, polygon_mode: wgpu::PolygonMode) -> wgpu::RenderPipeline {
let vert_shader_contents = load_shader(vert_shader_name);
let frag_shader_contents = load_shader(frag_shader_name);
let vertex_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(vert_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&vert_shader_contents),
});
let frag_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(frag_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&frag_shader_contents),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
push_constant_ranges: &[wgpu::PushConstantRange {
stages: wgpu_types::ShaderStage::VERTEX,
range: 0..128,
}],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &vertex_shader,
entry_point: "main",
buffers: &[Vertex::Desc()],
},
fragment: Some(wgpu::FragmentState {
module: &frag_shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: texture_format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING), // To select alpha
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: topology,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: polygon_mode,
clamp_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
});
pipeline
}
impl Graphics {
pub async fn new(window: &sdl2::video::Window) -> Self {
let size = window.size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// Surface is used to create the swap chain and adapter
let surface = unsafe { instance.create_surface(window) };
// Adapter is used to create the device and queue
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
// Specify any extra gpu feature. You can get a list of features supported by your device using adapter.features(), or device.features().
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::PUSH_CONSTANTS,
// The limits field describes the limit of certain types of resource we can create.
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits {
max_push_constant_size: 128,
..wgpu::Limits::default()
},
label: None,
},
None,
).await.unwrap();
// Define and creating the swap_chain.
let swap_chain_descriptor = wgpu::SwapChainDescriptor {
// The usage field describes how the swap_chain's underlying textures will be used.
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
// Defines how the swap_chains textures will be stored on the gpu
format: adapter.get_swap_chain_preferred_format(&surface).unwrap(),
width: size.0,
height: size.1,
// The present_mode uses the wgpu::PresentMode enum which determines how to sync the swap chain with the display.
present_mode: wgpu::PresentMode::Fifo, | let texture_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
});
let uniforms = Uniforms::new();
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
}
],
label: Some("uniform_bind_group"),
});
let mut pipelines : HashMap::<String, wgpu::RenderPipeline> = HashMap::new();
let pipeline = new_pipeline(&device, swap_chain_descriptor.format, "sprite.vert.spv", "sprite.frag.spv", &texture_layout, &uniform_bind_group_layout, wgpu::PrimitiveTopology::TriangleList, wgpu::PolygonMode::Fill);
pipelines.insert("sprite".to_owned(), pipeline);
let mut models : HashMap::<String, Mesh> = HashMap::new();
let mut triangle_mesh = create_quad();
triangle_mesh.upload_to_gpu(&device);
models.insert("quad".to_owned(), triangle_mesh);
let mut textures : HashMap<String, wgpu::BindGroup> = HashMap::new();
textures.insert("spaceship.png".to_owned(), upload_texture_to_gpu("spaceship.png", &device, &queue, &texture_layout));
Self {
surface,
device,
queue,
swap_chain_descriptor,
swap_chain,
size,
models,
textures,
pipelines,
texture_layout,
uniforms,
uniform_buffer,
uniform_bind_group,
}
}
pub fn resize(&mut self, new_size: (u32, u32)) {
self.size = new_size;
self.swap_chain_descriptor.width = new_size.0;
self.swap_chain_descriptor.height = new_size.1;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.swap_chain_descriptor);
}
} | };
let swap_chain = device.create_swap_chain(&surface, &swap_chain_descriptor);
| random_line_split |
graphics.rs | use std::collections::HashMap;
use std::fs;
use wgpu::util::DeviceExt;
use crate::texture::Texture;
pub struct Graphics {
pub surface: wgpu::Surface,
pub device: wgpu::Device,
pub queue: wgpu::Queue,
pub swap_chain_descriptor: wgpu::SwapChainDescriptor,
pub swap_chain: wgpu::SwapChain,
pub size: (u32, u32),
pub models: HashMap<String, Mesh>,
pub textures: HashMap<String, wgpu::BindGroup>,
pub pipelines: HashMap<String, wgpu::RenderPipeline>,
pub uniforms: Uniforms,
pub uniform_buffer: wgpu::Buffer,
pub uniform_bind_group: wgpu::BindGroup,
texture_layout: wgpu::BindGroupLayout,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Vertex {
position: [f32; 3],
normal: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn Desc<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 6]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x2,
}
]
}
}
}
pub struct Mesh {
pub vertices: Vec<Vertex>,
pub indices: Vec<u16>,
pub vertex_buffer: Option<wgpu::Buffer>,
pub index_buffer: Option<wgpu::Buffer>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct | {
pub model_matrix: [[f32; 4]; 4],
}
fn create_quad() -> Mesh {
let mut vertices = Vec::new();
let vertexA = Vertex {
position: [-0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 0.0],
};
let vertexB = Vertex {
position: [0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 0.0],
};
let vertexC = Vertex {
position: [-0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 1.0],
};
let vertexD = Vertex {
position: [0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 1.0],
};
vertices.push(vertexA);
vertices.push(vertexB);
vertices.push(vertexC);
vertices.push(vertexD);
let indices = vec!(2, 1, 0, 1, 2, 3);
Mesh {
vertices,
indices,
vertex_buffer: None,
index_buffer: None,
}
}
impl Mesh {
fn upload_to_gpu(&mut self, device: &wgpu::Device) {
self.vertex_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.vertices),
usage: wgpu::BufferUsage::VERTEX,
}));
self.index_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.indices),
usage: wgpu::BufferUsage::INDEX,
}));
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Uniforms {
view_proj: [[f32; 4]; 4],
}
impl Uniforms {
pub fn new() -> Self {
use cgmath::SquareMatrix;
Self {
view_proj: cgmath::Matrix4::identity().into(),
}
}
pub fn update_view_proj(&mut self, matrix4: cgmath::Matrix4<f32>) {
self.view_proj = matrix4.into();
}
}
pub fn upload_texture_to_gpu(texture_name: &str, device: &wgpu::Device, queue: &wgpu::Queue, texture_bind_group_layout: &wgpu::BindGroupLayout) -> wgpu::BindGroup {
let texture = Texture::load_texture(texture_name, &device, &queue).unwrap();
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
}
],
label: Some(texture_name),
})
}
pub fn load_shader(shader_name: &str) -> Vec<u8> {
let mut shader_dir = std::env::current_dir().unwrap();
shader_dir.push("src\\resources\\shaders");
shader_dir.push(shader_name);
match fs::read(&shader_dir) {
Ok(v) => v,
Err(error) => panic!("Failed to read the file: {:?}. Error: {}", shader_dir.as_path(), error)
}
}
pub fn new_pipeline(device: &wgpu::Device, texture_format: wgpu::TextureFormat, vert_shader_name: &str, frag_shader_name: &str, texture_bind_group_layout: &wgpu::BindGroupLayout, uniform_bind_group_layout: &wgpu::BindGroupLayout, topology: wgpu::PrimitiveTopology, polygon_mode: wgpu::PolygonMode) -> wgpu::RenderPipeline {
let vert_shader_contents = load_shader(vert_shader_name);
let frag_shader_contents = load_shader(frag_shader_name);
let vertex_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(vert_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&vert_shader_contents),
});
let frag_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(frag_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&frag_shader_contents),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
push_constant_ranges: &[wgpu::PushConstantRange {
stages: wgpu_types::ShaderStage::VERTEX,
range: 0..128,
}],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &vertex_shader,
entry_point: "main",
buffers: &[Vertex::Desc()],
},
fragment: Some(wgpu::FragmentState {
module: &frag_shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: texture_format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING), // To select alpha
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: topology,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: polygon_mode,
clamp_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
});
pipeline
}
impl Graphics {
pub async fn new(window: &sdl2::video::Window) -> Self {
let size = window.size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// Surface is used to create the swap chain and adapter
let surface = unsafe { instance.create_surface(window) };
// Adapter is used to create the device and queue
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
// Specify any extra gpu feature. You can get a list of features supported by your device using adapter.features(), or device.features().
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::PUSH_CONSTANTS,
// The limits field describes the limit of certain types of resource we can create.
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits {
max_push_constant_size: 128,
..wgpu::Limits::default()
},
label: None,
},
None,
).await.unwrap();
// Define and creating the swap_chain.
let swap_chain_descriptor = wgpu::SwapChainDescriptor {
// The usage field describes how the swap_chain's underlying textures will be used.
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
// Defines how the swap_chains textures will be stored on the gpu
format: adapter.get_swap_chain_preferred_format(&surface).unwrap(),
width: size.0,
height: size.1,
// The present_mode uses the wgpu::PresentMode enum which determines how to sync the swap chain with the display.
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_descriptor);
let texture_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
});
let uniforms = Uniforms::new();
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
}
],
label: Some("uniform_bind_group"),
});
let mut pipelines : HashMap::<String, wgpu::RenderPipeline> = HashMap::new();
let pipeline = new_pipeline(&device, swap_chain_descriptor.format, "sprite.vert.spv", "sprite.frag.spv", &texture_layout, &uniform_bind_group_layout, wgpu::PrimitiveTopology::TriangleList, wgpu::PolygonMode::Fill);
pipelines.insert("sprite".to_owned(), pipeline);
let mut models : HashMap::<String, Mesh> = HashMap::new();
let mut triangle_mesh = create_quad();
triangle_mesh.upload_to_gpu(&device);
models.insert("quad".to_owned(), triangle_mesh);
let mut textures : HashMap<String, wgpu::BindGroup> = HashMap::new();
textures.insert("spaceship.png".to_owned(), upload_texture_to_gpu("spaceship.png", &device, &queue, &texture_layout));
Self {
surface,
device,
queue,
swap_chain_descriptor,
swap_chain,
size,
models,
textures,
pipelines,
texture_layout,
uniforms,
uniform_buffer,
uniform_bind_group,
}
}
pub fn resize(&mut self, new_size: (u32, u32)) {
self.size = new_size;
self.swap_chain_descriptor.width = new_size.0;
self.swap_chain_descriptor.height = new_size.1;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.swap_chain_descriptor);
}
} | ModelProperties | identifier_name |
MMM-Ilevia-Lille.js | /* Timetable for Lille local transport Module */
/* Module: MMM-Ilevia-Lille
*
* By Jérémy PALAFFRE (Jilano5)
* based on a script from normyx (https://github.com/normyx/MMM-Nantes-TAN)
* MIT Licensed.
*/
Module.register("MMM-Ilevia-Lille",{
// Define module defaults
defaults: {
updateInterval: 2 * 60 * 1000, //time in ms between pulling request for new times (update request)
initialLoadDelay: 0, // start delay seconds.
maxLettersForDestination: 12, //will limit the length of the destination string
maxLettersForStop: 12, //will limit the length of the destination string
showSecondsToNextUpdate: false, // display a countdown to the next update pull (should I wait for a refresh before going ?)
showLastUpdateTime: false, //display the time when the last pulled occured (taste & color...)
defaultIcon: 'bus',
showNumber: true, // Bus number
showIcon: true, // Bus icon in front of row
useIleviaColor: true, //Use colors from ilevia database
useColor: true,
colorCode: {
Blue: "rgb(0,121,188)",
Green: "rgb(0, 118,125)",
Yellow: "rgb(253,197,16)",
Purple: "rgb(153,51,255)",
White: "rgb(255,255,255)",
Orange: "rgb(236,114,0)"
},
size: "medium", // Text size, for example small, medium or large
stacked: true, // Show multiple buses on same row, if same route and destination
showTimeLimit: 45, // If not stacked, show time of departure instead of minutes, if more than this limit until departure.
debug: false, //console.log more things to help debugging
ileviaAPIURL: 'https://opendata.lillemetropole.fr/api/records/1.0/search/?dataset=ilevia-prochainspassages',
ileviaAPIURLColor: 'https://opendata.lillemetropole.fr/api/records/1.0/search/?dataset=ilevia-couleurslignes'
},
// Define start sequence.
start: function() {
Log.info("Starting module: " + this.name);
//Get Timezone
this.config.timezone = Intl.DateTimeFormat().resolvedOptions().timeZone
//Send data to Node JS
this.sendSocketNotification('SET_CONFIG', this.config);
//Get color for buses lines from Ilevia
this.IleviaColor = []
this.sendSocketNotification('GET_COLOR', this.config.busStations);
this.busRecords = {};
this.loaded = false;
this.updateTimer = null;
var self = this;
setInterval(function () {
self.caller = 'updateInterval';
self.updateDom();
}, 1000);
},
getTranslations: function () {
return {
en: "translations/en.json",
fr: "translations/fr.json"
};
},
getHeader: function () {
var header = this.data.header;
if (this.config.showSecondsToNextUpdate && typeof(this.config.lastUpdate) !== 'undefined') {
var timeDifference = Math.round((this.config.updateInterval - new Date() + Date.parse(this.config.lastUpdate)) / 1000);
if (timeDifference > 0) {
header += ', ' + this.translate("NEXT_UPDATE_IN") + ' ' + timeDifference + ' s';
} else {
header += ', ' + this.translate("UPDATE_REQUESTED") + ' ' + Math.abs(timeDifference) + 's ago';
}
}
if (this.config.showLastUpdateTime && typeof(this.config.lastUpdate) !== 'undefined') {
var now = new Date(this.config.lastUpdate);
header += (now ? (' @ ' + now.getHours() + ':' + (now.getMinutes() > 9 ? '' : '0') + now.getMinutes() + ':' + (now.getSeconds() > 9 ? '' : '0') + now.getSeconds()) : '');
}
return header;
},
setColor: function(element, codeColor) {
if (this.config.useColor && codeColor != null) {
var color = null;
switch(codeColor) {
case 'blue':
color = this.config.colorCode.Blue;
break;
case 'green':
color = this.config.colorCode.Green;
break;
case 'yellow':
color = this.config.colorCode.Yellow;
break;
case 'purple':
color = this.config.colorCode.Purple;
break;
case 'white':
color = this.config.colorCode.White;
break;
case 'orange':
color = this.config.colorCode.Orange;
break;
default :
}
if (color != null) {
element.style="color:"+color+";";
}
}
},
setIleviaColor: function(element, codeligne) {
if (this.config.useColor) {
var colorHEX = null;
for (var index in this.IleviaColor) {
if(this.IleviaColor[index].codeligne === codeligne){
colorHEX = '#' + this.IleviaColor[index].colorHEX
break;
}
}
if (colorHEX != null) {
element.style="color:"+colorHEX+";";
}
}
},
stackBuses: function (buses) {
stackedBuses = [];
var len = buses.length;
var previousStackvalue = '';
var stackedTimes = [];
if (len > 0) {
previousStackvalue = '' + buses[0].fields.nomstation + buses[0].fields.codeligne + buses[0].fields.sensligne;
stackedTimes.push(buses[0].fields.heureestimeedepart);
for (var i = 1; i < len; i++) {
| stackedBuses.push({
from: buses[len - 1].fields.nomstation,
number: buses[len - 1].fields.codeligne,
to: buses[len - 1].fields.sensligne,
times: stackedTimes
});
}
return stackedBuses;
},
formatBuses: function (buses) {
formatedBuses = [];
var len = buses.length;
if (len > 0) {
for (var i = 0; i < len; i++) {
formatedBuses.push({
from: buses[i].fields.nomstation,
number: buses[i].fields.codeligne,
to: buses[i].fields.sensligne,
time: buses[i].fields.heureestimeedepart
});
}
}
return formatedBuses;
},
// Override dom generator.
getDom: function() {
self = this;
var wrapper = document.createElement("table");
wrapper.className = "small";
var first = true;
if (!this.loaded) {
wrapper.innerHTML = self.translate("LOADING");
wrapper.className = "medium dimmed";
return wrapper;
}
for (var busIndex = 0; busIndex < this.config.busStations.length; busIndex++) {
var stop = this.config.busStations[busIndex];
//#region Get stop index
var stopIndex = ''
if(typeof(stop.nomstation) !== 'undefined'){
stopIndex += stop.nomstation + '_'
}
if(typeof(stop.codeligne) !== 'undefined'){
stopIndex += stop.codeligne + '_'
}
if(typeof(stop.sensligne) !== 'undefined'){
stopIndex += stop.sensligne
}
//#endregion
var comingBuses = this.busRecords[stopIndex];
if(self.config.debug){
Log.info('MMM-Ilevia-Lille Debug : comingBuses')
Log.info(comingBuses)
Log.info(self.config.debug)
}
if(typeof(comingBuses) !== 'undefined'){
comingBuses.forEach(function (bus) {
//#region Get the next passage time
var now = new Date();
var minutes = '';
if(self.config.stacked) {
if(bus.times.length > 0) {
var busTime = new Date(bus.times[0]);
minutes = Math.round((busTime - now) / 60000);
if(minutes <= 1 && minutes > 0){
minutes = self.translate("CLOSE");
}
else if (minutes <= 0){
minutes = ''
}
}
for(var i=1; i < bus.times.length; i++){
var busTime = new Date(bus.times[i]);
if(minutes == ''){
minutes += Math.round((busTime - now) / 60000);
}else{
minutes += '/ ' + Math.round((busTime - now) / 60000);
}
}
minutes += " min";
} else {
var busTime = new Date(bus.time);
minutes = Math.round((busTime - now) / 60000);
if(minutes > self.config.showTimeLimit){
minutes = busTime.getHours() + ':' + (busTime.getMinutes() < 10 ? '0' : '') + busTime.getMinutes();
}else{
minutes += " min";
}
}
//#endregion
var busWrapper = document.createElement("tr");
busWrapper.className = first ? ' border_top' : '';
first = false; // Top border only on the first row
//Color
if (self.config.useColor) {
self.setColor(busWrapper,stop.color);
}
//Ilevia Color
if (self.config.useIleviaColor) {
self.setIleviaColor(busWrapper,bus.number);
}
// Icon
if (self.config.showIcon) {
var iconWrapper = document.createElement("td");
if (stop.icon != null) {
iconWrapper.innerHTML = '<i class="fa fa-' + stop.icon + '" aria-hidden="true"></i>';
} else {
iconWrapper.innerHTML = '<i class="fa fa-' + self.config.defaultIcon + '" aria-hidden="true"></i>'; "fa fa-fw fa-"+self.config.defaultIcon;
}
iconWrapper.className = "align-right";
busWrapper.appendChild(iconWrapper);
}
// Line number
if (self.config.showNumber) {
var numberWrapper = document.createElement("td");
numberWrapper.innerHTML = bus.number;
numberWrapper.className = "align-right bold";
busWrapper.appendChild(numberWrapper);
}
// Trip
var tripWrapper = document.createElement("td");
tripWrapper.className = "align-left";
tripWrapper.innerHTML = self.capitalizeFirstLetter(bus.from.substr(0, this.config.maxLettersForStop).toLowerCase());
if (comingBuses.length>0) {
tripWrapper.innerHTML += " → " + self.capitalizeFirstLetter(bus.to.substr(0, this.config.maxLettersForDestination).toLowerCase());
}
busWrapper.appendChild(tripWrapper);
// Passage Time
var minutesWrapper = document.createElement("td");
minutesWrapper.className = "align-right bright";
minutesWrapper.innerHTML = minutes;
busWrapper.appendChild(minutesWrapper);
wrapper.appendChild(busWrapper);
});
}
}
return wrapper;
},
socketNotificationReceived: function(notification, payload) {
this.caller = notification;
switch (notification) {
case "BUS":
if (payload.id != null) {
this.busRecords[payload.id] = this.config.stacked ? this.stackBuses(payload.records) : this.formatBuses(payload.records);
this.loaded = true;
this.updateDom();
break;
} else {
Log.info(this.name + ': BUS - No payload');
}
case "UPDATE":
this.config.lastUpdate = payload.lastUpdate;
this.updateDom();
break;
case "DEBUG":
Log.info(payload);
break;
case "ILEVIA_COLOR":
this.IleviaColor.push(payload)
break;
}
},
capitalizeFirstLetter: function (str) {
var splitStr = str.toLowerCase().split(' ');
for (var i = 0; i < splitStr.length; i++) {
// You do not need to check if i is larger than splitStr length, as your for does that for you
// Assign it back to the array
splitStr[i] = splitStr[i].charAt(0).toUpperCase() + splitStr[i].substring(1);
}
// Directly return the joined string
return splitStr.join(' ');
}
});
| stackvalue = '' + buses[i].fields.nomstation + buses[i].fields.codeligne + buses[i].fields.sensligne;
if (stackvalue == previousStackvalue) {
stackedTimes.push(buses[i].fields.heureestimeedepart);
} else {
stackedBuses.push({
from: buses[i - 1].fields.nomstation,
number: buses[i - 1].fields.codeligne,
to: buses[i - 1].fields.sensligne,
times: stackedTimes
});
previousStackvalue = stackvalue;
stackedTimes = [];
stackedTimes.push(buses[i].fields.heureestimeedepart)
}
}
| conditional_block |
MMM-Ilevia-Lille.js | /* Timetable for Lille local transport Module */
/* Module: MMM-Ilevia-Lille
*
* By Jérémy PALAFFRE (Jilano5)
* based on a script from normyx (https://github.com/normyx/MMM-Nantes-TAN)
* MIT Licensed.
*/
Module.register("MMM-Ilevia-Lille",{
// Define module defaults
defaults: {
updateInterval: 2 * 60 * 1000, //time in ms between pulling request for new times (update request)
initialLoadDelay: 0, // start delay seconds.
maxLettersForDestination: 12, //will limit the length of the destination string
maxLettersForStop: 12, //will limit the length of the destination string
showSecondsToNextUpdate: false, // display a countdown to the next update pull (should I wait for a refresh before going ?)
showLastUpdateTime: false, //display the time when the last pulled occured (taste & color...)
defaultIcon: 'bus',
showNumber: true, // Bus number
showIcon: true, // Bus icon in front of row
useIleviaColor: true, //Use colors from ilevia database
useColor: true,
colorCode: {
Blue: "rgb(0,121,188)",
Green: "rgb(0, 118,125)",
Yellow: "rgb(253,197,16)",
Purple: "rgb(153,51,255)",
White: "rgb(255,255,255)",
Orange: "rgb(236,114,0)"
},
size: "medium", // Text size, for example small, medium or large
stacked: true, // Show multiple buses on same row, if same route and destination
showTimeLimit: 45, // If not stacked, show time of departure instead of minutes, if more than this limit until departure.
debug: false, //console.log more things to help debugging
ileviaAPIURL: 'https://opendata.lillemetropole.fr/api/records/1.0/search/?dataset=ilevia-prochainspassages',
ileviaAPIURLColor: 'https://opendata.lillemetropole.fr/api/records/1.0/search/?dataset=ilevia-couleurslignes'
},
// Define start sequence.
start: function() {
Log.info("Starting module: " + this.name);
//Get Timezone
this.config.timezone = Intl.DateTimeFormat().resolvedOptions().timeZone
//Send data to Node JS
this.sendSocketNotification('SET_CONFIG', this.config);
//Get color for buses lines from Ilevia
this.IleviaColor = []
this.sendSocketNotification('GET_COLOR', this.config.busStations);
this.busRecords = {};
this.loaded = false;
this.updateTimer = null;
var self = this;
setInterval(function () {
self.caller = 'updateInterval';
self.updateDom();
}, 1000);
},
getTranslations: function () {
return {
en: "translations/en.json",
fr: "translations/fr.json"
};
},
getHeader: function () {
var header = this.data.header;
if (this.config.showSecondsToNextUpdate && typeof(this.config.lastUpdate) !== 'undefined') {
var timeDifference = Math.round((this.config.updateInterval - new Date() + Date.parse(this.config.lastUpdate)) / 1000);
if (timeDifference > 0) {
header += ', ' + this.translate("NEXT_UPDATE_IN") + ' ' + timeDifference + ' s';
} else {
header += ', ' + this.translate("UPDATE_REQUESTED") + ' ' + Math.abs(timeDifference) + 's ago';
}
}
if (this.config.showLastUpdateTime && typeof(this.config.lastUpdate) !== 'undefined') {
var now = new Date(this.config.lastUpdate);
header += (now ? (' @ ' + now.getHours() + ':' + (now.getMinutes() > 9 ? '' : '0') + now.getMinutes() + ':' + (now.getSeconds() > 9 ? '' : '0') + now.getSeconds()) : '');
}
return header;
},
setColor: function(element, codeColor) {
if (this.config.useColor && codeColor != null) {
var color = null;
switch(codeColor) {
case 'blue':
color = this.config.colorCode.Blue;
break;
case 'green':
color = this.config.colorCode.Green;
break;
case 'yellow':
color = this.config.colorCode.Yellow;
break;
case 'purple':
color = this.config.colorCode.Purple;
break;
case 'white':
color = this.config.colorCode.White;
break;
case 'orange':
color = this.config.colorCode.Orange;
break;
default :
}
if (color != null) {
element.style="color:"+color+";";
}
}
},
setIleviaColor: function(element, codeligne) {
if (this.config.useColor) {
var colorHEX = null;
for (var index in this.IleviaColor) {
if(this.IleviaColor[index].codeligne === codeligne){
colorHEX = '#' + this.IleviaColor[index].colorHEX
break;
}
} | if (colorHEX != null) {
element.style="color:"+colorHEX+";";
}
}
},
stackBuses: function (buses) {
stackedBuses = [];
var len = buses.length;
var previousStackvalue = '';
var stackedTimes = [];
if (len > 0) {
previousStackvalue = '' + buses[0].fields.nomstation + buses[0].fields.codeligne + buses[0].fields.sensligne;
stackedTimes.push(buses[0].fields.heureestimeedepart);
for (var i = 1; i < len; i++) {
stackvalue = '' + buses[i].fields.nomstation + buses[i].fields.codeligne + buses[i].fields.sensligne;
if (stackvalue == previousStackvalue) {
stackedTimes.push(buses[i].fields.heureestimeedepart);
} else {
stackedBuses.push({
from: buses[i - 1].fields.nomstation,
number: buses[i - 1].fields.codeligne,
to: buses[i - 1].fields.sensligne,
times: stackedTimes
});
previousStackvalue = stackvalue;
stackedTimes = [];
stackedTimes.push(buses[i].fields.heureestimeedepart)
}
}
stackedBuses.push({
from: buses[len - 1].fields.nomstation,
number: buses[len - 1].fields.codeligne,
to: buses[len - 1].fields.sensligne,
times: stackedTimes
});
}
return stackedBuses;
},
formatBuses: function (buses) {
formatedBuses = [];
var len = buses.length;
if (len > 0) {
for (var i = 0; i < len; i++) {
formatedBuses.push({
from: buses[i].fields.nomstation,
number: buses[i].fields.codeligne,
to: buses[i].fields.sensligne,
time: buses[i].fields.heureestimeedepart
});
}
}
return formatedBuses;
},
// Override dom generator.
getDom: function() {
self = this;
var wrapper = document.createElement("table");
wrapper.className = "small";
var first = true;
if (!this.loaded) {
wrapper.innerHTML = self.translate("LOADING");
wrapper.className = "medium dimmed";
return wrapper;
}
for (var busIndex = 0; busIndex < this.config.busStations.length; busIndex++) {
var stop = this.config.busStations[busIndex];
//#region Get stop index
var stopIndex = ''
if(typeof(stop.nomstation) !== 'undefined'){
stopIndex += stop.nomstation + '_'
}
if(typeof(stop.codeligne) !== 'undefined'){
stopIndex += stop.codeligne + '_'
}
if(typeof(stop.sensligne) !== 'undefined'){
stopIndex += stop.sensligne
}
//#endregion
var comingBuses = this.busRecords[stopIndex];
if(self.config.debug){
Log.info('MMM-Ilevia-Lille Debug : comingBuses')
Log.info(comingBuses)
Log.info(self.config.debug)
}
if(typeof(comingBuses) !== 'undefined'){
comingBuses.forEach(function (bus) {
//#region Get the next passage time
var now = new Date();
var minutes = '';
if(self.config.stacked) {
if(bus.times.length > 0) {
var busTime = new Date(bus.times[0]);
minutes = Math.round((busTime - now) / 60000);
if(minutes <= 1 && minutes > 0){
minutes = self.translate("CLOSE");
}
else if (minutes <= 0){
minutes = ''
}
}
for(var i=1; i < bus.times.length; i++){
var busTime = new Date(bus.times[i]);
if(minutes == ''){
minutes += Math.round((busTime - now) / 60000);
}else{
minutes += '/ ' + Math.round((busTime - now) / 60000);
}
}
minutes += " min";
} else {
var busTime = new Date(bus.time);
minutes = Math.round((busTime - now) / 60000);
if(minutes > self.config.showTimeLimit){
minutes = busTime.getHours() + ':' + (busTime.getMinutes() < 10 ? '0' : '') + busTime.getMinutes();
}else{
minutes += " min";
}
}
//#endregion
var busWrapper = document.createElement("tr");
busWrapper.className = first ? ' border_top' : '';
first = false; // Top border only on the first row
//Color
if (self.config.useColor) {
self.setColor(busWrapper,stop.color);
}
//Ilevia Color
if (self.config.useIleviaColor) {
self.setIleviaColor(busWrapper,bus.number);
}
// Icon
if (self.config.showIcon) {
var iconWrapper = document.createElement("td");
if (stop.icon != null) {
iconWrapper.innerHTML = '<i class="fa fa-' + stop.icon + '" aria-hidden="true"></i>';
} else {
iconWrapper.innerHTML = '<i class="fa fa-' + self.config.defaultIcon + '" aria-hidden="true"></i>'; "fa fa-fw fa-"+self.config.defaultIcon;
}
iconWrapper.className = "align-right";
busWrapper.appendChild(iconWrapper);
}
// Line number
if (self.config.showNumber) {
var numberWrapper = document.createElement("td");
numberWrapper.innerHTML = bus.number;
numberWrapper.className = "align-right bold";
busWrapper.appendChild(numberWrapper);
}
// Trip
var tripWrapper = document.createElement("td");
tripWrapper.className = "align-left";
tripWrapper.innerHTML = self.capitalizeFirstLetter(bus.from.substr(0, this.config.maxLettersForStop).toLowerCase());
if (comingBuses.length>0) {
tripWrapper.innerHTML += " → " + self.capitalizeFirstLetter(bus.to.substr(0, this.config.maxLettersForDestination).toLowerCase());
}
busWrapper.appendChild(tripWrapper);
// Passage Time
var minutesWrapper = document.createElement("td");
minutesWrapper.className = "align-right bright";
minutesWrapper.innerHTML = minutes;
busWrapper.appendChild(minutesWrapper);
wrapper.appendChild(busWrapper);
});
}
}
return wrapper;
},
socketNotificationReceived: function(notification, payload) {
this.caller = notification;
switch (notification) {
case "BUS":
if (payload.id != null) {
this.busRecords[payload.id] = this.config.stacked ? this.stackBuses(payload.records) : this.formatBuses(payload.records);
this.loaded = true;
this.updateDom();
break;
} else {
Log.info(this.name + ': BUS - No payload');
}
case "UPDATE":
this.config.lastUpdate = payload.lastUpdate;
this.updateDom();
break;
case "DEBUG":
Log.info(payload);
break;
case "ILEVIA_COLOR":
this.IleviaColor.push(payload)
break;
}
},
capitalizeFirstLetter: function (str) {
var splitStr = str.toLowerCase().split(' ');
for (var i = 0; i < splitStr.length; i++) {
// You do not need to check if i is larger than splitStr length, as your for does that for you
// Assign it back to the array
splitStr[i] = splitStr[i].charAt(0).toUpperCase() + splitStr[i].substring(1);
}
// Directly return the joined string
return splitStr.join(' ');
}
}); | random_line_split | |
test_supernet.py | import argparse
import os
import sys
import shutil
import time
import random
import glob
import logging
import copy
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import numpy as np
from resnet20_supernet import resnet20
from utils import *
'''
Namespace(affine=True,
alpha_type='sample_uniform',
arch_num=50000,
arch_start=1,
batch_size=10000,
bn_calibrate=False,
bn_calibrate_batch=10000,
bn_calibrate_batch_num=1,
convbn_type='sample_channel',
eval_json_path='files/Track1_final_archs.json',
localsep_layers=None,
localsep_portion=1.0,
mask_repeat=1,
model_path='files/supernet.th',
prob_ratio=1.0,
r=1.0,
sameshortcut=True,
save_dir='eval',
save_every=1,
save_file='eval-final',
seed=2,
track_running_stats=False,
train=False,
train_batch_size=128,
train_epochs=1,
train_lr=0.001,
train_min_lr=0,
train_momentum=0.9,
train_print_freq=100,
train_weight_decay=0.0005,
workers=4)
'''
parser = argparse.ArgumentParser(
description='Propert ResNets for CIFAR10 in pytorch')
parser.add_argument('--eval_json_path', help='json file containing archs to evaluete',
default='Track1_Submit/files/benchmark.json', type=str)
parser.add_argument('--model_path', default='Track1_Submit/train/model.th',
help='model checkpoint', type=str)
parser.add_argument('--arch_start', default=1, type=int,
metavar='N', help='the start index of eval archs')
parser.add_argument('--arch_num', default=101, type=int,
metavar='N', help='the num of eval archs')
parser.add_argument('--workers', default=1, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--batch_size', default=512, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--affine', action='store_true', help='BN affine')
parser.add_argument('--save_dir', help='The directory used to save the trained models',
default='./checkpoints', type=str)
parser.add_argument('--save_file', help='The file used to save the result',
default='eval-1_50000', type=str)
parser.add_argument(
'--save_every', help='Saves checkpoints at every specified number of epochs', type=int, default=1)
parser.add_argument('--convbn_type',
default='sample_channel',
type=str,
help='convbn forward with different mask: mix_channel or random_mix_channel or sample_channel or sample_random_channel or sample_sepmask_channel or sample_sepproject_channel or sample_localfree_channel')
parser.add_argument('--alpha_type', default='sample_uniform', type=str,
help='how to cal alpha in forward process: mix, sample_uniform, sample_fair, sample_flops_uniform, sample_flops_fair, sample_sandwich')
parser.add_argument('--mask_repeat', type=int, default=1,
help='used in random_mix_channel')
parser.add_argument('--prob_ratio', type=float, default=1.,
help='used in sample_flops_uniform or sample_flops_fair')
parser.add_argument('--r', type=int, default=1.,
help='used in local sample_localfree_channel')
parser.add_argument('--localsep_layers', default=None,
type=str, help='used in sample_localsepmask_channel')
parser.add_argument('--localsep_portion', type=float,
default=1., help='used in sample_localsepmask_channel')
parser.add_argument('--sameshortcut', action='store_true',
help='same shortcut')
parser.add_argument('--track_running_stats',
action='store_true', help='bn track_running_stats')
parser.add_argument('--bn_calibrate', action='store_true', help='bn calibrate')
parser.add_argument('--bn_calibrate_batch', type=int,
default=10000, help='bn calibrate batch')
parser.add_argument('--bn_calibrate_batch_num', type=int,
default=1, help='bn calibrate batch num')
parser.add_argument('--train', action='store_true', help='train on supernet')
parser.add_argument('--train_batch_size', type=int,
default=128, help='train epoch on supernet')
parser.add_argument('--train_epochs', type=int, default=1,
help='train epoch on supernet')
parser.add_argument('--train_lr', type=float, default=1e-3,
help='train lr on supernet')
parser.add_argument('--train_momentum', type=float,
default=0.9, help='train momentum on supernet')
parser.add_argument('--train_min_lr', type=float, default=0,
help='train min_lr on supernet')
parser.add_argument('--train_weight_decay', type=float,
default=5e-4, help='train wd on supernet')
parser.add_argument('--train_print_freq', type=int, default=100,
help='train print freq epoch on supernet')
parser.add_argument('--seed', type=int, default=2, help='random seed')
args = parser.parse_args()
best_prec1 = 0
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if args.bn_calibrate:
args.track_running_stats = True
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(
args.save_dir, '{}.txt'.format(args.save_file)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
def main():
global args, best_prec1
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
cudnn.benchmark = False
cudnn.enabled = True
cudnn.deterministic = True
model = resnet20(args.affine, args.convbn_type, args.mask_repeat,
args.alpha_type, localsep_layers=args.localsep_layers,
localsep_portion=args.localsep_portion,
same_shortcut=args.sameshortcut,
track_running_stats=args.track_running_stats)
model.cuda()
try:
model.load_state_dict(torch.load(args.model_path)['state_dict'])
except:
print("BN track running stats is False in pt but True in model, so here ignore it")
model.load_state_dict(torch.load(args.model_path)[
'state_dict'], strict=False)
normalize = transforms.Normalize(
mean=[0.5071, 0.4865, 0.4409], std=[0.1942, 0.1918, 0.1958])
if args.bn_calibrate:
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d) and m.track_running_stats is False:
del m.running_mean
del m.running_var
del m.num_batches_tracked
m.register_buffer('running_mean', torch.zeros(m.num_features))
m.register_buffer('running_var', torch.ones(m.num_features))
m.register_buffer('num_batches_tracked',
torch.tensor(0, dtype=torch.long))
model.cuda()
calib_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
transform=transforms.Compose([
# transforms.RandomCrop(32, 4),
# transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1)]),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(15),
transforms.ToTensor(),
normalize,
]),
download=True,
),
batch_size=args.bn_calibrate_batch,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
calib_loader = get_loader(calib_loader)
if args.train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, 4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
Cutout(16),
normalize,
]),
download=True,
),
batch_size=args.train_batch_size,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]),
),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
val_loader = get_loader(val_loader)
with open(args.eval_json_path, 'r') as f:
archs_info = json.load(f)
sub_archs_info = {}
if args.train:
model_origin = model
for arch_i in range(args.arch_start, min(50001, args.arch_start + args.arch_num)):
if 'arch{}'.format(arch_i) in archs_info:
lenlist = get_arch_lenlist(archs_info, arch_i)
if args.train:
model = train(train_loader, model_origin, lenlist, args)
if args.bn_calibrate:
model = calibrate_bn(calib_loader, model,
lenlist, args.bn_calibrate_batch_num)
prec1 = validate(val_loader, model, lenlist)
sub_archs_info['arch{}'.format(arch_i)] = {}
sub_archs_info['arch{}'.format(arch_i)]['acc'] = prec1
sub_archs_info['arch{}'.format(
arch_i)]['arch'] = archs_info['arch{}'.format(arch_i)]['arch']
logging.info('Arch{}: [acc: {:.5f}][arch: {}]'.format(
arch_i, prec1, archs_info['arch{}'.format(arch_i)]['arch']))
save_json = os.path.join(args.save_dir, '{}.json'.format(args.save_file))
with open(save_json, 'w') as f:
json.dump(sub_archs_info, f)
def get_arch_lenlist(archs_dict, arch_i):
arch = archs_dict['arch{}'.format(arch_i)]
arch_list = arch['arch'].split('-')
for i, lenth in enumerate(arch_list):
arch_list[i] = int(lenth)
return arch_list
def get_loader(loader):
new_loader = []
for x, y in loader:
new_loader.append((x.cuda(), y.cuda()))
return new_loader
def | (loader, model, lenlist, num):
model.train()
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d):
m.running_mean.data.fill_(0)
m.running_var.data.fill_(0)
m.num_batches_tracked.data.zero_()
m.momentum = None
for i, (input, _) in enumerate(loader):
# input = input.cuda()
if i < min(len(loader), num):
model(input, lenlist)
return model
def train(train_queue, model, lenlist, args):
model = copy.deepcopy(model)
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(
), args.train_lr, momentum=args.train_momentum, weight_decay=args.train_weight_decay)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, args.train_epochs, eta_min=args.train_min_lr)
logging.info('Train arch: {}'.format(lenlist))
for epoch in range(args.train_epochs):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_queue):
data_time.update(time.time() - end)
target_var = target.cuda()
input_var = input.cuda()
optimizer.zero_grad() # zero gradient
output = model(input_var, lenlist) # compute output
loss = criterion(output, target_var) # compute loss
loss.backward() # compute gradient
optimizer.step() # do SGD step
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target_var.data)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.train_print_freq == 0 or i == len(train_queue) - 1:
logging.info('\tEpoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(epoch, i, len(train_queue), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1))
lr_scheduler.step()
del criterion
del optimizer
del lr_scheduler
return model
def validate(valid_queue, model, lenlist):
"""
Run evaluation
"""
# switch to evaluate mode
top1 = AverageMeter()
model.eval()
with torch.no_grad():
for i, (input, target) in enumerate(valid_queue):
input_var = input
target_var = target
# compute output
output = model(input_var, lenlist)
output = output.float()
# measure accuracy
prec1 = accuracy(output.data, target)[0]
top1.update(prec1.item(), input.size(0))
return top1.avg
if __name__ == '__main__':
main()
| calibrate_bn | identifier_name |
test_supernet.py | import argparse
import os
import sys
import shutil
import time
import random
import glob
import logging
import copy
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import numpy as np
from resnet20_supernet import resnet20
from utils import *
'''
Namespace(affine=True,
alpha_type='sample_uniform',
arch_num=50000,
arch_start=1,
batch_size=10000,
bn_calibrate=False,
bn_calibrate_batch=10000,
bn_calibrate_batch_num=1,
convbn_type='sample_channel',
eval_json_path='files/Track1_final_archs.json',
localsep_layers=None,
localsep_portion=1.0,
mask_repeat=1,
model_path='files/supernet.th',
prob_ratio=1.0,
r=1.0,
sameshortcut=True,
save_dir='eval',
save_every=1,
save_file='eval-final',
seed=2,
track_running_stats=False,
train=False,
train_batch_size=128,
train_epochs=1,
train_lr=0.001,
train_min_lr=0,
train_momentum=0.9,
train_print_freq=100,
train_weight_decay=0.0005,
workers=4)
'''
parser = argparse.ArgumentParser(
description='Propert ResNets for CIFAR10 in pytorch')
parser.add_argument('--eval_json_path', help='json file containing archs to evaluete',
default='Track1_Submit/files/benchmark.json', type=str)
parser.add_argument('--model_path', default='Track1_Submit/train/model.th',
help='model checkpoint', type=str)
parser.add_argument('--arch_start', default=1, type=int,
metavar='N', help='the start index of eval archs')
parser.add_argument('--arch_num', default=101, type=int,
metavar='N', help='the num of eval archs')
parser.add_argument('--workers', default=1, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--batch_size', default=512, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--affine', action='store_true', help='BN affine')
parser.add_argument('--save_dir', help='The directory used to save the trained models',
default='./checkpoints', type=str)
parser.add_argument('--save_file', help='The file used to save the result',
default='eval-1_50000', type=str)
parser.add_argument(
'--save_every', help='Saves checkpoints at every specified number of epochs', type=int, default=1)
parser.add_argument('--convbn_type',
default='sample_channel',
type=str,
help='convbn forward with different mask: mix_channel or random_mix_channel or sample_channel or sample_random_channel or sample_sepmask_channel or sample_sepproject_channel or sample_localfree_channel')
parser.add_argument('--alpha_type', default='sample_uniform', type=str,
help='how to cal alpha in forward process: mix, sample_uniform, sample_fair, sample_flops_uniform, sample_flops_fair, sample_sandwich')
parser.add_argument('--mask_repeat', type=int, default=1,
help='used in random_mix_channel')
parser.add_argument('--prob_ratio', type=float, default=1.,
help='used in sample_flops_uniform or sample_flops_fair')
parser.add_argument('--r', type=int, default=1.,
help='used in local sample_localfree_channel')
parser.add_argument('--localsep_layers', default=None,
type=str, help='used in sample_localsepmask_channel')
parser.add_argument('--localsep_portion', type=float,
default=1., help='used in sample_localsepmask_channel')
parser.add_argument('--sameshortcut', action='store_true',
help='same shortcut')
parser.add_argument('--track_running_stats',
action='store_true', help='bn track_running_stats')
parser.add_argument('--bn_calibrate', action='store_true', help='bn calibrate')
parser.add_argument('--bn_calibrate_batch', type=int,
default=10000, help='bn calibrate batch')
parser.add_argument('--bn_calibrate_batch_num', type=int,
default=1, help='bn calibrate batch num')
parser.add_argument('--train', action='store_true', help='train on supernet')
parser.add_argument('--train_batch_size', type=int,
default=128, help='train epoch on supernet')
parser.add_argument('--train_epochs', type=int, default=1,
help='train epoch on supernet')
parser.add_argument('--train_lr', type=float, default=1e-3,
help='train lr on supernet')
parser.add_argument('--train_momentum', type=float,
default=0.9, help='train momentum on supernet')
parser.add_argument('--train_min_lr', type=float, default=0,
help='train min_lr on supernet')
parser.add_argument('--train_weight_decay', type=float,
default=5e-4, help='train wd on supernet')
parser.add_argument('--train_print_freq', type=int, default=100,
help='train print freq epoch on supernet')
parser.add_argument('--seed', type=int, default=2, help='random seed')
args = parser.parse_args()
best_prec1 = 0
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if args.bn_calibrate:
args.track_running_stats = True
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(
args.save_dir, '{}.txt'.format(args.save_file)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
def main():
global args, best_prec1
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
cudnn.benchmark = False
cudnn.enabled = True
cudnn.deterministic = True
model = resnet20(args.affine, args.convbn_type, args.mask_repeat,
args.alpha_type, localsep_layers=args.localsep_layers,
localsep_portion=args.localsep_portion,
same_shortcut=args.sameshortcut,
track_running_stats=args.track_running_stats)
model.cuda()
try:
model.load_state_dict(torch.load(args.model_path)['state_dict'])
except:
print("BN track running stats is False in pt but True in model, so here ignore it")
model.load_state_dict(torch.load(args.model_path)[
'state_dict'], strict=False)
normalize = transforms.Normalize(
mean=[0.5071, 0.4865, 0.4409], std=[0.1942, 0.1918, 0.1958])
if args.bn_calibrate:
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d) and m.track_running_stats is False:
del m.running_mean
del m.running_var
del m.num_batches_tracked
m.register_buffer('running_mean', torch.zeros(m.num_features))
m.register_buffer('running_var', torch.ones(m.num_features))
m.register_buffer('num_batches_tracked',
torch.tensor(0, dtype=torch.long))
model.cuda()
calib_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
transform=transforms.Compose([
# transforms.RandomCrop(32, 4),
# transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1)]),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(15),
transforms.ToTensor(),
normalize,
]),
download=True,
),
batch_size=args.bn_calibrate_batch,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
calib_loader = get_loader(calib_loader)
if args.train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, 4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
Cutout(16),
normalize,
]),
download=True,
),
batch_size=args.train_batch_size,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]),
),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
val_loader = get_loader(val_loader)
with open(args.eval_json_path, 'r') as f:
archs_info = json.load(f)
sub_archs_info = {}
if args.train:
model_origin = model
for arch_i in range(args.arch_start, min(50001, args.arch_start + args.arch_num)):
if 'arch{}'.format(arch_i) in archs_info:
lenlist = get_arch_lenlist(archs_info, arch_i)
if args.train:
model = train(train_loader, model_origin, lenlist, args)
if args.bn_calibrate:
model = calibrate_bn(calib_loader, model,
lenlist, args.bn_calibrate_batch_num)
prec1 = validate(val_loader, model, lenlist)
sub_archs_info['arch{}'.format(arch_i)] = {}
sub_archs_info['arch{}'.format(arch_i)]['acc'] = prec1
sub_archs_info['arch{}'.format(
arch_i)]['arch'] = archs_info['arch{}'.format(arch_i)]['arch']
logging.info('Arch{}: [acc: {:.5f}][arch: {}]'.format(
arch_i, prec1, archs_info['arch{}'.format(arch_i)]['arch']))
save_json = os.path.join(args.save_dir, '{}.json'.format(args.save_file))
with open(save_json, 'w') as f:
json.dump(sub_archs_info, f)
def get_arch_lenlist(archs_dict, arch_i):
arch = archs_dict['arch{}'.format(arch_i)]
arch_list = arch['arch'].split('-')
for i, lenth in enumerate(arch_list):
arch_list[i] = int(lenth)
return arch_list
def get_loader(loader):
new_loader = []
for x, y in loader:
|
return new_loader
def calibrate_bn(loader, model, lenlist, num):
model.train()
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d):
m.running_mean.data.fill_(0)
m.running_var.data.fill_(0)
m.num_batches_tracked.data.zero_()
m.momentum = None
for i, (input, _) in enumerate(loader):
# input = input.cuda()
if i < min(len(loader), num):
model(input, lenlist)
return model
def train(train_queue, model, lenlist, args):
model = copy.deepcopy(model)
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(
), args.train_lr, momentum=args.train_momentum, weight_decay=args.train_weight_decay)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, args.train_epochs, eta_min=args.train_min_lr)
logging.info('Train arch: {}'.format(lenlist))
for epoch in range(args.train_epochs):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_queue):
data_time.update(time.time() - end)
target_var = target.cuda()
input_var = input.cuda()
optimizer.zero_grad() # zero gradient
output = model(input_var, lenlist) # compute output
loss = criterion(output, target_var) # compute loss
loss.backward() # compute gradient
optimizer.step() # do SGD step
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target_var.data)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.train_print_freq == 0 or i == len(train_queue) - 1:
logging.info('\tEpoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(epoch, i, len(train_queue), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1))
lr_scheduler.step()
del criterion
del optimizer
del lr_scheduler
return model
def validate(valid_queue, model, lenlist):
"""
Run evaluation
"""
# switch to evaluate mode
top1 = AverageMeter()
model.eval()
with torch.no_grad():
for i, (input, target) in enumerate(valid_queue):
input_var = input
target_var = target
# compute output
output = model(input_var, lenlist)
output = output.float()
# measure accuracy
prec1 = accuracy(output.data, target)[0]
top1.update(prec1.item(), input.size(0))
return top1.avg
if __name__ == '__main__':
main()
| new_loader.append((x.cuda(), y.cuda())) | conditional_block |
test_supernet.py | import argparse
import os
import sys
import shutil
import time
import random
import glob
import logging
import copy
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import numpy as np
from resnet20_supernet import resnet20
from utils import *
'''
Namespace(affine=True,
alpha_type='sample_uniform',
arch_num=50000,
arch_start=1,
batch_size=10000,
bn_calibrate=False,
bn_calibrate_batch=10000,
bn_calibrate_batch_num=1,
convbn_type='sample_channel',
eval_json_path='files/Track1_final_archs.json',
localsep_layers=None,
localsep_portion=1.0,
mask_repeat=1,
model_path='files/supernet.th',
prob_ratio=1.0,
r=1.0,
sameshortcut=True,
save_dir='eval',
save_every=1,
save_file='eval-final',
seed=2,
track_running_stats=False,
train=False,
train_batch_size=128,
train_epochs=1,
train_lr=0.001,
train_min_lr=0,
train_momentum=0.9,
train_print_freq=100,
train_weight_decay=0.0005,
workers=4)
'''
parser = argparse.ArgumentParser(
description='Propert ResNets for CIFAR10 in pytorch')
parser.add_argument('--eval_json_path', help='json file containing archs to evaluete',
default='Track1_Submit/files/benchmark.json', type=str)
parser.add_argument('--model_path', default='Track1_Submit/train/model.th',
help='model checkpoint', type=str)
parser.add_argument('--arch_start', default=1, type=int,
metavar='N', help='the start index of eval archs')
parser.add_argument('--arch_num', default=101, type=int,
metavar='N', help='the num of eval archs')
parser.add_argument('--workers', default=1, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--batch_size', default=512, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--affine', action='store_true', help='BN affine')
parser.add_argument('--save_dir', help='The directory used to save the trained models',
default='./checkpoints', type=str)
parser.add_argument('--save_file', help='The file used to save the result',
default='eval-1_50000', type=str)
parser.add_argument(
'--save_every', help='Saves checkpoints at every specified number of epochs', type=int, default=1)
parser.add_argument('--convbn_type',
default='sample_channel',
type=str,
help='convbn forward with different mask: mix_channel or random_mix_channel or sample_channel or sample_random_channel or sample_sepmask_channel or sample_sepproject_channel or sample_localfree_channel')
parser.add_argument('--alpha_type', default='sample_uniform', type=str,
help='how to cal alpha in forward process: mix, sample_uniform, sample_fair, sample_flops_uniform, sample_flops_fair, sample_sandwich')
parser.add_argument('--mask_repeat', type=int, default=1,
help='used in random_mix_channel')
parser.add_argument('--prob_ratio', type=float, default=1.,
help='used in sample_flops_uniform or sample_flops_fair')
parser.add_argument('--r', type=int, default=1.,
help='used in local sample_localfree_channel')
parser.add_argument('--localsep_layers', default=None,
type=str, help='used in sample_localsepmask_channel')
parser.add_argument('--localsep_portion', type=float,
default=1., help='used in sample_localsepmask_channel')
parser.add_argument('--sameshortcut', action='store_true',
help='same shortcut')
parser.add_argument('--track_running_stats',
action='store_true', help='bn track_running_stats')
parser.add_argument('--bn_calibrate', action='store_true', help='bn calibrate')
parser.add_argument('--bn_calibrate_batch', type=int,
default=10000, help='bn calibrate batch')
parser.add_argument('--bn_calibrate_batch_num', type=int,
default=1, help='bn calibrate batch num')
parser.add_argument('--train', action='store_true', help='train on supernet')
parser.add_argument('--train_batch_size', type=int,
default=128, help='train epoch on supernet')
parser.add_argument('--train_epochs', type=int, default=1,
help='train epoch on supernet')
parser.add_argument('--train_lr', type=float, default=1e-3,
help='train lr on supernet')
parser.add_argument('--train_momentum', type=float,
default=0.9, help='train momentum on supernet')
parser.add_argument('--train_min_lr', type=float, default=0,
help='train min_lr on supernet')
parser.add_argument('--train_weight_decay', type=float,
default=5e-4, help='train wd on supernet')
parser.add_argument('--train_print_freq', type=int, default=100,
help='train print freq epoch on supernet')
parser.add_argument('--seed', type=int, default=2, help='random seed')
args = parser.parse_args()
best_prec1 = 0
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if args.bn_calibrate:
args.track_running_stats = True
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(
args.save_dir, '{}.txt'.format(args.save_file)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
def main():
global args, best_prec1
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
cudnn.benchmark = False
cudnn.enabled = True
cudnn.deterministic = True
model = resnet20(args.affine, args.convbn_type, args.mask_repeat,
args.alpha_type, localsep_layers=args.localsep_layers,
localsep_portion=args.localsep_portion,
same_shortcut=args.sameshortcut,
track_running_stats=args.track_running_stats)
model.cuda()
try:
model.load_state_dict(torch.load(args.model_path)['state_dict'])
except:
print("BN track running stats is False in pt but True in model, so here ignore it")
model.load_state_dict(torch.load(args.model_path)[
'state_dict'], strict=False)
normalize = transforms.Normalize(
mean=[0.5071, 0.4865, 0.4409], std=[0.1942, 0.1918, 0.1958])
if args.bn_calibrate:
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d) and m.track_running_stats is False:
del m.running_mean
del m.running_var
del m.num_batches_tracked
m.register_buffer('running_mean', torch.zeros(m.num_features))
m.register_buffer('running_var', torch.ones(m.num_features))
m.register_buffer('num_batches_tracked',
torch.tensor(0, dtype=torch.long))
model.cuda()
calib_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
transform=transforms.Compose([
# transforms.RandomCrop(32, 4),
# transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1)]),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(15),
transforms.ToTensor(),
normalize,
]),
download=True,
),
batch_size=args.bn_calibrate_batch,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
calib_loader = get_loader(calib_loader)
if args.train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, 4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
Cutout(16),
normalize,
]),
download=True,
),
batch_size=args.train_batch_size,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]),
),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
val_loader = get_loader(val_loader)
with open(args.eval_json_path, 'r') as f:
archs_info = json.load(f)
sub_archs_info = {}
if args.train:
model_origin = model
for arch_i in range(args.arch_start, min(50001, args.arch_start + args.arch_num)):
if 'arch{}'.format(arch_i) in archs_info:
lenlist = get_arch_lenlist(archs_info, arch_i)
if args.train:
model = train(train_loader, model_origin, lenlist, args)
if args.bn_calibrate:
model = calibrate_bn(calib_loader, model,
lenlist, args.bn_calibrate_batch_num)
prec1 = validate(val_loader, model, lenlist)
sub_archs_info['arch{}'.format(arch_i)] = {}
sub_archs_info['arch{}'.format(arch_i)]['acc'] = prec1
sub_archs_info['arch{}'.format(
arch_i)]['arch'] = archs_info['arch{}'.format(arch_i)]['arch']
logging.info('Arch{}: [acc: {:.5f}][arch: {}]'.format(
arch_i, prec1, archs_info['arch{}'.format(arch_i)]['arch']))
save_json = os.path.join(args.save_dir, '{}.json'.format(args.save_file))
with open(save_json, 'w') as f:
json.dump(sub_archs_info, f)
def get_arch_lenlist(archs_dict, arch_i):
arch = archs_dict['arch{}'.format(arch_i)]
arch_list = arch['arch'].split('-')
for i, lenth in enumerate(arch_list):
arch_list[i] = int(lenth)
return arch_list
def get_loader(loader):
new_loader = []
for x, y in loader:
new_loader.append((x.cuda(), y.cuda()))
return new_loader
def calibrate_bn(loader, model, lenlist, num):
model.train()
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d):
m.running_mean.data.fill_(0)
m.running_var.data.fill_(0)
m.num_batches_tracked.data.zero_()
m.momentum = None
for i, (input, _) in enumerate(loader):
# input = input.cuda()
if i < min(len(loader), num):
model(input, lenlist)
return model
def train(train_queue, model, lenlist, args):
model = copy.deepcopy(model)
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(
), args.train_lr, momentum=args.train_momentum, weight_decay=args.train_weight_decay)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, args.train_epochs, eta_min=args.train_min_lr)
logging.info('Train arch: {}'.format(lenlist))
for epoch in range(args.train_epochs):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_queue):
data_time.update(time.time() - end)
target_var = target.cuda()
input_var = input.cuda()
optimizer.zero_grad() # zero gradient
output = model(input_var, lenlist) # compute output
loss = criterion(output, target_var) # compute loss
loss.backward() # compute gradient
optimizer.step() # do SGD step
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target_var.data)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.train_print_freq == 0 or i == len(train_queue) - 1:
logging.info('\tEpoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(epoch, i, len(train_queue), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1))
lr_scheduler.step()
del criterion
del optimizer
del lr_scheduler
return model | def validate(valid_queue, model, lenlist):
"""
Run evaluation
"""
# switch to evaluate mode
top1 = AverageMeter()
model.eval()
with torch.no_grad():
for i, (input, target) in enumerate(valid_queue):
input_var = input
target_var = target
# compute output
output = model(input_var, lenlist)
output = output.float()
# measure accuracy
prec1 = accuracy(output.data, target)[0]
top1.update(prec1.item(), input.size(0))
return top1.avg
if __name__ == '__main__':
main() | random_line_split | |
test_supernet.py | import argparse
import os
import sys
import shutil
import time
import random
import glob
import logging
import copy
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import numpy as np
from resnet20_supernet import resnet20
from utils import *
'''
Namespace(affine=True,
alpha_type='sample_uniform',
arch_num=50000,
arch_start=1,
batch_size=10000,
bn_calibrate=False,
bn_calibrate_batch=10000,
bn_calibrate_batch_num=1,
convbn_type='sample_channel',
eval_json_path='files/Track1_final_archs.json',
localsep_layers=None,
localsep_portion=1.0,
mask_repeat=1,
model_path='files/supernet.th',
prob_ratio=1.0,
r=1.0,
sameshortcut=True,
save_dir='eval',
save_every=1,
save_file='eval-final',
seed=2,
track_running_stats=False,
train=False,
train_batch_size=128,
train_epochs=1,
train_lr=0.001,
train_min_lr=0,
train_momentum=0.9,
train_print_freq=100,
train_weight_decay=0.0005,
workers=4)
'''
parser = argparse.ArgumentParser(
description='Propert ResNets for CIFAR10 in pytorch')
parser.add_argument('--eval_json_path', help='json file containing archs to evaluete',
default='Track1_Submit/files/benchmark.json', type=str)
parser.add_argument('--model_path', default='Track1_Submit/train/model.th',
help='model checkpoint', type=str)
parser.add_argument('--arch_start', default=1, type=int,
metavar='N', help='the start index of eval archs')
parser.add_argument('--arch_num', default=101, type=int,
metavar='N', help='the num of eval archs')
parser.add_argument('--workers', default=1, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--batch_size', default=512, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--affine', action='store_true', help='BN affine')
parser.add_argument('--save_dir', help='The directory used to save the trained models',
default='./checkpoints', type=str)
parser.add_argument('--save_file', help='The file used to save the result',
default='eval-1_50000', type=str)
parser.add_argument(
'--save_every', help='Saves checkpoints at every specified number of epochs', type=int, default=1)
parser.add_argument('--convbn_type',
default='sample_channel',
type=str,
help='convbn forward with different mask: mix_channel or random_mix_channel or sample_channel or sample_random_channel or sample_sepmask_channel or sample_sepproject_channel or sample_localfree_channel')
parser.add_argument('--alpha_type', default='sample_uniform', type=str,
help='how to cal alpha in forward process: mix, sample_uniform, sample_fair, sample_flops_uniform, sample_flops_fair, sample_sandwich')
parser.add_argument('--mask_repeat', type=int, default=1,
help='used in random_mix_channel')
parser.add_argument('--prob_ratio', type=float, default=1.,
help='used in sample_flops_uniform or sample_flops_fair')
parser.add_argument('--r', type=int, default=1.,
help='used in local sample_localfree_channel')
parser.add_argument('--localsep_layers', default=None,
type=str, help='used in sample_localsepmask_channel')
parser.add_argument('--localsep_portion', type=float,
default=1., help='used in sample_localsepmask_channel')
parser.add_argument('--sameshortcut', action='store_true',
help='same shortcut')
parser.add_argument('--track_running_stats',
action='store_true', help='bn track_running_stats')
parser.add_argument('--bn_calibrate', action='store_true', help='bn calibrate')
parser.add_argument('--bn_calibrate_batch', type=int,
default=10000, help='bn calibrate batch')
parser.add_argument('--bn_calibrate_batch_num', type=int,
default=1, help='bn calibrate batch num')
parser.add_argument('--train', action='store_true', help='train on supernet')
parser.add_argument('--train_batch_size', type=int,
default=128, help='train epoch on supernet')
parser.add_argument('--train_epochs', type=int, default=1,
help='train epoch on supernet')
parser.add_argument('--train_lr', type=float, default=1e-3,
help='train lr on supernet')
parser.add_argument('--train_momentum', type=float,
default=0.9, help='train momentum on supernet')
parser.add_argument('--train_min_lr', type=float, default=0,
help='train min_lr on supernet')
parser.add_argument('--train_weight_decay', type=float,
default=5e-4, help='train wd on supernet')
parser.add_argument('--train_print_freq', type=int, default=100,
help='train print freq epoch on supernet')
parser.add_argument('--seed', type=int, default=2, help='random seed')
args = parser.parse_args()
best_prec1 = 0
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if args.bn_calibrate:
args.track_running_stats = True
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(
args.save_dir, '{}.txt'.format(args.save_file)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
def main():
global args, best_prec1
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
cudnn.benchmark = False
cudnn.enabled = True
cudnn.deterministic = True
model = resnet20(args.affine, args.convbn_type, args.mask_repeat,
args.alpha_type, localsep_layers=args.localsep_layers,
localsep_portion=args.localsep_portion,
same_shortcut=args.sameshortcut,
track_running_stats=args.track_running_stats)
model.cuda()
try:
model.load_state_dict(torch.load(args.model_path)['state_dict'])
except:
print("BN track running stats is False in pt but True in model, so here ignore it")
model.load_state_dict(torch.load(args.model_path)[
'state_dict'], strict=False)
normalize = transforms.Normalize(
mean=[0.5071, 0.4865, 0.4409], std=[0.1942, 0.1918, 0.1958])
if args.bn_calibrate:
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d) and m.track_running_stats is False:
del m.running_mean
del m.running_var
del m.num_batches_tracked
m.register_buffer('running_mean', torch.zeros(m.num_features))
m.register_buffer('running_var', torch.ones(m.num_features))
m.register_buffer('num_batches_tracked',
torch.tensor(0, dtype=torch.long))
model.cuda()
calib_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
transform=transforms.Compose([
# transforms.RandomCrop(32, 4),
# transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1)]),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(15),
transforms.ToTensor(),
normalize,
]),
download=True,
),
batch_size=args.bn_calibrate_batch,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
calib_loader = get_loader(calib_loader)
if args.train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, 4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
Cutout(16),
normalize,
]),
download=True,
),
batch_size=args.train_batch_size,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]),
),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
val_loader = get_loader(val_loader)
with open(args.eval_json_path, 'r') as f:
archs_info = json.load(f)
sub_archs_info = {}
if args.train:
model_origin = model
for arch_i in range(args.arch_start, min(50001, args.arch_start + args.arch_num)):
if 'arch{}'.format(arch_i) in archs_info:
lenlist = get_arch_lenlist(archs_info, arch_i)
if args.train:
model = train(train_loader, model_origin, lenlist, args)
if args.bn_calibrate:
model = calibrate_bn(calib_loader, model,
lenlist, args.bn_calibrate_batch_num)
prec1 = validate(val_loader, model, lenlist)
sub_archs_info['arch{}'.format(arch_i)] = {}
sub_archs_info['arch{}'.format(arch_i)]['acc'] = prec1
sub_archs_info['arch{}'.format(
arch_i)]['arch'] = archs_info['arch{}'.format(arch_i)]['arch']
logging.info('Arch{}: [acc: {:.5f}][arch: {}]'.format(
arch_i, prec1, archs_info['arch{}'.format(arch_i)]['arch']))
save_json = os.path.join(args.save_dir, '{}.json'.format(args.save_file))
with open(save_json, 'w') as f:
json.dump(sub_archs_info, f)
def get_arch_lenlist(archs_dict, arch_i):
arch = archs_dict['arch{}'.format(arch_i)]
arch_list = arch['arch'].split('-')
for i, lenth in enumerate(arch_list):
arch_list[i] = int(lenth)
return arch_list
def get_loader(loader):
new_loader = []
for x, y in loader:
new_loader.append((x.cuda(), y.cuda()))
return new_loader
def calibrate_bn(loader, model, lenlist, num):
model.train()
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d):
m.running_mean.data.fill_(0)
m.running_var.data.fill_(0)
m.num_batches_tracked.data.zero_()
m.momentum = None
for i, (input, _) in enumerate(loader):
# input = input.cuda()
if i < min(len(loader), num):
model(input, lenlist)
return model
def train(train_queue, model, lenlist, args):
|
def validate(valid_queue, model, lenlist):
"""
Run evaluation
"""
# switch to evaluate mode
top1 = AverageMeter()
model.eval()
with torch.no_grad():
for i, (input, target) in enumerate(valid_queue):
input_var = input
target_var = target
# compute output
output = model(input_var, lenlist)
output = output.float()
# measure accuracy
prec1 = accuracy(output.data, target)[0]
top1.update(prec1.item(), input.size(0))
return top1.avg
if __name__ == '__main__':
main()
| model = copy.deepcopy(model)
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(
), args.train_lr, momentum=args.train_momentum, weight_decay=args.train_weight_decay)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, args.train_epochs, eta_min=args.train_min_lr)
logging.info('Train arch: {}'.format(lenlist))
for epoch in range(args.train_epochs):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_queue):
data_time.update(time.time() - end)
target_var = target.cuda()
input_var = input.cuda()
optimizer.zero_grad() # zero gradient
output = model(input_var, lenlist) # compute output
loss = criterion(output, target_var) # compute loss
loss.backward() # compute gradient
optimizer.step() # do SGD step
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target_var.data)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.train_print_freq == 0 or i == len(train_queue) - 1:
logging.info('\tEpoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(epoch, i, len(train_queue), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1))
lr_scheduler.step()
del criterion
del optimizer
del lr_scheduler
return model | identifier_body |
order.js | /**
* Created by cp on 2017/8/14.
*/
var pageSize = 8; //每页显示的记录条数
var curPage = 1; //显示第curPage页
var len; //总行数
var page; //总页数
var arry = {};
var saveUrl = "orderSave";
var updateUrl = "orderUpdate";
var deleteUrl = "orderDelete";
var listUrl ="orders";
$(function () {
;
//获取表头及搜索
//getTh();
//getchaxun();
getTable(new Object());
getfenye({description:"", clientid:"", pagenow:1});
$(".Preservation").click(function () {
var add = {};
// arry.pkey;
// arry["pkey"];
// arry[html];
$(".zeng").children().each(function (idx, ele) {
// if (idx < 1) {
// return;
// }
var kay = $(this).children()[0].name;
var val = $(this).children()[0].value;
add[kay] = val;
});
if(add["userName"] == ""){
alert("帐号不能为空");
return;
}
if(add["productId"] == ""){
alert("产品ID不能为空");
return;
}
var temp = checkUser(add["userName"]);;
if(!temp){
alert("帐号不存在");
return;
}
var temp2 =checkProduct(add["productId"]);
if(!temp2){
alert("产品不存在");
return;
}
$.ajax({
url: saveUrl,
data: add,
type: "POST",
dataType: "text",
success: function (data) {
if (data == "ok") {alert("添加成功");location.reload();}
if (data == "error") {alert("添加失败")}
if (data == "2") {alert("该设备已存在")}
}
})
});
});
//保存功能
function getenter() {
// $("#show_tab_tr ").each(function(i){
//var txt=$('input[type="text"]').value;
//var j = true;
$('input[kepp="dianji"]').click(function () {
//alert(toEdit);
var arry = {}
// var toEdit = $(this).attr("value") == "编辑";
// $(this).attr("value", toEdit ? "保存" : "编辑");
// var index = $(this).parent().parent().attr("idx");
// td_arr = $(this).parent().siblings().children().val();
var toEdit = $(this).attr("value") == "编辑";
var index = $(this).parent().parent().attr("idx");
td_arr=$(this).parent().siblings().children().val();
$(this).attr("value", toEdit ? "保存" : "编辑");
if (toEdit == false) {
$(this).parent().siblings().each(function (idx, ele) {
var td_arr = $(ele).children().val();
var key = $(this).attr("key");
arry[key] =td_arr;
});
if(arry["status"] == "0" || arry["status"] == "1"){
}else{
$(this).attr("value", "保存");
alert("类型只能为1或者0");
return;;
}
if(!checkUser(arry["userName"])){
$(this).attr("value", "保存");
alert("帐号不存在");
return;
}
if(!checkProduct(arry["productId"])){
$(this).attr("value", "保存");
alert("产品不存在");
return;
}
console.info(arry);
$.ajax({
url:updateUrl,
data:arry,
type:"POST",
dataType:"text",
success:function(data){
if(data=="ok")
{
alert("修改成功");
location.reload();
}
if (data=="error") {alert("修改失败");}
}
});
} else { }
var inputcss = $(this).parent().siblings().children()
if (toEdit) {
inputcss.attr("disabled", false);
inputcss.css("border", "1px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().attr("disabled", true);
$(this).parent().siblings('td[key="orderTime"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="orderTime"]').children().attr("disabled", true);
} else {
inputcss.attr("disabled", true);
inputcss.css("border", "0px solid #51e5fb");
}
});
}
function getfenye(arry){
// $(".search").children().each(function (idx, ele) {
// kay = $(this).children()[0].name;
// val = $(this).children()[0].value;
// arry[kay] = val;
console.info(arry);
// });
// // arry.pagenow = "1";
pagenow = 1;
document.getElementById("curPage").value = pagenow;
// getTable();//显示第一页
$("#nextpage").click(function () {//下一页
var arry = setarry();
//alert(page);
if (pagenow < countpage) {
pagenow += 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = countpage;
alert("已是最后一页");
}
console.info(arry);
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
//alert(pagenow);
});
$("#lastpage").click(function () {//上一页
var arry = setarry();
//alert(pagenow);
if (pagenow != 1) {
pagenow -= 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = 1
alert("已是首页")
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
$("#npage").click(function () {//跳到固定某一页
var arry = setarry();
var npage = parseInt(document.getElementById("curPage").value);
if (npage > countpage || npage < 1) {
alert("请输入1-" + countpage + "页");
}
else {
pagenow = npage;
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
}
//表身及分页
//获取表格数据
function getTable(data) {
var pagenow = document.getElementById("curPage").value;
$.ajax({
url: "count",
data: { "name": "order" },
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
| =contact % pageSize==0 ? contact/pageSize : Math.floor(contact/pageSize)+1;
countpage = page;
$("#pageSum").val(countpage);
}
})
data[ "pagenow"] = pagenow;
data[ "pagesize"] = pageSize;
$.ajax({
url: listUrl,
data: data,
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
allData = JSON.parse(contact);
temp = allData;
//temp = allData.data;
//alert(pagenow);
//console.info(temp);
var html = '';
for (var i = 0; i < allData.length; i++) {
//alert(temp.length);
var data = temp[i];
// console.info(temp[i][0]);
html += '<tr id="show_tab_tr" idx="' + i + '" >' +
'<td key="pkey"><input type="text" value="' + data.pkey + '" disabled ></td>' +
'<td key="userName"><input type="text" value="' + data.userName + '" disabled ></td>' +
'<td key="productId" ><input type="text" value="' + data.productId + '" disabled ></td>' +
'<td key="price" ><input type="text" value="' + data.price + '" disabled ></td>' +
'<td key="status" ><input type="text" value="' + data.status + '" disabled ></td>' +
'<td key="orderTime" ><input type="text" value="' + data.orderTime + '" disabled ></td>' +
'<td><input id="center" style="display: inline-block;float:left;width:40px;color:#12a9ef;" kepp="dianji" type="button" value="编辑">    ' +
'<input type="button" class = "remove" style="display: inline-block;float:right; width:40px; color:#12a9ef;" det="detlet" value="删除" ></td>' +
'</tr>';
$("tbody").html(html);
}
getenter();
getdet();
// len =temp.length;
// page=len % pageSize==0 ? len/pageSize : Math.floor(len/pageSize)+1;
//alert(page);
// countpage = allData.countpage
pagenow = allData.pagenow
//alert(pagenow);
}
});
};
//删除数据
function getdet(){
$('input[det="detlet"]').click(function(){
//var key = $('td[key="pkey"]').parent().parent().attr("idx");
var pkey = $(this).parent().siblings('td[key="pkey"]').children().val();
// console.info(pkey,clientid);
if(confirm('确定删除么')){
$.ajax({
url:deleteUrl,
data:{"pkey":pkey},
type:"POST",
// dataType:"text",
success:function(data){
if(data=="ok")
{
alert("删除成功")
location.reload();
//window.location.href="test.php";
}
else
{
alert("删除失败")
}
}
});
}
});
}
//搜索查询
function getchaxun(){
//console.info(arry);
$(".query").click(function(){
var arry = setarry();
if (arry["description"] == "" && arry["clientid"] == "") {
alert("请输入查询内容");
} else {
document.getElementById("curPage").value=1;
getagetable(arry);
console.info(arry);
//getfenye(arry);
}
});
} | var page | identifier_name |
order.js | /**
* Created by cp on 2017/8/14.
*/
var pageSize = 8; //每页显示的记录条数
var curPage = 1; //显示第curPage页
var len; //总行数
var page; //总页数
var arry = {};
var saveUrl = "orderSave";
var updateUrl = "orderUpdate";
var deleteUrl = "orderDelete";
var listUrl ="orders";
$(function () {
;
//获取表头及搜索
//getTh();
//getchaxun();
getTable(new Object());
getfenye({description:"", clientid:"", pagenow:1});
$(".Preservation").click(function () {
var add = {};
// arry.pkey;
// arry["pkey"];
// arry[html];
$(".zeng").children().each(function (idx, ele) {
// if (idx < 1) {
// return;
// }
var kay = $(this).children()[0].name;
var val = $(this).children()[0].value;
add[kay] = val;
});
if(add["userName"] == ""){
alert("帐号不能为空");
return;
}
if(add["productId"] == ""){
alert("产品ID不能为空");
return;
}
var temp = checkUser(add["userName"]);;
if(!temp){
alert("帐号不存在");
return;
}
var temp2 =checkProduct(add["productId"]);
if(!temp2){
alert("产品不存在");
return;
}
$.ajax({
url: saveUrl,
data: add,
type: "POST",
dataType: "text",
success: function (data) {
if (data == "ok") {alert("添加成功");location.reload();}
if (data == "error") {alert("添加失败")}
if (data == "2") {alert("该设备已存在")}
}
})
});
});
//保存功能
function getenter() {
// $("#show_tab_tr ").each(function(i){
//var txt=$('input[type="text"]').value;
//var j = true;
$('input[kepp="dianji"]').click(function () {
//alert(toEdit);
var arry = {}
// var toEdit = $(this).attr("value") == "编辑";
// $(this).attr("value", toEdit ? "保存" : "编辑");
// var index = $(this).parent().parent().attr("idx");
// td_arr = $(this).parent().siblings().children().val();
var toEdit = $(this).attr("value") == "编辑";
var index = $(this).parent().parent().attr("idx");
td_arr=$(this).parent().siblings().children().val();
$(this).attr("value", toEdit ? "保存" : "编辑");
if (toEdit == false) {
$(this).parent().siblings().each(function (idx, ele) {
var td_arr = $(ele).children().val();
var key = $(this).attr("key");
arry[key] =td_arr;
});
if(arry["status"] == "0" || arry["status"] == "1"){
}else{
$(this).attr("value", "保存");
alert("类型只能为1或者0");
return;;
}
if(!checkUser(arry["userName"])){
$(this).attr("value", "保存");
alert("帐号不存在");
return;
}
if(!checkProduct(arry["productId"])){
$(this).attr("value", "保存");
alert("产品不存在");
return;
}
console.info(arry);
$.ajax({
url:updateUrl,
data:arry,
type:"POST",
dataType:"text",
success:function(data){
if(data=="ok")
{
alert("修改成功");
location.reload();
}
if (data=="error") {alert("修改失败");}
}
});
} else { }
var inputcss = $(this).parent().siblings().children()
if (toEdit) {
inputcss.attr("disabled", false);
inputcss.css("border", "1px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().attr("disabled", true);
$(this).parent().siblings('td[key="orderTime"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="orderTime"]').children().attr("disabled", true);
} else {
inputcss.attr("disabled", true);
inputcss.css("border", "0px solid #51e5fb");
}
});
}
function getfenye(arry){
// $(".search").children().each(function (idx, ele) {
// kay = $(this).children()[0].name;
// val = $(this).children()[0].value;
// arry[kay] = val;
console.info(arry);
// });
| success: function (contact) {
var page=contact % pageSize==0 ? contact/pageSize : Math.floor(contact/pageSize)+1;
countpage = page;
$("#pageSum").val(countpage);
}
})
data[ "pagenow"] = pagenow;
data[ "pagesize"] = pageSize;
$.ajax({
url: listUrl,
data: data,
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
allData = JSON.parse(contact);
temp = allData;
//temp = allData.data;
//alert(pagenow);
//console.info(temp);
var html = '';
for (var i = 0; i < allData.length; i++) {
//alert(temp.length);
var data = temp[i];
// console.info(temp[i][0]);
html += '<tr id="show_tab_tr" idx="' + i + '" >' +
'<td key="pkey"><input type="text" value="' + data.pkey + '" disabled ></td>' +
'<td key="userName"><input type="text" value="' + data.userName + '" disabled ></td>' +
'<td key="productId" ><input type="text" value="' + data.productId + '" disabled ></td>' +
'<td key="price" ><input type="text" value="' + data.price + '" disabled ></td>' +
'<td key="status" ><input type="text" value="' + data.status + '" disabled ></td>' +
'<td key="orderTime" ><input type="text" value="' + data.orderTime + '" disabled ></td>' +
'<td><input id="center" style="display: inline-block;float:left;width:40px;color:#12a9ef;" kepp="dianji" type="button" value="编辑">    ' +
'<input type="button" class = "remove" style="display: inline-block;float:right; width:40px; color:#12a9ef;" det="detlet" value="删除" ></td>' +
'</tr>';
$("tbody").html(html);
}
getenter();
getdet();
// len =temp.length;
// page=len % pageSize==0 ? len/pageSize : Math.floor(len/pageSize)+1;
//alert(page);
// countpage = allData.countpage
pagenow = allData.pagenow
//alert(pagenow);
}
});
};
//删除数据
function getdet(){
$('input[det="detlet"]').click(function(){
//var key = $('td[key="pkey"]').parent().parent().attr("idx");
var pkey = $(this).parent().siblings('td[key="pkey"]').children().val();
// console.info(pkey,clientid);
if(confirm('确定删除么')){
$.ajax({
url:deleteUrl,
data:{"pkey":pkey},
type:"POST",
// dataType:"text",
success:function(data){
if(data=="ok")
{
alert("删除成功")
location.reload();
//window.location.href="test.php";
}
else
{
alert("删除失败")
}
}
});
}
});
}
//搜索查询
function getchaxun(){
//console.info(arry);
$(".query").click(function(){
var arry = setarry();
if (arry["description"] == "" && arry["clientid"] == "") {
alert("请输入查询内容");
} else {
document.getElementById("curPage").value=1;
getagetable(arry);
console.info(arry);
//getfenye(arry);
}
});
} | // // arry.pagenow = "1";
pagenow = 1;
document.getElementById("curPage").value = pagenow;
// getTable();//显示第一页
$("#nextpage").click(function () {//下一页
var arry = setarry();
//alert(page);
if (pagenow < countpage) {
pagenow += 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = countpage;
alert("已是最后一页");
}
console.info(arry);
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
//alert(pagenow);
});
$("#lastpage").click(function () {//上一页
var arry = setarry();
//alert(pagenow);
if (pagenow != 1) {
pagenow -= 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = 1
alert("已是首页")
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
$("#npage").click(function () {//跳到固定某一页
var arry = setarry();
var npage = parseInt(document.getElementById("curPage").value);
if (npage > countpage || npage < 1) {
alert("请输入1-" + countpage + "页");
}
else {
pagenow = npage;
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
}
//表身及分页
//获取表格数据
function getTable(data) {
var pagenow = document.getElementById("curPage").value;
$.ajax({
url: "count",
data: { "name": "order" },
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
| identifier_body |
order.js | /**
* Created by cp on 2017/8/14.
*/
var pageSize = 8; //每页显示的记录条数
var curPage = 1; //显示第curPage页
var len; //总行数
var page; //总页数
var arry = {};
var saveUrl = "orderSave";
var updateUrl = "orderUpdate";
var deleteUrl = "orderDelete";
var listUrl ="orders";
$(function () {
;
//获取表头及搜索
//getTh();
//getchaxun();
getTable(new Object());
getfenye({description:"", clientid:"", pagenow:1});
$(".Preservation").click(function () {
var add = {};
// arry.pkey;
// arry["pkey"];
// arry[html];
$(".zeng").children().each(function (idx, ele) {
// if (idx < 1) {
// return;
// }
var kay = $(this).children()[0].name;
var val = $(this).children()[0].value;
add[kay] = val;
});
if(add["userName"] == ""){
alert("帐号不能为空");
return;
}
if(add["productId"] == ""){
alert("产品ID不能为空");
return;
}
var temp = checkUser(add["userName"]);;
if(!temp){
alert("帐号不存在");
return;
}
var temp2 =checkProduct(add["productId"]);
if(!temp2){
alert("产品不存在");
return;
}
$.ajax({
url: saveUrl,
data: add,
type: "POST",
dataType: "text",
success: function (data) {
if (data == "ok") {alert("添加成功");location.reload();}
if (data == "error") {alert("添加失败")}
if (data == "2") {alert("该设备已存在")}
}
})
});
});
//保存功能
function getenter() {
// $("#show_tab_tr ").each(function(i){
//var txt=$('input[type="text"]').value;
//var j = true;
$('input[kepp="dianji"]').click(function () {
//alert(toEdit);
var arry = {}
// var toEdit = $(this).attr("value") == "编辑";
// $(this).attr("value", toEdit ? "保存" : "编辑");
// var index = $(this).parent().parent().attr("idx");
// td_arr = $(this).parent().siblings().children().val();
var toEdit = $(this).attr("value") == "编辑";
var index = $(this).parent().parent().attr("idx");
td_arr=$(this).parent().siblings().children().val();
$(this).attr("value", toEdit ? "保存" : "编辑");
if (toEdit == false) {
$(this).parent().siblings().each(function (idx, ele) {
var td_arr = $(ele).children().val();
var key = $(this).attr("key");
arry[key] =td_arr;
});
if(arry["status"] == "0" || arry["status"] == "1"){
}else{
$(this).attr("value", "保存");
alert("类型只能为1或者0");
return;;
}
if(!checkUser(arry["userName"])){
$(this).attr("value", "保存");
alert("帐号不存在");
return;
}
if(!checkProduct(arry["productId"])){
$(this).attr("value", "保存");
alert("产品不存在");
return;
}
console.info(arry);
$.ajax({
url:updateUrl,
data:arry,
type:"POST",
dataType:"text",
success:function(data){
if(data=="ok")
{
alert("修改成功");
location.reload();
}
if (data=="error") {alert("修改失败");}
}
});
} else { }
var inputcss = $(this).parent().siblings().children()
if (toEdit) {
inputcss.attr("disabled", false);
inputcss.css("border", "1px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().attr("disabled", true);
$(this).parent().siblings('td[key="orderTime"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="orderTime"]').children().attr("disabled", true);
} else {
inputcss.attr("disabled", true);
inputcss.css("border", "0px solid #51e5fb");
}
});
}
function getfenye(arry){
// $(".search").children().each(function (idx, ele) {
// kay = $(this).children()[0].name;
// val = $(this).children()[0].value;
// arry[kay] = val;
console.info(arry);
// });
// // arry.pagenow = "1";
pagenow = 1;
document.getElementById("curPage").value = pagenow;
// getTable();//显示第一页
$("#nextpage").click(function () {//下一页
var arry = setarry();
//alert(page);
if (pagenow < countpage) {
pagenow += 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = countpage;
alert("已是最后一页");
}
console.info(arry);
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
//alert(pagenow);
});
$("#lastpage").click(function () {//上一页
var arry = setarry();
//alert(pagenow);
if (pagenow != 1) {
pagenow -= 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = 1
alert("已是首页")
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
$("#npage").click(function () {//跳到固定某一页
var arry = setarry();
var npage = parseInt(document.getElementById("curP | untpage || npage < 1) {
alert("请输入1-" + countpage + "页");
}
else {
pagenow = npage;
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
}
//表身及分页
//获取表格数据
function getTable(data) {
var pagenow = document.getElementById("curPage").value;
$.ajax({
url: "count",
data: { "name": "order" },
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
var page=contact % pageSize==0 ? contact/pageSize : Math.floor(contact/pageSize)+1;
countpage = page;
$("#pageSum").val(countpage);
}
})
data[ "pagenow"] = pagenow;
data[ "pagesize"] = pageSize;
$.ajax({
url: listUrl,
data: data,
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
allData = JSON.parse(contact);
temp = allData;
//temp = allData.data;
//alert(pagenow);
//console.info(temp);
var html = '';
for (var i = 0; i < allData.length; i++) {
//alert(temp.length);
var data = temp[i];
// console.info(temp[i][0]);
html += '<tr id="show_tab_tr" idx="' + i + '" >' +
'<td key="pkey"><input type="text" value="' + data.pkey + '" disabled ></td>' +
'<td key="userName"><input type="text" value="' + data.userName + '" disabled ></td>' +
'<td key="productId" ><input type="text" value="' + data.productId + '" disabled ></td>' +
'<td key="price" ><input type="text" value="' + data.price + '" disabled ></td>' +
'<td key="status" ><input type="text" value="' + data.status + '" disabled ></td>' +
'<td key="orderTime" ><input type="text" value="' + data.orderTime + '" disabled ></td>' +
'<td><input id="center" style="display: inline-block;float:left;width:40px;color:#12a9ef;" kepp="dianji" type="button" value="编辑">    ' +
'<input type="button" class = "remove" style="display: inline-block;float:right; width:40px; color:#12a9ef;" det="detlet" value="删除" ></td>' +
'</tr>';
$("tbody").html(html);
}
getenter();
getdet();
// len =temp.length;
// page=len % pageSize==0 ? len/pageSize : Math.floor(len/pageSize)+1;
//alert(page);
// countpage = allData.countpage
pagenow = allData.pagenow
//alert(pagenow);
}
});
};
//删除数据
function getdet(){
$('input[det="detlet"]').click(function(){
//var key = $('td[key="pkey"]').parent().parent().attr("idx");
var pkey = $(this).parent().siblings('td[key="pkey"]').children().val();
// console.info(pkey,clientid);
if(confirm('确定删除么')){
$.ajax({
url:deleteUrl,
data:{"pkey":pkey},
type:"POST",
// dataType:"text",
success:function(data){
if(data=="ok")
{
alert("删除成功")
location.reload();
//window.location.href="test.php";
}
else
{
alert("删除失败")
}
}
});
}
});
}
//搜索查询
function getchaxun(){
//console.info(arry);
$(".query").click(function(){
var arry = setarry();
if (arry["description"] == "" && arry["clientid"] == "") {
alert("请输入查询内容");
} else {
document.getElementById("curPage").value=1;
getagetable(arry);
console.info(arry);
//getfenye(arry);
}
});
} | age").value);
if (npage > co | conditional_block |
order.js | /**
* Created by cp on 2017/8/14.
*/
var pageSize = 8; //每页显示的记录条数
var curPage = 1; //显示第curPage页
var len; //总行数
var page; //总页数
var arry = {};
var saveUrl = "orderSave";
var updateUrl = "orderUpdate";
var deleteUrl = "orderDelete";
var listUrl ="orders";
$(function () {
;
//获取表头及搜索
//getTh();
//getchaxun();
getTable(new Object());
getfenye({description:"", clientid:"", pagenow:1});
$(".Preservation").click(function () {
var add = {};
// arry.pkey;
// arry["pkey"];
// arry[html];
$(".zeng").children().each(function (idx, ele) {
// if (idx < 1) {
// return;
// }
var kay = $(this).children()[0].name;
var val = $(this).children()[0].value;
add[kay] = val;
});
if(add["userName"] == ""){
alert("帐号不能为空");
return;
}
if(add["productId"] == ""){
alert("产品ID不能为空");
return;
}
var temp = checkUser(add["userName"]);;
if(!temp){
alert("帐号不存在");
return;
}
var temp2 =checkProduct(add["productId"]);
if(!temp2){
alert("产品不存在");
return;
}
$.ajax({
url: saveUrl,
data: add,
type: "POST",
dataType: "text",
success: function (data) {
if (data == "ok") {alert("添加成功");location.reload();}
if (data == "error") {alert("添加失败")}
if (data == "2") {alert("该设备已存在")}
}
})
});
});
//保存功能
function getenter() {
// $("#show_tab_tr ").each(function(i){
//var txt=$('input[type="text"]').value;
//var j = true;
$('input[kepp="dianji"]').click(function () {
//alert(toEdit);
var arry = {}
// var toEdit = $(this).attr("value") == "编辑";
// $(this).attr("value", toEdit ? "保存" : "编辑");
// var index = $(this).parent().parent().attr("idx");
// td_arr = $(this).parent().siblings().children().val();
var toEdit = $(this).attr("value") == "编辑";
var index = $(this).parent().parent().attr("idx");
td_arr=$(this).parent().siblings().children().val();
$(this).attr("value", toEdit ? "保存" : "编辑");
if (toEdit == false) {
$(this).parent().siblings().each(function (idx, ele) {
var td_arr = $(ele).children().val();
var key = $(this).attr("key");
arry[key] =td_arr;
});
if(arry["status"] == "0" || arry["status"] == "1"){
}else{
$(this).attr("value", "保存");
alert("类型只能为1或者0");
return;;
}
if(!checkUser(arry["userName"])){
$(this).attr("value", "保存");
alert("帐号不存在");
return;
}
if(!checkProduct(arry["productId"])){
$(this).attr("value", "保存");
alert("产品不存在");
return;
}
console.info(arry);
$.ajax({
url:updateUrl,
data:arry,
type:"POST",
dataType:"text",
success:function(data){
if(data=="ok")
{
alert("修改成功");
location.reload();
}
if (data=="error") {alert("修改失败");}
}
});
} else { }
var inputcss = $(this).parent().siblings().children()
if (toEdit) {
inputcss.attr("disabled", false);
inputcss.css("border", "1px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().attr("disabled", true);
$(this).parent().siblings('td[key="orderTime"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="orderTime"]').children().attr("disabled", true);
} else {
inputcss.attr("disabled", true);
inputcss.css("border", "0px solid #51e5fb");
}
});
}
function getfenye(arry){
// $(".search").children().each(function (idx, ele) {
// kay = $(this).children()[0].name;
// val = $(this).children()[0].value;
// arry[kay] = val;
console.info(arry);
// });
// // arry.pagenow = "1";
pagenow = 1;
document.getElementById("curPage").value = pagenow;
// getTable();//显示第一页
$("#nextpage").click(function () {//下一页
var arry = setarry();
//alert(page);
if (pagenow < countpage) {
pagenow += 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = countpage;
alert("已是最后一页");
}
console.info(arry);
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
| //alert(pagenow);
});
$("#lastpage").click(function () {//上一页
var arry = setarry();
//alert(pagenow);
if (pagenow != 1) {
pagenow -= 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = 1
alert("已是首页")
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
$("#npage").click(function () {//跳到固定某一页
var arry = setarry();
var npage = parseInt(document.getElementById("curPage").value);
if (npage > countpage || npage < 1) {
alert("请输入1-" + countpage + "页");
}
else {
pagenow = npage;
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
}
//表身及分页
//获取表格数据
function getTable(data) {
var pagenow = document.getElementById("curPage").value;
$.ajax({
url: "count",
data: { "name": "order" },
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
var page=contact % pageSize==0 ? contact/pageSize : Math.floor(contact/pageSize)+1;
countpage = page;
$("#pageSum").val(countpage);
}
})
data[ "pagenow"] = pagenow;
data[ "pagesize"] = pageSize;
$.ajax({
url: listUrl,
data: data,
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
allData = JSON.parse(contact);
temp = allData;
//temp = allData.data;
//alert(pagenow);
//console.info(temp);
var html = '';
for (var i = 0; i < allData.length; i++) {
//alert(temp.length);
var data = temp[i];
// console.info(temp[i][0]);
html += '<tr id="show_tab_tr" idx="' + i + '" >' +
'<td key="pkey"><input type="text" value="' + data.pkey + '" disabled ></td>' +
'<td key="userName"><input type="text" value="' + data.userName + '" disabled ></td>' +
'<td key="productId" ><input type="text" value="' + data.productId + '" disabled ></td>' +
'<td key="price" ><input type="text" value="' + data.price + '" disabled ></td>' +
'<td key="status" ><input type="text" value="' + data.status + '" disabled ></td>' +
'<td key="orderTime" ><input type="text" value="' + data.orderTime + '" disabled ></td>' +
'<td><input id="center" style="display: inline-block;float:left;width:40px;color:#12a9ef;" kepp="dianji" type="button" value="编辑">    ' +
'<input type="button" class = "remove" style="display: inline-block;float:right; width:40px; color:#12a9ef;" det="detlet" value="删除" ></td>' +
'</tr>';
$("tbody").html(html);
}
getenter();
getdet();
// len =temp.length;
// page=len % pageSize==0 ? len/pageSize : Math.floor(len/pageSize)+1;
//alert(page);
// countpage = allData.countpage
pagenow = allData.pagenow
//alert(pagenow);
}
});
};
//删除数据
function getdet(){
$('input[det="detlet"]').click(function(){
//var key = $('td[key="pkey"]').parent().parent().attr("idx");
var pkey = $(this).parent().siblings('td[key="pkey"]').children().val();
// console.info(pkey,clientid);
if(confirm('确定删除么')){
$.ajax({
url:deleteUrl,
data:{"pkey":pkey},
type:"POST",
// dataType:"text",
success:function(data){
if(data=="ok")
{
alert("删除成功")
location.reload();
//window.location.href="test.php";
}
else
{
alert("删除失败")
}
}
});
}
});
}
//搜索查询
function getchaxun(){
//console.info(arry);
$(".query").click(function(){
var arry = setarry();
if (arry["description"] == "" && arry["clientid"] == "") {
alert("请输入查询内容");
} else {
document.getElementById("curPage").value=1;
getagetable(arry);
console.info(arry);
//getfenye(arry);
}
});
} |
} | random_line_split |
lib.rs | //! [](https://docs.rs/petgraph-graphml/)
//! [](https://crates.io/crates/petgraph-graphml/)
//! [](https://github.com/jonasbb/petgraph-graphml)
//! [](https://codecov.io/gh/jonasbb/petgraph-graphml)
//!
//! ---
//!
//! This crate extends [petgraph][] with [GraphML][graphmlwebsite] output support.
//!
//! This crate exports a single type [`GraphMl`] which combines a build-pattern for configuration and provides creating strings ([`GraphMl::to_string`]) and writing to writers ([`GraphMl::to_writer`]).
//!
//! # Usage
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! petgraph-graphml = "3.0.0"
//! ```
//!
//! # Example
//!
//! For a simple graph like  this is the generated GraphML output.
//!
//! ```
//! # use petgraph::Graph;
//! # use petgraph_graphml::GraphMl;
//! # fn make_graph() -> Graph<u32, ()> {
//! # let mut graph = Graph::new();
//! # let n0 = graph.add_node(0);
//! # let n1 = graph.add_node(1);
//! # let n2 = graph.add_node(2);
//! # graph.update_edge(n0, n1, ());
//! # graph.update_edge(n1, n2, ());
//! # graph
//! # }
//! # fn main() {
//! let graph = make_graph();
//! // Configure output settings
//! // Enable pretty printing and exporting of node weights.
//! // Use the Display implementation of NodeWeights for exporting them.
//! let graphml = GraphMl::new(&graph)
//! .pretty_print(true)
//! .export_node_weights_display();
//!
//! assert_eq!(
//! graphml.to_string(),
//! r#"<?xml version="1.0" encoding="UTF-8"?>
//! <graphml xmlns="http://graphml.graphdrawing.org/xmlns">
//! <graph edgedefault="directed">
//! <node id="n0">
//! <data key="weight">0</data>
//! </node>
//! <node id="n1">
//! <data key="weight">1</data>
//! </node>
//! <node id="n2">
//! <data key="weight">2</data>
//! </node>
//! <edge id="e0" source="n0" target="n1" />
//! <edge id="e1" source="n1" target="n2" />
//! </graph>
//! <key id="weight" for="node" attr.name="weight" attr.type="string" />
//! </graphml>"#
//! );
//! # }
//! ```
//!
//! [`GraphMl`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html
//! [`GraphMl::to_string`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_string
//! [`GraphMl::to_writer`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_writer
//! [graphmlwebsite]: http://graphml.graphdrawing.org/
//! [petgraph]: https://docs.rs/petgraph/
#![deny(
missing_debug_implementations,
missing_copy_implementations,
missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
#![allow(unknown_lints, clippy::return_self_not_must_use)]
#![doc(html_root_url = "https://docs.rs/petgraph-graphml/3.0.0")]
use petgraph::visit::{
EdgeRef, GraphProp, IntoEdgeReferences, IntoNodeReferences, NodeIndexable, NodeRef,
};
use std::borrow::Cow;
use std::collections::HashSet;
use std::fmt::{self, Debug, Display};
use std::io::{self, Cursor, Write};
use xml::common::XmlVersion;
use xml::writer::events::XmlEvent;
use xml::writer::{Error as XmlError, EventWriter, Result as WriterResult};
use xml::EmitterConfig;
static NAMESPACE_URL: &str = "http://graphml.graphdrawing.org/xmlns";
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
struct Attribute {
name: Cow<'static, str>,
for_: For,
}
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
enum For {
Node,
Edge,
}
impl For {
fn to_str(self) -> &'static str {
match self {
For::Node => "node",
For::Edge => "edge",
}
}
}
type PrintWeights<W> = dyn for<'a> Fn(&'a W) -> Vec<(Cow<'static, str>, Cow<'a, str>)>;
/// GraphML output printer
///
/// See the [main crate documentation](index.html) for usage instructions and examples.
pub struct GraphMl<G>
where
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
graph: G,
pretty_print: bool,
export_edges: Option<Box<PrintWeights<G::EdgeWeight>>>,
export_nodes: Option<Box<PrintWeights<G::NodeWeight>>>,
}
impl<G> GraphMl<G>
where
G: GraphProp,
G: IntoNodeReferences,
G: IntoEdgeReferences,
G: NodeIndexable,
{
/// Create a new GraphML printer for the graph.
pub fn new(graph: G) -> Self {
Self {
graph,
pretty_print: true,
export_edges: None,
export_nodes: None,
}
}
/// Enable or disble pretty printing of the XML.
///
/// Pretty printing enables linebreaks and indentation.
pub fn pretty_print(mut self, state: bool) -> Self {
self.pretty_print = state;
self
}
/// Export the edge weights to GraphML.
///
/// This uses the [`Display`] implementation of the edge weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_edge_weights_display(self) -> Self
where
G::EdgeWeight: Display,
{
self.export_edge_weights(Box::new(|edge| {
vec![("weight".into(), edge.to_string().into())]
}))
}
/// Export the edge weights to GraphML.
///
/// This uses a custom conversion function.
/// Each edge can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(), (String, u32)> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_edge_weights(Box::new(|edge| {
/// let &(ref s, i) = edge;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_edge_weights(mut self, edge_weight: Box<PrintWeights<G::EdgeWeight>>) -> Self {
self.export_edges = Some(edge_weight);
self
}
/// Export the node weights to GraphML.
///
/// This uses the [`Display`] implementation of the node weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_node_weights_display(self) -> Self
where
G::NodeWeight: Display,
{
self.export_node_weights(Box::new(|node| {
vec![("weight".into(), node.to_string().into())]
}))
}
/// Export the node weights to GraphML.
///
/// This uses a custom conversion function.
/// Each node can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(String, u32), ()> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_node_weights(Box::new(|node| {
/// let &(ref s, i) = node;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_node_weights(mut self, node_weight: Box<PrintWeights<G::NodeWeight>>) -> Self {
self.export_nodes = Some(node_weight);
self
}
/// Write the GraphML file to the given writer.
pub fn to_writer<W>(&self, writer: W) -> io::Result<()>
where
W: Write,
{
let mut writer = EventWriter::new_with_config(
writer,
EmitterConfig::new().perform_indent(self.pretty_print),
);
match self.emit_graphml(&mut writer) {
Ok(()) => Ok(()),
Err(XmlError::Io(ioerror)) => Err(ioerror),
_ => panic!(""),
}
}
fn emit_graphml<W>(&self, writer: &mut EventWriter<W>) -> WriterResult<()>
where
W: Write,
{
// Store information about the attributes for nodes and edges.
// We cannot know in advance what the attribute names will be, so we just keep track of what gets emitted.
let mut attributes: HashSet<Attribute> = HashSet::new();
// XML/GraphML boilerplate
writer.write(XmlEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("UTF-8"),
standalone: None,
})?;
writer.write(XmlEvent::start_element("graphml").attr("xmlns", NAMESPACE_URL))?;
// emit graph with nodes/edges and possibly weights
self.emit_graph(writer, &mut attributes)?;
// Emit <key> tags for all the attributes
self.emit_keys(writer, &attributes)?;
writer.write(XmlEvent::end_element())?; // end graphml
Ok(())
}
fn emit_graph<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &mut HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
// convenience function to turn a NodeId into a String
let node2str_id = |node: G::NodeId| -> String { format!("n{}", self.graph.to_index(node)) };
// Emit an attribute for either node or edge
// This will also keep track of updating the global attributes list
let mut emit_attribute = |writer: &mut EventWriter<_>,
name: Cow<'static, str>,
data: &str,
for_: For|
-> WriterResult<()> {
writer.write(XmlEvent::start_element("data").attr("key", &*name))?;
attributes.insert(Attribute { name, for_ });
writer.write(XmlEvent::characters(data))?;
writer.write(XmlEvent::end_element()) // end data
};
// Each graph needs a default edge type
writer.write(XmlEvent::start_element("graph").attr(
"edgedefault",
if self.graph.is_directed() {
"directed"
} else {
"undirected"
},
))?;
// Emit nodes
for node in self.graph.node_references() {
writer.write(XmlEvent::start_element("node").attr("id", &*node2str_id(node.id())))?;
// Print weights
if let Some(ref node_labels) = self.export_nodes {
let datas = node_labels(node.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Node)?;
}
}
writer.write(XmlEvent::end_element())?; // end node
}
// Emit edges
for (i, edge) in self.graph.edge_references().enumerate() {
writer.write(
XmlEvent::start_element("edge")
.attr("id", &format!("e{}", i))
.attr("source", &*node2str_id(edge.source()))
.attr("target", &*node2str_id(edge.target())),
)?;
// Print weights
if let Some(ref edge_labels) = self.export_edges {
let datas = edge_labels(edge.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Edge)?;
}
}
writer.write(XmlEvent::end_element())?; // end edge
}
writer.write(XmlEvent::end_element()) // end graph
}
fn emit_keys<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
for attr in attributes {
writer.write( | .attr("id", &*attr.name)
.attr("for", attr.for_.to_str())
.attr("attr.name", &*attr.name)
.attr("attr.type", "string"),
)?;
writer.write(XmlEvent::end_element())?; // end key
}
Ok(())
}
}
impl<G> Debug for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GraphMl")
.field("graph", &self.graph)
.field("pretty_print", &self.pretty_print)
.field("export_edges", &self.export_edges.is_some())
.field("export_nodes", &self.export_nodes.is_some())
.finish()
}
}
impl<G> Display for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
G: GraphProp,
G: NodeIndexable,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut buff = Cursor::new(Vec::new());
self.to_writer(&mut buff)
.expect("Writing to a Cursor should never create IO errors.");
let s = String::from_utf8(buff.into_inner()).unwrap();
write!(f, "{}", &s)
}
} | XmlEvent::start_element("key") | random_line_split |
lib.rs | //! [](https://docs.rs/petgraph-graphml/)
//! [](https://crates.io/crates/petgraph-graphml/)
//! [](https://github.com/jonasbb/petgraph-graphml)
//! [](https://codecov.io/gh/jonasbb/petgraph-graphml)
//!
//! ---
//!
//! This crate extends [petgraph][] with [GraphML][graphmlwebsite] output support.
//!
//! This crate exports a single type [`GraphMl`] which combines a build-pattern for configuration and provides creating strings ([`GraphMl::to_string`]) and writing to writers ([`GraphMl::to_writer`]).
//!
//! # Usage
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! petgraph-graphml = "3.0.0"
//! ```
//!
//! # Example
//!
//! For a simple graph like  this is the generated GraphML output.
//!
//! ```
//! # use petgraph::Graph;
//! # use petgraph_graphml::GraphMl;
//! # fn make_graph() -> Graph<u32, ()> {
//! # let mut graph = Graph::new();
//! # let n0 = graph.add_node(0);
//! # let n1 = graph.add_node(1);
//! # let n2 = graph.add_node(2);
//! # graph.update_edge(n0, n1, ());
//! # graph.update_edge(n1, n2, ());
//! # graph
//! # }
//! # fn main() {
//! let graph = make_graph();
//! // Configure output settings
//! // Enable pretty printing and exporting of node weights.
//! // Use the Display implementation of NodeWeights for exporting them.
//! let graphml = GraphMl::new(&graph)
//! .pretty_print(true)
//! .export_node_weights_display();
//!
//! assert_eq!(
//! graphml.to_string(),
//! r#"<?xml version="1.0" encoding="UTF-8"?>
//! <graphml xmlns="http://graphml.graphdrawing.org/xmlns">
//! <graph edgedefault="directed">
//! <node id="n0">
//! <data key="weight">0</data>
//! </node>
//! <node id="n1">
//! <data key="weight">1</data>
//! </node>
//! <node id="n2">
//! <data key="weight">2</data>
//! </node>
//! <edge id="e0" source="n0" target="n1" />
//! <edge id="e1" source="n1" target="n2" />
//! </graph>
//! <key id="weight" for="node" attr.name="weight" attr.type="string" />
//! </graphml>"#
//! );
//! # }
//! ```
//!
//! [`GraphMl`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html
//! [`GraphMl::to_string`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_string
//! [`GraphMl::to_writer`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_writer
//! [graphmlwebsite]: http://graphml.graphdrawing.org/
//! [petgraph]: https://docs.rs/petgraph/
#![deny(
missing_debug_implementations,
missing_copy_implementations,
missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
#![allow(unknown_lints, clippy::return_self_not_must_use)]
#![doc(html_root_url = "https://docs.rs/petgraph-graphml/3.0.0")]
use petgraph::visit::{
EdgeRef, GraphProp, IntoEdgeReferences, IntoNodeReferences, NodeIndexable, NodeRef,
};
use std::borrow::Cow;
use std::collections::HashSet;
use std::fmt::{self, Debug, Display};
use std::io::{self, Cursor, Write};
use xml::common::XmlVersion;
use xml::writer::events::XmlEvent;
use xml::writer::{Error as XmlError, EventWriter, Result as WriterResult};
use xml::EmitterConfig;
static NAMESPACE_URL: &str = "http://graphml.graphdrawing.org/xmlns";
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
struct Attribute {
name: Cow<'static, str>,
for_: For,
}
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
enum For {
Node,
Edge,
}
impl For {
fn to_str(self) -> &'static str {
match self {
For::Node => "node",
For::Edge => "edge",
}
}
}
type PrintWeights<W> = dyn for<'a> Fn(&'a W) -> Vec<(Cow<'static, str>, Cow<'a, str>)>;
/// GraphML output printer
///
/// See the [main crate documentation](index.html) for usage instructions and examples.
pub struct GraphMl<G>
where
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
graph: G,
pretty_print: bool,
export_edges: Option<Box<PrintWeights<G::EdgeWeight>>>,
export_nodes: Option<Box<PrintWeights<G::NodeWeight>>>,
}
impl<G> GraphMl<G>
where
G: GraphProp,
G: IntoNodeReferences,
G: IntoEdgeReferences,
G: NodeIndexable,
{
/// Create a new GraphML printer for the graph.
pub fn new(graph: G) -> Self {
Self {
graph,
pretty_print: true,
export_edges: None,
export_nodes: None,
}
}
/// Enable or disble pretty printing of the XML.
///
/// Pretty printing enables linebreaks and indentation.
pub fn pretty_print(mut self, state: bool) -> Self {
self.pretty_print = state;
self
}
/// Export the edge weights to GraphML.
///
/// This uses the [`Display`] implementation of the edge weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_edge_weights_display(self) -> Self
where
G::EdgeWeight: Display,
{
self.export_edge_weights(Box::new(|edge| {
vec![("weight".into(), edge.to_string().into())]
}))
}
/// Export the edge weights to GraphML.
///
/// This uses a custom conversion function.
/// Each edge can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(), (String, u32)> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_edge_weights(Box::new(|edge| {
/// let &(ref s, i) = edge;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_edge_weights(mut self, edge_weight: Box<PrintWeights<G::EdgeWeight>>) -> Self {
self.export_edges = Some(edge_weight);
self
}
/// Export the node weights to GraphML.
///
/// This uses the [`Display`] implementation of the node weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_node_weights_display(self) -> Self
where
G::NodeWeight: Display,
{
self.export_node_weights(Box::new(|node| {
vec![("weight".into(), node.to_string().into())]
}))
}
/// Export the node weights to GraphML.
///
/// This uses a custom conversion function.
/// Each node can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(String, u32), ()> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_node_weights(Box::new(|node| {
/// let &(ref s, i) = node;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_node_weights(mut self, node_weight: Box<PrintWeights<G::NodeWeight>>) -> Self {
self.export_nodes = Some(node_weight);
self
}
/// Write the GraphML file to the given writer.
pub fn to_writer<W>(&self, writer: W) -> io::Result<()>
where
W: Write,
{
let mut writer = EventWriter::new_with_config(
writer,
EmitterConfig::new().perform_indent(self.pretty_print),
);
match self.emit_graphml(&mut writer) {
Ok(()) => Ok(()),
Err(XmlError::Io(ioerror)) => Err(ioerror),
_ => panic!(""),
}
}
fn emit_graphml<W>(&self, writer: &mut EventWriter<W>) -> WriterResult<()>
where
W: Write,
{
// Store information about the attributes for nodes and edges.
// We cannot know in advance what the attribute names will be, so we just keep track of what gets emitted.
let mut attributes: HashSet<Attribute> = HashSet::new();
// XML/GraphML boilerplate
writer.write(XmlEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("UTF-8"),
standalone: None,
})?;
writer.write(XmlEvent::start_element("graphml").attr("xmlns", NAMESPACE_URL))?;
// emit graph with nodes/edges and possibly weights
self.emit_graph(writer, &mut attributes)?;
// Emit <key> tags for all the attributes
self.emit_keys(writer, &attributes)?;
writer.write(XmlEvent::end_element())?; // end graphml
Ok(())
}
fn emit_graph<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &mut HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
// convenience function to turn a NodeId into a String
let node2str_id = |node: G::NodeId| -> String { format!("n{}", self.graph.to_index(node)) };
// Emit an attribute for either node or edge
// This will also keep track of updating the global attributes list
let mut emit_attribute = |writer: &mut EventWriter<_>,
name: Cow<'static, str>,
data: &str,
for_: For|
-> WriterResult<()> {
writer.write(XmlEvent::start_element("data").attr("key", &*name))?;
attributes.insert(Attribute { name, for_ });
writer.write(XmlEvent::characters(data))?;
writer.write(XmlEvent::end_element()) // end data
};
// Each graph needs a default edge type
writer.write(XmlEvent::start_element("graph").attr(
"edgedefault",
if self.graph.is_directed() | else {
"undirected"
},
))?;
// Emit nodes
for node in self.graph.node_references() {
writer.write(XmlEvent::start_element("node").attr("id", &*node2str_id(node.id())))?;
// Print weights
if let Some(ref node_labels) = self.export_nodes {
let datas = node_labels(node.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Node)?;
}
}
writer.write(XmlEvent::end_element())?; // end node
}
// Emit edges
for (i, edge) in self.graph.edge_references().enumerate() {
writer.write(
XmlEvent::start_element("edge")
.attr("id", &format!("e{}", i))
.attr("source", &*node2str_id(edge.source()))
.attr("target", &*node2str_id(edge.target())),
)?;
// Print weights
if let Some(ref edge_labels) = self.export_edges {
let datas = edge_labels(edge.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Edge)?;
}
}
writer.write(XmlEvent::end_element())?; // end edge
}
writer.write(XmlEvent::end_element()) // end graph
}
fn emit_keys<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
for attr in attributes {
writer.write(
XmlEvent::start_element("key")
.attr("id", &*attr.name)
.attr("for", attr.for_.to_str())
.attr("attr.name", &*attr.name)
.attr("attr.type", "string"),
)?;
writer.write(XmlEvent::end_element())?; // end key
}
Ok(())
}
}
impl<G> Debug for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GraphMl")
.field("graph", &self.graph)
.field("pretty_print", &self.pretty_print)
.field("export_edges", &self.export_edges.is_some())
.field("export_nodes", &self.export_nodes.is_some())
.finish()
}
}
impl<G> Display for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
G: GraphProp,
G: NodeIndexable,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut buff = Cursor::new(Vec::new());
self.to_writer(&mut buff)
.expect("Writing to a Cursor should never create IO errors.");
let s = String::from_utf8(buff.into_inner()).unwrap();
write!(f, "{}", &s)
}
}
| {
"directed"
} | conditional_block |
lib.rs | //! [](https://docs.rs/petgraph-graphml/)
//! [](https://crates.io/crates/petgraph-graphml/)
//! [](https://github.com/jonasbb/petgraph-graphml)
//! [](https://codecov.io/gh/jonasbb/petgraph-graphml)
//!
//! ---
//!
//! This crate extends [petgraph][] with [GraphML][graphmlwebsite] output support.
//!
//! This crate exports a single type [`GraphMl`] which combines a build-pattern for configuration and provides creating strings ([`GraphMl::to_string`]) and writing to writers ([`GraphMl::to_writer`]).
//!
//! # Usage
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! petgraph-graphml = "3.0.0"
//! ```
//!
//! # Example
//!
//! For a simple graph like  this is the generated GraphML output.
//!
//! ```
//! # use petgraph::Graph;
//! # use petgraph_graphml::GraphMl;
//! # fn make_graph() -> Graph<u32, ()> {
//! # let mut graph = Graph::new();
//! # let n0 = graph.add_node(0);
//! # let n1 = graph.add_node(1);
//! # let n2 = graph.add_node(2);
//! # graph.update_edge(n0, n1, ());
//! # graph.update_edge(n1, n2, ());
//! # graph
//! # }
//! # fn main() {
//! let graph = make_graph();
//! // Configure output settings
//! // Enable pretty printing and exporting of node weights.
//! // Use the Display implementation of NodeWeights for exporting them.
//! let graphml = GraphMl::new(&graph)
//! .pretty_print(true)
//! .export_node_weights_display();
//!
//! assert_eq!(
//! graphml.to_string(),
//! r#"<?xml version="1.0" encoding="UTF-8"?>
//! <graphml xmlns="http://graphml.graphdrawing.org/xmlns">
//! <graph edgedefault="directed">
//! <node id="n0">
//! <data key="weight">0</data>
//! </node>
//! <node id="n1">
//! <data key="weight">1</data>
//! </node>
//! <node id="n2">
//! <data key="weight">2</data>
//! </node>
//! <edge id="e0" source="n0" target="n1" />
//! <edge id="e1" source="n1" target="n2" />
//! </graph>
//! <key id="weight" for="node" attr.name="weight" attr.type="string" />
//! </graphml>"#
//! );
//! # }
//! ```
//!
//! [`GraphMl`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html
//! [`GraphMl::to_string`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_string
//! [`GraphMl::to_writer`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_writer
//! [graphmlwebsite]: http://graphml.graphdrawing.org/
//! [petgraph]: https://docs.rs/petgraph/
#![deny(
missing_debug_implementations,
missing_copy_implementations,
missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
#![allow(unknown_lints, clippy::return_self_not_must_use)]
#![doc(html_root_url = "https://docs.rs/petgraph-graphml/3.0.0")]
use petgraph::visit::{
EdgeRef, GraphProp, IntoEdgeReferences, IntoNodeReferences, NodeIndexable, NodeRef,
};
use std::borrow::Cow;
use std::collections::HashSet;
use std::fmt::{self, Debug, Display};
use std::io::{self, Cursor, Write};
use xml::common::XmlVersion;
use xml::writer::events::XmlEvent;
use xml::writer::{Error as XmlError, EventWriter, Result as WriterResult};
use xml::EmitterConfig;
static NAMESPACE_URL: &str = "http://graphml.graphdrawing.org/xmlns";
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
struct Attribute {
name: Cow<'static, str>,
for_: For,
}
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
enum For {
Node,
Edge,
}
impl For {
fn to_str(self) -> &'static str {
match self {
For::Node => "node",
For::Edge => "edge",
}
}
}
type PrintWeights<W> = dyn for<'a> Fn(&'a W) -> Vec<(Cow<'static, str>, Cow<'a, str>)>;
/// GraphML output printer
///
/// See the [main crate documentation](index.html) for usage instructions and examples.
pub struct GraphMl<G>
where
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
graph: G,
pretty_print: bool,
export_edges: Option<Box<PrintWeights<G::EdgeWeight>>>,
export_nodes: Option<Box<PrintWeights<G::NodeWeight>>>,
}
impl<G> GraphMl<G>
where
G: GraphProp,
G: IntoNodeReferences,
G: IntoEdgeReferences,
G: NodeIndexable,
{
/// Create a new GraphML printer for the graph.
pub fn new(graph: G) -> Self {
Self {
graph,
pretty_print: true,
export_edges: None,
export_nodes: None,
}
}
/// Enable or disble pretty printing of the XML.
///
/// Pretty printing enables linebreaks and indentation.
pub fn pretty_print(mut self, state: bool) -> Self {
self.pretty_print = state;
self
}
/// Export the edge weights to GraphML.
///
/// This uses the [`Display`] implementation of the edge weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_edge_weights_display(self) -> Self
where
G::EdgeWeight: Display,
{
self.export_edge_weights(Box::new(|edge| {
vec![("weight".into(), edge.to_string().into())]
}))
}
/// Export the edge weights to GraphML.
///
/// This uses a custom conversion function.
/// Each edge can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(), (String, u32)> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_edge_weights(Box::new(|edge| {
/// let &(ref s, i) = edge;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_edge_weights(mut self, edge_weight: Box<PrintWeights<G::EdgeWeight>>) -> Self |
/// Export the node weights to GraphML.
///
/// This uses the [`Display`] implementation of the node weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_node_weights_display(self) -> Self
where
G::NodeWeight: Display,
{
self.export_node_weights(Box::new(|node| {
vec![("weight".into(), node.to_string().into())]
}))
}
/// Export the node weights to GraphML.
///
/// This uses a custom conversion function.
/// Each node can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(String, u32), ()> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_node_weights(Box::new(|node| {
/// let &(ref s, i) = node;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_node_weights(mut self, node_weight: Box<PrintWeights<G::NodeWeight>>) -> Self {
self.export_nodes = Some(node_weight);
self
}
/// Write the GraphML file to the given writer.
pub fn to_writer<W>(&self, writer: W) -> io::Result<()>
where
W: Write,
{
let mut writer = EventWriter::new_with_config(
writer,
EmitterConfig::new().perform_indent(self.pretty_print),
);
match self.emit_graphml(&mut writer) {
Ok(()) => Ok(()),
Err(XmlError::Io(ioerror)) => Err(ioerror),
_ => panic!(""),
}
}
fn emit_graphml<W>(&self, writer: &mut EventWriter<W>) -> WriterResult<()>
where
W: Write,
{
// Store information about the attributes for nodes and edges.
// We cannot know in advance what the attribute names will be, so we just keep track of what gets emitted.
let mut attributes: HashSet<Attribute> = HashSet::new();
// XML/GraphML boilerplate
writer.write(XmlEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("UTF-8"),
standalone: None,
})?;
writer.write(XmlEvent::start_element("graphml").attr("xmlns", NAMESPACE_URL))?;
// emit graph with nodes/edges and possibly weights
self.emit_graph(writer, &mut attributes)?;
// Emit <key> tags for all the attributes
self.emit_keys(writer, &attributes)?;
writer.write(XmlEvent::end_element())?; // end graphml
Ok(())
}
fn emit_graph<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &mut HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
// convenience function to turn a NodeId into a String
let node2str_id = |node: G::NodeId| -> String { format!("n{}", self.graph.to_index(node)) };
// Emit an attribute for either node or edge
// This will also keep track of updating the global attributes list
let mut emit_attribute = |writer: &mut EventWriter<_>,
name: Cow<'static, str>,
data: &str,
for_: For|
-> WriterResult<()> {
writer.write(XmlEvent::start_element("data").attr("key", &*name))?;
attributes.insert(Attribute { name, for_ });
writer.write(XmlEvent::characters(data))?;
writer.write(XmlEvent::end_element()) // end data
};
// Each graph needs a default edge type
writer.write(XmlEvent::start_element("graph").attr(
"edgedefault",
if self.graph.is_directed() {
"directed"
} else {
"undirected"
},
))?;
// Emit nodes
for node in self.graph.node_references() {
writer.write(XmlEvent::start_element("node").attr("id", &*node2str_id(node.id())))?;
// Print weights
if let Some(ref node_labels) = self.export_nodes {
let datas = node_labels(node.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Node)?;
}
}
writer.write(XmlEvent::end_element())?; // end node
}
// Emit edges
for (i, edge) in self.graph.edge_references().enumerate() {
writer.write(
XmlEvent::start_element("edge")
.attr("id", &format!("e{}", i))
.attr("source", &*node2str_id(edge.source()))
.attr("target", &*node2str_id(edge.target())),
)?;
// Print weights
if let Some(ref edge_labels) = self.export_edges {
let datas = edge_labels(edge.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Edge)?;
}
}
writer.write(XmlEvent::end_element())?; // end edge
}
writer.write(XmlEvent::end_element()) // end graph
}
fn emit_keys<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
for attr in attributes {
writer.write(
XmlEvent::start_element("key")
.attr("id", &*attr.name)
.attr("for", attr.for_.to_str())
.attr("attr.name", &*attr.name)
.attr("attr.type", "string"),
)?;
writer.write(XmlEvent::end_element())?; // end key
}
Ok(())
}
}
impl<G> Debug for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GraphMl")
.field("graph", &self.graph)
.field("pretty_print", &self.pretty_print)
.field("export_edges", &self.export_edges.is_some())
.field("export_nodes", &self.export_nodes.is_some())
.finish()
}
}
impl<G> Display for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
G: GraphProp,
G: NodeIndexable,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut buff = Cursor::new(Vec::new());
self.to_writer(&mut buff)
.expect("Writing to a Cursor should never create IO errors.");
let s = String::from_utf8(buff.into_inner()).unwrap();
write!(f, "{}", &s)
}
}
| {
self.export_edges = Some(edge_weight);
self
} | identifier_body |
lib.rs | //! [](https://docs.rs/petgraph-graphml/)
//! [](https://crates.io/crates/petgraph-graphml/)
//! [](https://github.com/jonasbb/petgraph-graphml)
//! [](https://codecov.io/gh/jonasbb/petgraph-graphml)
//!
//! ---
//!
//! This crate extends [petgraph][] with [GraphML][graphmlwebsite] output support.
//!
//! This crate exports a single type [`GraphMl`] which combines a build-pattern for configuration and provides creating strings ([`GraphMl::to_string`]) and writing to writers ([`GraphMl::to_writer`]).
//!
//! # Usage
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! petgraph-graphml = "3.0.0"
//! ```
//!
//! # Example
//!
//! For a simple graph like  this is the generated GraphML output.
//!
//! ```
//! # use petgraph::Graph;
//! # use petgraph_graphml::GraphMl;
//! # fn make_graph() -> Graph<u32, ()> {
//! # let mut graph = Graph::new();
//! # let n0 = graph.add_node(0);
//! # let n1 = graph.add_node(1);
//! # let n2 = graph.add_node(2);
//! # graph.update_edge(n0, n1, ());
//! # graph.update_edge(n1, n2, ());
//! # graph
//! # }
//! # fn main() {
//! let graph = make_graph();
//! // Configure output settings
//! // Enable pretty printing and exporting of node weights.
//! // Use the Display implementation of NodeWeights for exporting them.
//! let graphml = GraphMl::new(&graph)
//! .pretty_print(true)
//! .export_node_weights_display();
//!
//! assert_eq!(
//! graphml.to_string(),
//! r#"<?xml version="1.0" encoding="UTF-8"?>
//! <graphml xmlns="http://graphml.graphdrawing.org/xmlns">
//! <graph edgedefault="directed">
//! <node id="n0">
//! <data key="weight">0</data>
//! </node>
//! <node id="n1">
//! <data key="weight">1</data>
//! </node>
//! <node id="n2">
//! <data key="weight">2</data>
//! </node>
//! <edge id="e0" source="n0" target="n1" />
//! <edge id="e1" source="n1" target="n2" />
//! </graph>
//! <key id="weight" for="node" attr.name="weight" attr.type="string" />
//! </graphml>"#
//! );
//! # }
//! ```
//!
//! [`GraphMl`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html
//! [`GraphMl::to_string`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_string
//! [`GraphMl::to_writer`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_writer
//! [graphmlwebsite]: http://graphml.graphdrawing.org/
//! [petgraph]: https://docs.rs/petgraph/
#![deny(
missing_debug_implementations,
missing_copy_implementations,
missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
#![allow(unknown_lints, clippy::return_self_not_must_use)]
#![doc(html_root_url = "https://docs.rs/petgraph-graphml/3.0.0")]
use petgraph::visit::{
EdgeRef, GraphProp, IntoEdgeReferences, IntoNodeReferences, NodeIndexable, NodeRef,
};
use std::borrow::Cow;
use std::collections::HashSet;
use std::fmt::{self, Debug, Display};
use std::io::{self, Cursor, Write};
use xml::common::XmlVersion;
use xml::writer::events::XmlEvent;
use xml::writer::{Error as XmlError, EventWriter, Result as WriterResult};
use xml::EmitterConfig;
static NAMESPACE_URL: &str = "http://graphml.graphdrawing.org/xmlns";
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
struct Attribute {
name: Cow<'static, str>,
for_: For,
}
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
enum For {
Node,
Edge,
}
impl For {
fn to_str(self) -> &'static str {
match self {
For::Node => "node",
For::Edge => "edge",
}
}
}
type PrintWeights<W> = dyn for<'a> Fn(&'a W) -> Vec<(Cow<'static, str>, Cow<'a, str>)>;
/// GraphML output printer
///
/// See the [main crate documentation](index.html) for usage instructions and examples.
pub struct GraphMl<G>
where
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
graph: G,
pretty_print: bool,
export_edges: Option<Box<PrintWeights<G::EdgeWeight>>>,
export_nodes: Option<Box<PrintWeights<G::NodeWeight>>>,
}
impl<G> GraphMl<G>
where
G: GraphProp,
G: IntoNodeReferences,
G: IntoEdgeReferences,
G: NodeIndexable,
{
/// Create a new GraphML printer for the graph.
pub fn new(graph: G) -> Self {
Self {
graph,
pretty_print: true,
export_edges: None,
export_nodes: None,
}
}
/// Enable or disble pretty printing of the XML.
///
/// Pretty printing enables linebreaks and indentation.
pub fn pretty_print(mut self, state: bool) -> Self {
self.pretty_print = state;
self
}
/// Export the edge weights to GraphML.
///
/// This uses the [`Display`] implementation of the edge weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_edge_weights_display(self) -> Self
where
G::EdgeWeight: Display,
{
self.export_edge_weights(Box::new(|edge| {
vec![("weight".into(), edge.to_string().into())]
}))
}
/// Export the edge weights to GraphML.
///
/// This uses a custom conversion function.
/// Each edge can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(), (String, u32)> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_edge_weights(Box::new(|edge| {
/// let &(ref s, i) = edge;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_edge_weights(mut self, edge_weight: Box<PrintWeights<G::EdgeWeight>>) -> Self {
self.export_edges = Some(edge_weight);
self
}
/// Export the node weights to GraphML.
///
/// This uses the [`Display`] implementation of the node weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_node_weights_display(self) -> Self
where
G::NodeWeight: Display,
{
self.export_node_weights(Box::new(|node| {
vec![("weight".into(), node.to_string().into())]
}))
}
/// Export the node weights to GraphML.
///
/// This uses a custom conversion function.
/// Each node can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(String, u32), ()> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_node_weights(Box::new(|node| {
/// let &(ref s, i) = node;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_node_weights(mut self, node_weight: Box<PrintWeights<G::NodeWeight>>) -> Self {
self.export_nodes = Some(node_weight);
self
}
/// Write the GraphML file to the given writer.
pub fn to_writer<W>(&self, writer: W) -> io::Result<()>
where
W: Write,
{
let mut writer = EventWriter::new_with_config(
writer,
EmitterConfig::new().perform_indent(self.pretty_print),
);
match self.emit_graphml(&mut writer) {
Ok(()) => Ok(()),
Err(XmlError::Io(ioerror)) => Err(ioerror),
_ => panic!(""),
}
}
fn emit_graphml<W>(&self, writer: &mut EventWriter<W>) -> WriterResult<()>
where
W: Write,
{
// Store information about the attributes for nodes and edges.
// We cannot know in advance what the attribute names will be, so we just keep track of what gets emitted.
let mut attributes: HashSet<Attribute> = HashSet::new();
// XML/GraphML boilerplate
writer.write(XmlEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("UTF-8"),
standalone: None,
})?;
writer.write(XmlEvent::start_element("graphml").attr("xmlns", NAMESPACE_URL))?;
// emit graph with nodes/edges and possibly weights
self.emit_graph(writer, &mut attributes)?;
// Emit <key> tags for all the attributes
self.emit_keys(writer, &attributes)?;
writer.write(XmlEvent::end_element())?; // end graphml
Ok(())
}
fn emit_graph<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &mut HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
// convenience function to turn a NodeId into a String
let node2str_id = |node: G::NodeId| -> String { format!("n{}", self.graph.to_index(node)) };
// Emit an attribute for either node or edge
// This will also keep track of updating the global attributes list
let mut emit_attribute = |writer: &mut EventWriter<_>,
name: Cow<'static, str>,
data: &str,
for_: For|
-> WriterResult<()> {
writer.write(XmlEvent::start_element("data").attr("key", &*name))?;
attributes.insert(Attribute { name, for_ });
writer.write(XmlEvent::characters(data))?;
writer.write(XmlEvent::end_element()) // end data
};
// Each graph needs a default edge type
writer.write(XmlEvent::start_element("graph").attr(
"edgedefault",
if self.graph.is_directed() {
"directed"
} else {
"undirected"
},
))?;
// Emit nodes
for node in self.graph.node_references() {
writer.write(XmlEvent::start_element("node").attr("id", &*node2str_id(node.id())))?;
// Print weights
if let Some(ref node_labels) = self.export_nodes {
let datas = node_labels(node.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Node)?;
}
}
writer.write(XmlEvent::end_element())?; // end node
}
// Emit edges
for (i, edge) in self.graph.edge_references().enumerate() {
writer.write(
XmlEvent::start_element("edge")
.attr("id", &format!("e{}", i))
.attr("source", &*node2str_id(edge.source()))
.attr("target", &*node2str_id(edge.target())),
)?;
// Print weights
if let Some(ref edge_labels) = self.export_edges {
let datas = edge_labels(edge.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Edge)?;
}
}
writer.write(XmlEvent::end_element())?; // end edge
}
writer.write(XmlEvent::end_element()) // end graph
}
fn emit_keys<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
for attr in attributes {
writer.write(
XmlEvent::start_element("key")
.attr("id", &*attr.name)
.attr("for", attr.for_.to_str())
.attr("attr.name", &*attr.name)
.attr("attr.type", "string"),
)?;
writer.write(XmlEvent::end_element())?; // end key
}
Ok(())
}
}
impl<G> Debug for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
fn | (&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GraphMl")
.field("graph", &self.graph)
.field("pretty_print", &self.pretty_print)
.field("export_edges", &self.export_edges.is_some())
.field("export_nodes", &self.export_nodes.is_some())
.finish()
}
}
impl<G> Display for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
G: GraphProp,
G: NodeIndexable,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut buff = Cursor::new(Vec::new());
self.to_writer(&mut buff)
.expect("Writing to a Cursor should never create IO errors.");
let s = String::from_utf8(buff.into_inner()).unwrap();
write!(f, "{}", &s)
}
}
| fmt | identifier_name |
main.rs | #![feature(rustc_private)]
extern crate im;
extern crate pretty;
extern crate rustc_ast;
extern crate rustc_driver;
extern crate rustc_errors;
extern crate rustc_hir;
extern crate rustc_interface;
extern crate rustc_metadata;
extern crate rustc_middle;
extern crate rustc_session;
extern crate rustc_span;
mod ast_to_rustspec;
mod hir_to_rustspec;
mod name_resolution;
mod rustspec;
mod rustspec_to_coq;
mod rustspec_to_easycrypt;
mod rustspec_to_fstar;
mod typechecker;
mod util;
use itertools::Itertools;
use rustc_driver::{Callbacks, Compilation, RunCompiler};
use rustc_errors::emitter::{ColorConfig, HumanReadableErrorType};
use rustc_errors::DiagnosticId;
use rustc_interface::{
interface::{Compiler, Config},
Queries,
};
use rustc_session::Session;
use rustc_session::{config::ErrorOutputType, search_paths::SearchPath};
use rustc_span::MultiSpan;
use serde::Deserialize;
use serde_json;
use std::env;
use std::ffi::OsStr;
use std::fs::File;
use std::path::Path;
use std::process::Command;
use util::APP_USAGE;
struct HacspecCallbacks {
output_file: Option<String>,
target_directory: String,
}
const ERROR_OUTPUT_CONFIG: ErrorOutputType =
ErrorOutputType::HumanReadable(HumanReadableErrorType::Default(ColorConfig::Auto));
trait HacspecErrorEmitter {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str);
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str);
}
impl HacspecErrorEmitter for Session {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_err_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_warn_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
}
impl Callbacks for HacspecCallbacks {
fn config(&mut self, config: &mut Config) {
log::debug!(" --- hacspec config callback");
log::trace!(" target directory {}", self.target_directory);
config.opts.search_paths.push(SearchPath::from_cli_opt(
&self.target_directory,
ERROR_OUTPUT_CONFIG,
));
config.crate_cfg.insert((
String::from("feature"),
Some(String::from("\"hacspec_attributes\"")),
));
}
fn after_analysis<'tcx>(
&mut self,
compiler: &Compiler,
queries: &'tcx Queries<'tcx>,
) -> Compilation {
log::debug!(" --- hacspec after_analysis callback");
let krate = queries.parse().unwrap().take();
let external_data = |imported_crates: &Vec<rustspec::Spanned<String>>| {
queries.global_ctxt().unwrap().peek_mut().enter(|tcx| {
hir_to_rustspec::retrieve_external_data(&compiler.session(), &tcx, imported_crates)
})
};
let krate = match ast_to_rustspec::translate(&compiler.session(), &krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("unable to translate to Hacspec due to out-of-language errors");
return Compilation::Stop;
}
};
let (krate, mut top_ctx) =
match name_resolution::resolve_crate(&compiler.session(), krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec name resolution errors");
return Compilation::Stop;
}
};
let krate = match typechecker::typecheck_program(&compiler.session(), &krate, &mut top_ctx)
{
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec typechecking errors");
return Compilation::Stop;
}
};
let imported_crates = name_resolution::get_imported_crates(&krate);
let imported_crates = imported_crates
.into_iter()
.filter(|(x, _)| x != "hacspec_lib")
.map(|(x, _)| x)
.collect::<Vec<_>>();
println!(
" > Successfully typechecked{}",
if imported_crates.len() == 0 {
".".to_string()
} else {
format!(
", assuming that the code in crates {} has also been Hacspec-typechecked",
imported_crates.iter().format(", ")
)
}
);
match &self.output_file {
None => return Compilation::Stop,
Some(file) => match Path::new(file).extension().and_then(OsStr::to_str).unwrap() {
"fst" => rustspec_to_fstar::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"ec" => rustspec_to_easycrypt::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"json" => {
let file = file.trim();
let path = Path::new(file);
let file = match File::create(&path) {
Err(why) => {
compiler.session().err(
format!("Unable to write to output file {}: \"{}\"", file, why)
.as_str(),
);
return Compilation::Stop;
}
Ok(file) => file,
};
match serde_json::to_writer_pretty(file, &krate) {
Err(why) => {
compiler
.session()
.err(format!("Unable to serialize program: \"{}\"", why).as_str());
return Compilation::Stop;
}
Ok(_) => (),
};
}
"v" => rustspec_to_coq::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
_ => {
compiler
.session()
.err("unknown backend extension for output file");
return Compilation::Stop;
}
},
}
Compilation::Stop
}
}
// === Cargo Metadata Helpers ===
#[derive(Debug, Default, Deserialize)]
struct Dependency {
name: String,
#[allow(dead_code)]
kind: Option<String>,
}
#[derive(Debug, Default, Deserialize)]
struct Target {
#[allow(dead_code)]
name: String,
#[allow(dead_code)]
kind: Vec<String>,
crate_types: Vec<String>,
src_path: String,
}
#[derive(Debug, Default, Deserialize)]
struct Package {
name: String,
targets: Vec<Target>,
dependencies: Vec<Dependency>,
}
#[derive(Debug, Default, Deserialize)]
struct Manifest {
packages: Vec<Package>,
target_directory: String,
}
// ===
/// Read the crate metadata and use the information for the build.
fn read_crate(
manifest: Option<String>,
package_name: Option<String>,
args: &mut Vec<String>,
callbacks: &mut HacspecCallbacks,
) {
let manifest: Manifest = {
let mut output = Command::new("cargo");
let mut output_args = if let Some(manifest_path) = manifest {
vec!["--manifest-path".to_string(), manifest_path]
} else {
Vec::<String>::new()
};
output_args.extend_from_slice(&[
"--no-deps".to_string(),
"--format-version".to_string(),
"1".to_string(),
]);
let output = output.arg("metadata").args(&output_args);
let output = output.output().expect(" ⚠️ Error reading cargo manifest.");
let stdout = output.stdout;
if !output.status.success() {
let error =
String::from_utf8(output.stderr).expect(" ⚠️ Failed reading cargo stderr output");
panic!("Error running cargo metadata: {:?}", error);
}
let json_string = String::from_utf8(stdout).expect(" ⚠️ Failed reading cargo output");
serde_json::from_str(&json_string).expect(" ⚠️ Error reading to manifest")
};
// Pick the package of the given name or the only package available.
let package = if let Some(package_name) = package_name {
manifest
.packages | .find(|p| p.name == package_name)
.expect(&format!(
" ⚠️ Can't find the package {} in the Cargo.toml\n\n{}",
package_name, APP_USAGE,
))
} else {
&manifest.packages[0]
};
log::trace!("Typechecking '{:?}' ...", package);
// Take the first lib target we find. There should be only one really.
// log::trace!("crate types: {:?}", package.targets);
// log::trace!("package targets {:?}", package.targets);
let target = package
.targets
.iter()
.find(|p| {
p.crate_types.contains(&"lib".to_string())
|| p.crate_types.contains(&"rlib".to_string())
})
.expect(&format!(" ⚠️ No target in the Cargo.toml\n\n{}", APP_USAGE));
// Add the target source file to the arguments
args.push(target.src_path.clone());
// Add build artifact path.
// This only works with debug builds.
let deps = manifest.target_directory + "/debug/deps";
callbacks.target_directory = deps;
// Add the dependencies as --extern for the hacpsec typechecker.
for dependency in package.dependencies.iter() {
args.push(format!("--extern={}", dependency.name.replace("-", "_")));
}
}
fn main() -> Result<(), usize> {
pretty_env_logger::init();
log::debug!(" --- hacspec");
let mut args = env::args().collect::<Vec<String>>();
log::trace!(" args: {:?}", args);
// Args to pass to the compiler
let mut compiler_args = Vec::new();
// Drop and pass along binary name.
compiler_args.push(args.remove(0));
// Optionally get output file.
let output_file_index = args.iter().position(|a| a == "-o");
let output_file = match output_file_index {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Optionally an input file can be passed in. This should be mostly used for
// testing.
let input_file = match args.iter().position(|a| a == "-f") {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Read the --manifest-path argument if present.
let manifest = match args.iter().position(|a| a == "--manifest-path") {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Read the --sysroot. It must be present
log::trace!("args: {:?}", args);
match args.iter().position(|a| a.starts_with("--sysroot")) {
Some(i) => {
compiler_args.push(args.remove(i));
}
None => panic!(" ⚠️ --sysroot is missing. Please report this issue."),
}
let mut callbacks = HacspecCallbacks {
output_file,
// This defaults to the default target directory.
target_directory: env::current_dir().unwrap().to_str().unwrap().to_owned()
+ "/../target/debug/deps",
};
match input_file {
Some(input_file) => {
compiler_args.push(input_file);
// If only a file is provided we add the default dependencies only.
compiler_args.extend_from_slice(&[
"--extern=abstract_integers".to_string(),
"--extern=hacspec_derive".to_string(),
"--extern=hacspec_lib".to_string(),
"--extern=secret_integers".to_string(),
]);
}
None => {
let package_name = args.pop();
log::trace!("package name to analyze: {:?}", package_name);
read_crate(manifest, package_name, &mut compiler_args, &mut callbacks);
}
}
compiler_args.push("--crate-type=lib".to_string());
compiler_args.push("--edition=2021".to_string());
log::trace!("compiler_args: {:?}", compiler_args);
let compiler = RunCompiler::new(&compiler_args, &mut callbacks);
match compiler.run() {
Ok(_) => Ok(()),
Err(_) => Err(1),
}
} | .iter() | random_line_split |
main.rs | #![feature(rustc_private)]
extern crate im;
extern crate pretty;
extern crate rustc_ast;
extern crate rustc_driver;
extern crate rustc_errors;
extern crate rustc_hir;
extern crate rustc_interface;
extern crate rustc_metadata;
extern crate rustc_middle;
extern crate rustc_session;
extern crate rustc_span;
mod ast_to_rustspec;
mod hir_to_rustspec;
mod name_resolution;
mod rustspec;
mod rustspec_to_coq;
mod rustspec_to_easycrypt;
mod rustspec_to_fstar;
mod typechecker;
mod util;
use itertools::Itertools;
use rustc_driver::{Callbacks, Compilation, RunCompiler};
use rustc_errors::emitter::{ColorConfig, HumanReadableErrorType};
use rustc_errors::DiagnosticId;
use rustc_interface::{
interface::{Compiler, Config},
Queries,
};
use rustc_session::Session;
use rustc_session::{config::ErrorOutputType, search_paths::SearchPath};
use rustc_span::MultiSpan;
use serde::Deserialize;
use serde_json;
use std::env;
use std::ffi::OsStr;
use std::fs::File;
use std::path::Path;
use std::process::Command;
use util::APP_USAGE;
struct | {
output_file: Option<String>,
target_directory: String,
}
const ERROR_OUTPUT_CONFIG: ErrorOutputType =
ErrorOutputType::HumanReadable(HumanReadableErrorType::Default(ColorConfig::Auto));
trait HacspecErrorEmitter {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str);
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str);
}
impl HacspecErrorEmitter for Session {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_err_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_warn_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
}
impl Callbacks for HacspecCallbacks {
fn config(&mut self, config: &mut Config) {
log::debug!(" --- hacspec config callback");
log::trace!(" target directory {}", self.target_directory);
config.opts.search_paths.push(SearchPath::from_cli_opt(
&self.target_directory,
ERROR_OUTPUT_CONFIG,
));
config.crate_cfg.insert((
String::from("feature"),
Some(String::from("\"hacspec_attributes\"")),
));
}
fn after_analysis<'tcx>(
&mut self,
compiler: &Compiler,
queries: &'tcx Queries<'tcx>,
) -> Compilation {
log::debug!(" --- hacspec after_analysis callback");
let krate = queries.parse().unwrap().take();
let external_data = |imported_crates: &Vec<rustspec::Spanned<String>>| {
queries.global_ctxt().unwrap().peek_mut().enter(|tcx| {
hir_to_rustspec::retrieve_external_data(&compiler.session(), &tcx, imported_crates)
})
};
let krate = match ast_to_rustspec::translate(&compiler.session(), &krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("unable to translate to Hacspec due to out-of-language errors");
return Compilation::Stop;
}
};
let (krate, mut top_ctx) =
match name_resolution::resolve_crate(&compiler.session(), krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec name resolution errors");
return Compilation::Stop;
}
};
let krate = match typechecker::typecheck_program(&compiler.session(), &krate, &mut top_ctx)
{
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec typechecking errors");
return Compilation::Stop;
}
};
let imported_crates = name_resolution::get_imported_crates(&krate);
let imported_crates = imported_crates
.into_iter()
.filter(|(x, _)| x != "hacspec_lib")
.map(|(x, _)| x)
.collect::<Vec<_>>();
println!(
" > Successfully typechecked{}",
if imported_crates.len() == 0 {
".".to_string()
} else {
format!(
", assuming that the code in crates {} has also been Hacspec-typechecked",
imported_crates.iter().format(", ")
)
}
);
match &self.output_file {
None => return Compilation::Stop,
Some(file) => match Path::new(file).extension().and_then(OsStr::to_str).unwrap() {
"fst" => rustspec_to_fstar::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"ec" => rustspec_to_easycrypt::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"json" => {
let file = file.trim();
let path = Path::new(file);
let file = match File::create(&path) {
Err(why) => {
compiler.session().err(
format!("Unable to write to output file {}: \"{}\"", file, why)
.as_str(),
);
return Compilation::Stop;
}
Ok(file) => file,
};
match serde_json::to_writer_pretty(file, &krate) {
Err(why) => {
compiler
.session()
.err(format!("Unable to serialize program: \"{}\"", why).as_str());
return Compilation::Stop;
}
Ok(_) => (),
};
}
"v" => rustspec_to_coq::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
_ => {
compiler
.session()
.err("unknown backend extension for output file");
return Compilation::Stop;
}
},
}
Compilation::Stop
}
}
// === Cargo Metadata Helpers ===
#[derive(Debug, Default, Deserialize)]
struct Dependency {
name: String,
#[allow(dead_code)]
kind: Option<String>,
}
#[derive(Debug, Default, Deserialize)]
struct Target {
#[allow(dead_code)]
name: String,
#[allow(dead_code)]
kind: Vec<String>,
crate_types: Vec<String>,
src_path: String,
}
#[derive(Debug, Default, Deserialize)]
struct Package {
name: String,
targets: Vec<Target>,
dependencies: Vec<Dependency>,
}
#[derive(Debug, Default, Deserialize)]
struct Manifest {
packages: Vec<Package>,
target_directory: String,
}
// ===
/// Read the crate metadata and use the information for the build.
fn read_crate(
manifest: Option<String>,
package_name: Option<String>,
args: &mut Vec<String>,
callbacks: &mut HacspecCallbacks,
) {
let manifest: Manifest = {
let mut output = Command::new("cargo");
let mut output_args = if let Some(manifest_path) = manifest {
vec!["--manifest-path".to_string(), manifest_path]
} else {
Vec::<String>::new()
};
output_args.extend_from_slice(&[
"--no-deps".to_string(),
"--format-version".to_string(),
"1".to_string(),
]);
let output = output.arg("metadata").args(&output_args);
let output = output.output().expect(" ⚠️ Error reading cargo manifest.");
let stdout = output.stdout;
if !output.status.success() {
let error =
String::from_utf8(output.stderr).expect(" ⚠️ Failed reading cargo stderr output");
panic!("Error running cargo metadata: {:?}", error);
}
let json_string = String::from_utf8(stdout).expect(" ⚠️ Failed reading cargo output");
serde_json::from_str(&json_string).expect(" ⚠️ Error reading to manifest")
};
// Pick the package of the given name or the only package available.
let package = if let Some(package_name) = package_name {
manifest
.packages
.iter()
.find(|p| p.name == package_name)
.expect(&format!(
" ⚠️ Can't find the package {} in the Cargo.toml\n\n{}",
package_name, APP_USAGE,
))
} else {
&manifest.packages[0]
};
log::trace!("Typechecking '{:?}' ...", package);
// Take the first lib target we find. There should be only one really.
// log::trace!("crate types: {:?}", package.targets);
// log::trace!("package targets {:?}", package.targets);
let target = package
.targets
.iter()
.find(|p| {
p.crate_types.contains(&"lib".to_string())
|| p.crate_types.contains(&"rlib".to_string())
})
.expect(&format!(" ⚠️ No target in the Cargo.toml\n\n{}", APP_USAGE));
// Add the target source file to the arguments
args.push(target.src_path.clone());
// Add build artifact path.
// This only works with debug builds.
let deps = manifest.target_directory + "/debug/deps";
callbacks.target_directory = deps;
// Add the dependencies as --extern for the hacpsec typechecker.
for dependency in package.dependencies.iter() {
args.push(format!("--extern={}", dependency.name.replace("-", "_")));
}
}
fn main() -> Result<(), usize> {
pretty_env_logger::init();
log::debug!(" --- hacspec");
let mut args = env::args().collect::<Vec<String>>();
log::trace!(" args: {:?}", args);
// Args to pass to the compiler
let mut compiler_args = Vec::new();
// Drop and pass along binary name.
compiler_args.push(args.remove(0));
// Optionally get output file.
let output_file_index = args.iter().position(|a| a == "-o");
let output_file = match output_file_index {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Optionally an input file can be passed in. This should be mostly used for
// testing.
let input_file = match args.iter().position(|a| a == "-f") {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Read the --manifest-path argument if present.
let manifest = match args.iter().position(|a| a == "--manifest-path") {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Read the --sysroot. It must be present
log::trace!("args: {:?}", args);
match args.iter().position(|a| a.starts_with("--sysroot")) {
Some(i) => {
compiler_args.push(args.remove(i));
}
None => panic!(" ⚠️ --sysroot is missing. Please report this issue."),
}
let mut callbacks = HacspecCallbacks {
output_file,
// This defaults to the default target directory.
target_directory: env::current_dir().unwrap().to_str().unwrap().to_owned()
+ "/../target/debug/deps",
};
match input_file {
Some(input_file) => {
compiler_args.push(input_file);
// If only a file is provided we add the default dependencies only.
compiler_args.extend_from_slice(&[
"--extern=abstract_integers".to_string(),
"--extern=hacspec_derive".to_string(),
"--extern=hacspec_lib".to_string(),
"--extern=secret_integers".to_string(),
]);
}
None => {
let package_name = args.pop();
log::trace!("package name to analyze: {:?}", package_name);
read_crate(manifest, package_name, &mut compiler_args, &mut callbacks);
}
}
compiler_args.push("--crate-type=lib".to_string());
compiler_args.push("--edition=2021".to_string());
log::trace!("compiler_args: {:?}", compiler_args);
let compiler = RunCompiler::new(&compiler_args, &mut callbacks);
match compiler.run() {
Ok(_) => Ok(()),
Err(_) => Err(1),
}
}
| HacspecCallbacks | identifier_name |
main.rs | #![feature(rustc_private)]
extern crate im;
extern crate pretty;
extern crate rustc_ast;
extern crate rustc_driver;
extern crate rustc_errors;
extern crate rustc_hir;
extern crate rustc_interface;
extern crate rustc_metadata;
extern crate rustc_middle;
extern crate rustc_session;
extern crate rustc_span;
mod ast_to_rustspec;
mod hir_to_rustspec;
mod name_resolution;
mod rustspec;
mod rustspec_to_coq;
mod rustspec_to_easycrypt;
mod rustspec_to_fstar;
mod typechecker;
mod util;
use itertools::Itertools;
use rustc_driver::{Callbacks, Compilation, RunCompiler};
use rustc_errors::emitter::{ColorConfig, HumanReadableErrorType};
use rustc_errors::DiagnosticId;
use rustc_interface::{
interface::{Compiler, Config},
Queries,
};
use rustc_session::Session;
use rustc_session::{config::ErrorOutputType, search_paths::SearchPath};
use rustc_span::MultiSpan;
use serde::Deserialize;
use serde_json;
use std::env;
use std::ffi::OsStr;
use std::fs::File;
use std::path::Path;
use std::process::Command;
use util::APP_USAGE;
struct HacspecCallbacks {
output_file: Option<String>,
target_directory: String,
}
const ERROR_OUTPUT_CONFIG: ErrorOutputType =
ErrorOutputType::HumanReadable(HumanReadableErrorType::Default(ColorConfig::Auto));
trait HacspecErrorEmitter {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str);
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str);
}
impl HacspecErrorEmitter for Session {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_err_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_warn_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
}
impl Callbacks for HacspecCallbacks {
fn config(&mut self, config: &mut Config) {
log::debug!(" --- hacspec config callback");
log::trace!(" target directory {}", self.target_directory);
config.opts.search_paths.push(SearchPath::from_cli_opt(
&self.target_directory,
ERROR_OUTPUT_CONFIG,
));
config.crate_cfg.insert((
String::from("feature"),
Some(String::from("\"hacspec_attributes\"")),
));
}
fn after_analysis<'tcx>(
&mut self,
compiler: &Compiler,
queries: &'tcx Queries<'tcx>,
) -> Compilation {
log::debug!(" --- hacspec after_analysis callback");
let krate = queries.parse().unwrap().take();
let external_data = |imported_crates: &Vec<rustspec::Spanned<String>>| {
queries.global_ctxt().unwrap().peek_mut().enter(|tcx| {
hir_to_rustspec::retrieve_external_data(&compiler.session(), &tcx, imported_crates)
})
};
let krate = match ast_to_rustspec::translate(&compiler.session(), &krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("unable to translate to Hacspec due to out-of-language errors");
return Compilation::Stop;
}
};
let (krate, mut top_ctx) =
match name_resolution::resolve_crate(&compiler.session(), krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec name resolution errors");
return Compilation::Stop;
}
};
let krate = match typechecker::typecheck_program(&compiler.session(), &krate, &mut top_ctx)
{
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec typechecking errors");
return Compilation::Stop;
}
};
let imported_crates = name_resolution::get_imported_crates(&krate);
let imported_crates = imported_crates
.into_iter()
.filter(|(x, _)| x != "hacspec_lib")
.map(|(x, _)| x)
.collect::<Vec<_>>();
println!(
" > Successfully typechecked{}",
if imported_crates.len() == 0 {
".".to_string()
} else {
format!(
", assuming that the code in crates {} has also been Hacspec-typechecked",
imported_crates.iter().format(", ")
)
}
);
match &self.output_file {
None => return Compilation::Stop,
Some(file) => match Path::new(file).extension().and_then(OsStr::to_str).unwrap() {
"fst" => rustspec_to_fstar::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"ec" => rustspec_to_easycrypt::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"json" => {
let file = file.trim();
let path = Path::new(file);
let file = match File::create(&path) {
Err(why) => {
compiler.session().err(
format!("Unable to write to output file {}: \"{}\"", file, why)
.as_str(),
);
return Compilation::Stop;
}
Ok(file) => file,
};
match serde_json::to_writer_pretty(file, &krate) {
Err(why) => {
compiler
.session()
.err(format!("Unable to serialize program: \"{}\"", why).as_str());
return Compilation::Stop;
}
Ok(_) => (),
};
}
"v" => rustspec_to_coq::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
_ => {
compiler
.session()
.err("unknown backend extension for output file");
return Compilation::Stop;
}
},
}
Compilation::Stop
}
}
// === Cargo Metadata Helpers ===
#[derive(Debug, Default, Deserialize)]
struct Dependency {
name: String,
#[allow(dead_code)]
kind: Option<String>,
}
#[derive(Debug, Default, Deserialize)]
struct Target {
#[allow(dead_code)]
name: String,
#[allow(dead_code)]
kind: Vec<String>,
crate_types: Vec<String>,
src_path: String,
}
#[derive(Debug, Default, Deserialize)]
struct Package {
name: String,
targets: Vec<Target>,
dependencies: Vec<Dependency>,
}
#[derive(Debug, Default, Deserialize)]
struct Manifest {
packages: Vec<Package>,
target_directory: String,
}
// ===
/// Read the crate metadata and use the information for the build.
fn read_crate(
manifest: Option<String>,
package_name: Option<String>,
args: &mut Vec<String>,
callbacks: &mut HacspecCallbacks,
) | , usize> {
pretty_env_logger::init();
log::debug!(" --- hacspec");
let mut args = env::args().collect::<Vec<String>>();
log::trace!(" args: {:?}", args);
// Args to pass to the compiler
let mut compiler_args = Vec::new();
// Drop and pass along binary name.
compiler_args.push(args.remove(0));
// Optionally get output file.
let output_file_index = args.iter().position(|a| a == "-o");
let output_file = match output_file_index {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Optionally an input file can be passed in. This should be mostly used for
// testing.
let input_file = match args.iter().position(|a| a == "-f") {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Read the --manifest-path argument if present.
let manifest = match args.iter().position(|a| a == "--manifest-path") {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Read the --sysroot. It must be present
log::trace!("args: {:?}", args);
match args.iter().position(|a| a.starts_with("--sysroot")) {
Some(i) => {
compiler_args.push(args.remove(i));
}
None => panic!(" ⚠️ --sysroot is missing. Please report this issue."),
}
let mut callbacks = HacspecCallbacks {
output_file,
// This defaults to the default target directory.
target_directory: env::current_dir().unwrap().to_str().unwrap().to_owned()
+ "/../target/debug/deps",
};
match input_file {
Some(input_file) => {
compiler_args.push(input_file);
// If only a file is provided we add the default dependencies only.
compiler_args.extend_from_slice(&[
"--extern=abstract_integers".to_string(),
"--extern=hacspec_derive".to_string(),
"--extern=hacspec_lib".to_string(),
"--extern=secret_integers".to_string(),
]);
}
None => {
let package_name = args.pop();
log::trace!("package name to analyze: {:?}", package_name);
read_crate(manifest, package_name, &mut compiler_args, &mut callbacks);
}
}
compiler_args.push("--crate-type=lib".to_string());
compiler_args.push("--edition=2021".to_string());
log::trace!("compiler_args: {:?}", compiler_args);
let compiler = RunCompiler::new(&compiler_args, &mut callbacks);
match compiler.run() {
Ok(_) => Ok(()),
Err(_) => Err(1),
}
}
| {
let manifest: Manifest = {
let mut output = Command::new("cargo");
let mut output_args = if let Some(manifest_path) = manifest {
vec!["--manifest-path".to_string(), manifest_path]
} else {
Vec::<String>::new()
};
output_args.extend_from_slice(&[
"--no-deps".to_string(),
"--format-version".to_string(),
"1".to_string(),
]);
let output = output.arg("metadata").args(&output_args);
let output = output.output().expect(" ⚠️ Error reading cargo manifest.");
let stdout = output.stdout;
if !output.status.success() {
let error =
String::from_utf8(output.stderr).expect(" ⚠️ Failed reading cargo stderr output");
panic!("Error running cargo metadata: {:?}", error);
}
let json_string = String::from_utf8(stdout).expect(" ⚠️ Failed reading cargo output");
serde_json::from_str(&json_string).expect(" ⚠️ Error reading to manifest")
};
// Pick the package of the given name or the only package available.
let package = if let Some(package_name) = package_name {
manifest
.packages
.iter()
.find(|p| p.name == package_name)
.expect(&format!(
" ⚠️ Can't find the package {} in the Cargo.toml\n\n{}",
package_name, APP_USAGE,
))
} else {
&manifest.packages[0]
};
log::trace!("Typechecking '{:?}' ...", package);
// Take the first lib target we find. There should be only one really.
// log::trace!("crate types: {:?}", package.targets);
// log::trace!("package targets {:?}", package.targets);
let target = package
.targets
.iter()
.find(|p| {
p.crate_types.contains(&"lib".to_string())
|| p.crate_types.contains(&"rlib".to_string())
})
.expect(&format!(" ⚠️ No target in the Cargo.toml\n\n{}", APP_USAGE));
// Add the target source file to the arguments
args.push(target.src_path.clone());
// Add build artifact path.
// This only works with debug builds.
let deps = manifest.target_directory + "/debug/deps";
callbacks.target_directory = deps;
// Add the dependencies as --extern for the hacpsec typechecker.
for dependency in package.dependencies.iter() {
args.push(format!("--extern={}", dependency.name.replace("-", "_")));
}
}
fn main() -> Result<() | identifier_body |
get.go | package local_get
import (
"fmt"
"github.com/creativesoftwarefdn/weaviate/database/schema"
"github.com/creativesoftwarefdn/weaviate/database/schema/kind"
common "github.com/creativesoftwarefdn/weaviate/graphqlapi/common_resolver"
"github.com/creativesoftwarefdn/weaviate/graphqlapi/local/common_filters"
"github.com/creativesoftwarefdn/weaviate/models"
"github.com/graphql-go/graphql"
graphql_ast "github.com/graphql-go/graphql/language/ast"
"strings"
)
func Build(dbSchema *schema.Schema) (*graphql.Field, error) |
// Builds the classes below a Local -> Get -> (k kind.Kind)
func buildGetClasses(dbSchema *schema.Schema, k kind.Kind, semanticSchema *models.SemanticSchema, knownClasses *map[string]*graphql.Object) (*graphql.Object, error) {
classFields := graphql.Fields{}
var kindName string
switch k {
case kind.THING_KIND:
kindName = "Thing"
case kind.ACTION_KIND:
kindName = "Action"
}
for _, class := range semanticSchema.Classes {
classField, err := buildGetClass(dbSchema, k, class, knownClasses)
if err != nil {
return nil, fmt.Errorf("Could not build class for %s", class.Class)
}
classFields[class.Class] = classField
}
classes := graphql.NewObject(graphql.ObjectConfig{
Name: fmt.Sprintf("WeaviateLocalGet%ssObj", kindName),
Fields: classFields,
Description: fmt.Sprintf("Type of %ss i.e. %ss classes to Get on the Local Weaviate", kindName, kindName),
})
return classes, nil
}
// Build a single class in Local -> Get -> (k kind.Kind) -> (models.SemanticSchemaClass)
func buildGetClass(dbSchema *schema.Schema, k kind.Kind, class *models.SemanticSchemaClass, knownClasses *map[string]*graphql.Object) (*graphql.Field, error) {
classObject := graphql.NewObject(graphql.ObjectConfig{
Name: class.Class,
Fields: (graphql.FieldsThunk)(func() graphql.Fields {
classProperties := graphql.Fields{}
classProperties["uuid"] = &graphql.Field{
Description: "UUID of the thing or action given by the local Weaviate instance",
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("WHOOPTYDOO uuid\n")
return "uuid", nil
},
}
for _, property := range class.Properties {
propertyType, err := dbSchema.FindPropertyDataType(property.AtDataType)
if err != nil {
// We can't return an error in this FieldsThunk function, so we need to panic
panic(fmt.Sprintf("buildGetClass: wrong propertyType for %s.%s.%s; %s", k.Name(), class.Class, property.Name, err.Error()))
}
var propertyField *graphql.Field
if propertyType.IsPrimitive() {
switch propertyType.AsPrimitive() {
case schema.DataTypeString:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: string\n")
return "primitive string", nil
},
}
case schema.DataTypeInt:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Int,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: int\n")
return nil, nil
},
}
case schema.DataTypeNumber:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Float,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: float\n")
return 4.2, nil
},
}
case schema.DataTypeBoolean:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Boolean,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: bool\n")
return true, nil
},
}
case schema.DataTypeDate:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String, // String since no graphql date datatype exists
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: date\n")
return "somedate", nil
},
}
default:
panic(fmt.Sprintf("buildGetClass: unknown primitive type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, propertyType.AsPrimitive()))
}
propertyField.Name = property.Name
classProperties[property.Name] = propertyField
} else {
// This is a reference
refClasses := propertyType.Classes()
propertyName := strings.Title(property.Name)
dataTypeClasses := make([]*graphql.Object, len(refClasses))
for index, refClassName := range refClasses {
refClass, ok := (*knownClasses)[string(refClassName)]
if !ok {
panic(fmt.Sprintf("buildGetClass: unknown referenced class type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, refClassName))
}
dataTypeClasses[index] = refClass
}
classUnion := graphql.NewUnion(graphql.UnionConfig{
Name: fmt.Sprintf("%s%s%s", class.Class, propertyName, "Obj"),
Types: dataTypeClasses,
ResolveType: func(p graphql.ResolveTypeParams) *graphql.Object {
// TODO: inspect type of result.
return (*knownClasses)["City"]
fmt.Printf("Resolver: WHOOPTYDOO\n")
return nil
},
Description: property.Description,
})
// TODO: Check cardinality
classProperties[propertyName] = &graphql.Field{
Type: classUnion,
Description: property.Description,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- Resolve action property field (ref?)\n")
fmt.Printf("WHOOPTYDOO2\n")
return true, nil
},
}
}
}
return classProperties
}),
Description: class.Description,
})
(*knownClasses)[class.Class] = classObject
classField := graphql.Field{
Type: graphql.NewList(classObject),
Description: class.Description,
Args: graphql.FieldConfigArgument{
"first": &graphql.ArgumentConfig{
Description: "Pagination option, show the first x results",
Type: graphql.Int,
},
"after": &graphql.ArgumentConfig{
Description: "Pagination option, show the results after the first x results",
Type: graphql.Int,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- thing class (supposed to extract pagination, now return nil)\n")
filtersAndResolver := p.Source.(*filtersAndResolver)
pagination, err := common.ExtractPaginationFromArgs(p.Args)
if err != nil {
return nil, err
}
// There can only be exactly one graphql_ast.Field; it is the class name.
if len(p.Info.FieldASTs) != 1 {
panic("Only one Field expected here")
}
selectionsOfClass := p.Info.FieldASTs[0].SelectionSet
properties, err := extractProperties(selectionsOfClass)
if err != nil {
return nil, err
}
params := LocalGetClassParams{
Filters: filtersAndResolver.filters,
Kind: k,
ClassName: class.Class,
Pagination: pagination,
Properties: properties,
}
promise, err := filtersAndResolver.resolver.LocalGetClass(¶ms)
return promise, err
},
}
return &classField, nil
}
func extractProperties(selections *graphql_ast.SelectionSet) ([]SelectProperty, error) {
//debugFieldAsts(fieldASTs)
var properties []SelectProperty
for _, selection := range selections.Selections {
field := selection.(*graphql_ast.Field)
name := field.Name.Value
property := SelectProperty{Name: name}
property.IsPrimitive = (field.SelectionSet == nil)
if !property.IsPrimitive {
// We can interpret this property in different ways
for _, subSelection := range field.SelectionSet.Selections {
// Is it a field with the name __typename?
subsectionField, ok := subSelection.(*graphql_ast.Field)
if ok {
if subsectionField.Name.Value == "__typename" {
property.IncludeTypeName = true
continue
} else {
return nil, fmt.Errorf("Expected a InlineFragment, not a '%s' field ", subsectionField.Name.Value)
}
}
// Otherwise these _must_ be inline fragments
fragment, ok := subSelection.(*graphql_ast.InlineFragment)
if !ok {
return nil, fmt.Errorf("Expected a InlineFragment; you need to specify as which type you want to retrieve a reference %#v", subSelection)
}
err, className := schema.ValidateClassName(fragment.TypeCondition.Name.Value)
if err != nil {
return nil, fmt.Errorf("The inline fragment type name '%s' is not a valid class name.", fragment.TypeCondition.Name.Value)
}
subProperties, err := extractProperties(fragment.SelectionSet)
if err != nil {
return nil, err
}
property.Refs = append(property.Refs, SelectClass{
ClassName: string(className),
RefProperties: subProperties,
})
}
}
properties = append(properties, property)
}
return properties, nil
}
| {
getKinds := graphql.Fields{}
if len(dbSchema.Actions.Classes) == 0 && len(dbSchema.Things.Classes) == 0 {
return nil, fmt.Errorf("There are not any Actions or Things classes defined yet.")
}
knownClasses := map[string]*graphql.Object{}
if len(dbSchema.Actions.Classes) > 0 {
localGetActions, err := buildGetClasses(dbSchema, kind.ACTION_KIND, dbSchema.Actions, &knownClasses)
if err != nil {
return nil, err
}
getKinds["Actions"] = &graphql.Field{
Name: "WeaviateLocalGetActions",
Description: "Get Actions on the Local Weaviate",
Type: localGetActions,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGetActions (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
}
if len(dbSchema.Things.Classes) > 0 {
localGetThings, err := buildGetClasses(dbSchema, kind.THING_KIND, dbSchema.Things, &knownClasses)
if err != nil {
return nil, err
}
getKinds["Things"] = &graphql.Field{
Name: "WeaviateLocalGetThings",
Description: "Get Things on the Local Weaviate",
Type: localGetThings,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGetThings (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
}
field := graphql.Field{
Name: "WeaviateLocalGet",
Description: "Get Things or Actions on the local weaviate",
Args: graphql.FieldConfigArgument{
"where": &graphql.ArgumentConfig{
Description: "Filter options for the Get search, to convert the data to the filter input",
Type: graphql.NewInputObject(
graphql.InputObjectConfig{
Name: "WeaviateLocalGetWhereInpObj",
Fields: common_filters.Build(),
Description: "Filter options for the Get search, to convert the data to the filter input",
},
),
},
},
Type: graphql.NewObject(graphql.ObjectConfig{
Name: "WeaviateLocalGetObj",
Fields: getKinds,
Description: "Type of Get function to get Things or Actions on the Local Weaviate",
}),
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGet (extract resolver from source, parse filters )\n")
resolver := p.Source.(map[string]interface{})["Resolver"].(Resolver)
filters, err := common_filters.ExtractFilters(p.Args)
if err != nil {
return nil, err
}
return &filtersAndResolver{
filters: filters,
resolver: resolver,
}, nil
},
}
return &field, nil
} | identifier_body |
get.go | package local_get
import (
"fmt"
"github.com/creativesoftwarefdn/weaviate/database/schema"
"github.com/creativesoftwarefdn/weaviate/database/schema/kind"
common "github.com/creativesoftwarefdn/weaviate/graphqlapi/common_resolver"
"github.com/creativesoftwarefdn/weaviate/graphqlapi/local/common_filters"
"github.com/creativesoftwarefdn/weaviate/models"
"github.com/graphql-go/graphql"
graphql_ast "github.com/graphql-go/graphql/language/ast"
"strings"
)
func Build(dbSchema *schema.Schema) (*graphql.Field, error) {
getKinds := graphql.Fields{}
if len(dbSchema.Actions.Classes) == 0 && len(dbSchema.Things.Classes) == 0 {
return nil, fmt.Errorf("There are not any Actions or Things classes defined yet.")
}
knownClasses := map[string]*graphql.Object{}
if len(dbSchema.Actions.Classes) > 0 {
localGetActions, err := buildGetClasses(dbSchema, kind.ACTION_KIND, dbSchema.Actions, &knownClasses)
if err != nil {
return nil, err
}
getKinds["Actions"] = &graphql.Field{
Name: "WeaviateLocalGetActions",
Description: "Get Actions on the Local Weaviate",
Type: localGetActions,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGetActions (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
} | if err != nil {
return nil, err
}
getKinds["Things"] = &graphql.Field{
Name: "WeaviateLocalGetThings",
Description: "Get Things on the Local Weaviate",
Type: localGetThings,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGetThings (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
}
field := graphql.Field{
Name: "WeaviateLocalGet",
Description: "Get Things or Actions on the local weaviate",
Args: graphql.FieldConfigArgument{
"where": &graphql.ArgumentConfig{
Description: "Filter options for the Get search, to convert the data to the filter input",
Type: graphql.NewInputObject(
graphql.InputObjectConfig{
Name: "WeaviateLocalGetWhereInpObj",
Fields: common_filters.Build(),
Description: "Filter options for the Get search, to convert the data to the filter input",
},
),
},
},
Type: graphql.NewObject(graphql.ObjectConfig{
Name: "WeaviateLocalGetObj",
Fields: getKinds,
Description: "Type of Get function to get Things or Actions on the Local Weaviate",
}),
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGet (extract resolver from source, parse filters )\n")
resolver := p.Source.(map[string]interface{})["Resolver"].(Resolver)
filters, err := common_filters.ExtractFilters(p.Args)
if err != nil {
return nil, err
}
return &filtersAndResolver{
filters: filters,
resolver: resolver,
}, nil
},
}
return &field, nil
}
// Builds the classes below a Local -> Get -> (k kind.Kind)
func buildGetClasses(dbSchema *schema.Schema, k kind.Kind, semanticSchema *models.SemanticSchema, knownClasses *map[string]*graphql.Object) (*graphql.Object, error) {
classFields := graphql.Fields{}
var kindName string
switch k {
case kind.THING_KIND:
kindName = "Thing"
case kind.ACTION_KIND:
kindName = "Action"
}
for _, class := range semanticSchema.Classes {
classField, err := buildGetClass(dbSchema, k, class, knownClasses)
if err != nil {
return nil, fmt.Errorf("Could not build class for %s", class.Class)
}
classFields[class.Class] = classField
}
classes := graphql.NewObject(graphql.ObjectConfig{
Name: fmt.Sprintf("WeaviateLocalGet%ssObj", kindName),
Fields: classFields,
Description: fmt.Sprintf("Type of %ss i.e. %ss classes to Get on the Local Weaviate", kindName, kindName),
})
return classes, nil
}
// Build a single class in Local -> Get -> (k kind.Kind) -> (models.SemanticSchemaClass)
func buildGetClass(dbSchema *schema.Schema, k kind.Kind, class *models.SemanticSchemaClass, knownClasses *map[string]*graphql.Object) (*graphql.Field, error) {
classObject := graphql.NewObject(graphql.ObjectConfig{
Name: class.Class,
Fields: (graphql.FieldsThunk)(func() graphql.Fields {
classProperties := graphql.Fields{}
classProperties["uuid"] = &graphql.Field{
Description: "UUID of the thing or action given by the local Weaviate instance",
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("WHOOPTYDOO uuid\n")
return "uuid", nil
},
}
for _, property := range class.Properties {
propertyType, err := dbSchema.FindPropertyDataType(property.AtDataType)
if err != nil {
// We can't return an error in this FieldsThunk function, so we need to panic
panic(fmt.Sprintf("buildGetClass: wrong propertyType for %s.%s.%s; %s", k.Name(), class.Class, property.Name, err.Error()))
}
var propertyField *graphql.Field
if propertyType.IsPrimitive() {
switch propertyType.AsPrimitive() {
case schema.DataTypeString:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: string\n")
return "primitive string", nil
},
}
case schema.DataTypeInt:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Int,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: int\n")
return nil, nil
},
}
case schema.DataTypeNumber:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Float,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: float\n")
return 4.2, nil
},
}
case schema.DataTypeBoolean:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Boolean,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: bool\n")
return true, nil
},
}
case schema.DataTypeDate:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String, // String since no graphql date datatype exists
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: date\n")
return "somedate", nil
},
}
default:
panic(fmt.Sprintf("buildGetClass: unknown primitive type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, propertyType.AsPrimitive()))
}
propertyField.Name = property.Name
classProperties[property.Name] = propertyField
} else {
// This is a reference
refClasses := propertyType.Classes()
propertyName := strings.Title(property.Name)
dataTypeClasses := make([]*graphql.Object, len(refClasses))
for index, refClassName := range refClasses {
refClass, ok := (*knownClasses)[string(refClassName)]
if !ok {
panic(fmt.Sprintf("buildGetClass: unknown referenced class type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, refClassName))
}
dataTypeClasses[index] = refClass
}
classUnion := graphql.NewUnion(graphql.UnionConfig{
Name: fmt.Sprintf("%s%s%s", class.Class, propertyName, "Obj"),
Types: dataTypeClasses,
ResolveType: func(p graphql.ResolveTypeParams) *graphql.Object {
// TODO: inspect type of result.
return (*knownClasses)["City"]
fmt.Printf("Resolver: WHOOPTYDOO\n")
return nil
},
Description: property.Description,
})
// TODO: Check cardinality
classProperties[propertyName] = &graphql.Field{
Type: classUnion,
Description: property.Description,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- Resolve action property field (ref?)\n")
fmt.Printf("WHOOPTYDOO2\n")
return true, nil
},
}
}
}
return classProperties
}),
Description: class.Description,
})
(*knownClasses)[class.Class] = classObject
classField := graphql.Field{
Type: graphql.NewList(classObject),
Description: class.Description,
Args: graphql.FieldConfigArgument{
"first": &graphql.ArgumentConfig{
Description: "Pagination option, show the first x results",
Type: graphql.Int,
},
"after": &graphql.ArgumentConfig{
Description: "Pagination option, show the results after the first x results",
Type: graphql.Int,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- thing class (supposed to extract pagination, now return nil)\n")
filtersAndResolver := p.Source.(*filtersAndResolver)
pagination, err := common.ExtractPaginationFromArgs(p.Args)
if err != nil {
return nil, err
}
// There can only be exactly one graphql_ast.Field; it is the class name.
if len(p.Info.FieldASTs) != 1 {
panic("Only one Field expected here")
}
selectionsOfClass := p.Info.FieldASTs[0].SelectionSet
properties, err := extractProperties(selectionsOfClass)
if err != nil {
return nil, err
}
params := LocalGetClassParams{
Filters: filtersAndResolver.filters,
Kind: k,
ClassName: class.Class,
Pagination: pagination,
Properties: properties,
}
promise, err := filtersAndResolver.resolver.LocalGetClass(¶ms)
return promise, err
},
}
return &classField, nil
}
func extractProperties(selections *graphql_ast.SelectionSet) ([]SelectProperty, error) {
//debugFieldAsts(fieldASTs)
var properties []SelectProperty
for _, selection := range selections.Selections {
field := selection.(*graphql_ast.Field)
name := field.Name.Value
property := SelectProperty{Name: name}
property.IsPrimitive = (field.SelectionSet == nil)
if !property.IsPrimitive {
// We can interpret this property in different ways
for _, subSelection := range field.SelectionSet.Selections {
// Is it a field with the name __typename?
subsectionField, ok := subSelection.(*graphql_ast.Field)
if ok {
if subsectionField.Name.Value == "__typename" {
property.IncludeTypeName = true
continue
} else {
return nil, fmt.Errorf("Expected a InlineFragment, not a '%s' field ", subsectionField.Name.Value)
}
}
// Otherwise these _must_ be inline fragments
fragment, ok := subSelection.(*graphql_ast.InlineFragment)
if !ok {
return nil, fmt.Errorf("Expected a InlineFragment; you need to specify as which type you want to retrieve a reference %#v", subSelection)
}
err, className := schema.ValidateClassName(fragment.TypeCondition.Name.Value)
if err != nil {
return nil, fmt.Errorf("The inline fragment type name '%s' is not a valid class name.", fragment.TypeCondition.Name.Value)
}
subProperties, err := extractProperties(fragment.SelectionSet)
if err != nil {
return nil, err
}
property.Refs = append(property.Refs, SelectClass{
ClassName: string(className),
RefProperties: subProperties,
})
}
}
properties = append(properties, property)
}
return properties, nil
} |
if len(dbSchema.Things.Classes) > 0 {
localGetThings, err := buildGetClasses(dbSchema, kind.THING_KIND, dbSchema.Things, &knownClasses) | random_line_split |
get.go | package local_get
import (
"fmt"
"github.com/creativesoftwarefdn/weaviate/database/schema"
"github.com/creativesoftwarefdn/weaviate/database/schema/kind"
common "github.com/creativesoftwarefdn/weaviate/graphqlapi/common_resolver"
"github.com/creativesoftwarefdn/weaviate/graphqlapi/local/common_filters"
"github.com/creativesoftwarefdn/weaviate/models"
"github.com/graphql-go/graphql"
graphql_ast "github.com/graphql-go/graphql/language/ast"
"strings"
)
func | (dbSchema *schema.Schema) (*graphql.Field, error) {
getKinds := graphql.Fields{}
if len(dbSchema.Actions.Classes) == 0 && len(dbSchema.Things.Classes) == 0 {
return nil, fmt.Errorf("There are not any Actions or Things classes defined yet.")
}
knownClasses := map[string]*graphql.Object{}
if len(dbSchema.Actions.Classes) > 0 {
localGetActions, err := buildGetClasses(dbSchema, kind.ACTION_KIND, dbSchema.Actions, &knownClasses)
if err != nil {
return nil, err
}
getKinds["Actions"] = &graphql.Field{
Name: "WeaviateLocalGetActions",
Description: "Get Actions on the Local Weaviate",
Type: localGetActions,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGetActions (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
}
if len(dbSchema.Things.Classes) > 0 {
localGetThings, err := buildGetClasses(dbSchema, kind.THING_KIND, dbSchema.Things, &knownClasses)
if err != nil {
return nil, err
}
getKinds["Things"] = &graphql.Field{
Name: "WeaviateLocalGetThings",
Description: "Get Things on the Local Weaviate",
Type: localGetThings,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGetThings (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
}
field := graphql.Field{
Name: "WeaviateLocalGet",
Description: "Get Things or Actions on the local weaviate",
Args: graphql.FieldConfigArgument{
"where": &graphql.ArgumentConfig{
Description: "Filter options for the Get search, to convert the data to the filter input",
Type: graphql.NewInputObject(
graphql.InputObjectConfig{
Name: "WeaviateLocalGetWhereInpObj",
Fields: common_filters.Build(),
Description: "Filter options for the Get search, to convert the data to the filter input",
},
),
},
},
Type: graphql.NewObject(graphql.ObjectConfig{
Name: "WeaviateLocalGetObj",
Fields: getKinds,
Description: "Type of Get function to get Things or Actions on the Local Weaviate",
}),
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGet (extract resolver from source, parse filters )\n")
resolver := p.Source.(map[string]interface{})["Resolver"].(Resolver)
filters, err := common_filters.ExtractFilters(p.Args)
if err != nil {
return nil, err
}
return &filtersAndResolver{
filters: filters,
resolver: resolver,
}, nil
},
}
return &field, nil
}
// Builds the classes below a Local -> Get -> (k kind.Kind)
func buildGetClasses(dbSchema *schema.Schema, k kind.Kind, semanticSchema *models.SemanticSchema, knownClasses *map[string]*graphql.Object) (*graphql.Object, error) {
classFields := graphql.Fields{}
var kindName string
switch k {
case kind.THING_KIND:
kindName = "Thing"
case kind.ACTION_KIND:
kindName = "Action"
}
for _, class := range semanticSchema.Classes {
classField, err := buildGetClass(dbSchema, k, class, knownClasses)
if err != nil {
return nil, fmt.Errorf("Could not build class for %s", class.Class)
}
classFields[class.Class] = classField
}
classes := graphql.NewObject(graphql.ObjectConfig{
Name: fmt.Sprintf("WeaviateLocalGet%ssObj", kindName),
Fields: classFields,
Description: fmt.Sprintf("Type of %ss i.e. %ss classes to Get on the Local Weaviate", kindName, kindName),
})
return classes, nil
}
// Build a single class in Local -> Get -> (k kind.Kind) -> (models.SemanticSchemaClass)
func buildGetClass(dbSchema *schema.Schema, k kind.Kind, class *models.SemanticSchemaClass, knownClasses *map[string]*graphql.Object) (*graphql.Field, error) {
classObject := graphql.NewObject(graphql.ObjectConfig{
Name: class.Class,
Fields: (graphql.FieldsThunk)(func() graphql.Fields {
classProperties := graphql.Fields{}
classProperties["uuid"] = &graphql.Field{
Description: "UUID of the thing or action given by the local Weaviate instance",
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("WHOOPTYDOO uuid\n")
return "uuid", nil
},
}
for _, property := range class.Properties {
propertyType, err := dbSchema.FindPropertyDataType(property.AtDataType)
if err != nil {
// We can't return an error in this FieldsThunk function, so we need to panic
panic(fmt.Sprintf("buildGetClass: wrong propertyType for %s.%s.%s; %s", k.Name(), class.Class, property.Name, err.Error()))
}
var propertyField *graphql.Field
if propertyType.IsPrimitive() {
switch propertyType.AsPrimitive() {
case schema.DataTypeString:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: string\n")
return "primitive string", nil
},
}
case schema.DataTypeInt:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Int,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: int\n")
return nil, nil
},
}
case schema.DataTypeNumber:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Float,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: float\n")
return 4.2, nil
},
}
case schema.DataTypeBoolean:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Boolean,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: bool\n")
return true, nil
},
}
case schema.DataTypeDate:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String, // String since no graphql date datatype exists
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: date\n")
return "somedate", nil
},
}
default:
panic(fmt.Sprintf("buildGetClass: unknown primitive type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, propertyType.AsPrimitive()))
}
propertyField.Name = property.Name
classProperties[property.Name] = propertyField
} else {
// This is a reference
refClasses := propertyType.Classes()
propertyName := strings.Title(property.Name)
dataTypeClasses := make([]*graphql.Object, len(refClasses))
for index, refClassName := range refClasses {
refClass, ok := (*knownClasses)[string(refClassName)]
if !ok {
panic(fmt.Sprintf("buildGetClass: unknown referenced class type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, refClassName))
}
dataTypeClasses[index] = refClass
}
classUnion := graphql.NewUnion(graphql.UnionConfig{
Name: fmt.Sprintf("%s%s%s", class.Class, propertyName, "Obj"),
Types: dataTypeClasses,
ResolveType: func(p graphql.ResolveTypeParams) *graphql.Object {
// TODO: inspect type of result.
return (*knownClasses)["City"]
fmt.Printf("Resolver: WHOOPTYDOO\n")
return nil
},
Description: property.Description,
})
// TODO: Check cardinality
classProperties[propertyName] = &graphql.Field{
Type: classUnion,
Description: property.Description,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- Resolve action property field (ref?)\n")
fmt.Printf("WHOOPTYDOO2\n")
return true, nil
},
}
}
}
return classProperties
}),
Description: class.Description,
})
(*knownClasses)[class.Class] = classObject
classField := graphql.Field{
Type: graphql.NewList(classObject),
Description: class.Description,
Args: graphql.FieldConfigArgument{
"first": &graphql.ArgumentConfig{
Description: "Pagination option, show the first x results",
Type: graphql.Int,
},
"after": &graphql.ArgumentConfig{
Description: "Pagination option, show the results after the first x results",
Type: graphql.Int,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- thing class (supposed to extract pagination, now return nil)\n")
filtersAndResolver := p.Source.(*filtersAndResolver)
pagination, err := common.ExtractPaginationFromArgs(p.Args)
if err != nil {
return nil, err
}
// There can only be exactly one graphql_ast.Field; it is the class name.
if len(p.Info.FieldASTs) != 1 {
panic("Only one Field expected here")
}
selectionsOfClass := p.Info.FieldASTs[0].SelectionSet
properties, err := extractProperties(selectionsOfClass)
if err != nil {
return nil, err
}
params := LocalGetClassParams{
Filters: filtersAndResolver.filters,
Kind: k,
ClassName: class.Class,
Pagination: pagination,
Properties: properties,
}
promise, err := filtersAndResolver.resolver.LocalGetClass(¶ms)
return promise, err
},
}
return &classField, nil
}
func extractProperties(selections *graphql_ast.SelectionSet) ([]SelectProperty, error) {
//debugFieldAsts(fieldASTs)
var properties []SelectProperty
for _, selection := range selections.Selections {
field := selection.(*graphql_ast.Field)
name := field.Name.Value
property := SelectProperty{Name: name}
property.IsPrimitive = (field.SelectionSet == nil)
if !property.IsPrimitive {
// We can interpret this property in different ways
for _, subSelection := range field.SelectionSet.Selections {
// Is it a field with the name __typename?
subsectionField, ok := subSelection.(*graphql_ast.Field)
if ok {
if subsectionField.Name.Value == "__typename" {
property.IncludeTypeName = true
continue
} else {
return nil, fmt.Errorf("Expected a InlineFragment, not a '%s' field ", subsectionField.Name.Value)
}
}
// Otherwise these _must_ be inline fragments
fragment, ok := subSelection.(*graphql_ast.InlineFragment)
if !ok {
return nil, fmt.Errorf("Expected a InlineFragment; you need to specify as which type you want to retrieve a reference %#v", subSelection)
}
err, className := schema.ValidateClassName(fragment.TypeCondition.Name.Value)
if err != nil {
return nil, fmt.Errorf("The inline fragment type name '%s' is not a valid class name.", fragment.TypeCondition.Name.Value)
}
subProperties, err := extractProperties(fragment.SelectionSet)
if err != nil {
return nil, err
}
property.Refs = append(property.Refs, SelectClass{
ClassName: string(className),
RefProperties: subProperties,
})
}
}
properties = append(properties, property)
}
return properties, nil
}
| Build | identifier_name |
get.go | package local_get
import (
"fmt"
"github.com/creativesoftwarefdn/weaviate/database/schema"
"github.com/creativesoftwarefdn/weaviate/database/schema/kind"
common "github.com/creativesoftwarefdn/weaviate/graphqlapi/common_resolver"
"github.com/creativesoftwarefdn/weaviate/graphqlapi/local/common_filters"
"github.com/creativesoftwarefdn/weaviate/models"
"github.com/graphql-go/graphql"
graphql_ast "github.com/graphql-go/graphql/language/ast"
"strings"
)
func Build(dbSchema *schema.Schema) (*graphql.Field, error) {
getKinds := graphql.Fields{}
if len(dbSchema.Actions.Classes) == 0 && len(dbSchema.Things.Classes) == 0 {
return nil, fmt.Errorf("There are not any Actions or Things classes defined yet.")
}
knownClasses := map[string]*graphql.Object{}
if len(dbSchema.Actions.Classes) > 0 {
localGetActions, err := buildGetClasses(dbSchema, kind.ACTION_KIND, dbSchema.Actions, &knownClasses)
if err != nil {
return nil, err
}
getKinds["Actions"] = &graphql.Field{
Name: "WeaviateLocalGetActions",
Description: "Get Actions on the Local Weaviate",
Type: localGetActions,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGetActions (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
}
if len(dbSchema.Things.Classes) > 0 {
localGetThings, err := buildGetClasses(dbSchema, kind.THING_KIND, dbSchema.Things, &knownClasses)
if err != nil {
return nil, err
}
getKinds["Things"] = &graphql.Field{
Name: "WeaviateLocalGetThings",
Description: "Get Things on the Local Weaviate",
Type: localGetThings,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGetThings (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
}
field := graphql.Field{
Name: "WeaviateLocalGet",
Description: "Get Things or Actions on the local weaviate",
Args: graphql.FieldConfigArgument{
"where": &graphql.ArgumentConfig{
Description: "Filter options for the Get search, to convert the data to the filter input",
Type: graphql.NewInputObject(
graphql.InputObjectConfig{
Name: "WeaviateLocalGetWhereInpObj",
Fields: common_filters.Build(),
Description: "Filter options for the Get search, to convert the data to the filter input",
},
),
},
},
Type: graphql.NewObject(graphql.ObjectConfig{
Name: "WeaviateLocalGetObj",
Fields: getKinds,
Description: "Type of Get function to get Things or Actions on the Local Weaviate",
}),
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGet (extract resolver from source, parse filters )\n")
resolver := p.Source.(map[string]interface{})["Resolver"].(Resolver)
filters, err := common_filters.ExtractFilters(p.Args)
if err != nil {
return nil, err
}
return &filtersAndResolver{
filters: filters,
resolver: resolver,
}, nil
},
}
return &field, nil
}
// Builds the classes below a Local -> Get -> (k kind.Kind)
func buildGetClasses(dbSchema *schema.Schema, k kind.Kind, semanticSchema *models.SemanticSchema, knownClasses *map[string]*graphql.Object) (*graphql.Object, error) {
classFields := graphql.Fields{}
var kindName string
switch k {
case kind.THING_KIND:
kindName = "Thing"
case kind.ACTION_KIND:
kindName = "Action"
}
for _, class := range semanticSchema.Classes {
classField, err := buildGetClass(dbSchema, k, class, knownClasses)
if err != nil {
return nil, fmt.Errorf("Could not build class for %s", class.Class)
}
classFields[class.Class] = classField
}
classes := graphql.NewObject(graphql.ObjectConfig{
Name: fmt.Sprintf("WeaviateLocalGet%ssObj", kindName),
Fields: classFields,
Description: fmt.Sprintf("Type of %ss i.e. %ss classes to Get on the Local Weaviate", kindName, kindName),
})
return classes, nil
}
// Build a single class in Local -> Get -> (k kind.Kind) -> (models.SemanticSchemaClass)
func buildGetClass(dbSchema *schema.Schema, k kind.Kind, class *models.SemanticSchemaClass, knownClasses *map[string]*graphql.Object) (*graphql.Field, error) {
classObject := graphql.NewObject(graphql.ObjectConfig{
Name: class.Class,
Fields: (graphql.FieldsThunk)(func() graphql.Fields {
classProperties := graphql.Fields{}
classProperties["uuid"] = &graphql.Field{
Description: "UUID of the thing or action given by the local Weaviate instance",
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("WHOOPTYDOO uuid\n")
return "uuid", nil
},
}
for _, property := range class.Properties {
propertyType, err := dbSchema.FindPropertyDataType(property.AtDataType)
if err != nil {
// We can't return an error in this FieldsThunk function, so we need to panic
panic(fmt.Sprintf("buildGetClass: wrong propertyType for %s.%s.%s; %s", k.Name(), class.Class, property.Name, err.Error()))
}
var propertyField *graphql.Field
if propertyType.IsPrimitive() {
switch propertyType.AsPrimitive() {
case schema.DataTypeString:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: string\n")
return "primitive string", nil
},
}
case schema.DataTypeInt:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Int,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: int\n")
return nil, nil
},
}
case schema.DataTypeNumber:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Float,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: float\n")
return 4.2, nil
},
}
case schema.DataTypeBoolean:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Boolean,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: bool\n")
return true, nil
},
}
case schema.DataTypeDate:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String, // String since no graphql date datatype exists
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: date\n")
return "somedate", nil
},
}
default:
panic(fmt.Sprintf("buildGetClass: unknown primitive type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, propertyType.AsPrimitive()))
}
propertyField.Name = property.Name
classProperties[property.Name] = propertyField
} else {
// This is a reference
refClasses := propertyType.Classes()
propertyName := strings.Title(property.Name)
dataTypeClasses := make([]*graphql.Object, len(refClasses))
for index, refClassName := range refClasses {
refClass, ok := (*knownClasses)[string(refClassName)]
if !ok {
panic(fmt.Sprintf("buildGetClass: unknown referenced class type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, refClassName))
}
dataTypeClasses[index] = refClass
}
classUnion := graphql.NewUnion(graphql.UnionConfig{
Name: fmt.Sprintf("%s%s%s", class.Class, propertyName, "Obj"),
Types: dataTypeClasses,
ResolveType: func(p graphql.ResolveTypeParams) *graphql.Object {
// TODO: inspect type of result.
return (*knownClasses)["City"]
fmt.Printf("Resolver: WHOOPTYDOO\n")
return nil
},
Description: property.Description,
})
// TODO: Check cardinality
classProperties[propertyName] = &graphql.Field{
Type: classUnion,
Description: property.Description,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- Resolve action property field (ref?)\n")
fmt.Printf("WHOOPTYDOO2\n")
return true, nil
},
}
}
}
return classProperties
}),
Description: class.Description,
})
(*knownClasses)[class.Class] = classObject
classField := graphql.Field{
Type: graphql.NewList(classObject),
Description: class.Description,
Args: graphql.FieldConfigArgument{
"first": &graphql.ArgumentConfig{
Description: "Pagination option, show the first x results",
Type: graphql.Int,
},
"after": &graphql.ArgumentConfig{
Description: "Pagination option, show the results after the first x results",
Type: graphql.Int,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- thing class (supposed to extract pagination, now return nil)\n")
filtersAndResolver := p.Source.(*filtersAndResolver)
pagination, err := common.ExtractPaginationFromArgs(p.Args)
if err != nil {
return nil, err
}
// There can only be exactly one graphql_ast.Field; it is the class name.
if len(p.Info.FieldASTs) != 1 {
panic("Only one Field expected here")
}
selectionsOfClass := p.Info.FieldASTs[0].SelectionSet
properties, err := extractProperties(selectionsOfClass)
if err != nil {
return nil, err
}
params := LocalGetClassParams{
Filters: filtersAndResolver.filters,
Kind: k,
ClassName: class.Class,
Pagination: pagination,
Properties: properties,
}
promise, err := filtersAndResolver.resolver.LocalGetClass(¶ms)
return promise, err
},
}
return &classField, nil
}
func extractProperties(selections *graphql_ast.SelectionSet) ([]SelectProperty, error) {
//debugFieldAsts(fieldASTs)
var properties []SelectProperty
for _, selection := range selections.Selections {
field := selection.(*graphql_ast.Field)
name := field.Name.Value
property := SelectProperty{Name: name}
property.IsPrimitive = (field.SelectionSet == nil)
if !property.IsPrimitive |
properties = append(properties, property)
}
return properties, nil
}
| {
// We can interpret this property in different ways
for _, subSelection := range field.SelectionSet.Selections {
// Is it a field with the name __typename?
subsectionField, ok := subSelection.(*graphql_ast.Field)
if ok {
if subsectionField.Name.Value == "__typename" {
property.IncludeTypeName = true
continue
} else {
return nil, fmt.Errorf("Expected a InlineFragment, not a '%s' field ", subsectionField.Name.Value)
}
}
// Otherwise these _must_ be inline fragments
fragment, ok := subSelection.(*graphql_ast.InlineFragment)
if !ok {
return nil, fmt.Errorf("Expected a InlineFragment; you need to specify as which type you want to retrieve a reference %#v", subSelection)
}
err, className := schema.ValidateClassName(fragment.TypeCondition.Name.Value)
if err != nil {
return nil, fmt.Errorf("The inline fragment type name '%s' is not a valid class name.", fragment.TypeCondition.Name.Value)
}
subProperties, err := extractProperties(fragment.SelectionSet)
if err != nil {
return nil, err
}
property.Refs = append(property.Refs, SelectClass{
ClassName: string(className),
RefProperties: subProperties,
})
}
} | conditional_block |
configV2.go | // Copyright 2016-2018 The grok_exporter Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v2
import (
"errors"
"fmt"
"github.com/sequix/grok_exporter/template"
"gopkg.in/natefinch/lumberjack.v2"
"gopkg.in/yaml.v2"
"os"
"strings"
"time"
)
const (
defaultLogLevel = "info"
defaultLogTo = "mixed"
defaultPositionsFile = "/tmp/position.json"
defaultPositionSyncIntervcal = 500 * time.Millisecond
defaultPollInterval = 500 * time.Millisecond
defaultRetentionCheckInterval = 60 * time.Second
inputTypeStdin = "stdin"
inputTypeFile = "file"
inputTypeWebhook = "webhook"
)
func Unmarshal(config []byte) (*Config, error) {
cfg := &Config{}
err := yaml.Unmarshal(config, cfg)
if err != nil {
return nil, fmt.Errorf("invalid configuration: %v. make sure to use 'single quotes' around strings with special characters (like match patterns or label templates), and make sure to use '-' only for lists (metrics) but not for maps (labels).", err.Error())
}
err = AddDefaultsAndValidate(cfg)
if err != nil {
return nil, err
}
return cfg, nil
}
type Config struct {
Global GlobalConfig `yaml:",omitempty"`
Input InputConfig `yaml:",omitempty"`
Grok GrokConfig `yaml:",omitempty"`
Metrics MetricsConfig `yaml:",omitempty"`
Server ServerConfig `yaml:",omitempty"`
LogRotate lumberjack.Logger `yaml:"log_rotate,omitempty"`
}
type GlobalConfig struct {
ConfigVersion int `yaml:"config_version,omitempty"`
LogLevel string `yaml:"log_level,omitempty"`
LogTo string `yaml:"log_to,omitempty"`
RetentionCheckInterval time.Duration `yaml:"retention_check_interval,omitempty"` // implicitly parsed with time.ParseDuration()
}
type InputConfig struct {
CollectMode string `yaml:"collectMode,omitempty"`
Type string `yaml:",omitempty"`
Path []string `yaml:",omitempty"`
Excludes []string `yaml:",omitempty"`
PositionFile string `yaml:"position_file,omitempty"`
SyncInterval time.Duration `yaml:"position_sync_interval,omitempty"`
PollInterval time.Duration `yaml:"poll_interval,omitempty"`
MaxLinesInBuffer int `yaml:"max_lines_in_buffer,omitempty"`
MaxLineSize int `yaml:"max_line_size,omitempty"`
MaxLinesRatePerFile uint16 `yaml:"max_lines_rate_per_file,omitempty"`
IdleTimeout time.Duration `yaml:"idle_timeout,omitempty"`
WebhookPath string `yaml:"webhook_path,omitempty"`
WebhookFormat string `yaml:"webhook_format,omitempty"`
WebhookJsonSelector string `yaml:"webhook_json_selector,omitempty"`
WebhookTextBulkSeparator string `yaml:"webhook_text_bulk_separator,omitempty"`
}
type GrokConfig struct {
PatternsDir string `yaml:"patterns_dir,omitempty"`
AdditionalPatterns []string `yaml:"additional_patterns,omitempty"`
}
type MetricConfig struct {
Type string `yaml:",omitempty"`
Name string `yaml:",omitempty"`
Path []string `yaml:",omitempty"`
Excludes []string `yaml:",omitempty"`
Help string `yaml:",omitempty"`
Match string `yaml:",omitempty"`
Retention time.Duration `yaml:",omitempty"` // implicitly parsed with time.ParseDuration()
Value string `yaml:",omitempty"`
Cumulative bool `yaml:",omitempty"`
Buckets []float64 `yaml:",flow,omitempty"`
Quantiles map[float64]float64 `yaml:",flow,omitempty"`
Labels map[string]string `yaml:",omitempty"`
LabelTemplates []template.Template `yaml:"-"` // parsed version of Labels, will not be serialized to yaml.
ValueTemplate template.Template `yaml:"-"` // parsed version of Value, will not be serialized to yaml.
DeleteMatch string `yaml:"delete_match,omitempty"`
DeleteLabels map[string]string `yaml:"delete_labels,omitempty"` // TODO: Make sure that DeleteMatch is not nil if DeleteLabels are used.
DeleteLabelTemplates []template.Template `yaml:"-"` // parsed version of DeleteLabels, will not be serialized to yaml.
}
type MetricsConfig []MetricConfig
type ServerConfig struct {
Protocol string `yaml:",omitempty"`
Host string `yaml:",omitempty"`
Port int `yaml:",omitempty"`
Path string `yaml:",omitempty"`
Cert string `yaml:",omitempty"`
Key string `yaml:",omitempty"`
}
func (cfg *Config) LoadEnvironments() {
path := cfg.Input.Path
for i := range path {
path[i] = os.ExpandEnv(path[i])
}
excludes := cfg.Input.Excludes
for i := range excludes {
excludes[i] = os.ExpandEnv(excludes[i])
}
for i := range cfg.Metrics {
m := &cfg.Metrics[i]
for j, p := range m.Path {
m.Path[j] = os.ExpandEnv(p)
}
for j, p := range m.Excludes {
m.Excludes[j] = os.ExpandEnv(p)
}
}
cfg.Input.PositionFile = os.ExpandEnv(cfg.Input.PositionFile)
}
func (cfg *Config) addDefaults() {
cfg.Global.addDefaults()
cfg.Input.addDefaults()
cfg.Grok.addDefaults()
if cfg.Metrics == nil {
cfg.Metrics = MetricsConfig(make([]MetricConfig, 0))
}
cfg.Metrics.addDefaults()
cfg.Server.addDefaults()
}
func (c *GlobalConfig) addDefaults() {
if c.ConfigVersion == 0 {
c.ConfigVersion = 2
}
if c.RetentionCheckInterval == 0 {
c.RetentionCheckInterval = defaultRetentionCheckInterval
}
if c.LogLevel == "" {
c.LogLevel = defaultLogLevel
}
if c.LogTo == "" {
c.LogLevel = defaultLogTo
}
}
func (c *InputConfig) addDefaults() {
if len(c.CollectMode) == 0 {
c.CollectMode = "mixed"
}
if c.PollInterval == 0 {
c.PollInterval = defaultPollInterval
}
switch c.Type {
case "", inputTypeStdin:
c.Type = inputTypeStdin
case inputTypeFile:
if c.PositionFile == "" {
c.PositionFile = defaultPositionsFile
}
if c.SyncInterval == 0 {
c.SyncInterval = defaultPositionSyncIntervcal
}
case inputTypeWebhook:
if len(c.WebhookPath) == 0 {
c.WebhookPath = "/webhook"
}
if len(c.WebhookFormat) == 0 {
c.WebhookFormat = "text_single"
}
if len(c.WebhookJsonSelector) == 0 {
c.WebhookJsonSelector = ".message"
}
if len(c.WebhookTextBulkSeparator) == 0 {
c.WebhookTextBulkSeparator = "\n\n"
}
}
}
func (c *GrokConfig) addDefaults() {}
func (c *MetricsConfig) addDefaults() {}
func (c *ServerConfig) addDefaults() {
if c.Protocol == "" {
c.Protocol = "http"
}
if c.Port == 0 {
c.Port = 9144
}
if c.Path == "" {
c.Path = "/metrics"
}
}
func (cfg *Config) validate() error {
err := cfg.Input.validate()
if err != nil {
return err
}
err = cfg.Grok.validate()
if err != nil {
return err
}
err = cfg.Metrics.validate()
if err != nil {
return err
}
err = cfg.Server.validate()
if err != nil {
return err
}
return nil
}
func (c *InputConfig) validate() error {
switch {
case c.Type == inputTypeStdin:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.path' when 'input.type' is stdin")
}
if c.PollInterval != 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.poll_interval_seconds' when 'input.type' is stdin")
}
case c.Type == inputTypeFile:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: 'input.path' is required for input type \"file\"")
}
if c.PollInterval > 0 {
if c.MaxLinesRatePerFile != 0 {
return fmt.Errorf("cannot limit input speed when using poller")
}
}
fi, err := os.Stat(c.PositionFile)
if err != nil {
if !os.IsNotExist(err) {
return err
}
} else {
if fi.IsDir() {
return errors.New("expected a file for position_file")
}
}
if c.SyncInterval < time.Second {
return errors.New("expected sync_interval more than 1s")
}
case c.Type == inputTypeWebhook:
if c.WebhookPath == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' is required for input type \"webhook\"")
} else if c.WebhookPath[0] != '/' {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' must start with \"/\"")
}
if c.WebhookFormat != "text_single" && c.WebhookFormat != "text_bulk" && c.WebhookFormat != "json_single" && c.WebhookFormat != "json_bulk" {
return fmt.Errorf("invalid input configuration: 'input.webhook_format' must be \"text_single|text_bulk|json_single|json_bulk\"")
}
if c.WebhookJsonSelector == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' is required for input type \"webhook\"")
} else if c.WebhookJsonSelector[0] != '.' {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' must start with \".\"")
}
if c.WebhookFormat == "text_bulk" && c.WebhookTextBulkSeparator == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_text_bulk_separator' is required for input type \"webhook\" and webhook_format \"text_bulk\"")
}
default:
return fmt.Errorf("unsupported 'input.type': %v", c.Type)
}
return nil
}
func (c *GrokConfig) validate() error {
if c.PatternsDir == "" && len(c.AdditionalPatterns) == 0 {
return fmt.Errorf("Invalid grok configuration: no patterns defined: one of 'grok.patterns_dir' and 'grok.additional_patterns' must be configured.")
}
return nil
}
func (c *MetricsConfig) validate() error {
if len(*c) == 0 {
return fmt.Errorf("Invalid metrics configuration: 'metrics' must not be empty.")
}
metricNames := make(map[string]bool)
for _, metric := range *c {
err := metric.validate()
if err != nil {
return err
}
_, exists := metricNames[metric.Name]
if exists {
return fmt.Errorf("Invalid metric configuration: metric '%v' defined twice.", metric.Name)
}
metricNames[metric.Name] = true
}
return nil
}
func (c *MetricConfig) validate() error {
switch {
case c.Type == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.type' must not be empty.")
case c.Name == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.name' must not be empty.")
case c.Help == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.help' must not be empty.")
case c.Match == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.match' must not be empty.")
}
var hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed bool
switch c.Type {
case "counter":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = false, false, false, false
case "gauge":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, true, false, false
case "histogram":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, false, true, false
case "summary":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, false, false, true
default:
return fmt.Errorf("Invalid 'metrics.type': '%v'. We currently only support 'counter' and 'gauge'.", c.Type)
}
switch {
case hasValue && len(c.Value) == 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.value' must not be empty for %v metrics.", c.Type)
case !hasValue && len(c.Value) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.value' cannot be used for %v metrics.", c.Type)
case !cumulativeAllowed && c.Cumulative:
return fmt.Errorf("Invalid metric configuration: 'metrics.cumulative' cannot be used for %v metrics.", c.Type)
case !bucketsAllowed && len(c.Buckets) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.buckets' cannot be used for %v metrics.", c.Type)
case !quantilesAllowed && len(c.Quantiles) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.buckets' cannot be used for %v metrics.", c.Type)
}
if len(c.DeleteMatch) > 0 && len(c.Labels) == 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.delete_match' is only supported for metrics with labels.")
}
if len(c.DeleteMatch) == 0 && len(c.DeleteLabelTemplates) > 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.delete_labels' can only be used when 'metrics.delete_match' is present.")
}
if c.Retention > 0 && len(c.Labels) == 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.retention' is only supported for metrics with labels.")
}
for _, deleteLabelTemplate := range c.DeleteLabelTemplates {
found := false
for _, labelTemplate := range c.LabelTemplates |
if !found {
return fmt.Errorf("Invalid metric configuration: '%v' cannot be used as a delete_label, because the metric does not have a label named '%v'.", deleteLabelTemplate.Name(), deleteLabelTemplate.Name())
}
}
// InitTemplates() validates that labels/delete_labels/value are present as grok_fields in the grok pattern.
return nil
}
func (c *ServerConfig) validate() error {
switch {
case c.Protocol != "https" && c.Protocol != "http":
return fmt.Errorf("Invalid 'server.protocol': '%v'. Expecting 'http' or 'https'.", c.Protocol)
case c.Port <= 0:
return fmt.Errorf("Invalid 'server.port': '%v'.", c.Port)
case !strings.HasPrefix(c.Path, "/"):
return fmt.Errorf("Invalid server configuration: 'server.path' must start with '/'.")
case c.Protocol == "https":
if c.Cert != "" && c.Key == "" {
return fmt.Errorf("Invalid server configuration: 'server.cert' must not be specified without 'server.key'")
}
if c.Cert == "" && c.Key != "" {
return fmt.Errorf("Invalid server configuration: 'server.key' must not be specified without 'server.cert'")
}
case c.Protocol == "http":
if c.Cert != "" || c.Key != "" {
return fmt.Errorf("Invalid server configuration: 'server.cert' and 'server.key' can only be configured for protocol 'https'.")
}
}
return nil
}
// Made this public so it can be called when converting config v1 to config v2.
func AddDefaultsAndValidate(cfg *Config) error {
var err error
cfg.addDefaults()
for i := range []MetricConfig(cfg.Metrics) {
err = cfg.Metrics[i].InitTemplates()
if err != nil {
return err
}
}
return cfg.validate()
}
// Made this public so MetricConfig can be initialized in tests.
func (metric *MetricConfig) InitTemplates() error {
var (
err error
tmplt template.Template
msg = "invalid configuration: failed to read metric %v: error parsing %v template: %v: " +
"don't forget to put a . (dot) in front of grok fields, otherwise it will be interpreted as a function."
)
for _, t := range []struct {
src map[string]string // label / template string as read from the config file
dest *[]template.Template // parsed template used internally in grok_exporter
}{
{
src: metric.Labels,
dest: &(metric.LabelTemplates),
},
{
src: metric.DeleteLabels,
dest: &(metric.DeleteLabelTemplates),
},
} {
*t.dest = make([]template.Template, 0, len(t.src))
for name, templateString := range t.src {
tmplt, err = template.New(name, templateString)
if err != nil {
return fmt.Errorf(msg, fmt.Sprintf("label %v", metric.Name), name, err.Error())
}
*t.dest = append(*t.dest, tmplt)
}
}
if len(metric.Value) > 0 {
metric.ValueTemplate, err = template.New("__value__", metric.Value)
if err != nil {
return fmt.Errorf(msg, "value", metric.Name, err.Error())
}
}
return nil
}
// YAML representation, does not include default values.
func (cfg *Config) String() string {
stripped := cfg.copy()
if stripped.Global.RetentionCheckInterval == defaultRetentionCheckInterval {
stripped.Global.RetentionCheckInterval = 0
}
if stripped.Server.Path == "/metrics" {
stripped.Server.Path = ""
}
return stripped.marshalToString()
}
func (cfg *Config) copy() *Config {
result, _ := Unmarshal([]byte(cfg.marshalToString()))
return result
}
func (cfg *Config) marshalToString() string {
out, err := yaml.Marshal(cfg)
if err != nil {
return fmt.Sprintf("ERROR: Failed to marshal config: %v", err.Error())
}
result := string(out)
// Pretend fail_on_missing_logfile is a boolean, remove quotes
result = strings.Replace(result, "fail_on_missing_logfile: \"false\"", "fail_on_missing_logfile: false", -1)
result = strings.Replace(result, "fail_on_missing_logfile: \"true\"", "fail_on_missing_logfile: true", -1)
return result
}
| {
if deleteLabelTemplate.Name() == labelTemplate.Name() {
found = true
break
}
} | conditional_block |
configV2.go | // Copyright 2016-2018 The grok_exporter Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v2
import (
"errors"
"fmt"
"github.com/sequix/grok_exporter/template"
"gopkg.in/natefinch/lumberjack.v2"
"gopkg.in/yaml.v2"
"os"
"strings"
"time"
)
const (
defaultLogLevel = "info"
defaultLogTo = "mixed"
defaultPositionsFile = "/tmp/position.json"
defaultPositionSyncIntervcal = 500 * time.Millisecond
defaultPollInterval = 500 * time.Millisecond
defaultRetentionCheckInterval = 60 * time.Second
inputTypeStdin = "stdin"
inputTypeFile = "file"
inputTypeWebhook = "webhook"
)
func Unmarshal(config []byte) (*Config, error) {
cfg := &Config{}
err := yaml.Unmarshal(config, cfg)
if err != nil {
return nil, fmt.Errorf("invalid configuration: %v. make sure to use 'single quotes' around strings with special characters (like match patterns or label templates), and make sure to use '-' only for lists (metrics) but not for maps (labels).", err.Error())
}
err = AddDefaultsAndValidate(cfg)
if err != nil {
return nil, err
}
return cfg, nil
}
type Config struct {
Global GlobalConfig `yaml:",omitempty"`
Input InputConfig `yaml:",omitempty"`
Grok GrokConfig `yaml:",omitempty"`
Metrics MetricsConfig `yaml:",omitempty"`
Server ServerConfig `yaml:",omitempty"`
LogRotate lumberjack.Logger `yaml:"log_rotate,omitempty"`
}
type GlobalConfig struct {
ConfigVersion int `yaml:"config_version,omitempty"`
LogLevel string `yaml:"log_level,omitempty"`
LogTo string `yaml:"log_to,omitempty"`
RetentionCheckInterval time.Duration `yaml:"retention_check_interval,omitempty"` // implicitly parsed with time.ParseDuration()
}
type InputConfig struct {
CollectMode string `yaml:"collectMode,omitempty"`
Type string `yaml:",omitempty"`
Path []string `yaml:",omitempty"`
Excludes []string `yaml:",omitempty"`
PositionFile string `yaml:"position_file,omitempty"`
SyncInterval time.Duration `yaml:"position_sync_interval,omitempty"`
PollInterval time.Duration `yaml:"poll_interval,omitempty"`
MaxLinesInBuffer int `yaml:"max_lines_in_buffer,omitempty"`
MaxLineSize int `yaml:"max_line_size,omitempty"`
MaxLinesRatePerFile uint16 `yaml:"max_lines_rate_per_file,omitempty"`
IdleTimeout time.Duration `yaml:"idle_timeout,omitempty"`
WebhookPath string `yaml:"webhook_path,omitempty"`
WebhookFormat string `yaml:"webhook_format,omitempty"`
WebhookJsonSelector string `yaml:"webhook_json_selector,omitempty"`
WebhookTextBulkSeparator string `yaml:"webhook_text_bulk_separator,omitempty"`
}
type GrokConfig struct {
PatternsDir string `yaml:"patterns_dir,omitempty"`
AdditionalPatterns []string `yaml:"additional_patterns,omitempty"`
}
type MetricConfig struct {
Type string `yaml:",omitempty"`
Name string `yaml:",omitempty"`
Path []string `yaml:",omitempty"`
Excludes []string `yaml:",omitempty"`
Help string `yaml:",omitempty"`
Match string `yaml:",omitempty"`
Retention time.Duration `yaml:",omitempty"` // implicitly parsed with time.ParseDuration()
Value string `yaml:",omitempty"`
Cumulative bool `yaml:",omitempty"`
Buckets []float64 `yaml:",flow,omitempty"`
Quantiles map[float64]float64 `yaml:",flow,omitempty"`
Labels map[string]string `yaml:",omitempty"`
LabelTemplates []template.Template `yaml:"-"` // parsed version of Labels, will not be serialized to yaml.
ValueTemplate template.Template `yaml:"-"` // parsed version of Value, will not be serialized to yaml.
DeleteMatch string `yaml:"delete_match,omitempty"`
DeleteLabels map[string]string `yaml:"delete_labels,omitempty"` // TODO: Make sure that DeleteMatch is not nil if DeleteLabels are used.
DeleteLabelTemplates []template.Template `yaml:"-"` // parsed version of DeleteLabels, will not be serialized to yaml.
}
type MetricsConfig []MetricConfig
type ServerConfig struct {
Protocol string `yaml:",omitempty"`
Host string `yaml:",omitempty"`
Port int `yaml:",omitempty"`
Path string `yaml:",omitempty"`
Cert string `yaml:",omitempty"`
Key string `yaml:",omitempty"`
}
func (cfg *Config) LoadEnvironments() {
path := cfg.Input.Path
for i := range path {
path[i] = os.ExpandEnv(path[i])
}
excludes := cfg.Input.Excludes
for i := range excludes {
excludes[i] = os.ExpandEnv(excludes[i])
}
for i := range cfg.Metrics {
m := &cfg.Metrics[i]
for j, p := range m.Path {
m.Path[j] = os.ExpandEnv(p)
}
for j, p := range m.Excludes {
m.Excludes[j] = os.ExpandEnv(p)
}
}
cfg.Input.PositionFile = os.ExpandEnv(cfg.Input.PositionFile)
}
func (cfg *Config) addDefaults() {
cfg.Global.addDefaults()
cfg.Input.addDefaults()
cfg.Grok.addDefaults()
if cfg.Metrics == nil {
cfg.Metrics = MetricsConfig(make([]MetricConfig, 0))
}
cfg.Metrics.addDefaults()
cfg.Server.addDefaults()
}
func (c *GlobalConfig) addDefaults() {
if c.ConfigVersion == 0 {
c.ConfigVersion = 2
}
if c.RetentionCheckInterval == 0 {
c.RetentionCheckInterval = defaultRetentionCheckInterval
}
if c.LogLevel == "" {
c.LogLevel = defaultLogLevel
}
if c.LogTo == "" {
c.LogLevel = defaultLogTo
}
}
func (c *InputConfig) addDefaults() {
if len(c.CollectMode) == 0 {
c.CollectMode = "mixed"
}
if c.PollInterval == 0 {
c.PollInterval = defaultPollInterval
}
switch c.Type {
case "", inputTypeStdin:
c.Type = inputTypeStdin
case inputTypeFile:
if c.PositionFile == "" {
c.PositionFile = defaultPositionsFile
}
if c.SyncInterval == 0 {
c.SyncInterval = defaultPositionSyncIntervcal
}
case inputTypeWebhook:
if len(c.WebhookPath) == 0 {
c.WebhookPath = "/webhook"
}
if len(c.WebhookFormat) == 0 {
c.WebhookFormat = "text_single"
}
if len(c.WebhookJsonSelector) == 0 {
c.WebhookJsonSelector = ".message"
}
if len(c.WebhookTextBulkSeparator) == 0 {
c.WebhookTextBulkSeparator = "\n\n"
}
}
}
func (c *GrokConfig) addDefaults() {}
func (c *MetricsConfig) addDefaults() {}
func (c *ServerConfig) addDefaults() {
if c.Protocol == "" {
c.Protocol = "http"
}
if c.Port == 0 {
c.Port = 9144
}
if c.Path == "" {
c.Path = "/metrics"
}
}
func (cfg *Config) validate() error {
err := cfg.Input.validate()
if err != nil {
return err
}
err = cfg.Grok.validate()
if err != nil {
return err
}
err = cfg.Metrics.validate()
if err != nil {
return err
}
err = cfg.Server.validate()
if err != nil {
return err
}
return nil
}
func (c *InputConfig) validate() error {
switch {
case c.Type == inputTypeStdin:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.path' when 'input.type' is stdin")
}
if c.PollInterval != 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.poll_interval_seconds' when 'input.type' is stdin")
}
case c.Type == inputTypeFile:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: 'input.path' is required for input type \"file\"")
}
if c.PollInterval > 0 {
if c.MaxLinesRatePerFile != 0 {
return fmt.Errorf("cannot limit input speed when using poller")
}
}
fi, err := os.Stat(c.PositionFile)
if err != nil {
if !os.IsNotExist(err) {
return err
}
} else {
if fi.IsDir() {
return errors.New("expected a file for position_file")
}
}
if c.SyncInterval < time.Second {
return errors.New("expected sync_interval more than 1s")
}
case c.Type == inputTypeWebhook:
if c.WebhookPath == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' is required for input type \"webhook\"")
} else if c.WebhookPath[0] != '/' {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' must start with \"/\"")
}
if c.WebhookFormat != "text_single" && c.WebhookFormat != "text_bulk" && c.WebhookFormat != "json_single" && c.WebhookFormat != "json_bulk" {
return fmt.Errorf("invalid input configuration: 'input.webhook_format' must be \"text_single|text_bulk|json_single|json_bulk\"")
}
if c.WebhookJsonSelector == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' is required for input type \"webhook\"")
} else if c.WebhookJsonSelector[0] != '.' {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' must start with \".\"")
}
if c.WebhookFormat == "text_bulk" && c.WebhookTextBulkSeparator == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_text_bulk_separator' is required for input type \"webhook\" and webhook_format \"text_bulk\"")
}
default:
return fmt.Errorf("unsupported 'input.type': %v", c.Type)
}
return nil
}
func (c *GrokConfig) validate() error {
if c.PatternsDir == "" && len(c.AdditionalPatterns) == 0 {
return fmt.Errorf("Invalid grok configuration: no patterns defined: one of 'grok.patterns_dir' and 'grok.additional_patterns' must be configured.")
}
return nil
}
func (c *MetricsConfig) validate() error {
if len(*c) == 0 {
return fmt.Errorf("Invalid metrics configuration: 'metrics' must not be empty.")
}
metricNames := make(map[string]bool)
for _, metric := range *c {
err := metric.validate()
if err != nil {
return err
}
_, exists := metricNames[metric.Name]
if exists {
return fmt.Errorf("Invalid metric configuration: metric '%v' defined twice.", metric.Name)
}
metricNames[metric.Name] = true
}
return nil
}
func (c *MetricConfig) validate() error {
switch {
case c.Type == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.type' must not be empty.")
case c.Name == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.name' must not be empty.")
case c.Help == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.help' must not be empty.")
case c.Match == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.match' must not be empty.")
}
var hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed bool
switch c.Type {
case "counter":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = false, false, false, false
case "gauge":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, true, false, false
case "histogram":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, false, true, false
case "summary":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, false, false, true
default:
return fmt.Errorf("Invalid 'metrics.type': '%v'. We currently only support 'counter' and 'gauge'.", c.Type)
}
switch {
case hasValue && len(c.Value) == 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.value' must not be empty for %v metrics.", c.Type)
case !hasValue && len(c.Value) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.value' cannot be used for %v metrics.", c.Type)
case !cumulativeAllowed && c.Cumulative:
return fmt.Errorf("Invalid metric configuration: 'metrics.cumulative' cannot be used for %v metrics.", c.Type)
case !bucketsAllowed && len(c.Buckets) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.buckets' cannot be used for %v metrics.", c.Type)
case !quantilesAllowed && len(c.Quantiles) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.buckets' cannot be used for %v metrics.", c.Type)
}
if len(c.DeleteMatch) > 0 && len(c.Labels) == 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.delete_match' is only supported for metrics with labels.")
}
if len(c.DeleteMatch) == 0 && len(c.DeleteLabelTemplates) > 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.delete_labels' can only be used when 'metrics.delete_match' is present.")
}
if c.Retention > 0 && len(c.Labels) == 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.retention' is only supported for metrics with labels.")
}
for _, deleteLabelTemplate := range c.DeleteLabelTemplates {
found := false
for _, labelTemplate := range c.LabelTemplates {
if deleteLabelTemplate.Name() == labelTemplate.Name() {
found = true
break
}
}
if !found {
return fmt.Errorf("Invalid metric configuration: '%v' cannot be used as a delete_label, because the metric does not have a label named '%v'.", deleteLabelTemplate.Name(), deleteLabelTemplate.Name())
}
}
// InitTemplates() validates that labels/delete_labels/value are present as grok_fields in the grok pattern.
return nil
}
func (c *ServerConfig) validate() error {
switch {
case c.Protocol != "https" && c.Protocol != "http":
return fmt.Errorf("Invalid 'server.protocol': '%v'. Expecting 'http' or 'https'.", c.Protocol)
case c.Port <= 0:
return fmt.Errorf("Invalid 'server.port': '%v'.", c.Port)
case !strings.HasPrefix(c.Path, "/"):
return fmt.Errorf("Invalid server configuration: 'server.path' must start with '/'.")
case c.Protocol == "https":
if c.Cert != "" && c.Key == "" {
return fmt.Errorf("Invalid server configuration: 'server.cert' must not be specified without 'server.key'")
}
if c.Cert == "" && c.Key != "" {
return fmt.Errorf("Invalid server configuration: 'server.key' must not be specified without 'server.cert'")
}
case c.Protocol == "http":
if c.Cert != "" || c.Key != "" {
return fmt.Errorf("Invalid server configuration: 'server.cert' and 'server.key' can only be configured for protocol 'https'.")
}
}
return nil
}
// Made this public so it can be called when converting config v1 to config v2.
func AddDefaultsAndValidate(cfg *Config) error {
var err error
cfg.addDefaults()
for i := range []MetricConfig(cfg.Metrics) {
err = cfg.Metrics[i].InitTemplates()
if err != nil {
return err
}
}
return cfg.validate()
}
// Made this public so MetricConfig can be initialized in tests.
func (metric *MetricConfig) InitTemplates() error {
var (
err error
tmplt template.Template
msg = "invalid configuration: failed to read metric %v: error parsing %v template: %v: " +
"don't forget to put a . (dot) in front of grok fields, otherwise it will be interpreted as a function."
)
for _, t := range []struct {
src map[string]string // label / template string as read from the config file
dest *[]template.Template // parsed template used internally in grok_exporter
}{
{
src: metric.Labels,
dest: &(metric.LabelTemplates),
},
{
src: metric.DeleteLabels,
dest: &(metric.DeleteLabelTemplates),
},
} {
*t.dest = make([]template.Template, 0, len(t.src))
for name, templateString := range t.src {
tmplt, err = template.New(name, templateString)
if err != nil {
return fmt.Errorf(msg, fmt.Sprintf("label %v", metric.Name), name, err.Error())
}
*t.dest = append(*t.dest, tmplt)
}
}
if len(metric.Value) > 0 {
metric.ValueTemplate, err = template.New("__value__", metric.Value)
if err != nil {
return fmt.Errorf(msg, "value", metric.Name, err.Error())
}
}
return nil
}
// YAML representation, does not include default values.
func (cfg *Config) String() string {
stripped := cfg.copy()
if stripped.Global.RetentionCheckInterval == defaultRetentionCheckInterval {
stripped.Global.RetentionCheckInterval = 0
}
if stripped.Server.Path == "/metrics" {
stripped.Server.Path = ""
}
return stripped.marshalToString()
}
func (cfg *Config) copy() *Config |
func (cfg *Config) marshalToString() string {
out, err := yaml.Marshal(cfg)
if err != nil {
return fmt.Sprintf("ERROR: Failed to marshal config: %v", err.Error())
}
result := string(out)
// Pretend fail_on_missing_logfile is a boolean, remove quotes
result = strings.Replace(result, "fail_on_missing_logfile: \"false\"", "fail_on_missing_logfile: false", -1)
result = strings.Replace(result, "fail_on_missing_logfile: \"true\"", "fail_on_missing_logfile: true", -1)
return result
}
| {
result, _ := Unmarshal([]byte(cfg.marshalToString()))
return result
} | identifier_body |
configV2.go | // Copyright 2016-2018 The grok_exporter Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v2
import (
"errors"
"fmt"
"github.com/sequix/grok_exporter/template"
"gopkg.in/natefinch/lumberjack.v2"
"gopkg.in/yaml.v2"
"os"
"strings"
"time"
)
const (
defaultLogLevel = "info"
defaultLogTo = "mixed"
defaultPositionsFile = "/tmp/position.json"
defaultPositionSyncIntervcal = 500 * time.Millisecond
defaultPollInterval = 500 * time.Millisecond
defaultRetentionCheckInterval = 60 * time.Second
inputTypeStdin = "stdin"
inputTypeFile = "file"
inputTypeWebhook = "webhook"
)
func Unmarshal(config []byte) (*Config, error) {
cfg := &Config{}
err := yaml.Unmarshal(config, cfg)
if err != nil {
return nil, fmt.Errorf("invalid configuration: %v. make sure to use 'single quotes' around strings with special characters (like match patterns or label templates), and make sure to use '-' only for lists (metrics) but not for maps (labels).", err.Error())
}
err = AddDefaultsAndValidate(cfg)
if err != nil {
return nil, err
}
return cfg, nil
}
type Config struct {
Global GlobalConfig `yaml:",omitempty"`
Input InputConfig `yaml:",omitempty"`
Grok GrokConfig `yaml:",omitempty"`
Metrics MetricsConfig `yaml:",omitempty"`
Server ServerConfig `yaml:",omitempty"`
LogRotate lumberjack.Logger `yaml:"log_rotate,omitempty"`
}
type GlobalConfig struct {
ConfigVersion int `yaml:"config_version,omitempty"`
LogLevel string `yaml:"log_level,omitempty"`
LogTo string `yaml:"log_to,omitempty"`
RetentionCheckInterval time.Duration `yaml:"retention_check_interval,omitempty"` // implicitly parsed with time.ParseDuration()
}
type InputConfig struct {
CollectMode string `yaml:"collectMode,omitempty"`
Type string `yaml:",omitempty"`
Path []string `yaml:",omitempty"`
Excludes []string `yaml:",omitempty"`
PositionFile string `yaml:"position_file,omitempty"`
SyncInterval time.Duration `yaml:"position_sync_interval,omitempty"`
PollInterval time.Duration `yaml:"poll_interval,omitempty"`
MaxLinesInBuffer int `yaml:"max_lines_in_buffer,omitempty"`
MaxLineSize int `yaml:"max_line_size,omitempty"`
MaxLinesRatePerFile uint16 `yaml:"max_lines_rate_per_file,omitempty"`
IdleTimeout time.Duration `yaml:"idle_timeout,omitempty"`
WebhookPath string `yaml:"webhook_path,omitempty"`
WebhookFormat string `yaml:"webhook_format,omitempty"`
WebhookJsonSelector string `yaml:"webhook_json_selector,omitempty"`
WebhookTextBulkSeparator string `yaml:"webhook_text_bulk_separator,omitempty"`
}
type GrokConfig struct {
PatternsDir string `yaml:"patterns_dir,omitempty"`
AdditionalPatterns []string `yaml:"additional_patterns,omitempty"`
}
type MetricConfig struct {
Type string `yaml:",omitempty"`
Name string `yaml:",omitempty"`
Path []string `yaml:",omitempty"`
Excludes []string `yaml:",omitempty"`
Help string `yaml:",omitempty"`
Match string `yaml:",omitempty"`
Retention time.Duration `yaml:",omitempty"` // implicitly parsed with time.ParseDuration()
Value string `yaml:",omitempty"`
Cumulative bool `yaml:",omitempty"`
Buckets []float64 `yaml:",flow,omitempty"`
Quantiles map[float64]float64 `yaml:",flow,omitempty"`
Labels map[string]string `yaml:",omitempty"`
LabelTemplates []template.Template `yaml:"-"` // parsed version of Labels, will not be serialized to yaml.
ValueTemplate template.Template `yaml:"-"` // parsed version of Value, will not be serialized to yaml.
DeleteMatch string `yaml:"delete_match,omitempty"`
DeleteLabels map[string]string `yaml:"delete_labels,omitempty"` // TODO: Make sure that DeleteMatch is not nil if DeleteLabels are used.
DeleteLabelTemplates []template.Template `yaml:"-"` // parsed version of DeleteLabels, will not be serialized to yaml.
}
type MetricsConfig []MetricConfig
type ServerConfig struct {
Protocol string `yaml:",omitempty"`
Host string `yaml:",omitempty"`
Port int `yaml:",omitempty"`
Path string `yaml:",omitempty"`
Cert string `yaml:",omitempty"`
Key string `yaml:",omitempty"`
}
func (cfg *Config) LoadEnvironments() {
path := cfg.Input.Path
for i := range path {
path[i] = os.ExpandEnv(path[i])
}
excludes := cfg.Input.Excludes
for i := range excludes {
excludes[i] = os.ExpandEnv(excludes[i])
}
for i := range cfg.Metrics {
m := &cfg.Metrics[i]
for j, p := range m.Path {
m.Path[j] = os.ExpandEnv(p)
}
for j, p := range m.Excludes {
m.Excludes[j] = os.ExpandEnv(p)
}
}
cfg.Input.PositionFile = os.ExpandEnv(cfg.Input.PositionFile)
}
func (cfg *Config) addDefaults() {
cfg.Global.addDefaults()
cfg.Input.addDefaults()
cfg.Grok.addDefaults()
if cfg.Metrics == nil {
cfg.Metrics = MetricsConfig(make([]MetricConfig, 0))
}
cfg.Metrics.addDefaults()
cfg.Server.addDefaults()
}
func (c *GlobalConfig) addDefaults() {
if c.ConfigVersion == 0 {
c.ConfigVersion = 2
}
if c.RetentionCheckInterval == 0 {
c.RetentionCheckInterval = defaultRetentionCheckInterval
}
if c.LogLevel == "" {
c.LogLevel = defaultLogLevel
}
if c.LogTo == "" {
c.LogLevel = defaultLogTo
}
}
func (c *InputConfig) addDefaults() {
if len(c.CollectMode) == 0 {
c.CollectMode = "mixed"
}
if c.PollInterval == 0 {
c.PollInterval = defaultPollInterval
}
switch c.Type {
case "", inputTypeStdin:
c.Type = inputTypeStdin
case inputTypeFile:
if c.PositionFile == "" {
c.PositionFile = defaultPositionsFile
}
if c.SyncInterval == 0 {
c.SyncInterval = defaultPositionSyncIntervcal
}
case inputTypeWebhook:
if len(c.WebhookPath) == 0 {
c.WebhookPath = "/webhook"
}
if len(c.WebhookFormat) == 0 {
c.WebhookFormat = "text_single"
}
if len(c.WebhookJsonSelector) == 0 {
c.WebhookJsonSelector = ".message"
}
if len(c.WebhookTextBulkSeparator) == 0 {
c.WebhookTextBulkSeparator = "\n\n"
}
}
}
func (c *GrokConfig) addDefaults() {}
func (c *MetricsConfig) addDefaults() {}
func (c *ServerConfig) addDefaults() {
if c.Protocol == "" {
c.Protocol = "http"
}
if c.Port == 0 {
c.Port = 9144
}
if c.Path == "" {
c.Path = "/metrics"
}
}
func (cfg *Config) validate() error {
err := cfg.Input.validate()
if err != nil {
return err
}
err = cfg.Grok.validate()
if err != nil {
return err
}
err = cfg.Metrics.validate()
if err != nil {
return err
}
err = cfg.Server.validate()
if err != nil {
return err
}
return nil
}
func (c *InputConfig) validate() error {
switch {
case c.Type == inputTypeStdin:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.path' when 'input.type' is stdin")
}
if c.PollInterval != 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.poll_interval_seconds' when 'input.type' is stdin")
}
case c.Type == inputTypeFile:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: 'input.path' is required for input type \"file\"")
}
if c.PollInterval > 0 {
if c.MaxLinesRatePerFile != 0 {
return fmt.Errorf("cannot limit input speed when using poller")
}
}
fi, err := os.Stat(c.PositionFile)
if err != nil {
if !os.IsNotExist(err) {
return err
}
} else {
if fi.IsDir() {
return errors.New("expected a file for position_file")
}
}
if c.SyncInterval < time.Second {
return errors.New("expected sync_interval more than 1s")
}
case c.Type == inputTypeWebhook:
if c.WebhookPath == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' is required for input type \"webhook\"")
} else if c.WebhookPath[0] != '/' {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' must start with \"/\"")
}
if c.WebhookFormat != "text_single" && c.WebhookFormat != "text_bulk" && c.WebhookFormat != "json_single" && c.WebhookFormat != "json_bulk" {
return fmt.Errorf("invalid input configuration: 'input.webhook_format' must be \"text_single|text_bulk|json_single|json_bulk\"")
}
if c.WebhookJsonSelector == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' is required for input type \"webhook\"")
} else if c.WebhookJsonSelector[0] != '.' {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' must start with \".\"")
}
if c.WebhookFormat == "text_bulk" && c.WebhookTextBulkSeparator == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_text_bulk_separator' is required for input type \"webhook\" and webhook_format \"text_bulk\"")
}
default:
return fmt.Errorf("unsupported 'input.type': %v", c.Type)
}
return nil
}
func (c *GrokConfig) validate() error {
if c.PatternsDir == "" && len(c.AdditionalPatterns) == 0 {
return fmt.Errorf("Invalid grok configuration: no patterns defined: one of 'grok.patterns_dir' and 'grok.additional_patterns' must be configured.")
}
return nil
}
func (c *MetricsConfig) validate() error {
if len(*c) == 0 {
return fmt.Errorf("Invalid metrics configuration: 'metrics' must not be empty.")
}
metricNames := make(map[string]bool)
for _, metric := range *c {
err := metric.validate()
if err != nil {
return err
}
_, exists := metricNames[metric.Name]
if exists {
return fmt.Errorf("Invalid metric configuration: metric '%v' defined twice.", metric.Name)
}
metricNames[metric.Name] = true
}
return nil
}
func (c *MetricConfig) validate() error {
switch {
case c.Type == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.type' must not be empty.")
case c.Name == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.name' must not be empty.")
case c.Help == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.help' must not be empty.")
case c.Match == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.match' must not be empty.")
}
var hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed bool
switch c.Type { | case "histogram":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, false, true, false
case "summary":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, false, false, true
default:
return fmt.Errorf("Invalid 'metrics.type': '%v'. We currently only support 'counter' and 'gauge'.", c.Type)
}
switch {
case hasValue && len(c.Value) == 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.value' must not be empty for %v metrics.", c.Type)
case !hasValue && len(c.Value) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.value' cannot be used for %v metrics.", c.Type)
case !cumulativeAllowed && c.Cumulative:
return fmt.Errorf("Invalid metric configuration: 'metrics.cumulative' cannot be used for %v metrics.", c.Type)
case !bucketsAllowed && len(c.Buckets) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.buckets' cannot be used for %v metrics.", c.Type)
case !quantilesAllowed && len(c.Quantiles) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.buckets' cannot be used for %v metrics.", c.Type)
}
if len(c.DeleteMatch) > 0 && len(c.Labels) == 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.delete_match' is only supported for metrics with labels.")
}
if len(c.DeleteMatch) == 0 && len(c.DeleteLabelTemplates) > 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.delete_labels' can only be used when 'metrics.delete_match' is present.")
}
if c.Retention > 0 && len(c.Labels) == 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.retention' is only supported for metrics with labels.")
}
for _, deleteLabelTemplate := range c.DeleteLabelTemplates {
found := false
for _, labelTemplate := range c.LabelTemplates {
if deleteLabelTemplate.Name() == labelTemplate.Name() {
found = true
break
}
}
if !found {
return fmt.Errorf("Invalid metric configuration: '%v' cannot be used as a delete_label, because the metric does not have a label named '%v'.", deleteLabelTemplate.Name(), deleteLabelTemplate.Name())
}
}
// InitTemplates() validates that labels/delete_labels/value are present as grok_fields in the grok pattern.
return nil
}
func (c *ServerConfig) validate() error {
switch {
case c.Protocol != "https" && c.Protocol != "http":
return fmt.Errorf("Invalid 'server.protocol': '%v'. Expecting 'http' or 'https'.", c.Protocol)
case c.Port <= 0:
return fmt.Errorf("Invalid 'server.port': '%v'.", c.Port)
case !strings.HasPrefix(c.Path, "/"):
return fmt.Errorf("Invalid server configuration: 'server.path' must start with '/'.")
case c.Protocol == "https":
if c.Cert != "" && c.Key == "" {
return fmt.Errorf("Invalid server configuration: 'server.cert' must not be specified without 'server.key'")
}
if c.Cert == "" && c.Key != "" {
return fmt.Errorf("Invalid server configuration: 'server.key' must not be specified without 'server.cert'")
}
case c.Protocol == "http":
if c.Cert != "" || c.Key != "" {
return fmt.Errorf("Invalid server configuration: 'server.cert' and 'server.key' can only be configured for protocol 'https'.")
}
}
return nil
}
// Made this public so it can be called when converting config v1 to config v2.
func AddDefaultsAndValidate(cfg *Config) error {
var err error
cfg.addDefaults()
for i := range []MetricConfig(cfg.Metrics) {
err = cfg.Metrics[i].InitTemplates()
if err != nil {
return err
}
}
return cfg.validate()
}
// Made this public so MetricConfig can be initialized in tests.
func (metric *MetricConfig) InitTemplates() error {
var (
err error
tmplt template.Template
msg = "invalid configuration: failed to read metric %v: error parsing %v template: %v: " +
"don't forget to put a . (dot) in front of grok fields, otherwise it will be interpreted as a function."
)
for _, t := range []struct {
src map[string]string // label / template string as read from the config file
dest *[]template.Template // parsed template used internally in grok_exporter
}{
{
src: metric.Labels,
dest: &(metric.LabelTemplates),
},
{
src: metric.DeleteLabels,
dest: &(metric.DeleteLabelTemplates),
},
} {
*t.dest = make([]template.Template, 0, len(t.src))
for name, templateString := range t.src {
tmplt, err = template.New(name, templateString)
if err != nil {
return fmt.Errorf(msg, fmt.Sprintf("label %v", metric.Name), name, err.Error())
}
*t.dest = append(*t.dest, tmplt)
}
}
if len(metric.Value) > 0 {
metric.ValueTemplate, err = template.New("__value__", metric.Value)
if err != nil {
return fmt.Errorf(msg, "value", metric.Name, err.Error())
}
}
return nil
}
// YAML representation, does not include default values.
func (cfg *Config) String() string {
stripped := cfg.copy()
if stripped.Global.RetentionCheckInterval == defaultRetentionCheckInterval {
stripped.Global.RetentionCheckInterval = 0
}
if stripped.Server.Path == "/metrics" {
stripped.Server.Path = ""
}
return stripped.marshalToString()
}
func (cfg *Config) copy() *Config {
result, _ := Unmarshal([]byte(cfg.marshalToString()))
return result
}
func (cfg *Config) marshalToString() string {
out, err := yaml.Marshal(cfg)
if err != nil {
return fmt.Sprintf("ERROR: Failed to marshal config: %v", err.Error())
}
result := string(out)
// Pretend fail_on_missing_logfile is a boolean, remove quotes
result = strings.Replace(result, "fail_on_missing_logfile: \"false\"", "fail_on_missing_logfile: false", -1)
result = strings.Replace(result, "fail_on_missing_logfile: \"true\"", "fail_on_missing_logfile: true", -1)
return result
} | case "counter":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = false, false, false, false
case "gauge":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, true, false, false | random_line_split |
configV2.go | // Copyright 2016-2018 The grok_exporter Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v2
import (
"errors"
"fmt"
"github.com/sequix/grok_exporter/template"
"gopkg.in/natefinch/lumberjack.v2"
"gopkg.in/yaml.v2"
"os"
"strings"
"time"
)
const (
defaultLogLevel = "info"
defaultLogTo = "mixed"
defaultPositionsFile = "/tmp/position.json"
defaultPositionSyncIntervcal = 500 * time.Millisecond
defaultPollInterval = 500 * time.Millisecond
defaultRetentionCheckInterval = 60 * time.Second
inputTypeStdin = "stdin"
inputTypeFile = "file"
inputTypeWebhook = "webhook"
)
func Unmarshal(config []byte) (*Config, error) {
cfg := &Config{}
err := yaml.Unmarshal(config, cfg)
if err != nil {
return nil, fmt.Errorf("invalid configuration: %v. make sure to use 'single quotes' around strings with special characters (like match patterns or label templates), and make sure to use '-' only for lists (metrics) but not for maps (labels).", err.Error())
}
err = AddDefaultsAndValidate(cfg)
if err != nil {
return nil, err
}
return cfg, nil
}
type Config struct {
Global GlobalConfig `yaml:",omitempty"`
Input InputConfig `yaml:",omitempty"`
Grok GrokConfig `yaml:",omitempty"`
Metrics MetricsConfig `yaml:",omitempty"`
Server ServerConfig `yaml:",omitempty"`
LogRotate lumberjack.Logger `yaml:"log_rotate,omitempty"`
}
type GlobalConfig struct {
ConfigVersion int `yaml:"config_version,omitempty"`
LogLevel string `yaml:"log_level,omitempty"`
LogTo string `yaml:"log_to,omitempty"`
RetentionCheckInterval time.Duration `yaml:"retention_check_interval,omitempty"` // implicitly parsed with time.ParseDuration()
}
type InputConfig struct {
CollectMode string `yaml:"collectMode,omitempty"`
Type string `yaml:",omitempty"`
Path []string `yaml:",omitempty"`
Excludes []string `yaml:",omitempty"`
PositionFile string `yaml:"position_file,omitempty"`
SyncInterval time.Duration `yaml:"position_sync_interval,omitempty"`
PollInterval time.Duration `yaml:"poll_interval,omitempty"`
MaxLinesInBuffer int `yaml:"max_lines_in_buffer,omitempty"`
MaxLineSize int `yaml:"max_line_size,omitempty"`
MaxLinesRatePerFile uint16 `yaml:"max_lines_rate_per_file,omitempty"`
IdleTimeout time.Duration `yaml:"idle_timeout,omitempty"`
WebhookPath string `yaml:"webhook_path,omitempty"`
WebhookFormat string `yaml:"webhook_format,omitempty"`
WebhookJsonSelector string `yaml:"webhook_json_selector,omitempty"`
WebhookTextBulkSeparator string `yaml:"webhook_text_bulk_separator,omitempty"`
}
type GrokConfig struct {
PatternsDir string `yaml:"patterns_dir,omitempty"`
AdditionalPatterns []string `yaml:"additional_patterns,omitempty"`
}
type MetricConfig struct {
Type string `yaml:",omitempty"`
Name string `yaml:",omitempty"`
Path []string `yaml:",omitempty"`
Excludes []string `yaml:",omitempty"`
Help string `yaml:",omitempty"`
Match string `yaml:",omitempty"`
Retention time.Duration `yaml:",omitempty"` // implicitly parsed with time.ParseDuration()
Value string `yaml:",omitempty"`
Cumulative bool `yaml:",omitempty"`
Buckets []float64 `yaml:",flow,omitempty"`
Quantiles map[float64]float64 `yaml:",flow,omitempty"`
Labels map[string]string `yaml:",omitempty"`
LabelTemplates []template.Template `yaml:"-"` // parsed version of Labels, will not be serialized to yaml.
ValueTemplate template.Template `yaml:"-"` // parsed version of Value, will not be serialized to yaml.
DeleteMatch string `yaml:"delete_match,omitempty"`
DeleteLabels map[string]string `yaml:"delete_labels,omitempty"` // TODO: Make sure that DeleteMatch is not nil if DeleteLabels are used.
DeleteLabelTemplates []template.Template `yaml:"-"` // parsed version of DeleteLabels, will not be serialized to yaml.
}
type MetricsConfig []MetricConfig
type ServerConfig struct {
Protocol string `yaml:",omitempty"`
Host string `yaml:",omitempty"`
Port int `yaml:",omitempty"`
Path string `yaml:",omitempty"`
Cert string `yaml:",omitempty"`
Key string `yaml:",omitempty"`
}
func (cfg *Config) LoadEnvironments() {
path := cfg.Input.Path
for i := range path {
path[i] = os.ExpandEnv(path[i])
}
excludes := cfg.Input.Excludes
for i := range excludes {
excludes[i] = os.ExpandEnv(excludes[i])
}
for i := range cfg.Metrics {
m := &cfg.Metrics[i]
for j, p := range m.Path {
m.Path[j] = os.ExpandEnv(p)
}
for j, p := range m.Excludes {
m.Excludes[j] = os.ExpandEnv(p)
}
}
cfg.Input.PositionFile = os.ExpandEnv(cfg.Input.PositionFile)
}
func (cfg *Config) addDefaults() {
cfg.Global.addDefaults()
cfg.Input.addDefaults()
cfg.Grok.addDefaults()
if cfg.Metrics == nil {
cfg.Metrics = MetricsConfig(make([]MetricConfig, 0))
}
cfg.Metrics.addDefaults()
cfg.Server.addDefaults()
}
func (c *GlobalConfig) addDefaults() {
if c.ConfigVersion == 0 {
c.ConfigVersion = 2
}
if c.RetentionCheckInterval == 0 {
c.RetentionCheckInterval = defaultRetentionCheckInterval
}
if c.LogLevel == "" {
c.LogLevel = defaultLogLevel
}
if c.LogTo == "" {
c.LogLevel = defaultLogTo
}
}
func (c *InputConfig) addDefaults() {
if len(c.CollectMode) == 0 {
c.CollectMode = "mixed"
}
if c.PollInterval == 0 {
c.PollInterval = defaultPollInterval
}
switch c.Type {
case "", inputTypeStdin:
c.Type = inputTypeStdin
case inputTypeFile:
if c.PositionFile == "" {
c.PositionFile = defaultPositionsFile
}
if c.SyncInterval == 0 {
c.SyncInterval = defaultPositionSyncIntervcal
}
case inputTypeWebhook:
if len(c.WebhookPath) == 0 {
c.WebhookPath = "/webhook"
}
if len(c.WebhookFormat) == 0 {
c.WebhookFormat = "text_single"
}
if len(c.WebhookJsonSelector) == 0 {
c.WebhookJsonSelector = ".message"
}
if len(c.WebhookTextBulkSeparator) == 0 {
c.WebhookTextBulkSeparator = "\n\n"
}
}
}
func (c *GrokConfig) addDefaults() {}
func (c *MetricsConfig) addDefaults() {}
func (c *ServerConfig) addDefaults() {
if c.Protocol == "" {
c.Protocol = "http"
}
if c.Port == 0 {
c.Port = 9144
}
if c.Path == "" {
c.Path = "/metrics"
}
}
func (cfg *Config) validate() error {
err := cfg.Input.validate()
if err != nil {
return err
}
err = cfg.Grok.validate()
if err != nil {
return err
}
err = cfg.Metrics.validate()
if err != nil {
return err
}
err = cfg.Server.validate()
if err != nil {
return err
}
return nil
}
func (c *InputConfig) validate() error {
switch {
case c.Type == inputTypeStdin:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.path' when 'input.type' is stdin")
}
if c.PollInterval != 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.poll_interval_seconds' when 'input.type' is stdin")
}
case c.Type == inputTypeFile:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: 'input.path' is required for input type \"file\"")
}
if c.PollInterval > 0 {
if c.MaxLinesRatePerFile != 0 {
return fmt.Errorf("cannot limit input speed when using poller")
}
}
fi, err := os.Stat(c.PositionFile)
if err != nil {
if !os.IsNotExist(err) {
return err
}
} else {
if fi.IsDir() {
return errors.New("expected a file for position_file")
}
}
if c.SyncInterval < time.Second {
return errors.New("expected sync_interval more than 1s")
}
case c.Type == inputTypeWebhook:
if c.WebhookPath == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' is required for input type \"webhook\"")
} else if c.WebhookPath[0] != '/' {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' must start with \"/\"")
}
if c.WebhookFormat != "text_single" && c.WebhookFormat != "text_bulk" && c.WebhookFormat != "json_single" && c.WebhookFormat != "json_bulk" {
return fmt.Errorf("invalid input configuration: 'input.webhook_format' must be \"text_single|text_bulk|json_single|json_bulk\"")
}
if c.WebhookJsonSelector == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' is required for input type \"webhook\"")
} else if c.WebhookJsonSelector[0] != '.' {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' must start with \".\"")
}
if c.WebhookFormat == "text_bulk" && c.WebhookTextBulkSeparator == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_text_bulk_separator' is required for input type \"webhook\" and webhook_format \"text_bulk\"")
}
default:
return fmt.Errorf("unsupported 'input.type': %v", c.Type)
}
return nil
}
func (c *GrokConfig) validate() error {
if c.PatternsDir == "" && len(c.AdditionalPatterns) == 0 {
return fmt.Errorf("Invalid grok configuration: no patterns defined: one of 'grok.patterns_dir' and 'grok.additional_patterns' must be configured.")
}
return nil
}
func (c *MetricsConfig) | () error {
if len(*c) == 0 {
return fmt.Errorf("Invalid metrics configuration: 'metrics' must not be empty.")
}
metricNames := make(map[string]bool)
for _, metric := range *c {
err := metric.validate()
if err != nil {
return err
}
_, exists := metricNames[metric.Name]
if exists {
return fmt.Errorf("Invalid metric configuration: metric '%v' defined twice.", metric.Name)
}
metricNames[metric.Name] = true
}
return nil
}
func (c *MetricConfig) validate() error {
switch {
case c.Type == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.type' must not be empty.")
case c.Name == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.name' must not be empty.")
case c.Help == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.help' must not be empty.")
case c.Match == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.match' must not be empty.")
}
var hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed bool
switch c.Type {
case "counter":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = false, false, false, false
case "gauge":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, true, false, false
case "histogram":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, false, true, false
case "summary":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, false, false, true
default:
return fmt.Errorf("Invalid 'metrics.type': '%v'. We currently only support 'counter' and 'gauge'.", c.Type)
}
switch {
case hasValue && len(c.Value) == 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.value' must not be empty for %v metrics.", c.Type)
case !hasValue && len(c.Value) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.value' cannot be used for %v metrics.", c.Type)
case !cumulativeAllowed && c.Cumulative:
return fmt.Errorf("Invalid metric configuration: 'metrics.cumulative' cannot be used for %v metrics.", c.Type)
case !bucketsAllowed && len(c.Buckets) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.buckets' cannot be used for %v metrics.", c.Type)
case !quantilesAllowed && len(c.Quantiles) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.buckets' cannot be used for %v metrics.", c.Type)
}
if len(c.DeleteMatch) > 0 && len(c.Labels) == 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.delete_match' is only supported for metrics with labels.")
}
if len(c.DeleteMatch) == 0 && len(c.DeleteLabelTemplates) > 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.delete_labels' can only be used when 'metrics.delete_match' is present.")
}
if c.Retention > 0 && len(c.Labels) == 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.retention' is only supported for metrics with labels.")
}
for _, deleteLabelTemplate := range c.DeleteLabelTemplates {
found := false
for _, labelTemplate := range c.LabelTemplates {
if deleteLabelTemplate.Name() == labelTemplate.Name() {
found = true
break
}
}
if !found {
return fmt.Errorf("Invalid metric configuration: '%v' cannot be used as a delete_label, because the metric does not have a label named '%v'.", deleteLabelTemplate.Name(), deleteLabelTemplate.Name())
}
}
// InitTemplates() validates that labels/delete_labels/value are present as grok_fields in the grok pattern.
return nil
}
func (c *ServerConfig) validate() error {
switch {
case c.Protocol != "https" && c.Protocol != "http":
return fmt.Errorf("Invalid 'server.protocol': '%v'. Expecting 'http' or 'https'.", c.Protocol)
case c.Port <= 0:
return fmt.Errorf("Invalid 'server.port': '%v'.", c.Port)
case !strings.HasPrefix(c.Path, "/"):
return fmt.Errorf("Invalid server configuration: 'server.path' must start with '/'.")
case c.Protocol == "https":
if c.Cert != "" && c.Key == "" {
return fmt.Errorf("Invalid server configuration: 'server.cert' must not be specified without 'server.key'")
}
if c.Cert == "" && c.Key != "" {
return fmt.Errorf("Invalid server configuration: 'server.key' must not be specified without 'server.cert'")
}
case c.Protocol == "http":
if c.Cert != "" || c.Key != "" {
return fmt.Errorf("Invalid server configuration: 'server.cert' and 'server.key' can only be configured for protocol 'https'.")
}
}
return nil
}
// Made this public so it can be called when converting config v1 to config v2.
func AddDefaultsAndValidate(cfg *Config) error {
var err error
cfg.addDefaults()
for i := range []MetricConfig(cfg.Metrics) {
err = cfg.Metrics[i].InitTemplates()
if err != nil {
return err
}
}
return cfg.validate()
}
// Made this public so MetricConfig can be initialized in tests.
func (metric *MetricConfig) InitTemplates() error {
var (
err error
tmplt template.Template
msg = "invalid configuration: failed to read metric %v: error parsing %v template: %v: " +
"don't forget to put a . (dot) in front of grok fields, otherwise it will be interpreted as a function."
)
for _, t := range []struct {
src map[string]string // label / template string as read from the config file
dest *[]template.Template // parsed template used internally in grok_exporter
}{
{
src: metric.Labels,
dest: &(metric.LabelTemplates),
},
{
src: metric.DeleteLabels,
dest: &(metric.DeleteLabelTemplates),
},
} {
*t.dest = make([]template.Template, 0, len(t.src))
for name, templateString := range t.src {
tmplt, err = template.New(name, templateString)
if err != nil {
return fmt.Errorf(msg, fmt.Sprintf("label %v", metric.Name), name, err.Error())
}
*t.dest = append(*t.dest, tmplt)
}
}
if len(metric.Value) > 0 {
metric.ValueTemplate, err = template.New("__value__", metric.Value)
if err != nil {
return fmt.Errorf(msg, "value", metric.Name, err.Error())
}
}
return nil
}
// YAML representation, does not include default values.
func (cfg *Config) String() string {
stripped := cfg.copy()
if stripped.Global.RetentionCheckInterval == defaultRetentionCheckInterval {
stripped.Global.RetentionCheckInterval = 0
}
if stripped.Server.Path == "/metrics" {
stripped.Server.Path = ""
}
return stripped.marshalToString()
}
func (cfg *Config) copy() *Config {
result, _ := Unmarshal([]byte(cfg.marshalToString()))
return result
}
func (cfg *Config) marshalToString() string {
out, err := yaml.Marshal(cfg)
if err != nil {
return fmt.Sprintf("ERROR: Failed to marshal config: %v", err.Error())
}
result := string(out)
// Pretend fail_on_missing_logfile is a boolean, remove quotes
result = strings.Replace(result, "fail_on_missing_logfile: \"false\"", "fail_on_missing_logfile: false", -1)
result = strings.Replace(result, "fail_on_missing_logfile: \"true\"", "fail_on_missing_logfile: true", -1)
return result
}
| validate | identifier_name |
jQuery-Validate-Extend.js | /**日期比较
支持两个日期之间的比较(>,<,=),支持直接传入固定日期进行对比
param:string || {type:"<",format:"ymd",object:null} || json
*/
jQuery.validator.addMethod("compareDate", function (value, element, param) {
var elseDate = $.type(param) == "object" ? param.object : param;
if (!value) return true;
var _type = param.type == "=" ? "==" : param.type || "<";
var _format = param.format || "ymd";
var _thisDate = global.Fn.formatDate(value + ":" + _format, "yyyy-MM-dd");
if (/\d{2,4}([\s\-\/]{1})\d{1,2}\1\d{1,4}/.test(elseDate)) {
var _elseDate = new Date(elseDate);
} else if (elseDate.constructor == Date) {
_elseDate = elseDate;
} else {
var $elseDate = $(elseDate);
if (!$elseDate.val()) return true;
if (!$elseDate[0]) return;
var _elseDate = global.Fn.formatDate($elseDate.val() + ":" + _format, "yyyy-MM-dd");
//为另一个对象添加验证规则
if (!$elseDate.rules().compareDate) {
var _elseRule = {};
if (_type.match("==")) {
_elseRule.type = "=";
} else if (_type.match(">")) {
_elseRule.type = _type.replace(">", "<");
} else if (_type.match("<")) {
_elseRule.type = _type.replace("<", ">");
} else {
_elseRule.type = _type
}
_elseRule.object = "#" + element.id;
$elseDate.rules("add", { compareDate: _elseRule });
}
}
var result = eval("" + Date.parse(_thisDate) + _type + Date.parse(_elseDate));
return result;
});
//all cache false;
jQuery.validator.addMethod("htmltag", function (value, element, parm) {
var htmltag1 = /<(\/\s*)?((\w+:)?\w+)(\w+(\s*=\s*((["'])(\\["'tbnr]|[^\7])*?\7|\w+)|.{0})|\s)*?(\/\s*)?>/ig;
return this.optional(element) || !htmltag1.test(value);
}, "Not allowed to enter the HTML tag.");
//验证两个文本框不能同时为空
jQuery.validator.addMethod("bothEmpty", function (value, element, parm) {
if (value == '' && $("#" + parm).val() == '') return false;
else
return true;
}, "PaymentTerm1 and PaymentTerm2 can not both be empty.");
//验证值范围,自动去掉非数值字符"."除外 如2,010,000.00自动验证 2010000.00
jQuery.validator.addMethod("range", function (value, element, parm) {
var reg = /[^\d+(.)]/g;
var value = value.replace(reg, "");
return (value < parm[0] || value > parm[1]) ? false : true;
}, "This Field value should be between {0} - {1} .");
//验证日期小于指定范围内
jQuery.validator.addMethod("compareRangeDate", function (value, element, parm) {
var startDate = jQuery("#" + parm).val();
if (!$.trim(startDate) == "") {
var result = startDate.split("-");
//alert(Date.parse(result[0]));
var startDate = result[1] + "/12/31";
//alert(startDate);
var date1 = new Date(Date.parse(startDate.replace("-", "/")));
var date2 = new Date(Date.parse(value.replace("-", "/")));
return date1 >= date2;
} else {
return true;
}
}, "Date is invalid!!");
//验证日期大于指定范围内
jQuery.validator.addMethod("compareRangeDateToDate", function (value, element, parm) {
var startDate = jQuery("#" + parm).val();
if (!$.trim(startDate) == "") {
var result = startDate.split("-");
//alert(Date.parse(result[0]));
var startDate = result[0] + "/01/01";
//alert(startDate);
var date1 = new Date(Date.parse(startDate.replace("-", "/")));
var date2 = new Date(Date.parse(value.replace("-", "/")));
return date1 <= date2;
} else {
return true;
}
}, "Date is invalid!!");
/**验证指定元素不能同时为空:支持同时验证多个元素
param:string || array
Create by Aaron [20140318]
*/
jQuery.validator.addMethod("cantempty", function (value, element, param) {
if ($.type(param) != "array") param = [param];
var eleresult = false;
//复制数组
var newElements = global.Fn.copy(param);
for (var i = 0; i < param.length; i++) {
if ($(param[i]).val()) {
eleresult = true;
break;
}
if (!$(param[i]).rules().cantempty) {
newElements.splice(i, 1, "#" + element.id);
$(param[i]).rules("add", { cantempty: newElements });
//eleresult = $(param[i]).valid();
//break;
}
}
var result = (value || eleresult) ? true : false;
return result;
}, "This fields can not be empty.");
/* 判断负数 */
jQuery.validator.addMethod("negativeCheck", function (value, element, param) {
if (!isNaN(value))
return parseFloat(value) >= 0;
}, "Please enter a number greater than 0.");
/**日期有效性验证
日期是否在某个时间段
param:number || string || array
Create by Aaron [20140319]
*/
jQuery.validator.addMethod("inDate", function (value, element, param) {
var _format = "dmy";
var _inDate = param, _inEndDate;
if ($.type(param) == "array") {
_inDate = param[0];
_inEndDate = param[1];
}
var _thisDate = Date.parse(global.Fn.formatDate(value + ":" + _format, "MM/dd/yyyy"));
_inDate = Date.parse(_inDate);
if (_inEndDate) {
_inEndDate = Date.parse(_inEndDate);
return (_thisDate >= _inDate && _thisDate <= _inEndDate);
}
return _thisDate <= _inDate;
}, "This date is not in {0}");
/** check from and to should both have value in group
*/
jQuery.validator.addMethod("groupDateRequired", function (value, element, param)
{
var groupArray = param.split(',');
$.each(groupArray, function (i, o)
{
groupArray[i] = ('input[name=' + o + ']');
})
if (!value)
{
if (!$(groupArray[0]).val())
{
return !!($(groupArray[1]).val() && $(groupArray[2]).val());
}
else
{
return false;
}
}
if (value)
{
if (!$(groupArray[0]).val())
{
return false;
}
else
{
return !($(groupArray[1]).val() || $(groupArray[2]).val());
}
}
return true;
}, "From and To must be both have value!");
/** inFinlYear - Check whether is in the finance year
param:number || string || array
if(array)
{
[finlYearFromDate,finlYearToDate]
}
if(string)
{
if(element is FromDate)
[finlYearFromDate]
if(element is ToDate)
[finlYearToDate]
}
*/
jQuery.validator.addMethod("inFinlYear", function (value, element, param)
{
var _format = "dmy";
var _inDate = param, _inEndDate;
if ($.type(param) == "array")
{
_inDate = param[0];
_inEndDate = param[1];
}
var _thisDate = Date.parse(global.Fn.formatDate(value + ":" + _format, "MM/dd/yyyy"));
_inDate = Date.parse(_inDate);
if (_inEndDate)
{
_inEndDate = Date.parse(_inEndDate);
return (_thisDate >= _inDate && _thisDate <= _inEndDate);
}
return element.attributes.name.value.indexOf('To') > -1 ? (_thisDate <= _inDate) : (_inDate <= _thisDate);
}, "This date is not in {0}");
/**当控件为一定值时,指定字段必填
param:[{value:array,element:array}]
Create by Aaron [20140313]
*/
jQuery.validator.addMethod("dorequired", function (value, element, param) {
if ($.type(param) != "array") param = [param];
for (var i = 0; i < param.length; i++) {
var obj = param[i];
if (!obj.element || obj.element.length < 1) continue;
var _value = (obj.value && $.type(obj.value) != "array") ? [obj.value] : obj.value;
var _ele = $.type(obj.element) == "array" ? obj.element : obj.element.split(",");
for (var j = 0; j < _ele.length; j++) {
var $self = $(_ele[j]);
if (!$self.rules().required && (!obj.value || _value.length < 1 || $.inArray(value, _value) != -1)) {
$self.rules("add", { required: true });
return $self.valid();
} else if ($self.rules().required && $.inArray(value, _value) == -1) {
$self.removeClass("required").rules("remove", "required");
return $self.valid();
}
}
}
return true;
}, "");
/**格式化金额格式10,000,000.00
param:boolean
*/
jQuery.validator.addMethod("amount", function (value, element, param) {
var amountReg = /^[1-9](?:\d*,\d{3})*(?:(\.\d+|$))/;
if (param == true) return amountReg.test(value);
return true;
}, "Please enter a valid Amount");
/**惟一性验证,返回消息格式<XXX> is duplicated!
param:与remote一致
Create By Gary[20140327]
*/
jQuery.validator.addMethod("duplicatedRemote", function (value, element, param) {
if (this.optional(element))
return "dependency-mismatch";
var previous = this.previousValue(element);
if (!this.settings.messages[element.name])
this.settings.messages[element.name] = {};
previous.originalMessage = this.settings.messages[element.name].remote;
this.settings.messages[element.name].remote = previous.message;
param = typeof param == "string" && { url: param } || param;
var validator = this;
if (previous.old != value) {
previous.old = value;
var validator = this;
this.startRequest(element);
var data = {};
data[element.name] = value;
$.ajax($.extend(true, {
url: param,
mode: "abort",
port: "validate" + element.name,
dataType: "json",
type: "post",
data: data,
success: function (response) {
//var tempResponse = response;
//if (tempResponse.result != undefined) {
// response = tempResponse.result;
//}
//if (tempResponse.code != undefined) {
// validator.settings.messages[element.name].remote = "<" + tempResponse.code + ">is duplicated!";
//} else {
// validator.settings.messages[element.name].remote =previous.originalMessage;
//}
var valid = response === true;
if (valid) {
var submitted = validator.formSubmitted;
validator.prepareElement(element);
validator.formSubmitted = submitted;
validator.successList.push(element);
validator.showErrors();
} else {
var errors = {};
var message = value + " is duplicated!"; //response || validator.defaultMessage(element, "remote");
errors[element.name] = previous.message = message;// $.isFunction(message) ? message(value) :
validator.showErrors(errors);
}
previous.valid = valid;
validator.stopRequest(element, valid);
}
}, param));
return "pending";
} else if (this.pending[element.name]) {
return "pending";
}
if (previous.valid == true) {
return previous.valid;
} else {
var errors = {};
errors[element.name] = previous.message;
validator.showErrors(errors);
return "pending";
}
}, "This Field is duplicated!");
/*
设置显示duplication时,是显示对象的Text值还是Value值,例如DropDown控件
param.objID -> $('#'+objID) //如果param.objID没给值,默认用element.id
param.objType -> ['text'|'value'] 'text': get obj.text(), 'value': get obj.val() //如果param.objType没给值,默认用value
示例:
param.objID: "popAgentID option:selected", //这是用在DropDown控件的
param.objType:'text' //param.objType选text,显示duplication信息时,就取DropDown控件所选的item的text值
*/
jQuery.validator.addMethod("duplicatedRemoteCustomized", function (value, element, param) {
if (this.optional(element))
return "dependency-mismatch";
var previous = this.previousValue(element);
if (!this.settings.messages[element.name])
this.settings.messages[element.name] = {};
previous.originalMessage = this.settings.messages[element.name].remote;
this.settings.messages[element.name].remote = previous.message;
param = typeof param == "string" && { url: param } || param;
var validator = this;
if (previous.old != value) {
previous.old = value;
var validator = this;
this.startRequest(element);
var data = {};
data[element.name] = value;
$.ajax($.extend(true, {
url: param,
mode: "abort",
port: "validate" + element.name,
dataType: "json",
type: "post",
data: data,
success: function (response) { | validator.prepareElement(element);
validator.formSubmitted = submitted;
validator.successList.push(element);
validator.showErrors();
} else {
var errors = {};
if (!param.objID)
param.objID = element.id;
var objName = param.objType == 'text' ? $('#' + param.objID).text() : param.objType == 'value' ? $('#' + param.objID).val() : value;
var message = objName + " is duplicated!";
errors[element.name] = previous.message = message;
validator.showErrors(errors);
}
previous.valid = valid;
validator.stopRequest(element, valid);
}
}, param));
return "pending";
} else if (this.pending[element.name]) {
return "pending";
}
if (previous.valid == true) {
return previous.valid;
} else {
var errors = {};
errors[element.name] = previous.message;
validator.showErrors(errors);
return "pending";
}
}, "This Field is duplicated!");
/**
* 验证手机号格式
*/
jQuery.validator.addMethod("isMobile", function(value, element) {
var length = value.length;
var mobile = /^(((13[0-9]{1})|(15[0-9]{1})|(18[0-9]{1}))+\d{8})$/g;
return this.optional(element) || (length == 11 && mobile.test(value));
}, "请正确填写您的手机号码");
jQuery.extend(jQuery.validator.messages, {
required: "该字段必填",
remote: "请修正该字段",
email: "请输入正确格式的电子邮件",
url: "请输入合法的网址",
date: "请输入合法的日期",
dateISO: "请输入合法的日期 (ISO).",
number: "请输入合法的数字",
digits: "只能输入整数",
creditcard: "请输入合法的信用卡号",
equalTo: "请再次输入相同的值",
accept: "请输入拥有合法后缀名的字符串",
maxlength: jQuery.validator.format("请输入一个 长度最多是 {0} 的字符串"),
minlength: jQuery.validator.format("请输入一个 长度最少是 {0} 的字符串"),
rangelength: jQuery.validator.format("请输入 一个长度介于 {0} 和 {1} 之间的字符串"),
range: jQuery.validator.format("请输入一个介于 {0} 和 {1} 之间的值"),
max: jQuery.validator.format("请输入一个最大为{0} 的值"),
min: jQuery.validator.format("请输入一个最小为{0} 的值"),
compareDate: jQuery.validator.format("选择的日期范围有误")
}); | var valid = response === true;
if (valid) {
var submitted = validator.formSubmitted; | random_line_split |
jQuery-Validate-Extend.js | /**日期比较
支持两个日期之间的比较(>,<,=),支持直接传入固定日期进行对比
param:string || {type:"<",format:"ymd",object:null} || json
*/
jQuery.validator.addMethod("compareDate", function (value, element, param) {
var elseDate = $.type(param) == "object" ? param.object : param;
if (!value) return true;
var _type = param.type == "=" ? "==" : param.type || "<";
var _format = param.format || "ymd";
var _thisDate = global.Fn.formatDate(value + ":" + _format, "yyyy-MM-dd");
if (/\d{2,4}([\s\-\/]{1})\d{1,2}\1\d{1,4}/.test(elseDate)) {
var _elseDate = new Date(elseDate);
} else if (elseDate.constructor == Date) {
_elseDate = elseDate;
} else {
var $elseDate = $(elseDate);
if (!$elseDate.val()) return true;
if (!$elseDate[0]) return;
var _elseDate = global.Fn.formatDate($elseDate.val() + ":" + _format, "yyyy-MM-dd");
//为另一个对象添加验证规则
if (!$elseDate.rules().compareDate) {
var _elseRule = {};
if (_type.match("==")) {
_elseRule.type = "=";
} else if (_type.match(">")) {
| ;
} else if (_type.match("<")) {
_elseRule.type = _type.replace("<", ">");
} else {
_elseRule.type = _type
}
_elseRule.object = "#" + element.id;
$elseDate.rules("add", { compareDate: _elseRule });
}
}
var result = eval("" + Date.parse(_thisDate) + _type + Date.parse(_elseDate));
return result;
});
//all cache false;
jQuery.validator.addMethod("htmltag", function (value, element, parm) {
var htmltag1 = /<(\/\s*)?((\w+:)?\w+)(\w+(\s*=\s*((["'])(\\["'tbnr]|[^\7])*?\7|\w+)|.{0})|\s)*?(\/\s*)?>/ig;
return this.optional(element) || !htmltag1.test(value);
}, "Not allowed to enter the HTML tag.");
//验证两个文本框不能同时为空
jQuery.validator.addMethod("bothEmpty", function (value, element, parm) {
if (value == '' && $("#" + parm).val() == '') return false;
else
return true;
}, "PaymentTerm1 and PaymentTerm2 can not both be empty.");
//验证值范围,自动去掉非数值字符"."除外 如2,010,000.00自动验证 2010000.00
jQuery.validator.addMethod("range", function (value, element, parm) {
var reg = /[^\d+(.)]/g;
var value = value.replace(reg, "");
return (value < parm[0] || value > parm[1]) ? false : true;
}, "This Field value should be between {0} - {1} .");
//验证日期小于指定范围内
jQuery.validator.addMethod("compareRangeDate", function (value, element, parm) {
var startDate = jQuery("#" + parm).val();
if (!$.trim(startDate) == "") {
var result = startDate.split("-");
//alert(Date.parse(result[0]));
var startDate = result[1] + "/12/31";
//alert(startDate);
var date1 = new Date(Date.parse(startDate.replace("-", "/")));
var date2 = new Date(Date.parse(value.replace("-", "/")));
return date1 >= date2;
} else {
return true;
}
}, "Date is invalid!!");
//验证日期大于指定范围内
jQuery.validator.addMethod("compareRangeDateToDate", function (value, element, parm) {
var startDate = jQuery("#" + parm).val();
if (!$.trim(startDate) == "") {
var result = startDate.split("-");
//alert(Date.parse(result[0]));
var startDate = result[0] + "/01/01";
//alert(startDate);
var date1 = new Date(Date.parse(startDate.replace("-", "/")));
var date2 = new Date(Date.parse(value.replace("-", "/")));
return date1 <= date2;
} else {
return true;
}
}, "Date is invalid!!");
/**验证指定元素不能同时为空:支持同时验证多个元素
param:string || array
Create by Aaron [20140318]
*/
jQuery.validator.addMethod("cantempty", function (value, element, param) {
if ($.type(param) != "array") param = [param];
var eleresult = false;
//复制数组
var newElements = global.Fn.copy(param);
for (var i = 0; i < param.length; i++) {
if ($(param[i]).val()) {
eleresult = true;
break;
}
if (!$(param[i]).rules().cantempty) {
newElements.splice(i, 1, "#" + element.id);
$(param[i]).rules("add", { cantempty: newElements });
//eleresult = $(param[i]).valid();
//break;
}
}
var result = (value || eleresult) ? true : false;
return result;
}, "This fields can not be empty.");
/* 判断负数 */
jQuery.validator.addMethod("negativeCheck", function (value, element, param) {
if (!isNaN(value))
return parseFloat(value) >= 0;
}, "Please enter a number greater than 0.");
/**日期有效性验证
日期是否在某个时间段
param:number || string || array
Create by Aaron [20140319]
*/
jQuery.validator.addMethod("inDate", function (value, element, param) {
var _format = "dmy";
var _inDate = param, _inEndDate;
if ($.type(param) == "array") {
_inDate = param[0];
_inEndDate = param[1];
}
var _thisDate = Date.parse(global.Fn.formatDate(value + ":" + _format, "MM/dd/yyyy"));
_inDate = Date.parse(_inDate);
if (_inEndDate) {
_inEndDate = Date.parse(_inEndDate);
return (_thisDate >= _inDate && _thisDate <= _inEndDate);
}
return _thisDate <= _inDate;
}, "This date is not in {0}");
/** check from and to should both have value in group
*/
jQuery.validator.addMethod("groupDateRequired", function (value, element, param)
{
var groupArray = param.split(',');
$.each(groupArray, function (i, o)
{
groupArray[i] = ('input[name=' + o + ']');
})
if (!value)
{
if (!$(groupArray[0]).val())
{
return !!($(groupArray[1]).val() && $(groupArray[2]).val());
}
else
{
return false;
}
}
if (value)
{
if (!$(groupArray[0]).val())
{
return false;
}
else
{
return !($(groupArray[1]).val() || $(groupArray[2]).val());
}
}
return true;
}, "From and To must be both have value!");
/** inFinlYear - Check whether is in the finance year
param:number || string || array
if(array)
{
[finlYearFromDate,finlYearToDate]
}
if(string)
{
if(element is FromDate)
[finlYearFromDate]
if(element is ToDate)
[finlYearToDate]
}
*/
jQuery.validator.addMethod("inFinlYear", function (value, element, param)
{
var _format = "dmy";
var _inDate = param, _inEndDate;
if ($.type(param) == "array")
{
_inDate = param[0];
_inEndDate = param[1];
}
var _thisDate = Date.parse(global.Fn.formatDate(value + ":" + _format, "MM/dd/yyyy"));
_inDate = Date.parse(_inDate);
if (_inEndDate)
{
_inEndDate = Date.parse(_inEndDate);
return (_thisDate >= _inDate && _thisDate <= _inEndDate);
}
return element.attributes.name.value.indexOf('To') > -1 ? (_thisDate <= _inDate) : (_inDate <= _thisDate);
}, "This date is not in {0}");
/**当控件为一定值时,指定字段必填
param:[{value:array,element:array}]
Create by Aaron [20140313]
*/
jQuery.validator.addMethod("dorequired", function (value, element, param) {
if ($.type(param) != "array") param = [param];
for (var i = 0; i < param.length; i++) {
var obj = param[i];
if (!obj.element || obj.element.length < 1) continue;
var _value = (obj.value && $.type(obj.value) != "array") ? [obj.value] : obj.value;
var _ele = $.type(obj.element) == "array" ? obj.element : obj.element.split(",");
for (var j = 0; j < _ele.length; j++) {
var $self = $(_ele[j]);
if (!$self.rules().required && (!obj.value || _value.length < 1 || $.inArray(value, _value) != -1)) {
$self.rules("add", { required: true });
return $self.valid();
} else if ($self.rules().required && $.inArray(value, _value) == -1) {
$self.removeClass("required").rules("remove", "required");
return $self.valid();
}
}
}
return true;
}, "");
/**格式化金额格式10,000,000.00
param:boolean
*/
jQuery.validator.addMethod("amount", function (value, element, param) {
var amountReg = /^[1-9](?:\d*,\d{3})*(?:(\.\d+|$))/;
if (param == true) return amountReg.test(value);
return true;
}, "Please enter a valid Amount");
/**惟一性验证,返回消息格式<XXX> is duplicated!
param:与remote一致
Create By Gary[20140327]
*/
jQuery.validator.addMethod("duplicatedRemote", function (value, element, param) {
if (this.optional(element))
return "dependency-mismatch";
var previous = this.previousValue(element);
if (!this.settings.messages[element.name])
this.settings.messages[element.name] = {};
previous.originalMessage = this.settings.messages[element.name].remote;
this.settings.messages[element.name].remote = previous.message;
param = typeof param == "string" && { url: param } || param;
var validator = this;
if (previous.old != value) {
previous.old = value;
var validator = this;
this.startRequest(element);
var data = {};
data[element.name] = value;
$.ajax($.extend(true, {
url: param,
mode: "abort",
port: "validate" + element.name,
dataType: "json",
type: "post",
data: data,
success: function (response) {
//var tempResponse = response;
//if (tempResponse.result != undefined) {
// response = tempResponse.result;
//}
//if (tempResponse.code != undefined) {
// validator.settings.messages[element.name].remote = "<" + tempResponse.code + ">is duplicated!";
//} else {
// validator.settings.messages[element.name].remote =previous.originalMessage;
//}
var valid = response === true;
if (valid) {
var submitted = validator.formSubmitted;
validator.prepareElement(element);
validator.formSubmitted = submitted;
validator.successList.push(element);
validator.showErrors();
} else {
var errors = {};
var message = value + " is duplicated!"; //response || validator.defaultMessage(element, "remote");
errors[element.name] = previous.message = message;// $.isFunction(message) ? message(value) :
validator.showErrors(errors);
}
previous.valid = valid;
validator.stopRequest(element, valid);
}
}, param));
return "pending";
} else if (this.pending[element.name]) {
return "pending";
}
if (previous.valid == true) {
return previous.valid;
} else {
var errors = {};
errors[element.name] = previous.message;
validator.showErrors(errors);
return "pending";
}
}, "This Field is duplicated!");
/*
设置显示duplication时,是显示对象的Text值还是Value值,例如DropDown控件
param.objID -> $('#'+objID) //如果param.objID没给值,默认用element.id
param.objType -> ['text'|'value'] 'text': get obj.text(), 'value': get obj.val() //如果param.objType没给值,默认用value
示例:
param.objID: "popAgentID option:selected", //这是用在DropDown控件的
param.objType:'text' //param.objType选text,显示duplication信息时,就取DropDown控件所选的item的text值
*/
jQuery.validator.addMethod("duplicatedRemoteCustomized", function (value, element, param) {
if (this.optional(element))
return "dependency-mismatch";
var previous = this.previousValue(element);
if (!this.settings.messages[element.name])
this.settings.messages[element.name] = {};
previous.originalMessage = this.settings.messages[element.name].remote;
this.settings.messages[element.name].remote = previous.message;
param = typeof param == "string" && { url: param } || param;
var validator = this;
if (previous.old != value) {
previous.old = value;
var validator = this;
this.startRequest(element);
var data = {};
data[element.name] = value;
$.ajax($.extend(true, {
url: param,
mode: "abort",
port: "validate" + element.name,
dataType: "json",
type: "post",
data: data,
success: function (response) {
var valid = response === true;
if (valid) {
var submitted = validator.formSubmitted;
validator.prepareElement(element);
validator.formSubmitted = submitted;
validator.successList.push(element);
validator.showErrors();
} else {
var errors = {};
if (!param.objID)
param.objID = element.id;
var objName = param.objType == 'text' ? $('#' + param.objID).text() : param.objType == 'value' ? $('#' + param.objID).val() : value;
var message = objName + " is duplicated!";
errors[element.name] = previous.message = message;
validator.showErrors(errors);
}
previous.valid = valid;
validator.stopRequest(element, valid);
}
}, param));
return "pending";
} else if (this.pending[element.name]) {
return "pending";
}
if (previous.valid == true) {
return previous.valid;
} else {
var errors = {};
errors[element.name] = previous.message;
validator.showErrors(errors);
return "pending";
}
}, "This Field is duplicated!");
/**
* 验证手机号格式
*/
jQuery.validator.addMethod("isMobile", function(value, element) {
var length = value.length;
var mobile = /^(((13[0-9]{1})|(15[0-9]{1})|(18[0-9]{1}))+\d{8})$/g;
return this.optional(element) || (length == 11 && mobile.test(value));
}, "请正确填写您的手机号码");
jQuery.extend(jQuery.validator.messages, {
required: "该字段必填",
remote: "请修正该字段",
email: "请输入正确格式的电子邮件",
url: "请输入合法的网址",
date: "请输入合法的日期",
dateISO: "请输入合法的日期 (ISO).",
number: "请输入合法的数字",
digits: "只能输入整数",
creditcard: "请输入合法的信用卡号",
equalTo: "请再次输入相同的值",
accept: "请输入拥有合法后缀名的字符串",
maxlength: jQuery.validator.format("请输入一个 长度最多是 {0} 的字符串"),
minlength: jQuery.validator.format("请输入一个 长度最少是 {0} 的字符串"),
rangelength: jQuery.validator.format("请输入 一个长度介于 {0} 和 {1} 之间的字符串"),
range: jQuery.validator.format("请输入一个介于 {0} 和 {1} 之间的值"),
max: jQuery.validator.format("请输入一个最大为{0} 的值"),
min: jQuery.validator.format("请输入一个最小为{0} 的值"),
compareDate: jQuery.validator.format("选择的日期范围有误")
}); | _elseRule.type = _type.replace(">", "<") | conditional_block |
inputprocessor.py | """
Skimmer for ParticleNet tagger inputs.
Author(s): Cristina Mantilla Suarez, Raghav Kansal
"""
import os
import pathlib
import warnings
from typing import Dict
import awkward as ak
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import uproot
from coffea.analysis_tools import PackedSelection
from coffea.nanoevents.methods import candidate
from coffea.processor import ProcessorABC, dict_accumulator
from .get_tagger_inputs import get_lep_features, get_met_features
# from .run_tagger_inference import runInferenceTriton
from .utils import FILL_NONE_VALUE, add_selection_no_cutflow, bkgs, sigs, tagger_gen_matching
warnings.filterwarnings("ignore", message="Found duplicate branch ")
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", message="Missing cross-reference index ")
warnings.filterwarnings("ignore", message="divide by zero encountered in log")
np.seterr(invalid="ignore")
P4 = {
"eta": "eta",
"phi": "phi",
"mass": "mass",
"pt": "pt",
}
class InputProcessor(ProcessorABC):
"""
Produces a flat training ntuple from PFNano.
"""
def __init__(self, label, inference, output_location="./outfiles/"):
"""
:param num_jets: Number of jets to save
:type num_jets: int
"""
"""
Skimming variables
"""
self.label = label
self.inference = inference
self._output_location = output_location
self.skim_vars = {
"Event": {
"event": "event",
},
"FatJet": {
**P4,
"msoftdrop": "msoftdrop",
},
"GenPart": [
"fj_genjetmass",
"fj_genRes_pt",
"fj_genRes_eta",
"fj_genRes_phi",
"fj_genRes_mass",
"fj_nprongs",
"fj_ncquarks",
"fj_lepinprongs",
"fj_nquarks",
"fj_H_VV_4q",
"fj_H_VV_elenuqq",
"fj_H_VV_munuqq",
"fj_H_VV_leptauelvqq",
"fj_H_VV_leptaumuvqq",
"fj_H_VV_hadtauvqq",
"fj_QCDb",
"fj_QCDbb",
"fj_QCDc",
"fj_QCDcc",
"fj_QCDothers",
"fj_V_2q",
"fj_V_elenu",
"fj_V_munu",
"fj_V_taunu",
"fj_Top_nquarksnob",
"fj_Top_nbquarks",
"fj_Top_ncquarks",
"fj_Top_nleptons",
"fj_Top_nele",
"fj_Top_nmu",
"fj_Top_ntau",
"fj_Top_taudecay",
],
# formatted to match weaver's preprocess.json
"MET": {
"met_features": {
"var_names": [
"met_relpt",
"met_relphi",
],
},
"met_points": {"var_length": 1},
},
"Lep": {
"fj_features": {
"fj_lep_dR",
"fj_lep_pt",
"fj_lep_iso",
"fj_lep_miniiso",
},
},
}
self.tagger_resources_path = str(pathlib.Path(__file__).parent.resolve()) + "/tagger_resources/"
self.fatjet_label = "FatJet"
self.pfcands_label = "FatJetPFCands"
self.svs_label = "FatJetSVs"
self._accumulator = dict_accumulator({})
@property
def accumulator(self):
return self._accumulator
def save_dfs_parquet(self, df, fname):
if self._output_location is not None:
PATH = f"{self._output_location}/parquet/"
if not os.path.exists(PATH):
os.makedirs(PATH)
table = pa.Table.from_pandas(df)
if len(table) != 0: # skip dataframes with empty entries
pq.write_table(table, f"{PATH}/{fname}.parquet")
def ak_to_pandas(self, output_collection: ak.Array) -> pd.DataFrame:
output = pd.DataFrame()
for field in ak.fields(output_collection):
output[field] = ak.to_numpy(output_collection[field])
return output
def dump_root(self, skimmed_vars: Dict[str, np.array], fname: str) -> None:
"""
Saves ``jet_vars`` dict as a rootfile to './outroot'
"""
local_dir = os.path.abspath(os.path.join(self._output_location, "outroot"))
os.system(f"mkdir -p {local_dir}")
with uproot.recreate(f"{local_dir}/{fname}.root", compression=uproot.LZ4(4)) as rfile:
rfile["Events"] = ak.Array(skimmed_vars)
rfile["Events"].show()
def process(self, events: ak.Array):
import time
start = time.time()
def build_p4(cand):
return ak.zip(
{
"pt": cand.pt,
"eta": cand.eta,
"phi": cand.phi,
"mass": cand.mass,
"charge": cand.charge,
},
with_name="PtEtaPhiMCandidate",
behavior=candidate.behavior,
)
electrons = events["Electron"][events["Electron"].pt > 40]
muons = events["Muon"][events["Muon"].pt > 30]
leptons = ak.concatenate([electrons, muons], axis=1)
leptons = leptons[ak.argsort(leptons.pt, ascending=False)]
fatjets = events[self.fatjet_label]
candidatelep_p4 = build_p4(ak.firsts(leptons))
fj_idx_lep = ak.argmin(fatjets.delta_r(candidatelep_p4), axis=1, keepdims=True)
fatjet = ak.firsts(fatjets[fj_idx_lep])
# selection
selection = PackedSelection()
add_selection_no_cutflow("fjselection", (fatjet.pt > 200), selection)
if np.sum(selection.all(*selection.names)) == 0:
return {}
# variables
FatJetVars = {
f"fj_{key}": ak.fill_none(fatjet[var], FILL_NONE_VALUE) for (var, key) in self.skim_vars["FatJet"].items()
}
LepVars = {
**get_lep_features(
self.skim_vars["Lep"],
events,
fatjet,
candidatelep_p4,
),
}
METVars = {
**get_met_features(
self.skim_vars["MET"],
events,
fatjet,
"MET",
normalize=False,
),
}
genparts = events.GenPart
matched_mask, genVars = tagger_gen_matching(
events,
genparts,
fatjet,
# candidatelep_p4,
self.skim_vars["GenPart"],
label=self.label,
)
# add_selection_no_cutflow("gen_match", matched_mask, selection)
skimmed_vars = {**FatJetVars, **{"matched_mask": matched_mask}, **genVars, **METVars, **LepVars}
# apply selections
skimmed_vars = {
key: np.squeeze(np.array(value[selection.all(*selection.names)])) for (key, value) in skimmed_vars.items()
}
# fill inference
if self.inference:
from .run_tagger_inference import runInferenceTriton
for model_name in ["ak8_MD_vminclv2ParT_manual_fixwrap_all_nodes"]:
pnet_vars = runInferenceTriton(
self.tagger_resources_path,
events[selection.all(*selection.names)],
fj_idx_lep[selection.all(*selection.names)],
model_name=model_name,
)
# pnet_df = self.ak_to_pandas(pnet_vars)
pnet_df = pd.DataFrame(pnet_vars)
num = pnet_df[sigs].sum(axis=1)
den = pnet_df[sigs].sum(axis=1) + pnet_df[bkgs].sum(axis=1)
scores = {"fj_ParT_inclusive_score": (num / den).values}
reg_mass = {"fj_ParT_mass": pnet_vars["fj_ParT_mass"]}
hidNeurons = {}
for key in pnet_vars:
|
skimmed_vars = {**skimmed_vars, **scores, **reg_mass, **hidNeurons}
for key in skimmed_vars:
skimmed_vars[key] = skimmed_vars[key].squeeze()
# convert output to pandas
df = pd.DataFrame(skimmed_vars)
df = df.dropna() # very few events would have genjetmass NaN for some reason
print(f"convert: {time.time() - start:.1f}s")
print(df)
# save the output
fname = events.behavior["__events_factory__"]._partition_key.replace("/", "_")
fname = "condor_" + fname
self.save_dfs_parquet(df, fname)
print(f"dump parquet: {time.time() - start:.1f}s")
# TODO: drop NaNs from rootfiles
self.dump_root(skimmed_vars, fname)
print(f"dump rootfile: {time.time() - start:.1f}s")
# for now do something like this to dump the parquets in root
# OUTPATH = "../datafiles/ntuples/"
# for sample in samples:
# print(sample)
# for file in os.listdir(f"{OUTPATH}/{sample}/train/"):
# if "parquet" not in file:
# continue
# d = pd.read_parquet(f"{OUTPATH}/{sample}/train/{file}")
# with uproot.recreate(f"{OUTPATH}/{sample}/train/out.root", compression=uproot.LZ4(4)) as rfile:
# rfile["Events"] = ak.Array(d.to_dict(orient="list", index=True))
# rfile["Events"].show()
# for file in os.listdir(f"{OUTPATH}/{sample}/test/"):
# if "parquet" not in file:
# continue
# d = pd.read_parquet(f"{OUTPATH}/{sample}/test/{file}")
# with uproot.recreate(f"{OUTPATH}/{sample}/test/out.root", compression=uproot.LZ4(4)) as rfile:
# rfile["Events"] = ak.Array(d.to_dict(orient="list", index=True))
# rfile["Events"].show()
# print("--------------------------")
return {}
def postprocess(self, accumulator):
pass
| if "hidNeuron" in key:
hidNeurons[key] = pnet_vars[key] | conditional_block |
inputprocessor.py | """
Skimmer for ParticleNet tagger inputs.
Author(s): Cristina Mantilla Suarez, Raghav Kansal
"""
import os
import pathlib
import warnings
from typing import Dict
import awkward as ak
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import uproot
from coffea.analysis_tools import PackedSelection
from coffea.nanoevents.methods import candidate
from coffea.processor import ProcessorABC, dict_accumulator
from .get_tagger_inputs import get_lep_features, get_met_features
# from .run_tagger_inference import runInferenceTriton
from .utils import FILL_NONE_VALUE, add_selection_no_cutflow, bkgs, sigs, tagger_gen_matching
warnings.filterwarnings("ignore", message="Found duplicate branch ")
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", message="Missing cross-reference index ")
warnings.filterwarnings("ignore", message="divide by zero encountered in log")
np.seterr(invalid="ignore")
P4 = {
"eta": "eta",
"phi": "phi",
"mass": "mass",
"pt": "pt",
}
class InputProcessor(ProcessorABC):
"""
Produces a flat training ntuple from PFNano.
"""
def __init__(self, label, inference, output_location="./outfiles/"):
"""
:param num_jets: Number of jets to save
:type num_jets: int
"""
"""
Skimming variables
"""
self.label = label
self.inference = inference
self._output_location = output_location
self.skim_vars = {
"Event": {
"event": "event",
},
"FatJet": {
**P4,
"msoftdrop": "msoftdrop",
},
"GenPart": [
"fj_genjetmass",
"fj_genRes_pt",
"fj_genRes_eta",
"fj_genRes_phi",
"fj_genRes_mass",
"fj_nprongs",
"fj_ncquarks",
"fj_lepinprongs",
"fj_nquarks",
"fj_H_VV_4q",
"fj_H_VV_elenuqq",
"fj_H_VV_munuqq",
"fj_H_VV_leptauelvqq",
"fj_H_VV_leptaumuvqq",
"fj_H_VV_hadtauvqq",
"fj_QCDb",
"fj_QCDbb",
"fj_QCDc",
"fj_QCDcc",
"fj_QCDothers",
"fj_V_2q",
"fj_V_elenu",
"fj_V_munu",
"fj_V_taunu",
"fj_Top_nquarksnob",
"fj_Top_nbquarks",
"fj_Top_ncquarks",
"fj_Top_nleptons",
"fj_Top_nele",
"fj_Top_nmu",
"fj_Top_ntau",
"fj_Top_taudecay",
],
# formatted to match weaver's preprocess.json
"MET": {
"met_features": {
"var_names": [
"met_relpt",
"met_relphi",
],
},
"met_points": {"var_length": 1},
},
"Lep": {
"fj_features": {
"fj_lep_dR",
"fj_lep_pt",
"fj_lep_iso",
"fj_lep_miniiso",
},
},
}
self.tagger_resources_path = str(pathlib.Path(__file__).parent.resolve()) + "/tagger_resources/"
self.fatjet_label = "FatJet"
self.pfcands_label = "FatJetPFCands"
self.svs_label = "FatJetSVs"
self._accumulator = dict_accumulator({})
@property
def accumulator(self):
|
def save_dfs_parquet(self, df, fname):
if self._output_location is not None:
PATH = f"{self._output_location}/parquet/"
if not os.path.exists(PATH):
os.makedirs(PATH)
table = pa.Table.from_pandas(df)
if len(table) != 0: # skip dataframes with empty entries
pq.write_table(table, f"{PATH}/{fname}.parquet")
def ak_to_pandas(self, output_collection: ak.Array) -> pd.DataFrame:
output = pd.DataFrame()
for field in ak.fields(output_collection):
output[field] = ak.to_numpy(output_collection[field])
return output
def dump_root(self, skimmed_vars: Dict[str, np.array], fname: str) -> None:
"""
Saves ``jet_vars`` dict as a rootfile to './outroot'
"""
local_dir = os.path.abspath(os.path.join(self._output_location, "outroot"))
os.system(f"mkdir -p {local_dir}")
with uproot.recreate(f"{local_dir}/{fname}.root", compression=uproot.LZ4(4)) as rfile:
rfile["Events"] = ak.Array(skimmed_vars)
rfile["Events"].show()
def process(self, events: ak.Array):
import time
start = time.time()
def build_p4(cand):
return ak.zip(
{
"pt": cand.pt,
"eta": cand.eta,
"phi": cand.phi,
"mass": cand.mass,
"charge": cand.charge,
},
with_name="PtEtaPhiMCandidate",
behavior=candidate.behavior,
)
electrons = events["Electron"][events["Electron"].pt > 40]
muons = events["Muon"][events["Muon"].pt > 30]
leptons = ak.concatenate([electrons, muons], axis=1)
leptons = leptons[ak.argsort(leptons.pt, ascending=False)]
fatjets = events[self.fatjet_label]
candidatelep_p4 = build_p4(ak.firsts(leptons))
fj_idx_lep = ak.argmin(fatjets.delta_r(candidatelep_p4), axis=1, keepdims=True)
fatjet = ak.firsts(fatjets[fj_idx_lep])
# selection
selection = PackedSelection()
add_selection_no_cutflow("fjselection", (fatjet.pt > 200), selection)
if np.sum(selection.all(*selection.names)) == 0:
return {}
# variables
FatJetVars = {
f"fj_{key}": ak.fill_none(fatjet[var], FILL_NONE_VALUE) for (var, key) in self.skim_vars["FatJet"].items()
}
LepVars = {
**get_lep_features(
self.skim_vars["Lep"],
events,
fatjet,
candidatelep_p4,
),
}
METVars = {
**get_met_features(
self.skim_vars["MET"],
events,
fatjet,
"MET",
normalize=False,
),
}
genparts = events.GenPart
matched_mask, genVars = tagger_gen_matching(
events,
genparts,
fatjet,
# candidatelep_p4,
self.skim_vars["GenPart"],
label=self.label,
)
# add_selection_no_cutflow("gen_match", matched_mask, selection)
skimmed_vars = {**FatJetVars, **{"matched_mask": matched_mask}, **genVars, **METVars, **LepVars}
# apply selections
skimmed_vars = {
key: np.squeeze(np.array(value[selection.all(*selection.names)])) for (key, value) in skimmed_vars.items()
}
# fill inference
if self.inference:
from .run_tagger_inference import runInferenceTriton
for model_name in ["ak8_MD_vminclv2ParT_manual_fixwrap_all_nodes"]:
pnet_vars = runInferenceTriton(
self.tagger_resources_path,
events[selection.all(*selection.names)],
fj_idx_lep[selection.all(*selection.names)],
model_name=model_name,
)
# pnet_df = self.ak_to_pandas(pnet_vars)
pnet_df = pd.DataFrame(pnet_vars)
num = pnet_df[sigs].sum(axis=1)
den = pnet_df[sigs].sum(axis=1) + pnet_df[bkgs].sum(axis=1)
scores = {"fj_ParT_inclusive_score": (num / den).values}
reg_mass = {"fj_ParT_mass": pnet_vars["fj_ParT_mass"]}
hidNeurons = {}
for key in pnet_vars:
if "hidNeuron" in key:
hidNeurons[key] = pnet_vars[key]
skimmed_vars = {**skimmed_vars, **scores, **reg_mass, **hidNeurons}
for key in skimmed_vars:
skimmed_vars[key] = skimmed_vars[key].squeeze()
# convert output to pandas
df = pd.DataFrame(skimmed_vars)
df = df.dropna() # very few events would have genjetmass NaN for some reason
print(f"convert: {time.time() - start:.1f}s")
print(df)
# save the output
fname = events.behavior["__events_factory__"]._partition_key.replace("/", "_")
fname = "condor_" + fname
self.save_dfs_parquet(df, fname)
print(f"dump parquet: {time.time() - start:.1f}s")
# TODO: drop NaNs from rootfiles
self.dump_root(skimmed_vars, fname)
print(f"dump rootfile: {time.time() - start:.1f}s")
# for now do something like this to dump the parquets in root
# OUTPATH = "../datafiles/ntuples/"
# for sample in samples:
# print(sample)
# for file in os.listdir(f"{OUTPATH}/{sample}/train/"):
# if "parquet" not in file:
# continue
# d = pd.read_parquet(f"{OUTPATH}/{sample}/train/{file}")
# with uproot.recreate(f"{OUTPATH}/{sample}/train/out.root", compression=uproot.LZ4(4)) as rfile:
# rfile["Events"] = ak.Array(d.to_dict(orient="list", index=True))
# rfile["Events"].show()
# for file in os.listdir(f"{OUTPATH}/{sample}/test/"):
# if "parquet" not in file:
# continue
# d = pd.read_parquet(f"{OUTPATH}/{sample}/test/{file}")
# with uproot.recreate(f"{OUTPATH}/{sample}/test/out.root", compression=uproot.LZ4(4)) as rfile:
# rfile["Events"] = ak.Array(d.to_dict(orient="list", index=True))
# rfile["Events"].show()
# print("--------------------------")
return {}
def postprocess(self, accumulator):
pass
| return self._accumulator | identifier_body |
inputprocessor.py | """
Skimmer for ParticleNet tagger inputs.
Author(s): Cristina Mantilla Suarez, Raghav Kansal
"""
import os
import pathlib
import warnings
from typing import Dict
import awkward as ak
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import uproot
from coffea.analysis_tools import PackedSelection
from coffea.nanoevents.methods import candidate
from coffea.processor import ProcessorABC, dict_accumulator
from .get_tagger_inputs import get_lep_features, get_met_features
# from .run_tagger_inference import runInferenceTriton
from .utils import FILL_NONE_VALUE, add_selection_no_cutflow, bkgs, sigs, tagger_gen_matching
warnings.filterwarnings("ignore", message="Found duplicate branch ")
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", message="Missing cross-reference index ")
warnings.filterwarnings("ignore", message="divide by zero encountered in log")
np.seterr(invalid="ignore")
P4 = {
"eta": "eta",
"phi": "phi",
"mass": "mass",
"pt": "pt",
}
class InputProcessor(ProcessorABC):
"""
Produces a flat training ntuple from PFNano.
"""
def __init__(self, label, inference, output_location="./outfiles/"):
"""
:param num_jets: Number of jets to save
:type num_jets: int
"""
"""
Skimming variables
"""
self.label = label
self.inference = inference
self._output_location = output_location
self.skim_vars = {
"Event": {
"event": "event",
},
"FatJet": {
**P4,
"msoftdrop": "msoftdrop",
},
"GenPart": [
"fj_genjetmass",
"fj_genRes_pt",
"fj_genRes_eta",
"fj_genRes_phi",
"fj_genRes_mass",
"fj_nprongs",
"fj_ncquarks",
"fj_lepinprongs",
"fj_nquarks",
"fj_H_VV_4q",
"fj_H_VV_elenuqq",
"fj_H_VV_munuqq",
"fj_H_VV_leptauelvqq",
"fj_H_VV_leptaumuvqq",
"fj_H_VV_hadtauvqq",
"fj_QCDb",
"fj_QCDbb",
"fj_QCDc",
"fj_QCDcc",
"fj_QCDothers",
"fj_V_2q",
"fj_V_elenu",
"fj_V_munu",
"fj_V_taunu",
"fj_Top_nquarksnob",
"fj_Top_nbquarks",
"fj_Top_ncquarks",
"fj_Top_nleptons",
"fj_Top_nele",
"fj_Top_nmu",
"fj_Top_ntau",
"fj_Top_taudecay",
],
# formatted to match weaver's preprocess.json
"MET": {
"met_features": {
"var_names": [
"met_relpt",
"met_relphi",
],
},
"met_points": {"var_length": 1},
},
"Lep": {
"fj_features": {
"fj_lep_dR",
"fj_lep_pt",
"fj_lep_iso",
"fj_lep_miniiso",
},
},
}
self.tagger_resources_path = str(pathlib.Path(__file__).parent.resolve()) + "/tagger_resources/"
self.fatjet_label = "FatJet"
self.pfcands_label = "FatJetPFCands"
self.svs_label = "FatJetSVs"
self._accumulator = dict_accumulator({})
@property
def accumulator(self):
return self._accumulator
def save_dfs_parquet(self, df, fname):
if self._output_location is not None:
PATH = f"{self._output_location}/parquet/"
if not os.path.exists(PATH):
os.makedirs(PATH)
table = pa.Table.from_pandas(df)
if len(table) != 0: # skip dataframes with empty entries
pq.write_table(table, f"{PATH}/{fname}.parquet")
def | (self, output_collection: ak.Array) -> pd.DataFrame:
output = pd.DataFrame()
for field in ak.fields(output_collection):
output[field] = ak.to_numpy(output_collection[field])
return output
def dump_root(self, skimmed_vars: Dict[str, np.array], fname: str) -> None:
"""
Saves ``jet_vars`` dict as a rootfile to './outroot'
"""
local_dir = os.path.abspath(os.path.join(self._output_location, "outroot"))
os.system(f"mkdir -p {local_dir}")
with uproot.recreate(f"{local_dir}/{fname}.root", compression=uproot.LZ4(4)) as rfile:
rfile["Events"] = ak.Array(skimmed_vars)
rfile["Events"].show()
def process(self, events: ak.Array):
import time
start = time.time()
def build_p4(cand):
return ak.zip(
{
"pt": cand.pt,
"eta": cand.eta,
"phi": cand.phi,
"mass": cand.mass,
"charge": cand.charge,
},
with_name="PtEtaPhiMCandidate",
behavior=candidate.behavior,
)
electrons = events["Electron"][events["Electron"].pt > 40]
muons = events["Muon"][events["Muon"].pt > 30]
leptons = ak.concatenate([electrons, muons], axis=1)
leptons = leptons[ak.argsort(leptons.pt, ascending=False)]
fatjets = events[self.fatjet_label]
candidatelep_p4 = build_p4(ak.firsts(leptons))
fj_idx_lep = ak.argmin(fatjets.delta_r(candidatelep_p4), axis=1, keepdims=True)
fatjet = ak.firsts(fatjets[fj_idx_lep])
# selection
selection = PackedSelection()
add_selection_no_cutflow("fjselection", (fatjet.pt > 200), selection)
if np.sum(selection.all(*selection.names)) == 0:
return {}
# variables
FatJetVars = {
f"fj_{key}": ak.fill_none(fatjet[var], FILL_NONE_VALUE) for (var, key) in self.skim_vars["FatJet"].items()
}
LepVars = {
**get_lep_features(
self.skim_vars["Lep"],
events,
fatjet,
candidatelep_p4,
),
}
METVars = {
**get_met_features(
self.skim_vars["MET"],
events,
fatjet,
"MET",
normalize=False,
),
}
genparts = events.GenPart
matched_mask, genVars = tagger_gen_matching(
events,
genparts,
fatjet,
# candidatelep_p4,
self.skim_vars["GenPart"],
label=self.label,
)
# add_selection_no_cutflow("gen_match", matched_mask, selection)
skimmed_vars = {**FatJetVars, **{"matched_mask": matched_mask}, **genVars, **METVars, **LepVars}
# apply selections
skimmed_vars = {
key: np.squeeze(np.array(value[selection.all(*selection.names)])) for (key, value) in skimmed_vars.items()
}
# fill inference
if self.inference:
from .run_tagger_inference import runInferenceTriton
for model_name in ["ak8_MD_vminclv2ParT_manual_fixwrap_all_nodes"]:
pnet_vars = runInferenceTriton(
self.tagger_resources_path,
events[selection.all(*selection.names)],
fj_idx_lep[selection.all(*selection.names)],
model_name=model_name,
)
# pnet_df = self.ak_to_pandas(pnet_vars)
pnet_df = pd.DataFrame(pnet_vars)
num = pnet_df[sigs].sum(axis=1)
den = pnet_df[sigs].sum(axis=1) + pnet_df[bkgs].sum(axis=1)
scores = {"fj_ParT_inclusive_score": (num / den).values}
reg_mass = {"fj_ParT_mass": pnet_vars["fj_ParT_mass"]}
hidNeurons = {}
for key in pnet_vars:
if "hidNeuron" in key:
hidNeurons[key] = pnet_vars[key]
skimmed_vars = {**skimmed_vars, **scores, **reg_mass, **hidNeurons}
for key in skimmed_vars:
skimmed_vars[key] = skimmed_vars[key].squeeze()
# convert output to pandas
df = pd.DataFrame(skimmed_vars)
df = df.dropna() # very few events would have genjetmass NaN for some reason
print(f"convert: {time.time() - start:.1f}s")
print(df)
# save the output
fname = events.behavior["__events_factory__"]._partition_key.replace("/", "_")
fname = "condor_" + fname
self.save_dfs_parquet(df, fname)
print(f"dump parquet: {time.time() - start:.1f}s")
# TODO: drop NaNs from rootfiles
self.dump_root(skimmed_vars, fname)
print(f"dump rootfile: {time.time() - start:.1f}s")
# for now do something like this to dump the parquets in root
# OUTPATH = "../datafiles/ntuples/"
# for sample in samples:
# print(sample)
# for file in os.listdir(f"{OUTPATH}/{sample}/train/"):
# if "parquet" not in file:
# continue
# d = pd.read_parquet(f"{OUTPATH}/{sample}/train/{file}")
# with uproot.recreate(f"{OUTPATH}/{sample}/train/out.root", compression=uproot.LZ4(4)) as rfile:
# rfile["Events"] = ak.Array(d.to_dict(orient="list", index=True))
# rfile["Events"].show()
# for file in os.listdir(f"{OUTPATH}/{sample}/test/"):
# if "parquet" not in file:
# continue
# d = pd.read_parquet(f"{OUTPATH}/{sample}/test/{file}")
# with uproot.recreate(f"{OUTPATH}/{sample}/test/out.root", compression=uproot.LZ4(4)) as rfile:
# rfile["Events"] = ak.Array(d.to_dict(orient="list", index=True))
# rfile["Events"].show()
# print("--------------------------")
return {}
def postprocess(self, accumulator):
pass
| ak_to_pandas | identifier_name |
inputprocessor.py | """
Skimmer for ParticleNet tagger inputs.
Author(s): Cristina Mantilla Suarez, Raghav Kansal
"""
import os
import pathlib
import warnings
from typing import Dict
import awkward as ak
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import uproot
from coffea.analysis_tools import PackedSelection
from coffea.nanoevents.methods import candidate
from coffea.processor import ProcessorABC, dict_accumulator
from .get_tagger_inputs import get_lep_features, get_met_features
# from .run_tagger_inference import runInferenceTriton
from .utils import FILL_NONE_VALUE, add_selection_no_cutflow, bkgs, sigs, tagger_gen_matching
warnings.filterwarnings("ignore", message="Found duplicate branch ")
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", message="Missing cross-reference index ")
warnings.filterwarnings("ignore", message="divide by zero encountered in log")
np.seterr(invalid="ignore")
P4 = {
"eta": "eta",
"phi": "phi",
"mass": "mass",
"pt": "pt",
}
class InputProcessor(ProcessorABC):
"""
Produces a flat training ntuple from PFNano.
"""
def __init__(self, label, inference, output_location="./outfiles/"):
"""
:param num_jets: Number of jets to save
:type num_jets: int
"""
"""
Skimming variables
"""
self.label = label
self.inference = inference
self._output_location = output_location
self.skim_vars = {
"Event": {
"event": "event",
},
"FatJet": {
**P4,
"msoftdrop": "msoftdrop",
},
"GenPart": [
"fj_genjetmass",
"fj_genRes_pt",
"fj_genRes_eta",
"fj_genRes_phi",
"fj_genRes_mass",
"fj_nprongs",
"fj_ncquarks",
"fj_lepinprongs",
"fj_nquarks",
"fj_H_VV_4q",
"fj_H_VV_elenuqq",
"fj_H_VV_munuqq",
"fj_H_VV_leptauelvqq",
"fj_H_VV_leptaumuvqq",
"fj_H_VV_hadtauvqq",
"fj_QCDb",
"fj_QCDbb",
"fj_QCDc",
"fj_QCDcc",
"fj_QCDothers",
"fj_V_2q",
"fj_V_elenu",
"fj_V_munu",
"fj_V_taunu",
"fj_Top_nquarksnob",
"fj_Top_nbquarks",
"fj_Top_ncquarks",
"fj_Top_nleptons",
"fj_Top_nele",
"fj_Top_nmu",
"fj_Top_ntau",
"fj_Top_taudecay",
],
# formatted to match weaver's preprocess.json
"MET": {
"met_features": {
"var_names": [
"met_relpt",
"met_relphi",
],
},
"met_points": {"var_length": 1},
},
"Lep": {
"fj_features": {
"fj_lep_dR",
"fj_lep_pt",
"fj_lep_iso",
"fj_lep_miniiso",
},
},
}
self.tagger_resources_path = str(pathlib.Path(__file__).parent.resolve()) + "/tagger_resources/"
self.fatjet_label = "FatJet"
self.pfcands_label = "FatJetPFCands"
self.svs_label = "FatJetSVs"
self._accumulator = dict_accumulator({})
@property
def accumulator(self):
return self._accumulator
def save_dfs_parquet(self, df, fname):
if self._output_location is not None:
PATH = f"{self._output_location}/parquet/"
if not os.path.exists(PATH):
os.makedirs(PATH)
table = pa.Table.from_pandas(df)
if len(table) != 0: # skip dataframes with empty entries
pq.write_table(table, f"{PATH}/{fname}.parquet")
def ak_to_pandas(self, output_collection: ak.Array) -> pd.DataFrame:
output = pd.DataFrame()
for field in ak.fields(output_collection):
output[field] = ak.to_numpy(output_collection[field])
return output
def dump_root(self, skimmed_vars: Dict[str, np.array], fname: str) -> None:
"""
Saves ``jet_vars`` dict as a rootfile to './outroot'
"""
local_dir = os.path.abspath(os.path.join(self._output_location, "outroot"))
os.system(f"mkdir -p {local_dir}")
with uproot.recreate(f"{local_dir}/{fname}.root", compression=uproot.LZ4(4)) as rfile:
rfile["Events"] = ak.Array(skimmed_vars)
rfile["Events"].show()
def process(self, events: ak.Array):
import time
start = time.time()
def build_p4(cand):
return ak.zip(
{
"pt": cand.pt,
"eta": cand.eta,
"phi": cand.phi,
"mass": cand.mass,
"charge": cand.charge,
},
with_name="PtEtaPhiMCandidate",
behavior=candidate.behavior,
)
electrons = events["Electron"][events["Electron"].pt > 40]
muons = events["Muon"][events["Muon"].pt > 30]
leptons = ak.concatenate([electrons, muons], axis=1)
leptons = leptons[ak.argsort(leptons.pt, ascending=False)]
fatjets = events[self.fatjet_label] |
# selection
selection = PackedSelection()
add_selection_no_cutflow("fjselection", (fatjet.pt > 200), selection)
if np.sum(selection.all(*selection.names)) == 0:
return {}
# variables
FatJetVars = {
f"fj_{key}": ak.fill_none(fatjet[var], FILL_NONE_VALUE) for (var, key) in self.skim_vars["FatJet"].items()
}
LepVars = {
**get_lep_features(
self.skim_vars["Lep"],
events,
fatjet,
candidatelep_p4,
),
}
METVars = {
**get_met_features(
self.skim_vars["MET"],
events,
fatjet,
"MET",
normalize=False,
),
}
genparts = events.GenPart
matched_mask, genVars = tagger_gen_matching(
events,
genparts,
fatjet,
# candidatelep_p4,
self.skim_vars["GenPart"],
label=self.label,
)
# add_selection_no_cutflow("gen_match", matched_mask, selection)
skimmed_vars = {**FatJetVars, **{"matched_mask": matched_mask}, **genVars, **METVars, **LepVars}
# apply selections
skimmed_vars = {
key: np.squeeze(np.array(value[selection.all(*selection.names)])) for (key, value) in skimmed_vars.items()
}
# fill inference
if self.inference:
from .run_tagger_inference import runInferenceTriton
for model_name in ["ak8_MD_vminclv2ParT_manual_fixwrap_all_nodes"]:
pnet_vars = runInferenceTriton(
self.tagger_resources_path,
events[selection.all(*selection.names)],
fj_idx_lep[selection.all(*selection.names)],
model_name=model_name,
)
# pnet_df = self.ak_to_pandas(pnet_vars)
pnet_df = pd.DataFrame(pnet_vars)
num = pnet_df[sigs].sum(axis=1)
den = pnet_df[sigs].sum(axis=1) + pnet_df[bkgs].sum(axis=1)
scores = {"fj_ParT_inclusive_score": (num / den).values}
reg_mass = {"fj_ParT_mass": pnet_vars["fj_ParT_mass"]}
hidNeurons = {}
for key in pnet_vars:
if "hidNeuron" in key:
hidNeurons[key] = pnet_vars[key]
skimmed_vars = {**skimmed_vars, **scores, **reg_mass, **hidNeurons}
for key in skimmed_vars:
skimmed_vars[key] = skimmed_vars[key].squeeze()
# convert output to pandas
df = pd.DataFrame(skimmed_vars)
df = df.dropna() # very few events would have genjetmass NaN for some reason
print(f"convert: {time.time() - start:.1f}s")
print(df)
# save the output
fname = events.behavior["__events_factory__"]._partition_key.replace("/", "_")
fname = "condor_" + fname
self.save_dfs_parquet(df, fname)
print(f"dump parquet: {time.time() - start:.1f}s")
# TODO: drop NaNs from rootfiles
self.dump_root(skimmed_vars, fname)
print(f"dump rootfile: {time.time() - start:.1f}s")
# for now do something like this to dump the parquets in root
# OUTPATH = "../datafiles/ntuples/"
# for sample in samples:
# print(sample)
# for file in os.listdir(f"{OUTPATH}/{sample}/train/"):
# if "parquet" not in file:
# continue
# d = pd.read_parquet(f"{OUTPATH}/{sample}/train/{file}")
# with uproot.recreate(f"{OUTPATH}/{sample}/train/out.root", compression=uproot.LZ4(4)) as rfile:
# rfile["Events"] = ak.Array(d.to_dict(orient="list", index=True))
# rfile["Events"].show()
# for file in os.listdir(f"{OUTPATH}/{sample}/test/"):
# if "parquet" not in file:
# continue
# d = pd.read_parquet(f"{OUTPATH}/{sample}/test/{file}")
# with uproot.recreate(f"{OUTPATH}/{sample}/test/out.root", compression=uproot.LZ4(4)) as rfile:
# rfile["Events"] = ak.Array(d.to_dict(orient="list", index=True))
# rfile["Events"].show()
# print("--------------------------")
return {}
def postprocess(self, accumulator):
pass | candidatelep_p4 = build_p4(ak.firsts(leptons))
fj_idx_lep = ak.argmin(fatjets.delta_r(candidatelep_p4), axis=1, keepdims=True)
fatjet = ak.firsts(fatjets[fj_idx_lep]) | random_line_split |
npm-utils.js | /**
* @module system-npm/utils
*
* Helpers that are used by npm-extension and the npm plugin.
* This should be kept small and not have helpers exclusive to npm.
* However, it can have all npm-extension helpers.
*/
// A regex to test if a moduleName is npm-like.
var slice = Array.prototype.slice;
var npmModuleRegEx = /.+@.+\..+\..+#.+/;
var conditionalModuleRegEx = /#\{[^\}]+\}|#\?.+$/;
var gitUrlEx = /(git|http(s?)):\/\//;
var supportsSet = typeof Set === "function";
var utils = {
extend: function(d, s, deep, set){
var val;
if(deep) {
if(!set) {
if(supportsSet) | else {
set = [];
}
}
if(supportsSet) {
if(set.has(s)) {
return s;
} else {
set.add(s);
}
} else {
if(set.indexOf(s) !== -1) {
return s;
} else {
set.push(s);
}
}
}
for(var prop in s) {
val = s[prop];
if(deep) {
if(utils.isArray(val)) {
d[prop] = slice.call(val);
} else if(utils.isPlainObject(val)) {
d[prop] = utils.extend({}, val, deep, set);
} else {
d[prop] = s[prop];
}
} else {
d[prop] = s[prop];
}
}
return d;
},
map: function(arr, fn){
var i = 0, len = arr.length, out = [];
for(; i < len; i++) {
out.push(fn.call(arr, arr[i]));
}
return out;
},
filter: function(arr, fn){
var i = 0, len = arr.length, out = [], res;
for(; i < len; i++) {
res = fn.call(arr, arr[i]);
if(res) {
out.push(arr[i]);
}
}
return out;
},
forEach: function(arr, fn) {
var i = 0, len = arr.length;
for(; i < len; i++) {
fn.call(arr, arr[i], i);
}
},
isObject: function(obj){
return typeof obj === "object";
},
isPlainObject: function(obj){
// A plain object has a proto that is the Object
return utils.isObject(obj) && (!obj || obj.__proto__ === Object.prototype);
},
isArray: Array.isArray || function(arr){
return Object.prototype.toString.call(arr) === "[object Array]";
},
isEnv: function(name) {
return this.isEnv ? this.isEnv(name) : this.env === name;
},
isGitUrl: function(str) {
return gitUrlEx.test(str);
},
warnOnce: function(msg){
var w = this._warnings = this._warnings || {};
if(w[msg]) return;
w[msg] = true;
this.warn(msg);
},
warn: function(msg){
if(typeof steal !== "undefined" && typeof console !== "undefined" && console.warn) {
steal.done().then(function(){
if(steal.dev && steal.dev.warn){
steal.dev.warn(msg)
} else if(console.warn) {
console.warn("steal.js WARNING: "+msg);
} else {
console.log(msg);
}
});
}
},
relativeURI: function(baseURL, url) {
return typeof steal !== "undefined" ? steal.relativeURI(baseURL, url) : url;
},
moduleName: {
/**
* @function moduleName.create
* Converts a parsed module name to a string
*
* @param {system-npm/parsed_npm} descriptor
*/
create: function (descriptor, standard) {
if(standard) {
return descriptor.moduleName;
} else {
if(descriptor === "@empty") {
return descriptor;
}
var modulePath;
if(descriptor.modulePath) {
modulePath = descriptor.modulePath.substr(0,2) === "./" ? descriptor.modulePath.substr(2) : descriptor.modulePath;
}
return descriptor.packageName
+ (descriptor.version ? '@' + descriptor.version : '')
+ (modulePath ? '#' + modulePath : '')
+ (descriptor.plugin ? descriptor.plugin : '');
}
},
/**
* @function moduleName.isNpm
* Determines whether a moduleName is npm-like.
* @return {Boolean}
*/
isNpm: function(moduleName){
return npmModuleRegEx.test(moduleName);
},
/**
* @function moduleName.isConditional
* Determines whether a moduleName includes a condition.
* @return {Boolean}
*/
isConditional: function(moduleName){
return conditionalModuleRegEx.test(moduleName);
},
/**
* @function moduleName.isFullyConvertedModuleName
* Determines whether a moduleName is a fully npm name, not npm-like
* With a parsed module name we can make sure there is a package name,
* package version, and module path.
*/
isFullyConvertedNpm: function(parsedModuleName){
return !!(parsedModuleName.packageName &&
parsedModuleName.version && parsedModuleName.modulePath);
},
/**
* @function moduleName.isScoped
* Determines whether a moduleName is from a scoped package.
* @return {Boolean}
*/
isScoped: function(moduleName){
return moduleName[0] === "@";
},
/**
* @function moduleName.parse
* Breaks a string moduleName into parts.
* packageName@version!plugin#modulePath
* "./lib/bfs"
*
* @return {system-npm/parsed_npm}
*/
parse: function (moduleName, currentPackageName, global) {
var pluginParts = moduleName.split('!');
var modulePathParts = pluginParts[0].split("#");
var versionParts = modulePathParts[0].split("@");
// it could be something like `@empty`
if(!modulePathParts[1] && !versionParts[0]) {
versionParts = ["@"+versionParts[1]];
}
// it could be a scope package
if(versionParts.length === 3 && utils.moduleName.isScoped(moduleName)) {
versionParts.splice(0, 1);
versionParts[0] = "@"+versionParts[0];
}
var packageName,
modulePath;
// if the module name is relative
// use the currentPackageName
if (currentPackageName && utils.path.isRelative(moduleName)) {
packageName = currentPackageName;
modulePath = versionParts[0];
// if the module name starts with the ~ (tilde) operator
// use the currentPackageName
} else if (currentPackageName && utils.path.startsWithTildeSlash(moduleName)) {
packageName = currentPackageName;
modulePath = versionParts[0].split("/").slice(1).join("/");
} else {
if(modulePathParts[1]) { // foo@1.2#./path
packageName = versionParts[0];
modulePath = modulePathParts[1];
} else {
// test/abc
var folderParts = versionParts[0].split("/");
// Detect scoped packages
if(folderParts.length && folderParts[0][0] === "@") {
packageName = folderParts.splice(0, 2).join("/");
} else {
packageName = folderParts.shift();
}
modulePath = folderParts.join("/");
}
}
modulePath = utils.path.removeJS(modulePath);
return {
plugin: pluginParts.length === 2 ? "!"+pluginParts[1] : undefined,
version: versionParts[1],
modulePath: modulePath,
packageName: packageName,
moduleName: moduleName,
isGlobal: global
};
},
/**
* @function moduleName.parseFromPackage
*
* Given the package that loads the dependency, the dependency name,
* and the moduleName of what loaded the package, return
* a [system-npm/parsed_npm].
*
* @param {Loader} loader
* @param {NpmPackage} refPkg The package `name` is a dependency of.
* @param {moduleName} name
* @param {moduleName} parentName
* @return {system-npm/parsed_npm}
*
*/
parseFromPackage: function(loader, refPkg, name, parentName) {
// Get the name of the
var packageName = utils.pkg.name(refPkg),
parsedModuleName = utils.moduleName.parse(name, packageName),
isRelative = utils.path.isRelative(parsedModuleName.modulePath);
if(isRelative && !parentName) {
throw new Error("Cannot resolve a relative module identifier " +
"with no parent module:", name);
}
// If the module needs to be loaded relative.
if(isRelative) {
// get the location of the parent
var parentParsed = utils.moduleName.parse(parentName, packageName);
// If the parentModule and the currentModule are from the same parent
if( parentParsed.packageName === parsedModuleName.packageName && parentParsed.modulePath ) {
var makePathRelative = true;
if(name === "../" || name === "./" || name === "..") {
var relativePath = utils.path.relativeTo(
parentParsed.modulePath, name);
var isInRoot = utils.path.isPackageRootDir(relativePath);
if(isInRoot) {
parsedModuleName.modulePath = utils.pkg.main(refPkg);
makePathRelative = false;
} else {
parsedModuleName.modulePath = name +
(utils.path.endsWithSlash(name) ? "" : "/") +
"index";
}
}
if(makePathRelative) {
// Make the path relative to the parentName's path.
parsedModuleName.modulePath = utils.path.makeRelative(
utils.path.joinURIs(parentParsed.modulePath,
parsedModuleName.modulePath)
);
}
}
}
// we have the moduleName without the version
// we check this against various configs
var mapName = utils.moduleName.create(parsedModuleName),
refSteal = utils.pkg.config(refPkg),
mappedName;
// The refPkg might have a browser [https://github.com/substack/node-browserify#browser-field] mapping.
// Perform that mapping here.
if(refPkg.browser && (typeof refPkg.browser !== "string") &&
(mapName in refPkg.browser) &&
(!refSteal || !refSteal.ignoreBrowser)) {
mappedName = refPkg.browser[mapName] === false ?
"@empty" : refPkg.browser[mapName];
}
// globalBrowser looks like: {moduleName: aliasName, pgk: aliasingPkg}
var global = loader && loader.globalBrowser &&
loader.globalBrowser[mapName];
if(global) {
mappedName = global.moduleName === false ? "@empty" :
global.moduleName;
}
if(mappedName) {
return utils.moduleName.parse(mappedName, packageName, !!global);
} else {
return parsedModuleName;
}
},
nameAndVersion: function(parsedModuleName){
return parsedModuleName.packageName + "@" + parsedModuleName.version;
}
},
pkg: {
/**
* Returns a package's name. The system config allows one to set this to
* something else.
* @return {String}
*/
name: function(pkg){
var steal = utils.pkg.config(pkg);
return (steal && steal.name) || pkg.name;
},
main: function(pkg) {
var main;
var steal = utils.pkg.config(pkg);
if(steal && steal.main) {
main = steal.main;
} else if(typeof pkg.browser === "string") {
if(utils.path.endsWithSlash(pkg.browser)) {
main = pkg.browser + "index";
} else {
main = pkg.browser;
}
} else if(typeof pkg.jam === "object" && pkg.jam.main) {
main = pkg.jam.main;
} else if(pkg.main) {
main = pkg.main;
} else {
main = "index";
}
return utils.path.removeJS(
utils.path.removeDotSlash(main)
);
},
rootDir: function(pkg, isRoot) {
var root = isRoot ?
utils.path.removePackage( pkg.fileUrl ) :
utils.path.pkgDir(pkg.fileUrl);
var lib = utils.pkg.directoriesLib(pkg);
if(lib) {
root = utils.path.joinURIs(utils.path.addEndingSlash(root), lib);
}
return root;
},
/**
* @function pkg.isRoot
* Determines whether a module is the loader's root module.
* @return {Boolean}
*/
isRoot: function(loader, pkg) {
var root = utils.pkg.getDefault(loader);
return pkg.name === root.name && pkg.version === root.version;
},
getDefault: function(loader) {
return loader.npmPaths.__default;
},
/**
* Returns packageData given a module's name or module's address.
*
* Given a moduleName, it tries to return the package it belongs to.
* If a moduleName isn't provided, but a moduleA
*
* @param {Loader} loader
* @param {String} [moduleName]
* @param {String} [moduleAddress]
* @return {NpmPackage|undefined}
*/
findByModuleNameOrAddress: function(loader, moduleName, moduleAddress) {
if(loader.npm) {
if(moduleName) {
var parsed = utils.moduleName.parse(moduleName);
if(parsed.version && parsed.packageName) {
var name = parsed.packageName+"@"+parsed.version;
if(name in loader.npm) {
return loader.npm[name];
}
}
}
if(moduleAddress) {
// Remove the baseURL so that folderAddress only detects
// node_modules that are within the baseURL. Otherwise
// you cannot load a project that is itself within
// node_modules
var startingAddress = utils.relativeURI(loader.baseURL,
moduleAddress);
var packageFolder = utils.pkg.folderAddress(startingAddress);
return packageFolder ? loader.npmPaths[packageFolder] : utils.pkg.getDefault(loader);
} else {
return utils.pkg.getDefault(loader);
}
}
},
folderAddress: function (address){
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
nextSlash = address.indexOf("/", nodeModulesIndex+nodeModules.length);
if(nodeModulesIndex >= 0) {
return nextSlash>=0 ? address.substr(0, nextSlash) : address;
}
},
/**
* Finds a dependency by its saved resolutions. This will only be called
* after we've first successful found a package the "hard way" by doing
* semver matching.
*/
findDep: function(loader, refPkg, name){
if(loader.npm && refPkg && !utils.path.startsWithDotSlash(name)) {
var nameAndVersion = name + "@" + refPkg.resolutions[name];
var pkg = loader.npm[nameAndVersion];
return pkg;
}
},
/**
* Walks up npmPaths looking for a [name]/package.json. Returns
* the package data it finds.
*
* @param {Loader} loader
* @param {NpmPackage} refPackage
* @param {packgeName} name the package name we are looking for.
*
* @return {undefined|NpmPackage}
*/
findDepWalking: function (loader, refPackage, name) {
if(loader.npm && refPackage && !utils.path.startsWithDotSlash(name)) {
// Todo .. first part of name
var curPackage = utils.path.depPackageDir(refPackage.fileUrl, name);
while(curPackage) {
var pkg = loader.npmPaths[curPackage];
if(pkg) {
return pkg;
}
var parentAddress = utils.path.parentNodeModuleAddress(curPackage);
if(!parentAddress) {
return;
}
curPackage = parentAddress+"/"+name;
}
}
},
findByName: function(loader, name) {
if(loader.npm && !utils.path.startsWithDotSlash(name)) {
return loader.npm[name];
}
},
findByNameAndVersion: function(loader, name, version) {
if(loader.npm && !utils.path.startsWithDotSlash(name)) {
var nameAndVersion = name + "@" + version;
return loader.npm[nameAndVersion];
}
},
findByUrl: function(loader, url) {
if(loader.npm) {
url = utils.pkg.folderAddress(url);
return loader.npmPaths[url];
}
},
directoriesLib: function(pkg) {
var steal = utils.pkg.config(pkg);
var lib = steal && steal.directories && steal.directories.lib;
var ignores = [".", "/"], ignore;
if(!lib) return undefined;
while(!!(ignore = ignores.shift())) {
if(lib[0] === ignore) {
lib = lib.substr(1);
}
}
return lib;
},
hasDirectoriesLib: function(pkg) {
var steal = utils.pkg.config(pkg);
return steal && steal.directories && !!steal.directories.lib;
},
findPackageInfo: function(context, pkg){
var pkgInfo = context.pkgInfo;
if(pkgInfo) {
var out;
utils.forEach(pkgInfo, function(p){
if(pkg.name === p.name && pkg.version === p.version) {
out = p;
}
});
return out;
}
},
saveResolution: function(context, refPkg, pkg){
var npmPkg = utils.pkg.findPackageInfo(context, refPkg);
npmPkg.resolutions[pkg.name] = refPkg.resolutions[pkg.name] =
pkg.version;
},
config: function(pkg){
return pkg.steal || pkg.system;
}
},
path: {
makeRelative: function(path){
if( utils.path.isRelative(path) && path.substr(0,1) !== "/" ) {
return path;
} else {
return "./"+path;
}
},
removeJS: function(path) {
return path.replace(/\.js(!|$)/,function(whole, part){return part;});
},
removePackage: function (path){
return path.replace(/\/package\.json.*/,"");
},
addJS: function(path){
// Don't add `.js` for types that need to work without an extension.
if(/\.js(on)?$/.test(path)) {
return path;
} else {
return path+".js";
}
},
isRelative: function(path) {
return path.substr(0,1) === ".";
},
startsWithTildeSlash: function( path ) {
return path.substr(0,2) === "~/";
},
joinURIs: function(base, href) {
function removeDotSegments(input) {
var output = [];
input.replace(/^(\.\.?(\/|$))+/, '')
.replace(/\/(\.(\/|$))+/g, '/')
.replace(/\/\.\.$/, '/../')
.replace(/\/?[^\/]*/g, function (p) {
if (p === '/..') {
output.pop();
} else {
output.push(p);
}
});
return output.join('').replace(/^\//, input.charAt(0) === '/' ? '/' : '');
}
href = parseURI(href || '');
base = parseURI(base || '');
return !href || !base ? null : (href.protocol || base.protocol) +
(href.protocol || href.authority ? href.authority : base.authority) +
removeDotSegments(href.protocol || href.authority || href.pathname.charAt(0) === '/' ? href.pathname : (href.pathname ? ((base.authority && !base.pathname ? '/' : '') + base.pathname.slice(0, base.pathname.lastIndexOf('/') + 1) + href.pathname) : base.pathname)) +
(href.protocol || href.authority || href.pathname ? href.search : (href.search || base.search)) +
href.hash;
},
startsWithDotSlash: function( path ) {
return path.substr(0,2) === "./";
},
removeDotSlash: function(path) {
return utils.path.startsWithDotSlash(path) ?
path.substr(2) :
path;
},
endsWithSlash: function(path){
return path[path.length -1] === "/";
},
addEndingSlash: function(path){
return utils.path.endsWithSlash(path) ? path : path+"/";
},
// Returns a package.json path one node_modules folder deeper than the
// parentPackageAddress
depPackage: function (parentPackageAddress, childName){
var packageFolderName = parentPackageAddress.replace(/\/package\.json.*/,"");
return (packageFolderName ? packageFolderName+"/" : "")+"node_modules/" + childName + "/package.json";
},
peerPackage: function(parentPackageAddress, childName){
var packageFolderName = parentPackageAddress.replace(/\/package\.json.*/,"");
return packageFolderName.substr(0, packageFolderName.lastIndexOf("/"))
+ "/" + childName + "/package.json";
},
// returns the package directory one level deeper.
depPackageDir: function(parentPackageAddress, childName){
return utils.path.depPackage(parentPackageAddress, childName).replace(/\/package\.json.*/,"");
},
peerNodeModuleAddress: function(address) {
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules);
if(nodeModulesIndex >= 0) {
return address.substr(0, nodeModulesIndex+nodeModules.length - 1 );
}
},
// /node_modules/a/node_modules/b/node_modules/c -> /node_modules/a/node_modules/
parentNodeModuleAddress: function(address) {
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
prevModulesIndex = address.lastIndexOf(nodeModules, nodeModulesIndex-1);
if(prevModulesIndex >= 0) {
return address.substr(0, prevModulesIndex+nodeModules.length - 1 );
}
},
pkgDir: function(address){
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
nextSlash = address.indexOf("/", nodeModulesIndex+nodeModules.length);
// Scoped packages
if(address[nodeModulesIndex+nodeModules.length] === "@") {
nextSlash = address.indexOf("/", nextSlash+1);
}
if(nodeModulesIndex >= 0) {
return nextSlash>=0 ? address.substr(0, nextSlash) : address;
}
},
basename: function(address){
var parts = address.split("/");
return parts[parts.length - 1];
},
relativeTo: function(modulePath, rel) {
var parts = modulePath.split("/");
var idx = 1;
while(rel[idx] === ".") {
parts.pop();
idx++;
}
return parts.join("/");
},
isPackageRootDir: function(pth) {
return pth.indexOf("/") === -1;
}
},
json: {
/**
* if a jsonOptions transformer is provided (by the System.config)
* use it for all json files, package.json's are also included
* @param loader
* @param load
* @param data
* @returns data
*/
transform: function(loader, load, data) {
// harmonize steal config
data.steal = utils.pkg.config(data);
var fn = loader.jsonOptions && loader.jsonOptions.transform;
if(!fn) return data;
return fn.call(loader, load, data);
}
},
includeInBuild: true
};
function parseURI(url) {
var m = String(url).replace(/^\s+|\s+$/g, '').match(/^([^:\/?#]+:)?(\/\/(?:[^:@]*(?::[^:@]*)?@)?(([^:\/?#]*)(?::(\d*))?))?([^?#]*)(\?[^#]*)?(#[\s\S]*)?/);
// authority = '//' + user + ':' + pass '@' + hostname + ':' port
return (m ? {
href : m[0] || '',
protocol : m[1] || '',
authority: m[2] || '',
host : m[3] || '',
hostname : m[4] || '',
port : m[5] || '',
pathname : m[6] || '',
search : m[7] || '',
hash : m[8] || ''
} : null);
}
module.exports = utils;
| {
set = new Set();
} | conditional_block |
npm-utils.js | /**
* @module system-npm/utils
*
* Helpers that are used by npm-extension and the npm plugin.
* This should be kept small and not have helpers exclusive to npm.
* However, it can have all npm-extension helpers.
*/
// A regex to test if a moduleName is npm-like.
var slice = Array.prototype.slice;
var npmModuleRegEx = /.+@.+\..+\..+#.+/;
var conditionalModuleRegEx = /#\{[^\}]+\}|#\?.+$/;
var gitUrlEx = /(git|http(s?)):\/\//;
var supportsSet = typeof Set === "function";
var utils = {
extend: function(d, s, deep, set){
var val;
if(deep) {
if(!set) {
if(supportsSet) {
set = new Set();
} else {
set = [];
}
}
if(supportsSet) {
if(set.has(s)) {
return s;
} else {
set.add(s);
}
} else {
if(set.indexOf(s) !== -1) {
return s;
} else {
set.push(s);
}
}
}
for(var prop in s) {
val = s[prop];
if(deep) {
if(utils.isArray(val)) {
d[prop] = slice.call(val);
} else if(utils.isPlainObject(val)) {
d[prop] = utils.extend({}, val, deep, set);
} else {
d[prop] = s[prop];
}
} else {
d[prop] = s[prop];
}
}
return d;
},
map: function(arr, fn){
var i = 0, len = arr.length, out = [];
for(; i < len; i++) {
out.push(fn.call(arr, arr[i]));
}
return out;
},
filter: function(arr, fn){
var i = 0, len = arr.length, out = [], res;
for(; i < len; i++) {
res = fn.call(arr, arr[i]);
if(res) {
out.push(arr[i]);
}
}
return out;
},
forEach: function(arr, fn) {
var i = 0, len = arr.length;
for(; i < len; i++) {
fn.call(arr, arr[i], i);
}
},
isObject: function(obj){
return typeof obj === "object";
},
isPlainObject: function(obj){
// A plain object has a proto that is the Object
return utils.isObject(obj) && (!obj || obj.__proto__ === Object.prototype);
},
isArray: Array.isArray || function(arr){
return Object.prototype.toString.call(arr) === "[object Array]";
},
isEnv: function(name) {
return this.isEnv ? this.isEnv(name) : this.env === name;
},
isGitUrl: function(str) {
return gitUrlEx.test(str);
},
warnOnce: function(msg){
var w = this._warnings = this._warnings || {};
if(w[msg]) return;
w[msg] = true;
this.warn(msg);
},
warn: function(msg){
if(typeof steal !== "undefined" && typeof console !== "undefined" && console.warn) {
steal.done().then(function(){
if(steal.dev && steal.dev.warn){
steal.dev.warn(msg)
} else if(console.warn) {
console.warn("steal.js WARNING: "+msg);
} else {
console.log(msg);
}
});
}
},
relativeURI: function(baseURL, url) {
return typeof steal !== "undefined" ? steal.relativeURI(baseURL, url) : url;
},
moduleName: {
/**
* @function moduleName.create
* Converts a parsed module name to a string
*
* @param {system-npm/parsed_npm} descriptor
*/
create: function (descriptor, standard) {
if(standard) {
return descriptor.moduleName;
} else {
if(descriptor === "@empty") {
return descriptor;
}
var modulePath;
if(descriptor.modulePath) {
modulePath = descriptor.modulePath.substr(0,2) === "./" ? descriptor.modulePath.substr(2) : descriptor.modulePath;
}
return descriptor.packageName
+ (descriptor.version ? '@' + descriptor.version : '')
+ (modulePath ? '#' + modulePath : '')
+ (descriptor.plugin ? descriptor.plugin : '');
}
},
/**
* @function moduleName.isNpm
* Determines whether a moduleName is npm-like.
* @return {Boolean}
*/
isNpm: function(moduleName){
return npmModuleRegEx.test(moduleName);
},
/**
* @function moduleName.isConditional
* Determines whether a moduleName includes a condition.
* @return {Boolean}
*/
isConditional: function(moduleName){
return conditionalModuleRegEx.test(moduleName);
},
/**
* @function moduleName.isFullyConvertedModuleName
* Determines whether a moduleName is a fully npm name, not npm-like
* With a parsed module name we can make sure there is a package name,
* package version, and module path.
*/
isFullyConvertedNpm: function(parsedModuleName){
return !!(parsedModuleName.packageName &&
parsedModuleName.version && parsedModuleName.modulePath);
},
/**
* @function moduleName.isScoped
* Determines whether a moduleName is from a scoped package.
* @return {Boolean}
*/
isScoped: function(moduleName){
return moduleName[0] === "@";
},
/**
* @function moduleName.parse
* Breaks a string moduleName into parts.
* packageName@version!plugin#modulePath
* "./lib/bfs"
*
* @return {system-npm/parsed_npm}
*/
parse: function (moduleName, currentPackageName, global) {
var pluginParts = moduleName.split('!');
var modulePathParts = pluginParts[0].split("#");
var versionParts = modulePathParts[0].split("@");
// it could be something like `@empty`
if(!modulePathParts[1] && !versionParts[0]) {
versionParts = ["@"+versionParts[1]];
}
// it could be a scope package
if(versionParts.length === 3 && utils.moduleName.isScoped(moduleName)) {
versionParts.splice(0, 1);
versionParts[0] = "@"+versionParts[0];
}
var packageName,
modulePath;
// if the module name is relative
// use the currentPackageName
if (currentPackageName && utils.path.isRelative(moduleName)) {
packageName = currentPackageName;
modulePath = versionParts[0];
// if the module name starts with the ~ (tilde) operator
// use the currentPackageName
} else if (currentPackageName && utils.path.startsWithTildeSlash(moduleName)) {
packageName = currentPackageName;
modulePath = versionParts[0].split("/").slice(1).join("/");
} else {
if(modulePathParts[1]) { // foo@1.2#./path
packageName = versionParts[0];
modulePath = modulePathParts[1];
} else {
// test/abc
var folderParts = versionParts[0].split("/");
// Detect scoped packages
if(folderParts.length && folderParts[0][0] === "@") {
packageName = folderParts.splice(0, 2).join("/");
} else {
packageName = folderParts.shift();
}
modulePath = folderParts.join("/");
}
}
modulePath = utils.path.removeJS(modulePath);
return {
plugin: pluginParts.length === 2 ? "!"+pluginParts[1] : undefined,
version: versionParts[1],
modulePath: modulePath,
packageName: packageName,
moduleName: moduleName,
isGlobal: global
};
},
/**
* @function moduleName.parseFromPackage
*
* Given the package that loads the dependency, the dependency name,
* and the moduleName of what loaded the package, return
* a [system-npm/parsed_npm].
*
* @param {Loader} loader
* @param {NpmPackage} refPkg The package `name` is a dependency of.
* @param {moduleName} name
* @param {moduleName} parentName
* @return {system-npm/parsed_npm}
*
*/
parseFromPackage: function(loader, refPkg, name, parentName) {
// Get the name of the
var packageName = utils.pkg.name(refPkg),
parsedModuleName = utils.moduleName.parse(name, packageName),
isRelative = utils.path.isRelative(parsedModuleName.modulePath);
if(isRelative && !parentName) {
throw new Error("Cannot resolve a relative module identifier " +
"with no parent module:", name);
}
// If the module needs to be loaded relative.
if(isRelative) {
// get the location of the parent
var parentParsed = utils.moduleName.parse(parentName, packageName);
// If the parentModule and the currentModule are from the same parent
if( parentParsed.packageName === parsedModuleName.packageName && parentParsed.modulePath ) {
var makePathRelative = true;
if(name === "../" || name === "./" || name === "..") {
var relativePath = utils.path.relativeTo(
parentParsed.modulePath, name);
var isInRoot = utils.path.isPackageRootDir(relativePath);
if(isInRoot) {
parsedModuleName.modulePath = utils.pkg.main(refPkg);
makePathRelative = false;
} else {
parsedModuleName.modulePath = name +
(utils.path.endsWithSlash(name) ? "" : "/") +
"index";
}
}
if(makePathRelative) {
// Make the path relative to the parentName's path.
parsedModuleName.modulePath = utils.path.makeRelative(
utils.path.joinURIs(parentParsed.modulePath,
parsedModuleName.modulePath)
);
}
}
}
// we have the moduleName without the version
// we check this against various configs
var mapName = utils.moduleName.create(parsedModuleName),
refSteal = utils.pkg.config(refPkg),
mappedName;
// The refPkg might have a browser [https://github.com/substack/node-browserify#browser-field] mapping.
// Perform that mapping here.
if(refPkg.browser && (typeof refPkg.browser !== "string") &&
(mapName in refPkg.browser) &&
(!refSteal || !refSteal.ignoreBrowser)) {
mappedName = refPkg.browser[mapName] === false ?
"@empty" : refPkg.browser[mapName];
}
// globalBrowser looks like: {moduleName: aliasName, pgk: aliasingPkg}
var global = loader && loader.globalBrowser &&
loader.globalBrowser[mapName];
if(global) {
mappedName = global.moduleName === false ? "@empty" :
global.moduleName;
}
if(mappedName) {
return utils.moduleName.parse(mappedName, packageName, !!global);
} else {
return parsedModuleName;
}
},
nameAndVersion: function(parsedModuleName){
return parsedModuleName.packageName + "@" + parsedModuleName.version;
}
},
pkg: {
/**
* Returns a package's name. The system config allows one to set this to
* something else.
* @return {String}
*/
name: function(pkg){
var steal = utils.pkg.config(pkg);
return (steal && steal.name) || pkg.name;
},
main: function(pkg) {
var main;
var steal = utils.pkg.config(pkg);
if(steal && steal.main) {
main = steal.main;
} else if(typeof pkg.browser === "string") {
if(utils.path.endsWithSlash(pkg.browser)) {
main = pkg.browser + "index";
} else {
main = pkg.browser;
}
} else if(typeof pkg.jam === "object" && pkg.jam.main) {
main = pkg.jam.main;
} else if(pkg.main) {
main = pkg.main;
} else {
main = "index";
}
return utils.path.removeJS(
utils.path.removeDotSlash(main)
);
},
rootDir: function(pkg, isRoot) {
var root = isRoot ?
utils.path.removePackage( pkg.fileUrl ) :
utils.path.pkgDir(pkg.fileUrl);
var lib = utils.pkg.directoriesLib(pkg);
if(lib) {
root = utils.path.joinURIs(utils.path.addEndingSlash(root), lib);
}
return root;
},
/**
* @function pkg.isRoot
* Determines whether a module is the loader's root module.
* @return {Boolean}
*/
isRoot: function(loader, pkg) {
var root = utils.pkg.getDefault(loader);
return pkg.name === root.name && pkg.version === root.version;
},
getDefault: function(loader) {
return loader.npmPaths.__default;
},
/**
* Returns packageData given a module's name or module's address.
*
* Given a moduleName, it tries to return the package it belongs to.
* If a moduleName isn't provided, but a moduleA
*
* @param {Loader} loader
* @param {String} [moduleName]
* @param {String} [moduleAddress]
* @return {NpmPackage|undefined}
*/
findByModuleNameOrAddress: function(loader, moduleName, moduleAddress) {
if(loader.npm) {
if(moduleName) {
var parsed = utils.moduleName.parse(moduleName);
if(parsed.version && parsed.packageName) {
var name = parsed.packageName+"@"+parsed.version;
if(name in loader.npm) {
return loader.npm[name];
}
}
}
if(moduleAddress) {
// Remove the baseURL so that folderAddress only detects
// node_modules that are within the baseURL. Otherwise
// you cannot load a project that is itself within
// node_modules
var startingAddress = utils.relativeURI(loader.baseURL,
moduleAddress);
var packageFolder = utils.pkg.folderAddress(startingAddress);
return packageFolder ? loader.npmPaths[packageFolder] : utils.pkg.getDefault(loader);
} else {
return utils.pkg.getDefault(loader);
}
}
},
folderAddress: function (address){
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
nextSlash = address.indexOf("/", nodeModulesIndex+nodeModules.length);
if(nodeModulesIndex >= 0) {
return nextSlash>=0 ? address.substr(0, nextSlash) : address;
}
},
/**
* Finds a dependency by its saved resolutions. This will only be called
* after we've first successful found a package the "hard way" by doing
* semver matching.
*/
findDep: function(loader, refPkg, name){
if(loader.npm && refPkg && !utils.path.startsWithDotSlash(name)) {
var nameAndVersion = name + "@" + refPkg.resolutions[name];
var pkg = loader.npm[nameAndVersion];
return pkg;
}
},
/**
* Walks up npmPaths looking for a [name]/package.json. Returns
* the package data it finds.
*
* @param {Loader} loader
* @param {NpmPackage} refPackage
* @param {packgeName} name the package name we are looking for.
*
* @return {undefined|NpmPackage}
*/
findDepWalking: function (loader, refPackage, name) {
if(loader.npm && refPackage && !utils.path.startsWithDotSlash(name)) {
// Todo .. first part of name
var curPackage = utils.path.depPackageDir(refPackage.fileUrl, name);
while(curPackage) {
var pkg = loader.npmPaths[curPackage];
if(pkg) {
return pkg;
}
var parentAddress = utils.path.parentNodeModuleAddress(curPackage);
if(!parentAddress) {
return;
}
curPackage = parentAddress+"/"+name;
}
}
},
findByName: function(loader, name) {
if(loader.npm && !utils.path.startsWithDotSlash(name)) {
return loader.npm[name];
}
},
findByNameAndVersion: function(loader, name, version) {
if(loader.npm && !utils.path.startsWithDotSlash(name)) {
var nameAndVersion = name + "@" + version;
return loader.npm[nameAndVersion];
}
},
findByUrl: function(loader, url) {
if(loader.npm) {
url = utils.pkg.folderAddress(url);
return loader.npmPaths[url];
}
},
directoriesLib: function(pkg) {
var steal = utils.pkg.config(pkg);
var lib = steal && steal.directories && steal.directories.lib;
var ignores = [".", "/"], ignore;
if(!lib) return undefined;
while(!!(ignore = ignores.shift())) {
if(lib[0] === ignore) {
lib = lib.substr(1);
}
}
return lib;
},
hasDirectoriesLib: function(pkg) {
var steal = utils.pkg.config(pkg);
return steal && steal.directories && !!steal.directories.lib;
},
findPackageInfo: function(context, pkg){
var pkgInfo = context.pkgInfo;
if(pkgInfo) {
var out;
utils.forEach(pkgInfo, function(p){
if(pkg.name === p.name && pkg.version === p.version) {
out = p;
}
});
return out;
}
},
saveResolution: function(context, refPkg, pkg){
var npmPkg = utils.pkg.findPackageInfo(context, refPkg);
npmPkg.resolutions[pkg.name] = refPkg.resolutions[pkg.name] =
pkg.version;
},
config: function(pkg){
return pkg.steal || pkg.system;
}
},
path: {
makeRelative: function(path){
if( utils.path.isRelative(path) && path.substr(0,1) !== "/" ) {
return path;
} else {
return "./"+path;
}
},
removeJS: function(path) {
return path.replace(/\.js(!|$)/,function(whole, part){return part;});
},
removePackage: function (path){
return path.replace(/\/package\.json.*/,"");
},
addJS: function(path){
// Don't add `.js` for types that need to work without an extension.
if(/\.js(on)?$/.test(path)) {
return path;
} else {
return path+".js";
}
},
isRelative: function(path) {
return path.substr(0,1) === ".";
},
startsWithTildeSlash: function( path ) {
return path.substr(0,2) === "~/";
},
joinURIs: function(base, href) {
function removeDotSegments(input) {
var output = [];
input.replace(/^(\.\.?(\/|$))+/, '')
.replace(/\/(\.(\/|$))+/g, '/')
.replace(/\/\.\.$/, '/../')
.replace(/\/?[^\/]*/g, function (p) {
if (p === '/..') {
output.pop();
} else {
output.push(p);
}
});
return output.join('').replace(/^\//, input.charAt(0) === '/' ? '/' : '');
}
href = parseURI(href || '');
base = parseURI(base || '');
return !href || !base ? null : (href.protocol || base.protocol) +
(href.protocol || href.authority ? href.authority : base.authority) +
removeDotSegments(href.protocol || href.authority || href.pathname.charAt(0) === '/' ? href.pathname : (href.pathname ? ((base.authority && !base.pathname ? '/' : '') + base.pathname.slice(0, base.pathname.lastIndexOf('/') + 1) + href.pathname) : base.pathname)) +
(href.protocol || href.authority || href.pathname ? href.search : (href.search || base.search)) +
href.hash;
},
startsWithDotSlash: function( path ) {
return path.substr(0,2) === "./";
},
removeDotSlash: function(path) {
return utils.path.startsWithDotSlash(path) ?
path.substr(2) :
path;
},
endsWithSlash: function(path){
return path[path.length -1] === "/";
},
addEndingSlash: function(path){
return utils.path.endsWithSlash(path) ? path : path+"/";
},
// Returns a package.json path one node_modules folder deeper than the
// parentPackageAddress
depPackage: function (parentPackageAddress, childName){
var packageFolderName = parentPackageAddress.replace(/\/package\.json.*/,"");
return (packageFolderName ? packageFolderName+"/" : "")+"node_modules/" + childName + "/package.json";
},
peerPackage: function(parentPackageAddress, childName){
var packageFolderName = parentPackageAddress.replace(/\/package\.json.*/,"");
return packageFolderName.substr(0, packageFolderName.lastIndexOf("/"))
+ "/" + childName + "/package.json";
},
// returns the package directory one level deeper.
depPackageDir: function(parentPackageAddress, childName){
return utils.path.depPackage(parentPackageAddress, childName).replace(/\/package\.json.*/,"");
},
peerNodeModuleAddress: function(address) {
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules);
if(nodeModulesIndex >= 0) {
return address.substr(0, nodeModulesIndex+nodeModules.length - 1 );
}
},
// /node_modules/a/node_modules/b/node_modules/c -> /node_modules/a/node_modules/
parentNodeModuleAddress: function(address) {
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
prevModulesIndex = address.lastIndexOf(nodeModules, nodeModulesIndex-1);
if(prevModulesIndex >= 0) {
return address.substr(0, prevModulesIndex+nodeModules.length - 1 );
}
},
pkgDir: function(address){
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
nextSlash = address.indexOf("/", nodeModulesIndex+nodeModules.length);
// Scoped packages
if(address[nodeModulesIndex+nodeModules.length] === "@") {
nextSlash = address.indexOf("/", nextSlash+1);
}
if(nodeModulesIndex >= 0) {
return nextSlash>=0 ? address.substr(0, nextSlash) : address;
}
},
basename: function(address){
var parts = address.split("/");
return parts[parts.length - 1];
},
relativeTo: function(modulePath, rel) {
var parts = modulePath.split("/");
var idx = 1;
while(rel[idx] === ".") {
parts.pop();
idx++;
}
return parts.join("/");
},
isPackageRootDir: function(pth) {
return pth.indexOf("/") === -1;
}
},
json: {
/**
* if a jsonOptions transformer is provided (by the System.config)
* use it for all json files, package.json's are also included
* @param loader
* @param load
* @param data
* @returns data
*/
transform: function(loader, load, data) {
// harmonize steal config
data.steal = utils.pkg.config(data);
var fn = loader.jsonOptions && loader.jsonOptions.transform;
if(!fn) return data;
return fn.call(loader, load, data);
}
},
includeInBuild: true
};
function parseURI(url) |
module.exports = utils;
| {
var m = String(url).replace(/^\s+|\s+$/g, '').match(/^([^:\/?#]+:)?(\/\/(?:[^:@]*(?::[^:@]*)?@)?(([^:\/?#]*)(?::(\d*))?))?([^?#]*)(\?[^#]*)?(#[\s\S]*)?/);
// authority = '//' + user + ':' + pass '@' + hostname + ':' port
return (m ? {
href : m[0] || '',
protocol : m[1] || '',
authority: m[2] || '',
host : m[3] || '',
hostname : m[4] || '',
port : m[5] || '',
pathname : m[6] || '',
search : m[7] || '',
hash : m[8] || ''
} : null);
} | identifier_body |
npm-utils.js | /**
* @module system-npm/utils
*
* Helpers that are used by npm-extension and the npm plugin.
* This should be kept small and not have helpers exclusive to npm.
* However, it can have all npm-extension helpers.
*/
// A regex to test if a moduleName is npm-like.
var slice = Array.prototype.slice;
var npmModuleRegEx = /.+@.+\..+\..+#.+/;
var conditionalModuleRegEx = /#\{[^\}]+\}|#\?.+$/;
var gitUrlEx = /(git|http(s?)):\/\//;
var supportsSet = typeof Set === "function";
var utils = {
extend: function(d, s, deep, set){
var val;
if(deep) {
if(!set) {
if(supportsSet) {
set = new Set();
} else {
set = [];
}
}
if(supportsSet) {
if(set.has(s)) {
return s;
} else {
set.add(s);
}
} else {
if(set.indexOf(s) !== -1) {
return s;
} else {
set.push(s);
}
}
}
for(var prop in s) {
val = s[prop];
if(deep) {
if(utils.isArray(val)) {
d[prop] = slice.call(val);
} else if(utils.isPlainObject(val)) {
d[prop] = utils.extend({}, val, deep, set);
} else {
d[prop] = s[prop];
}
} else {
d[prop] = s[prop];
}
}
return d;
},
map: function(arr, fn){
var i = 0, len = arr.length, out = [];
for(; i < len; i++) {
out.push(fn.call(arr, arr[i]));
}
return out;
},
filter: function(arr, fn){
var i = 0, len = arr.length, out = [], res;
for(; i < len; i++) {
res = fn.call(arr, arr[i]);
if(res) {
out.push(arr[i]);
}
}
return out;
},
forEach: function(arr, fn) {
var i = 0, len = arr.length;
for(; i < len; i++) {
fn.call(arr, arr[i], i);
}
},
isObject: function(obj){
return typeof obj === "object";
},
isPlainObject: function(obj){
// A plain object has a proto that is the Object
return utils.isObject(obj) && (!obj || obj.__proto__ === Object.prototype);
},
isArray: Array.isArray || function(arr){
return Object.prototype.toString.call(arr) === "[object Array]";
},
isEnv: function(name) {
return this.isEnv ? this.isEnv(name) : this.env === name;
},
isGitUrl: function(str) {
return gitUrlEx.test(str);
},
warnOnce: function(msg){
var w = this._warnings = this._warnings || {};
if(w[msg]) return;
w[msg] = true;
this.warn(msg);
},
warn: function(msg){
if(typeof steal !== "undefined" && typeof console !== "undefined" && console.warn) {
steal.done().then(function(){
if(steal.dev && steal.dev.warn){
steal.dev.warn(msg)
} else if(console.warn) {
console.warn("steal.js WARNING: "+msg);
} else {
console.log(msg);
}
});
}
},
relativeURI: function(baseURL, url) {
return typeof steal !== "undefined" ? steal.relativeURI(baseURL, url) : url;
},
moduleName: {
/**
* @function moduleName.create
* Converts a parsed module name to a string
*
* @param {system-npm/parsed_npm} descriptor
*/
create: function (descriptor, standard) {
if(standard) {
return descriptor.moduleName;
} else {
if(descriptor === "@empty") {
return descriptor;
}
var modulePath;
if(descriptor.modulePath) {
modulePath = descriptor.modulePath.substr(0,2) === "./" ? descriptor.modulePath.substr(2) : descriptor.modulePath;
}
return descriptor.packageName
+ (descriptor.version ? '@' + descriptor.version : '')
+ (modulePath ? '#' + modulePath : '')
+ (descriptor.plugin ? descriptor.plugin : '');
}
},
/**
* @function moduleName.isNpm
* Determines whether a moduleName is npm-like.
* @return {Boolean}
*/
isNpm: function(moduleName){
return npmModuleRegEx.test(moduleName);
},
/**
* @function moduleName.isConditional
* Determines whether a moduleName includes a condition.
* @return {Boolean}
*/
isConditional: function(moduleName){
return conditionalModuleRegEx.test(moduleName);
},
/**
* @function moduleName.isFullyConvertedModuleName
* Determines whether a moduleName is a fully npm name, not npm-like
* With a parsed module name we can make sure there is a package name,
* package version, and module path.
*/
isFullyConvertedNpm: function(parsedModuleName){
return !!(parsedModuleName.packageName &&
parsedModuleName.version && parsedModuleName.modulePath);
},
/**
* @function moduleName.isScoped
* Determines whether a moduleName is from a scoped package.
* @return {Boolean}
*/
isScoped: function(moduleName){
return moduleName[0] === "@";
},
/**
* @function moduleName.parse
* Breaks a string moduleName into parts.
* packageName@version!plugin#modulePath
* "./lib/bfs"
*
* @return {system-npm/parsed_npm}
*/
parse: function (moduleName, currentPackageName, global) {
var pluginParts = moduleName.split('!');
var modulePathParts = pluginParts[0].split("#");
var versionParts = modulePathParts[0].split("@");
// it could be something like `@empty`
if(!modulePathParts[1] && !versionParts[0]) {
versionParts = ["@"+versionParts[1]];
}
// it could be a scope package
if(versionParts.length === 3 && utils.moduleName.isScoped(moduleName)) {
versionParts.splice(0, 1);
versionParts[0] = "@"+versionParts[0];
}
var packageName,
modulePath; |
// if the module name is relative
// use the currentPackageName
if (currentPackageName && utils.path.isRelative(moduleName)) {
packageName = currentPackageName;
modulePath = versionParts[0];
// if the module name starts with the ~ (tilde) operator
// use the currentPackageName
} else if (currentPackageName && utils.path.startsWithTildeSlash(moduleName)) {
packageName = currentPackageName;
modulePath = versionParts[0].split("/").slice(1).join("/");
} else {
if(modulePathParts[1]) { // foo@1.2#./path
packageName = versionParts[0];
modulePath = modulePathParts[1];
} else {
// test/abc
var folderParts = versionParts[0].split("/");
// Detect scoped packages
if(folderParts.length && folderParts[0][0] === "@") {
packageName = folderParts.splice(0, 2).join("/");
} else {
packageName = folderParts.shift();
}
modulePath = folderParts.join("/");
}
}
modulePath = utils.path.removeJS(modulePath);
return {
plugin: pluginParts.length === 2 ? "!"+pluginParts[1] : undefined,
version: versionParts[1],
modulePath: modulePath,
packageName: packageName,
moduleName: moduleName,
isGlobal: global
};
},
/**
* @function moduleName.parseFromPackage
*
* Given the package that loads the dependency, the dependency name,
* and the moduleName of what loaded the package, return
* a [system-npm/parsed_npm].
*
* @param {Loader} loader
* @param {NpmPackage} refPkg The package `name` is a dependency of.
* @param {moduleName} name
* @param {moduleName} parentName
* @return {system-npm/parsed_npm}
*
*/
parseFromPackage: function(loader, refPkg, name, parentName) {
// Get the name of the
var packageName = utils.pkg.name(refPkg),
parsedModuleName = utils.moduleName.parse(name, packageName),
isRelative = utils.path.isRelative(parsedModuleName.modulePath);
if(isRelative && !parentName) {
throw new Error("Cannot resolve a relative module identifier " +
"with no parent module:", name);
}
// If the module needs to be loaded relative.
if(isRelative) {
// get the location of the parent
var parentParsed = utils.moduleName.parse(parentName, packageName);
// If the parentModule and the currentModule are from the same parent
if( parentParsed.packageName === parsedModuleName.packageName && parentParsed.modulePath ) {
var makePathRelative = true;
if(name === "../" || name === "./" || name === "..") {
var relativePath = utils.path.relativeTo(
parentParsed.modulePath, name);
var isInRoot = utils.path.isPackageRootDir(relativePath);
if(isInRoot) {
parsedModuleName.modulePath = utils.pkg.main(refPkg);
makePathRelative = false;
} else {
parsedModuleName.modulePath = name +
(utils.path.endsWithSlash(name) ? "" : "/") +
"index";
}
}
if(makePathRelative) {
// Make the path relative to the parentName's path.
parsedModuleName.modulePath = utils.path.makeRelative(
utils.path.joinURIs(parentParsed.modulePath,
parsedModuleName.modulePath)
);
}
}
}
// we have the moduleName without the version
// we check this against various configs
var mapName = utils.moduleName.create(parsedModuleName),
refSteal = utils.pkg.config(refPkg),
mappedName;
// The refPkg might have a browser [https://github.com/substack/node-browserify#browser-field] mapping.
// Perform that mapping here.
if(refPkg.browser && (typeof refPkg.browser !== "string") &&
(mapName in refPkg.browser) &&
(!refSteal || !refSteal.ignoreBrowser)) {
mappedName = refPkg.browser[mapName] === false ?
"@empty" : refPkg.browser[mapName];
}
// globalBrowser looks like: {moduleName: aliasName, pgk: aliasingPkg}
var global = loader && loader.globalBrowser &&
loader.globalBrowser[mapName];
if(global) {
mappedName = global.moduleName === false ? "@empty" :
global.moduleName;
}
if(mappedName) {
return utils.moduleName.parse(mappedName, packageName, !!global);
} else {
return parsedModuleName;
}
},
nameAndVersion: function(parsedModuleName){
return parsedModuleName.packageName + "@" + parsedModuleName.version;
}
},
pkg: {
/**
* Returns a package's name. The system config allows one to set this to
* something else.
* @return {String}
*/
name: function(pkg){
var steal = utils.pkg.config(pkg);
return (steal && steal.name) || pkg.name;
},
main: function(pkg) {
var main;
var steal = utils.pkg.config(pkg);
if(steal && steal.main) {
main = steal.main;
} else if(typeof pkg.browser === "string") {
if(utils.path.endsWithSlash(pkg.browser)) {
main = pkg.browser + "index";
} else {
main = pkg.browser;
}
} else if(typeof pkg.jam === "object" && pkg.jam.main) {
main = pkg.jam.main;
} else if(pkg.main) {
main = pkg.main;
} else {
main = "index";
}
return utils.path.removeJS(
utils.path.removeDotSlash(main)
);
},
rootDir: function(pkg, isRoot) {
var root = isRoot ?
utils.path.removePackage( pkg.fileUrl ) :
utils.path.pkgDir(pkg.fileUrl);
var lib = utils.pkg.directoriesLib(pkg);
if(lib) {
root = utils.path.joinURIs(utils.path.addEndingSlash(root), lib);
}
return root;
},
/**
* @function pkg.isRoot
* Determines whether a module is the loader's root module.
* @return {Boolean}
*/
isRoot: function(loader, pkg) {
var root = utils.pkg.getDefault(loader);
return pkg.name === root.name && pkg.version === root.version;
},
getDefault: function(loader) {
return loader.npmPaths.__default;
},
/**
* Returns packageData given a module's name or module's address.
*
* Given a moduleName, it tries to return the package it belongs to.
* If a moduleName isn't provided, but a moduleA
*
* @param {Loader} loader
* @param {String} [moduleName]
* @param {String} [moduleAddress]
* @return {NpmPackage|undefined}
*/
findByModuleNameOrAddress: function(loader, moduleName, moduleAddress) {
if(loader.npm) {
if(moduleName) {
var parsed = utils.moduleName.parse(moduleName);
if(parsed.version && parsed.packageName) {
var name = parsed.packageName+"@"+parsed.version;
if(name in loader.npm) {
return loader.npm[name];
}
}
}
if(moduleAddress) {
// Remove the baseURL so that folderAddress only detects
// node_modules that are within the baseURL. Otherwise
// you cannot load a project that is itself within
// node_modules
var startingAddress = utils.relativeURI(loader.baseURL,
moduleAddress);
var packageFolder = utils.pkg.folderAddress(startingAddress);
return packageFolder ? loader.npmPaths[packageFolder] : utils.pkg.getDefault(loader);
} else {
return utils.pkg.getDefault(loader);
}
}
},
folderAddress: function (address){
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
nextSlash = address.indexOf("/", nodeModulesIndex+nodeModules.length);
if(nodeModulesIndex >= 0) {
return nextSlash>=0 ? address.substr(0, nextSlash) : address;
}
},
/**
* Finds a dependency by its saved resolutions. This will only be called
* after we've first successful found a package the "hard way" by doing
* semver matching.
*/
findDep: function(loader, refPkg, name){
if(loader.npm && refPkg && !utils.path.startsWithDotSlash(name)) {
var nameAndVersion = name + "@" + refPkg.resolutions[name];
var pkg = loader.npm[nameAndVersion];
return pkg;
}
},
/**
* Walks up npmPaths looking for a [name]/package.json. Returns
* the package data it finds.
*
* @param {Loader} loader
* @param {NpmPackage} refPackage
* @param {packgeName} name the package name we are looking for.
*
* @return {undefined|NpmPackage}
*/
findDepWalking: function (loader, refPackage, name) {
if(loader.npm && refPackage && !utils.path.startsWithDotSlash(name)) {
// Todo .. first part of name
var curPackage = utils.path.depPackageDir(refPackage.fileUrl, name);
while(curPackage) {
var pkg = loader.npmPaths[curPackage];
if(pkg) {
return pkg;
}
var parentAddress = utils.path.parentNodeModuleAddress(curPackage);
if(!parentAddress) {
return;
}
curPackage = parentAddress+"/"+name;
}
}
},
findByName: function(loader, name) {
if(loader.npm && !utils.path.startsWithDotSlash(name)) {
return loader.npm[name];
}
},
findByNameAndVersion: function(loader, name, version) {
if(loader.npm && !utils.path.startsWithDotSlash(name)) {
var nameAndVersion = name + "@" + version;
return loader.npm[nameAndVersion];
}
},
findByUrl: function(loader, url) {
if(loader.npm) {
url = utils.pkg.folderAddress(url);
return loader.npmPaths[url];
}
},
directoriesLib: function(pkg) {
var steal = utils.pkg.config(pkg);
var lib = steal && steal.directories && steal.directories.lib;
var ignores = [".", "/"], ignore;
if(!lib) return undefined;
while(!!(ignore = ignores.shift())) {
if(lib[0] === ignore) {
lib = lib.substr(1);
}
}
return lib;
},
hasDirectoriesLib: function(pkg) {
var steal = utils.pkg.config(pkg);
return steal && steal.directories && !!steal.directories.lib;
},
findPackageInfo: function(context, pkg){
var pkgInfo = context.pkgInfo;
if(pkgInfo) {
var out;
utils.forEach(pkgInfo, function(p){
if(pkg.name === p.name && pkg.version === p.version) {
out = p;
}
});
return out;
}
},
saveResolution: function(context, refPkg, pkg){
var npmPkg = utils.pkg.findPackageInfo(context, refPkg);
npmPkg.resolutions[pkg.name] = refPkg.resolutions[pkg.name] =
pkg.version;
},
config: function(pkg){
return pkg.steal || pkg.system;
}
},
path: {
makeRelative: function(path){
if( utils.path.isRelative(path) && path.substr(0,1) !== "/" ) {
return path;
} else {
return "./"+path;
}
},
removeJS: function(path) {
return path.replace(/\.js(!|$)/,function(whole, part){return part;});
},
removePackage: function (path){
return path.replace(/\/package\.json.*/,"");
},
addJS: function(path){
// Don't add `.js` for types that need to work without an extension.
if(/\.js(on)?$/.test(path)) {
return path;
} else {
return path+".js";
}
},
isRelative: function(path) {
return path.substr(0,1) === ".";
},
startsWithTildeSlash: function( path ) {
return path.substr(0,2) === "~/";
},
joinURIs: function(base, href) {
function removeDotSegments(input) {
var output = [];
input.replace(/^(\.\.?(\/|$))+/, '')
.replace(/\/(\.(\/|$))+/g, '/')
.replace(/\/\.\.$/, '/../')
.replace(/\/?[^\/]*/g, function (p) {
if (p === '/..') {
output.pop();
} else {
output.push(p);
}
});
return output.join('').replace(/^\//, input.charAt(0) === '/' ? '/' : '');
}
href = parseURI(href || '');
base = parseURI(base || '');
return !href || !base ? null : (href.protocol || base.protocol) +
(href.protocol || href.authority ? href.authority : base.authority) +
removeDotSegments(href.protocol || href.authority || href.pathname.charAt(0) === '/' ? href.pathname : (href.pathname ? ((base.authority && !base.pathname ? '/' : '') + base.pathname.slice(0, base.pathname.lastIndexOf('/') + 1) + href.pathname) : base.pathname)) +
(href.protocol || href.authority || href.pathname ? href.search : (href.search || base.search)) +
href.hash;
},
startsWithDotSlash: function( path ) {
return path.substr(0,2) === "./";
},
removeDotSlash: function(path) {
return utils.path.startsWithDotSlash(path) ?
path.substr(2) :
path;
},
endsWithSlash: function(path){
return path[path.length -1] === "/";
},
addEndingSlash: function(path){
return utils.path.endsWithSlash(path) ? path : path+"/";
},
// Returns a package.json path one node_modules folder deeper than the
// parentPackageAddress
depPackage: function (parentPackageAddress, childName){
var packageFolderName = parentPackageAddress.replace(/\/package\.json.*/,"");
return (packageFolderName ? packageFolderName+"/" : "")+"node_modules/" + childName + "/package.json";
},
peerPackage: function(parentPackageAddress, childName){
var packageFolderName = parentPackageAddress.replace(/\/package\.json.*/,"");
return packageFolderName.substr(0, packageFolderName.lastIndexOf("/"))
+ "/" + childName + "/package.json";
},
// returns the package directory one level deeper.
depPackageDir: function(parentPackageAddress, childName){
return utils.path.depPackage(parentPackageAddress, childName).replace(/\/package\.json.*/,"");
},
peerNodeModuleAddress: function(address) {
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules);
if(nodeModulesIndex >= 0) {
return address.substr(0, nodeModulesIndex+nodeModules.length - 1 );
}
},
// /node_modules/a/node_modules/b/node_modules/c -> /node_modules/a/node_modules/
parentNodeModuleAddress: function(address) {
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
prevModulesIndex = address.lastIndexOf(nodeModules, nodeModulesIndex-1);
if(prevModulesIndex >= 0) {
return address.substr(0, prevModulesIndex+nodeModules.length - 1 );
}
},
pkgDir: function(address){
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
nextSlash = address.indexOf("/", nodeModulesIndex+nodeModules.length);
// Scoped packages
if(address[nodeModulesIndex+nodeModules.length] === "@") {
nextSlash = address.indexOf("/", nextSlash+1);
}
if(nodeModulesIndex >= 0) {
return nextSlash>=0 ? address.substr(0, nextSlash) : address;
}
},
basename: function(address){
var parts = address.split("/");
return parts[parts.length - 1];
},
relativeTo: function(modulePath, rel) {
var parts = modulePath.split("/");
var idx = 1;
while(rel[idx] === ".") {
parts.pop();
idx++;
}
return parts.join("/");
},
isPackageRootDir: function(pth) {
return pth.indexOf("/") === -1;
}
},
json: {
/**
* if a jsonOptions transformer is provided (by the System.config)
* use it for all json files, package.json's are also included
* @param loader
* @param load
* @param data
* @returns data
*/
transform: function(loader, load, data) {
// harmonize steal config
data.steal = utils.pkg.config(data);
var fn = loader.jsonOptions && loader.jsonOptions.transform;
if(!fn) return data;
return fn.call(loader, load, data);
}
},
includeInBuild: true
};
function parseURI(url) {
var m = String(url).replace(/^\s+|\s+$/g, '').match(/^([^:\/?#]+:)?(\/\/(?:[^:@]*(?::[^:@]*)?@)?(([^:\/?#]*)(?::(\d*))?))?([^?#]*)(\?[^#]*)?(#[\s\S]*)?/);
// authority = '//' + user + ':' + pass '@' + hostname + ':' port
return (m ? {
href : m[0] || '',
protocol : m[1] || '',
authority: m[2] || '',
host : m[3] || '',
hostname : m[4] || '',
port : m[5] || '',
pathname : m[6] || '',
search : m[7] || '',
hash : m[8] || ''
} : null);
}
module.exports = utils; | random_line_split | |
npm-utils.js | /**
* @module system-npm/utils
*
* Helpers that are used by npm-extension and the npm plugin.
* This should be kept small and not have helpers exclusive to npm.
* However, it can have all npm-extension helpers.
*/
// A regex to test if a moduleName is npm-like.
var slice = Array.prototype.slice;
var npmModuleRegEx = /.+@.+\..+\..+#.+/;
var conditionalModuleRegEx = /#\{[^\}]+\}|#\?.+$/;
var gitUrlEx = /(git|http(s?)):\/\//;
var supportsSet = typeof Set === "function";
var utils = {
extend: function(d, s, deep, set){
var val;
if(deep) {
if(!set) {
if(supportsSet) {
set = new Set();
} else {
set = [];
}
}
if(supportsSet) {
if(set.has(s)) {
return s;
} else {
set.add(s);
}
} else {
if(set.indexOf(s) !== -1) {
return s;
} else {
set.push(s);
}
}
}
for(var prop in s) {
val = s[prop];
if(deep) {
if(utils.isArray(val)) {
d[prop] = slice.call(val);
} else if(utils.isPlainObject(val)) {
d[prop] = utils.extend({}, val, deep, set);
} else {
d[prop] = s[prop];
}
} else {
d[prop] = s[prop];
}
}
return d;
},
map: function(arr, fn){
var i = 0, len = arr.length, out = [];
for(; i < len; i++) {
out.push(fn.call(arr, arr[i]));
}
return out;
},
filter: function(arr, fn){
var i = 0, len = arr.length, out = [], res;
for(; i < len; i++) {
res = fn.call(arr, arr[i]);
if(res) {
out.push(arr[i]);
}
}
return out;
},
forEach: function(arr, fn) {
var i = 0, len = arr.length;
for(; i < len; i++) {
fn.call(arr, arr[i], i);
}
},
isObject: function(obj){
return typeof obj === "object";
},
isPlainObject: function(obj){
// A plain object has a proto that is the Object
return utils.isObject(obj) && (!obj || obj.__proto__ === Object.prototype);
},
isArray: Array.isArray || function(arr){
return Object.prototype.toString.call(arr) === "[object Array]";
},
isEnv: function(name) {
return this.isEnv ? this.isEnv(name) : this.env === name;
},
isGitUrl: function(str) {
return gitUrlEx.test(str);
},
warnOnce: function(msg){
var w = this._warnings = this._warnings || {};
if(w[msg]) return;
w[msg] = true;
this.warn(msg);
},
warn: function(msg){
if(typeof steal !== "undefined" && typeof console !== "undefined" && console.warn) {
steal.done().then(function(){
if(steal.dev && steal.dev.warn){
steal.dev.warn(msg)
} else if(console.warn) {
console.warn("steal.js WARNING: "+msg);
} else {
console.log(msg);
}
});
}
},
relativeURI: function(baseURL, url) {
return typeof steal !== "undefined" ? steal.relativeURI(baseURL, url) : url;
},
moduleName: {
/**
* @function moduleName.create
* Converts a parsed module name to a string
*
* @param {system-npm/parsed_npm} descriptor
*/
create: function (descriptor, standard) {
if(standard) {
return descriptor.moduleName;
} else {
if(descriptor === "@empty") {
return descriptor;
}
var modulePath;
if(descriptor.modulePath) {
modulePath = descriptor.modulePath.substr(0,2) === "./" ? descriptor.modulePath.substr(2) : descriptor.modulePath;
}
return descriptor.packageName
+ (descriptor.version ? '@' + descriptor.version : '')
+ (modulePath ? '#' + modulePath : '')
+ (descriptor.plugin ? descriptor.plugin : '');
}
},
/**
* @function moduleName.isNpm
* Determines whether a moduleName is npm-like.
* @return {Boolean}
*/
isNpm: function(moduleName){
return npmModuleRegEx.test(moduleName);
},
/**
* @function moduleName.isConditional
* Determines whether a moduleName includes a condition.
* @return {Boolean}
*/
isConditional: function(moduleName){
return conditionalModuleRegEx.test(moduleName);
},
/**
* @function moduleName.isFullyConvertedModuleName
* Determines whether a moduleName is a fully npm name, not npm-like
* With a parsed module name we can make sure there is a package name,
* package version, and module path.
*/
isFullyConvertedNpm: function(parsedModuleName){
return !!(parsedModuleName.packageName &&
parsedModuleName.version && parsedModuleName.modulePath);
},
/**
* @function moduleName.isScoped
* Determines whether a moduleName is from a scoped package.
* @return {Boolean}
*/
isScoped: function(moduleName){
return moduleName[0] === "@";
},
/**
* @function moduleName.parse
* Breaks a string moduleName into parts.
* packageName@version!plugin#modulePath
* "./lib/bfs"
*
* @return {system-npm/parsed_npm}
*/
parse: function (moduleName, currentPackageName, global) {
var pluginParts = moduleName.split('!');
var modulePathParts = pluginParts[0].split("#");
var versionParts = modulePathParts[0].split("@");
// it could be something like `@empty`
if(!modulePathParts[1] && !versionParts[0]) {
versionParts = ["@"+versionParts[1]];
}
// it could be a scope package
if(versionParts.length === 3 && utils.moduleName.isScoped(moduleName)) {
versionParts.splice(0, 1);
versionParts[0] = "@"+versionParts[0];
}
var packageName,
modulePath;
// if the module name is relative
// use the currentPackageName
if (currentPackageName && utils.path.isRelative(moduleName)) {
packageName = currentPackageName;
modulePath = versionParts[0];
// if the module name starts with the ~ (tilde) operator
// use the currentPackageName
} else if (currentPackageName && utils.path.startsWithTildeSlash(moduleName)) {
packageName = currentPackageName;
modulePath = versionParts[0].split("/").slice(1).join("/");
} else {
if(modulePathParts[1]) { // foo@1.2#./path
packageName = versionParts[0];
modulePath = modulePathParts[1];
} else {
// test/abc
var folderParts = versionParts[0].split("/");
// Detect scoped packages
if(folderParts.length && folderParts[0][0] === "@") {
packageName = folderParts.splice(0, 2).join("/");
} else {
packageName = folderParts.shift();
}
modulePath = folderParts.join("/");
}
}
modulePath = utils.path.removeJS(modulePath);
return {
plugin: pluginParts.length === 2 ? "!"+pluginParts[1] : undefined,
version: versionParts[1],
modulePath: modulePath,
packageName: packageName,
moduleName: moduleName,
isGlobal: global
};
},
/**
* @function moduleName.parseFromPackage
*
* Given the package that loads the dependency, the dependency name,
* and the moduleName of what loaded the package, return
* a [system-npm/parsed_npm].
*
* @param {Loader} loader
* @param {NpmPackage} refPkg The package `name` is a dependency of.
* @param {moduleName} name
* @param {moduleName} parentName
* @return {system-npm/parsed_npm}
*
*/
parseFromPackage: function(loader, refPkg, name, parentName) {
// Get the name of the
var packageName = utils.pkg.name(refPkg),
parsedModuleName = utils.moduleName.parse(name, packageName),
isRelative = utils.path.isRelative(parsedModuleName.modulePath);
if(isRelative && !parentName) {
throw new Error("Cannot resolve a relative module identifier " +
"with no parent module:", name);
}
// If the module needs to be loaded relative.
if(isRelative) {
// get the location of the parent
var parentParsed = utils.moduleName.parse(parentName, packageName);
// If the parentModule and the currentModule are from the same parent
if( parentParsed.packageName === parsedModuleName.packageName && parentParsed.modulePath ) {
var makePathRelative = true;
if(name === "../" || name === "./" || name === "..") {
var relativePath = utils.path.relativeTo(
parentParsed.modulePath, name);
var isInRoot = utils.path.isPackageRootDir(relativePath);
if(isInRoot) {
parsedModuleName.modulePath = utils.pkg.main(refPkg);
makePathRelative = false;
} else {
parsedModuleName.modulePath = name +
(utils.path.endsWithSlash(name) ? "" : "/") +
"index";
}
}
if(makePathRelative) {
// Make the path relative to the parentName's path.
parsedModuleName.modulePath = utils.path.makeRelative(
utils.path.joinURIs(parentParsed.modulePath,
parsedModuleName.modulePath)
);
}
}
}
// we have the moduleName without the version
// we check this against various configs
var mapName = utils.moduleName.create(parsedModuleName),
refSteal = utils.pkg.config(refPkg),
mappedName;
// The refPkg might have a browser [https://github.com/substack/node-browserify#browser-field] mapping.
// Perform that mapping here.
if(refPkg.browser && (typeof refPkg.browser !== "string") &&
(mapName in refPkg.browser) &&
(!refSteal || !refSteal.ignoreBrowser)) {
mappedName = refPkg.browser[mapName] === false ?
"@empty" : refPkg.browser[mapName];
}
// globalBrowser looks like: {moduleName: aliasName, pgk: aliasingPkg}
var global = loader && loader.globalBrowser &&
loader.globalBrowser[mapName];
if(global) {
mappedName = global.moduleName === false ? "@empty" :
global.moduleName;
}
if(mappedName) {
return utils.moduleName.parse(mappedName, packageName, !!global);
} else {
return parsedModuleName;
}
},
nameAndVersion: function(parsedModuleName){
return parsedModuleName.packageName + "@" + parsedModuleName.version;
}
},
pkg: {
/**
* Returns a package's name. The system config allows one to set this to
* something else.
* @return {String}
*/
name: function(pkg){
var steal = utils.pkg.config(pkg);
return (steal && steal.name) || pkg.name;
},
main: function(pkg) {
var main;
var steal = utils.pkg.config(pkg);
if(steal && steal.main) {
main = steal.main;
} else if(typeof pkg.browser === "string") {
if(utils.path.endsWithSlash(pkg.browser)) {
main = pkg.browser + "index";
} else {
main = pkg.browser;
}
} else if(typeof pkg.jam === "object" && pkg.jam.main) {
main = pkg.jam.main;
} else if(pkg.main) {
main = pkg.main;
} else {
main = "index";
}
return utils.path.removeJS(
utils.path.removeDotSlash(main)
);
},
rootDir: function(pkg, isRoot) {
var root = isRoot ?
utils.path.removePackage( pkg.fileUrl ) :
utils.path.pkgDir(pkg.fileUrl);
var lib = utils.pkg.directoriesLib(pkg);
if(lib) {
root = utils.path.joinURIs(utils.path.addEndingSlash(root), lib);
}
return root;
},
/**
* @function pkg.isRoot
* Determines whether a module is the loader's root module.
* @return {Boolean}
*/
isRoot: function(loader, pkg) {
var root = utils.pkg.getDefault(loader);
return pkg.name === root.name && pkg.version === root.version;
},
getDefault: function(loader) {
return loader.npmPaths.__default;
},
/**
* Returns packageData given a module's name or module's address.
*
* Given a moduleName, it tries to return the package it belongs to.
* If a moduleName isn't provided, but a moduleA
*
* @param {Loader} loader
* @param {String} [moduleName]
* @param {String} [moduleAddress]
* @return {NpmPackage|undefined}
*/
findByModuleNameOrAddress: function(loader, moduleName, moduleAddress) {
if(loader.npm) {
if(moduleName) {
var parsed = utils.moduleName.parse(moduleName);
if(parsed.version && parsed.packageName) {
var name = parsed.packageName+"@"+parsed.version;
if(name in loader.npm) {
return loader.npm[name];
}
}
}
if(moduleAddress) {
// Remove the baseURL so that folderAddress only detects
// node_modules that are within the baseURL. Otherwise
// you cannot load a project that is itself within
// node_modules
var startingAddress = utils.relativeURI(loader.baseURL,
moduleAddress);
var packageFolder = utils.pkg.folderAddress(startingAddress);
return packageFolder ? loader.npmPaths[packageFolder] : utils.pkg.getDefault(loader);
} else {
return utils.pkg.getDefault(loader);
}
}
},
folderAddress: function (address){
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
nextSlash = address.indexOf("/", nodeModulesIndex+nodeModules.length);
if(nodeModulesIndex >= 0) {
return nextSlash>=0 ? address.substr(0, nextSlash) : address;
}
},
/**
* Finds a dependency by its saved resolutions. This will only be called
* after we've first successful found a package the "hard way" by doing
* semver matching.
*/
findDep: function(loader, refPkg, name){
if(loader.npm && refPkg && !utils.path.startsWithDotSlash(name)) {
var nameAndVersion = name + "@" + refPkg.resolutions[name];
var pkg = loader.npm[nameAndVersion];
return pkg;
}
},
/**
* Walks up npmPaths looking for a [name]/package.json. Returns
* the package data it finds.
*
* @param {Loader} loader
* @param {NpmPackage} refPackage
* @param {packgeName} name the package name we are looking for.
*
* @return {undefined|NpmPackage}
*/
findDepWalking: function (loader, refPackage, name) {
if(loader.npm && refPackage && !utils.path.startsWithDotSlash(name)) {
// Todo .. first part of name
var curPackage = utils.path.depPackageDir(refPackage.fileUrl, name);
while(curPackage) {
var pkg = loader.npmPaths[curPackage];
if(pkg) {
return pkg;
}
var parentAddress = utils.path.parentNodeModuleAddress(curPackage);
if(!parentAddress) {
return;
}
curPackage = parentAddress+"/"+name;
}
}
},
findByName: function(loader, name) {
if(loader.npm && !utils.path.startsWithDotSlash(name)) {
return loader.npm[name];
}
},
findByNameAndVersion: function(loader, name, version) {
if(loader.npm && !utils.path.startsWithDotSlash(name)) {
var nameAndVersion = name + "@" + version;
return loader.npm[nameAndVersion];
}
},
findByUrl: function(loader, url) {
if(loader.npm) {
url = utils.pkg.folderAddress(url);
return loader.npmPaths[url];
}
},
directoriesLib: function(pkg) {
var steal = utils.pkg.config(pkg);
var lib = steal && steal.directories && steal.directories.lib;
var ignores = [".", "/"], ignore;
if(!lib) return undefined;
while(!!(ignore = ignores.shift())) {
if(lib[0] === ignore) {
lib = lib.substr(1);
}
}
return lib;
},
hasDirectoriesLib: function(pkg) {
var steal = utils.pkg.config(pkg);
return steal && steal.directories && !!steal.directories.lib;
},
findPackageInfo: function(context, pkg){
var pkgInfo = context.pkgInfo;
if(pkgInfo) {
var out;
utils.forEach(pkgInfo, function(p){
if(pkg.name === p.name && pkg.version === p.version) {
out = p;
}
});
return out;
}
},
saveResolution: function(context, refPkg, pkg){
var npmPkg = utils.pkg.findPackageInfo(context, refPkg);
npmPkg.resolutions[pkg.name] = refPkg.resolutions[pkg.name] =
pkg.version;
},
config: function(pkg){
return pkg.steal || pkg.system;
}
},
path: {
makeRelative: function(path){
if( utils.path.isRelative(path) && path.substr(0,1) !== "/" ) {
return path;
} else {
return "./"+path;
}
},
removeJS: function(path) {
return path.replace(/\.js(!|$)/,function(whole, part){return part;});
},
removePackage: function (path){
return path.replace(/\/package\.json.*/,"");
},
addJS: function(path){
// Don't add `.js` for types that need to work without an extension.
if(/\.js(on)?$/.test(path)) {
return path;
} else {
return path+".js";
}
},
isRelative: function(path) {
return path.substr(0,1) === ".";
},
startsWithTildeSlash: function( path ) {
return path.substr(0,2) === "~/";
},
joinURIs: function(base, href) {
function | (input) {
var output = [];
input.replace(/^(\.\.?(\/|$))+/, '')
.replace(/\/(\.(\/|$))+/g, '/')
.replace(/\/\.\.$/, '/../')
.replace(/\/?[^\/]*/g, function (p) {
if (p === '/..') {
output.pop();
} else {
output.push(p);
}
});
return output.join('').replace(/^\//, input.charAt(0) === '/' ? '/' : '');
}
href = parseURI(href || '');
base = parseURI(base || '');
return !href || !base ? null : (href.protocol || base.protocol) +
(href.protocol || href.authority ? href.authority : base.authority) +
removeDotSegments(href.protocol || href.authority || href.pathname.charAt(0) === '/' ? href.pathname : (href.pathname ? ((base.authority && !base.pathname ? '/' : '') + base.pathname.slice(0, base.pathname.lastIndexOf('/') + 1) + href.pathname) : base.pathname)) +
(href.protocol || href.authority || href.pathname ? href.search : (href.search || base.search)) +
href.hash;
},
startsWithDotSlash: function( path ) {
return path.substr(0,2) === "./";
},
removeDotSlash: function(path) {
return utils.path.startsWithDotSlash(path) ?
path.substr(2) :
path;
},
endsWithSlash: function(path){
return path[path.length -1] === "/";
},
addEndingSlash: function(path){
return utils.path.endsWithSlash(path) ? path : path+"/";
},
// Returns a package.json path one node_modules folder deeper than the
// parentPackageAddress
depPackage: function (parentPackageAddress, childName){
var packageFolderName = parentPackageAddress.replace(/\/package\.json.*/,"");
return (packageFolderName ? packageFolderName+"/" : "")+"node_modules/" + childName + "/package.json";
},
peerPackage: function(parentPackageAddress, childName){
var packageFolderName = parentPackageAddress.replace(/\/package\.json.*/,"");
return packageFolderName.substr(0, packageFolderName.lastIndexOf("/"))
+ "/" + childName + "/package.json";
},
// returns the package directory one level deeper.
depPackageDir: function(parentPackageAddress, childName){
return utils.path.depPackage(parentPackageAddress, childName).replace(/\/package\.json.*/,"");
},
peerNodeModuleAddress: function(address) {
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules);
if(nodeModulesIndex >= 0) {
return address.substr(0, nodeModulesIndex+nodeModules.length - 1 );
}
},
// /node_modules/a/node_modules/b/node_modules/c -> /node_modules/a/node_modules/
parentNodeModuleAddress: function(address) {
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
prevModulesIndex = address.lastIndexOf(nodeModules, nodeModulesIndex-1);
if(prevModulesIndex >= 0) {
return address.substr(0, prevModulesIndex+nodeModules.length - 1 );
}
},
pkgDir: function(address){
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
nextSlash = address.indexOf("/", nodeModulesIndex+nodeModules.length);
// Scoped packages
if(address[nodeModulesIndex+nodeModules.length] === "@") {
nextSlash = address.indexOf("/", nextSlash+1);
}
if(nodeModulesIndex >= 0) {
return nextSlash>=0 ? address.substr(0, nextSlash) : address;
}
},
basename: function(address){
var parts = address.split("/");
return parts[parts.length - 1];
},
relativeTo: function(modulePath, rel) {
var parts = modulePath.split("/");
var idx = 1;
while(rel[idx] === ".") {
parts.pop();
idx++;
}
return parts.join("/");
},
isPackageRootDir: function(pth) {
return pth.indexOf("/") === -1;
}
},
json: {
/**
* if a jsonOptions transformer is provided (by the System.config)
* use it for all json files, package.json's are also included
* @param loader
* @param load
* @param data
* @returns data
*/
transform: function(loader, load, data) {
// harmonize steal config
data.steal = utils.pkg.config(data);
var fn = loader.jsonOptions && loader.jsonOptions.transform;
if(!fn) return data;
return fn.call(loader, load, data);
}
},
includeInBuild: true
};
function parseURI(url) {
var m = String(url).replace(/^\s+|\s+$/g, '').match(/^([^:\/?#]+:)?(\/\/(?:[^:@]*(?::[^:@]*)?@)?(([^:\/?#]*)(?::(\d*))?))?([^?#]*)(\?[^#]*)?(#[\s\S]*)?/);
// authority = '//' + user + ':' + pass '@' + hostname + ':' port
return (m ? {
href : m[0] || '',
protocol : m[1] || '',
authority: m[2] || '',
host : m[3] || '',
hostname : m[4] || '',
port : m[5] || '',
pathname : m[6] || '',
search : m[7] || '',
hash : m[8] || ''
} : null);
}
module.exports = utils;
| removeDotSegments | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.