file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
has_loc.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::borrow::Cow;
use proc_macro2::Ident;
use proc_macro2::Span;
use proc_macro2::TokenStream;
use quote::quote;
use quote::ToTokens;
use syn::spanned::Spanned;
use syn::Attribute;
use syn::Data;
use syn::DataEnum;
use syn::DataStruct;
use syn::DeriveInput;
use syn::Error;
use syn::Lit;
use syn::Meta;
use syn::NestedMeta;
use syn::Result;
use syn::Variant;
use crate::simple_type::SimpleType;
use crate::util::InterestingFields;
/// Builds a HasLoc impl.
///
/// The build rules are as follows:
/// - For a struct it just looks for a field with a type of LocId.
/// - For an enum it does a match on each variant.
/// - For either tuple variants or struct variants it looks for a field with a
/// type of LocId.
/// - For a tuple variant with a single non-LocId type and calls `.loc_id()`
/// on that field.
/// - Otherwise you can specify `#[has_loc(n)]` where `n` is the index of the
/// field to call `.loc_id()` on. `#[has_loc(n)]` can also be used on the
/// whole enum to provide a default index.
///
pub(crate) fn build_has_loc(input: TokenStream) -> Result<TokenStream> {
let input = syn::parse2::<DeriveInput>(input)?;
match &input.data {
Data::Enum(data) => build_has_loc_enum(&input, data),
Data::Struct(data) => build_has_loc_struct(&input, data),
Data::Union(_) => Err(Error::new(input.span(), "Union not handled")),
}
}
fn field_might_contain_buried_loc_id(ty: &SimpleType<'_>) -> bool {
if let Some(ident) = ty.get_ident() {
!(ident == "BlockId"
|| ident == "ClassId"
|| ident == "ConstId"
|| ident == "ValueId"
|| ident == "LocalId"
|| ident == "MethodId"
|| ident == "ParamId"
|| ident == "VarId"
|| ident == "usize"
|| ident == "u32")
} else {
true
}
}
fn build_has_loc_struct(input: &DeriveInput, data: &DataStruct) -> Result<TokenStream> {
// struct Foo {
// ...
// loc: LocId,
// }
let struct_name = &input.ident;
let default_select_field = handle_has_loc_attr(&input.attrs)?;
let loc_field = if let Some(f) = default_select_field {
match f.kind {
FieldKind::Named(name) => {
let name = name.to_string();
let field = data
.fields
.iter()
.find(|field| field.ident.as_ref().map_or(false, |id| id == &name))
.ok_or_else(|| Error::new(input.span(), format!("Field '{name}' not found")))?
.ident
.as_ref()
.unwrap();
quote!(#field.loc_id())
}
FieldKind::None => todo!(),
FieldKind::Numbered(_) => todo!(),
}
} else {
let field = data
.fields
.iter()
.enumerate()
.map(|(i, field)| (i, field, SimpleType::from_type(&field.ty)))
.find(|(_, _, ty)| ty.is_based_on("LocId"));
let (idx, field, _) =
field.ok_or_else(|| Error::new(input.span(), "No field with type LocId found"))?;
if let Some(ident) = field.ident.as_ref() {
ident.to_token_stream()
} else {
syn::Index::from(idx).to_token_stream()
}
};
let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl();
let output = quote!(impl #impl_generics HasLoc for #struct_name #ty_generics #where_clause {
fn loc_id(&self) -> LocId {
self.#loc_field
}
});
Ok(output)
}
fn get_select_field<'a>(
variant: &'a Variant,
default_select_field: &Option<Field<'a>>,
) -> Result<Option<Field<'a>>> {
if let Some(f) = handle_has_loc_attr(&variant.attrs)? {
return Ok(Some(f));
}
if let Some(f) = default_select_field.as_ref() {
return Ok(Some(f.clone()));
}
let mut interesting_fields = InterestingFields::None;
for (idx, field) in variant.fields.iter().enumerate() {
let ty = SimpleType::from_type(&field.ty);
if ty.is_based_on("LocId") {
let kind = if let Some(ident) = field.ident.as_ref() {
// Bar { .., loc: LocId }
FieldKind::Named(Cow::Borrowed(ident))
} else {
// Bar(.., LocId)
FieldKind::Numbered(idx)
};
return Ok(Some(Field { kind, ty }));
} else if field_might_contain_buried_loc_id(&ty) {
// Report the type as 'unknown' because it's not a type that's
// related to LocId.
interesting_fields.add(idx, field.ident.as_ref(), SimpleType::Unknown);
}
}
match interesting_fields {
InterestingFields::None => {
let kind = FieldKind::None;
let ty = SimpleType::Unknown;
Ok(Some(Field { kind, ty }))
}
InterestingFields::One(idx, ident, ty) => {
// There's only a single field that could possibly contain a buried
// LocId.
let kind = ident.map_or_else(
|| FieldKind::Numbered(idx),
|id| FieldKind::Named(Cow::Borrowed(id)),
);
Ok(Some(Field { kind, ty }))
}
InterestingFields::Many => Ok(None),
}
}
fn build_has_loc_enum(input: &DeriveInput, data: &DataEnum) -> Result<TokenStream> {
// enum Foo {
// Bar(.., LocId),
// Baz { .., loc: LocId },
// }
let default_select_field = handle_has_loc_attr(&input.attrs)?;
let enum_name = &input.ident;
let mut variants: Vec<TokenStream> = Vec::new();
for variant in data.variants.iter() {
let select_field = get_select_field(variant, &default_select_field)?;
if let Some(select_field) = select_field {
push_handler(&mut variants, enum_name, variant, select_field);
} else {
return Err(Error::new(
variant.span(),
format!("LocId field not found in variant {}", variant.ident,),
));
}
}
let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl();
let output = quote!(impl #impl_generics HasLoc for #enum_name #ty_generics #where_clause {
fn loc_id(&self) -> LocId {
match self {
#(#variants),*
}
}
});
Ok(output)
}
#[derive(Clone)]
struct Field<'a> {
kind: FieldKind<'a>,
ty: SimpleType<'a>,
}
#[derive(Clone)]
enum FieldKind<'a> {
Named(Cow<'a, Ident>),
None,
Numbered(usize),
}
fn push_handler(
variants: &mut Vec<TokenStream>,
enum_name: &Ident,
variant: &Variant,
field: Field<'_>,
) {
let variant_name = &variant.ident;
let reference = match (&field.kind, &field.ty) {
(FieldKind::None, _) => quote!(LocId::NONE),
(_, SimpleType::Unknown) => quote!(f.loc_id()),
(_, SimpleType::Unit(_)) => quote!(*f),
(_, SimpleType::Array(_))
| (_, SimpleType::BoxedSlice(_))
| (_, SimpleType::RefSlice(_))
| (_, SimpleType::Slice(_)) => {
todo!("Unhandled type: {:?}", field.ty)
}
};
let params = match field.kind {
FieldKind::Named(id) => {
quote!( { #id: f, .. })
}
FieldKind::None => match &variant.fields {
syn::Fields::Named(_) => quote!({ .. }),
syn::Fields::Unnamed(_) => quote!((..)),
syn::Fields::Unit => TokenStream::default(),
},
FieldKind::Numbered(idx) => {
let mut fields = Vec::new();
for (field_idx, _) in variant.fields.iter().enumerate() {
if field_idx == idx {
fields.push(quote!(f));
} else {
fields.push(quote!(_));
}
}
quote!((#(#fields),*))
}
};
variants.push(quote!(#enum_name::#variant_name #params => #reference));
}
fn handle_has_loc_attr(attrs: &[Attribute]) -> Result<Option<Field<'_>>> {
for attr in attrs {
if attr.path.is_ident("has_loc") {
let meta = attr.parse_meta()?;
match meta {
Meta::Path(path) => {
return Err(Error::new(path.span(), "Arguments expected"));
}
Meta::List(list) => {
// has_loc(A, B, C)
if list.nested.len() != 1 {
return Err(Error::new(list.span(), "Only one argument expected"));
}
match &list.nested[0] {
NestedMeta::Lit(Lit::Int(i)) => {
return Ok(Some(Field {
kind: FieldKind::Numbered(i.base10_parse()?),
ty: SimpleType::Unknown, | }));
}
NestedMeta::Lit(Lit::Str(n)) => {
return Ok(Some(Field {
kind: FieldKind::Named(Cow::Owned(Ident::new(
&n.value(),
Span::call_site(),
))),
ty: SimpleType::Unknown,
}));
}
NestedMeta::Meta(Meta::Path(meta)) if meta.is_ident("none") => {
return Ok(Some(Field {
kind: FieldKind::None,
ty: SimpleType::Unknown,
}));
}
i => {
todo!("Unhandled: {:?}", i);
}
}
}
Meta::NameValue(_list) => {
todo!();
}
}
}
}
Ok(None)
} | random_line_split | |
has_loc.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::borrow::Cow;
use proc_macro2::Ident;
use proc_macro2::Span;
use proc_macro2::TokenStream;
use quote::quote;
use quote::ToTokens;
use syn::spanned::Spanned;
use syn::Attribute;
use syn::Data;
use syn::DataEnum;
use syn::DataStruct;
use syn::DeriveInput;
use syn::Error;
use syn::Lit;
use syn::Meta;
use syn::NestedMeta;
use syn::Result;
use syn::Variant;
use crate::simple_type::SimpleType;
use crate::util::InterestingFields;
/// Builds a HasLoc impl.
///
/// The build rules are as follows:
/// - For a struct it just looks for a field with a type of LocId.
/// - For an enum it does a match on each variant.
/// - For either tuple variants or struct variants it looks for a field with a
/// type of LocId.
/// - For a tuple variant with a single non-LocId type and calls `.loc_id()`
/// on that field.
/// - Otherwise you can specify `#[has_loc(n)]` where `n` is the index of the
/// field to call `.loc_id()` on. `#[has_loc(n)]` can also be used on the
/// whole enum to provide a default index.
///
pub(crate) fn build_has_loc(input: TokenStream) -> Result<TokenStream> {
let input = syn::parse2::<DeriveInput>(input)?;
match &input.data {
Data::Enum(data) => build_has_loc_enum(&input, data),
Data::Struct(data) => build_has_loc_struct(&input, data),
Data::Union(_) => Err(Error::new(input.span(), "Union not handled")),
}
}
fn field_might_contain_buried_loc_id(ty: &SimpleType<'_>) -> bool {
if let Some(ident) = ty.get_ident() {
!(ident == "BlockId"
|| ident == "ClassId"
|| ident == "ConstId"
|| ident == "ValueId"
|| ident == "LocalId"
|| ident == "MethodId"
|| ident == "ParamId"
|| ident == "VarId"
|| ident == "usize"
|| ident == "u32")
} else {
true
}
}
fn build_has_loc_struct(input: &DeriveInput, data: &DataStruct) -> Result<TokenStream> |
fn get_select_field<'a>(
variant: &'a Variant,
default_select_field: &Option<Field<'a>>,
) -> Result<Option<Field<'a>>> {
if let Some(f) = handle_has_loc_attr(&variant.attrs)? {
return Ok(Some(f));
}
if let Some(f) = default_select_field.as_ref() {
return Ok(Some(f.clone()));
}
let mut interesting_fields = InterestingFields::None;
for (idx, field) in variant.fields.iter().enumerate() {
let ty = SimpleType::from_type(&field.ty);
if ty.is_based_on("LocId") {
let kind = if let Some(ident) = field.ident.as_ref() {
// Bar { .., loc: LocId }
FieldKind::Named(Cow::Borrowed(ident))
} else {
// Bar(.., LocId)
FieldKind::Numbered(idx)
};
return Ok(Some(Field { kind, ty }));
} else if field_might_contain_buried_loc_id(&ty) {
// Report the type as 'unknown' because it's not a type that's
// related to LocId.
interesting_fields.add(idx, field.ident.as_ref(), SimpleType::Unknown);
}
}
match interesting_fields {
InterestingFields::None => {
let kind = FieldKind::None;
let ty = SimpleType::Unknown;
Ok(Some(Field { kind, ty }))
}
InterestingFields::One(idx, ident, ty) => {
// There's only a single field that could possibly contain a buried
// LocId.
let kind = ident.map_or_else(
|| FieldKind::Numbered(idx),
|id| FieldKind::Named(Cow::Borrowed(id)),
);
Ok(Some(Field { kind, ty }))
}
InterestingFields::Many => Ok(None),
}
}
fn build_has_loc_enum(input: &DeriveInput, data: &DataEnum) -> Result<TokenStream> {
// enum Foo {
// Bar(.., LocId),
// Baz { .., loc: LocId },
// }
let default_select_field = handle_has_loc_attr(&input.attrs)?;
let enum_name = &input.ident;
let mut variants: Vec<TokenStream> = Vec::new();
for variant in data.variants.iter() {
let select_field = get_select_field(variant, &default_select_field)?;
if let Some(select_field) = select_field {
push_handler(&mut variants, enum_name, variant, select_field);
} else {
return Err(Error::new(
variant.span(),
format!("LocId field not found in variant {}", variant.ident,),
));
}
}
let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl();
let output = quote!(impl #impl_generics HasLoc for #enum_name #ty_generics #where_clause {
fn loc_id(&self) -> LocId {
match self {
#(#variants),*
}
}
});
Ok(output)
}
#[derive(Clone)]
struct Field<'a> {
kind: FieldKind<'a>,
ty: SimpleType<'a>,
}
#[derive(Clone)]
enum FieldKind<'a> {
Named(Cow<'a, Ident>),
None,
Numbered(usize),
}
fn push_handler(
variants: &mut Vec<TokenStream>,
enum_name: &Ident,
variant: &Variant,
field: Field<'_>,
) {
let variant_name = &variant.ident;
let reference = match (&field.kind, &field.ty) {
(FieldKind::None, _) => quote!(LocId::NONE),
(_, SimpleType::Unknown) => quote!(f.loc_id()),
(_, SimpleType::Unit(_)) => quote!(*f),
(_, SimpleType::Array(_))
| (_, SimpleType::BoxedSlice(_))
| (_, SimpleType::RefSlice(_))
| (_, SimpleType::Slice(_)) => {
todo!("Unhandled type: {:?}", field.ty)
}
};
let params = match field.kind {
FieldKind::Named(id) => {
quote!( { #id: f, .. })
}
FieldKind::None => match &variant.fields {
syn::Fields::Named(_) => quote!({ .. }),
syn::Fields::Unnamed(_) => quote!((..)),
syn::Fields::Unit => TokenStream::default(),
},
FieldKind::Numbered(idx) => {
let mut fields = Vec::new();
for (field_idx, _) in variant.fields.iter().enumerate() {
if field_idx == idx {
fields.push(quote!(f));
} else {
fields.push(quote!(_));
}
}
quote!((#(#fields),*))
}
};
variants.push(quote!(#enum_name::#variant_name #params => #reference));
}
fn handle_has_loc_attr(attrs: &[Attribute]) -> Result<Option<Field<'_>>> {
for attr in attrs {
if attr.path.is_ident("has_loc") {
let meta = attr.parse_meta()?;
match meta {
Meta::Path(path) => {
return Err(Error::new(path.span(), "Arguments expected"));
}
Meta::List(list) => {
// has_loc(A, B, C)
if list.nested.len() != 1 {
return Err(Error::new(list.span(), "Only one argument expected"));
}
match &list.nested[0] {
NestedMeta::Lit(Lit::Int(i)) => {
return Ok(Some(Field {
kind: FieldKind::Numbered(i.base10_parse()?),
ty: SimpleType::Unknown,
}));
}
NestedMeta::Lit(Lit::Str(n)) => {
return Ok(Some(Field {
kind: FieldKind::Named(Cow::Owned(Ident::new(
&n.value(),
Span::call_site(),
))),
ty: SimpleType::Unknown,
}));
}
NestedMeta::Meta(Meta::Path(meta)) if meta.is_ident("none") => {
return Ok(Some(Field {
kind: FieldKind::None,
ty: SimpleType::Unknown,
}));
}
i => {
todo!("Unhandled: {:?}", i);
}
}
}
Meta::NameValue(_list) => {
todo!();
}
}
}
}
Ok(None)
}
| {
// struct Foo {
// ...
// loc: LocId,
// }
let struct_name = &input.ident;
let default_select_field = handle_has_loc_attr(&input.attrs)?;
let loc_field = if let Some(f) = default_select_field {
match f.kind {
FieldKind::Named(name) => {
let name = name.to_string();
let field = data
.fields
.iter()
.find(|field| field.ident.as_ref().map_or(false, |id| id == &name))
.ok_or_else(|| Error::new(input.span(), format!("Field '{name}' not found")))?
.ident
.as_ref()
.unwrap();
quote!(#field.loc_id())
}
FieldKind::None => todo!(),
FieldKind::Numbered(_) => todo!(),
}
} else {
let field = data
.fields
.iter()
.enumerate()
.map(|(i, field)| (i, field, SimpleType::from_type(&field.ty)))
.find(|(_, _, ty)| ty.is_based_on("LocId"));
let (idx, field, _) =
field.ok_or_else(|| Error::new(input.span(), "No field with type LocId found"))?;
if let Some(ident) = field.ident.as_ref() {
ident.to_token_stream()
} else {
syn::Index::from(idx).to_token_stream()
}
};
let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl();
let output = quote!(impl #impl_generics HasLoc for #struct_name #ty_generics #where_clause {
fn loc_id(&self) -> LocId {
self.#loc_field
}
});
Ok(output)
} | identifier_body |
reader_test.go | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"bytes"
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"strings"
"testing"
"google.golang.org/api/option"
)
const readData = "0123456789"
func TestRangeReader(t *testing.T) {
ctx := context.Background()
hc, close := newTestServer(handleRangeRead)
defer close()
multiReaderTest(ctx, t, func(t *testing.T, c *Client) {
obj := c.Bucket("b").Object("o")
for _, test := range []struct {
offset, length int64
want string
}{
{0, -1, readData},
{0, 10, readData},
{0, 5, readData[:5]},
{1, 3, readData[1:4]},
{6, -1, readData[6:]},
{4, 20, readData[4:]},
{-20, -1, readData},
{-6, -1, readData[4:]},
} {
r, err := obj.NewRangeReader(ctx, test.offset, test.length)
if err != nil {
t.Errorf("%d/%d: %v", test.offset, test.length, err)
continue
}
gotb, err := ioutil.ReadAll(r)
if err != nil {
t.Errorf("%d/%d: %v", test.offset, test.length, err)
continue
}
if got := string(gotb); got != test.want {
t.Errorf("%d/%d: got %q, want %q", test.offset, test.length, got, test.want)
}
}
}, option.WithHTTPClient(hc))
}
func handleRangeRead(w http.ResponseWriter, r *http.Request) {
rh := strings.TrimSpace(r.Header.Get("Range"))
data := readData
var from, to int
if rh == "" {
from = 0
to = len(data)
} else {
// assume "bytes=N-", "bytes=-N" or "bytes=N-M"
var err error
i := strings.IndexRune(rh, '=')
j := strings.IndexRune(rh, '-')
hasPositiveStartOffset := i+1 != j
if hasPositiveStartOffset { // The case of "bytes=N-"
from, err = strconv.Atoi(rh[i+1 : j])
} else { // The case of "bytes=-N"
from, err = strconv.Atoi(rh[i+1:])
from += len(data)
if from < 0 {
from = 0
}
}
if err != nil {
w.WriteHeader(500)
return
}
to = len(data)
if hasPositiveStartOffset && j+1 < len(rh) { // The case of "bytes=N-M"
to, err = strconv.Atoi(rh[j+1:])
if err != nil {
w.WriteHeader(500)
return
}
to++ // Range header is inclusive, Go slice is exclusive
}
if from >= len(data) && to != from {
w.WriteHeader(416)
return
}
if from > len(data) {
from = len(data)
}
if to > len(data) {
to = len(data)
}
}
data = data[from:to]
if data != readData {
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", from, to-1, len(readData)))
w.WriteHeader(http.StatusPartialContent)
}
if _, err := w.Write([]byte(data)); err != nil {
panic(err)
}
}
type http2Error string
func (h http2Error) Error() string {
return string(h)
}
// TestRangeReaderRetry tests Reader resumption logic. It ensures that offset
// and seen bytes are handled correctly so that data is not corrupted.
// This tests only works for the HTTP Reader.
// TODO: Design a similar test for gRPC.
func TestRangeReaderRetry(t *testing.T) {
internalErr := http2Error("blah blah INTERNAL_ERROR")
goawayErr := http2Error("http2: server sent GOAWAY and closed the connection; LastStreamID=15, ErrCode=NO_ERROR, debug=\"load_shed\"")
readBytes := []byte(readData)
hc, close := newTestServer(handleRangeRead)
defer close()
ctx := context.Background()
multiReaderTest(ctx, t, func(t *testing.T, c *Client) {
obj := c.Bucket("b").Object("o")
for i, test := range []struct {
offset, length int64
bodies []fakeReadCloser
want string
}{
{
offset: 0,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{10}, err: io.EOF},
},
want: readData,
},
{
offset: 0,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: internalErr},
{data: readBytes[3:], counts: []int{5, 2}, err: io.EOF},
},
want: readData,
},
{
offset: 0,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{5}, err: internalErr},
{data: readBytes[5:], counts: []int{1, 3}, err: goawayErr},
{data: readBytes[9:], counts: []int{1}, err: io.EOF},
},
want: readData,
},
{
offset: 0,
length: 5,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: internalErr},
{data: readBytes[3:], counts: []int{2}, err: io.EOF},
},
want: readData[:5],
},
{
offset: 0,
length: 5,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: goawayErr},
{data: readBytes[3:], counts: []int{2}, err: io.EOF},
},
want: readData[:5],
},
{
offset: 1,
length: 5,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: internalErr},
{data: readBytes[3:], counts: []int{2}, err: io.EOF},
},
want: readData[:5],
},
{
offset: 1,
length: 3,
bodies: []fakeReadCloser{
{data: readBytes[1:], counts: []int{1}, err: internalErr},
{data: readBytes[2:], counts: []int{2}, err: io.EOF},
},
want: readData[1:4],
},
{
offset: 4,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes[4:], counts: []int{1}, err: internalErr},
{data: readBytes[5:], counts: []int{4}, err: internalErr},
{data: readBytes[9:], counts: []int{1}, err: io.EOF},
},
want: readData[4:],
},
{
offset: -4,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes[6:], counts: []int{1}, err: internalErr},
{data: readBytes[7:], counts: []int{3}, err: io.EOF},
},
want: readData[6:],
},
} {
r, err := obj.NewRangeReader(ctx, test.offset, test.length)
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
b := 0
r.reader = &httpReader{
body: &test.bodies[0],
reopen: func(int64) (*http.Response, error) {
b++
return &http.Response{Body: &test.bodies[b]}, nil
},
}
buf := make([]byte, len(readData)/2)
var gotb []byte
for |
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
if got := string(gotb); got != test.want {
t.Errorf("#%d: got %q, want %q", i, got, test.want)
}
if r.Attrs.Size != int64(len(readData)) {
t.Errorf("#%d: got Attrs.Size=%q, want %q", i, r.Attrs.Size, len(readData))
}
wantOffset := test.offset
if wantOffset < 0 {
wantOffset += int64(len(readData))
if wantOffset < 0 {
wantOffset = 0
}
}
if got := r.Attrs.StartOffset; got != wantOffset {
t.Errorf("#%d: got Attrs.Offset=%q, want %q", i, got, wantOffset)
}
}
r, err := obj.NewRangeReader(ctx, -100, 10)
if err == nil {
t.Fatal("Expected a non-nil error with negative offset and positive length")
} else if want := "storage: invalid offset"; !strings.HasPrefix(err.Error(), want) {
t.Errorf("Error mismatch\nGot: %q\nWant prefix: %q\n", err.Error(), want)
}
if r != nil {
t.Errorf("Expected nil reader")
}
}, option.WithHTTPClient(hc))
}
type fakeReadCloser struct {
data []byte
counts []int // how much of data to deliver on each read
err error // error to return with last count
d int // current position in data
c int // current position in counts
}
func (f *fakeReadCloser) Close() error {
return nil
}
func (f *fakeReadCloser) Read(buf []byte) (int, error) {
i := f.c
n := 0
if i < len(f.counts) {
n = f.counts[i]
}
var err error
if i >= len(f.counts)-1 {
err = f.err
}
copy(buf, f.data[f.d:f.d+n])
if len(buf) < n {
n = len(buf)
f.counts[i] -= n
err = nil
} else {
f.c++
}
f.d += n
return n, err
}
func TestFakeReadCloser(t *testing.T) {
e := errors.New("")
f := &fakeReadCloser{
data: []byte(readData),
counts: []int{1, 2, 3},
err: e,
}
wants := []string{"0", "12", "345"}
buf := make([]byte, 10)
for i := 0; i < 3; i++ {
n, err := f.Read(buf)
if got, want := n, f.counts[i]; got != want {
t.Fatalf("i=%d: got %d, want %d", i, got, want)
}
var wantErr error
if i == 2 {
wantErr = e
}
if err != wantErr {
t.Fatalf("i=%d: got error %v, want %v", i, err, wantErr)
}
if got, want := string(buf[:n]), wants[i]; got != want {
t.Fatalf("i=%d: got %q, want %q", i, got, want)
}
}
}
func TestContentEncodingGzipWithReader(t *testing.T) {
bucketName := "my-bucket"
objectName := "gzip-test"
getAttrsURL := fmt.Sprintf("/b/%s/o/%s?alt=json&prettyPrint=false&projection=full", bucketName, objectName)
downloadObjectXMLurl := fmt.Sprintf("/%s/%s", bucketName, objectName)
downloadObjectJSONurl := fmt.Sprintf("/b/%s/o/%s?alt=media&prettyPrint=false&projection=full", bucketName, objectName)
original := bytes.Repeat([]byte("a"), 4<<10)
mockGCS := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.String() {
case getAttrsURL:
fmt.Fprintf(w, `{
"bucket": "bucket", "name": "name", "contentEncoding": "gzip",
"contentLength": 43,
"contentType": "text/plain","timeCreated": "2020-04-10T16:08:58-07:00",
"updated": "2020-04-14T16:08:58-07:00"
}`)
return
case downloadObjectXMLurl, downloadObjectJSONurl:
// Serve back the file.
w.Header().Set("Content-Type", "text/plain")
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("Etag", `"c50e3e41c9bc9df34e84c94ce073f928"`)
w.Header().Set("X-Goog-Generation", "1587012235914578")
w.Header().Set("X-Goog-MetaGeneration", "2")
w.Header().Set("X-Goog-Stored-Content-Encoding", "gzip")
w.Header().Set("vary", "Accept-Encoding")
w.Header().Set("x-goog-stored-content-length", "43")
w.Header().Set("x-goog-hash", "crc32c=pYIWwQ==")
w.Header().Set("x-goog-hash", "md5=xQ4+Qcm8nfNOhMlM4HP5KA==")
w.Header().Set("x-goog-storage-class", "STANDARD")
gz := gzip.NewWriter(w)
gz.Write(original)
gz.Close()
default:
fmt.Fprintf(w, "unrecognized URL %s", r.URL)
}
}))
mockGCS.EnableHTTP2 = true
mockGCS.StartTLS()
defer mockGCS.Close()
ctx := context.Background()
hc := mockGCS.Client()
ux, _ := url.Parse(mockGCS.URL)
hc.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true
wrt := &alwaysToTargetURLRoundTripper{
destURL: ux,
hc: hc,
}
whc := &http.Client{Transport: wrt}
// 2. Different flavours of the read should all return the body.
readerCreators := []struct {
name string
create func(ctx context.Context, obj *ObjectHandle) (*Reader, error)
}{
{
"NewReader", func(cxt context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewReader(ctx)
},
},
{
"NewRangeReader(0, -1)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 0, -1)
},
},
{
"NewRangeReader(1kB, 2kB)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 1<<10, 2<<10)
},
},
{
"NewRangeReader(2kB, -1)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 2<<10, -1)
},
},
{
"NewRangeReader(2kB, 3kB)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 2<<10, 3<<10)
},
},
}
multiReaderTest(ctx, t, func(t *testing.T, c *Client) {
for _, tt := range readerCreators {
t.Run(tt.name, func(t *testing.T) {
obj := c.Bucket(bucketName).Object(objectName)
_, err := obj.Attrs(ctx)
if err != nil {
t.Fatal(err)
}
rd, err := tt.create(ctx, obj)
if err != nil {
t.Fatal(err)
}
defer rd.Close()
got, err := ioutil.ReadAll(rd)
if err != nil {
t.Fatal(err)
}
if g, w := got, original; !bytes.Equal(g, w) {
t.Fatalf("Response mismatch\nGot:\n%q\n\nWant:\n%q", g, w)
}
})
}
}, option.WithEndpoint(mockGCS.URL), option.WithoutAuthentication(), option.WithHTTPClient(whc))
}
// alwaysToTargetURLRoundTripper ensures that every single request
// is routed to a target destination. Some requests within the storage
// client by-pass using the provided HTTP client, hence this enforcemenet.
type alwaysToTargetURLRoundTripper struct {
destURL *url.URL
hc *http.Client
}
func (adrt *alwaysToTargetURLRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
req.URL.Host = adrt.destURL.Host
// Cloud Storage has full control over the response headers for their
// HTTP server but unfortunately we don't, so we have to prune
// the Range header to mimick GCS ignoring Range header:
// https://cloud.google.com/storage/docs/transcoding#range
delete(req.Header, "Range")
return adrt.hc.Do(req)
}
// multiTransportTest initializes fresh clients for each transport, then runs
// given testing function using each transport-specific client, supplying the
// test function with the sub-test instance, the context it was given, the name
// of an existing bucket to use, a bucket name to use for bucket creation, and
// the client to use.
func multiReaderTest(ctx context.Context, t *testing.T, test func(*testing.T, *Client), opts ...option.ClientOption) {
jsonOpts := append(opts, WithJSONReads())
xmlOpts := append(opts, WithXMLReads())
jsonClient, err := NewClient(ctx, jsonOpts...)
if err != nil {
t.Fatal(err)
}
xmlClient, err := NewClient(ctx, xmlOpts...)
if err != nil {
t.Fatal(err)
}
clients := map[string]*Client{
"xmlReads": xmlClient,
"jsonReads": jsonClient,
}
for transport, client := range clients {
t.Run(transport, func(t *testing.T) {
defer client.Close()
test(t, client)
})
}
}
| {
n, err := r.Read(buf)
gotb = append(gotb, buf[:n]...)
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
} | conditional_block |
reader_test.go | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"bytes"
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"strings"
"testing" |
"google.golang.org/api/option"
)
const readData = "0123456789"
func TestRangeReader(t *testing.T) {
ctx := context.Background()
hc, close := newTestServer(handleRangeRead)
defer close()
multiReaderTest(ctx, t, func(t *testing.T, c *Client) {
obj := c.Bucket("b").Object("o")
for _, test := range []struct {
offset, length int64
want string
}{
{0, -1, readData},
{0, 10, readData},
{0, 5, readData[:5]},
{1, 3, readData[1:4]},
{6, -1, readData[6:]},
{4, 20, readData[4:]},
{-20, -1, readData},
{-6, -1, readData[4:]},
} {
r, err := obj.NewRangeReader(ctx, test.offset, test.length)
if err != nil {
t.Errorf("%d/%d: %v", test.offset, test.length, err)
continue
}
gotb, err := ioutil.ReadAll(r)
if err != nil {
t.Errorf("%d/%d: %v", test.offset, test.length, err)
continue
}
if got := string(gotb); got != test.want {
t.Errorf("%d/%d: got %q, want %q", test.offset, test.length, got, test.want)
}
}
}, option.WithHTTPClient(hc))
}
func handleRangeRead(w http.ResponseWriter, r *http.Request) {
rh := strings.TrimSpace(r.Header.Get("Range"))
data := readData
var from, to int
if rh == "" {
from = 0
to = len(data)
} else {
// assume "bytes=N-", "bytes=-N" or "bytes=N-M"
var err error
i := strings.IndexRune(rh, '=')
j := strings.IndexRune(rh, '-')
hasPositiveStartOffset := i+1 != j
if hasPositiveStartOffset { // The case of "bytes=N-"
from, err = strconv.Atoi(rh[i+1 : j])
} else { // The case of "bytes=-N"
from, err = strconv.Atoi(rh[i+1:])
from += len(data)
if from < 0 {
from = 0
}
}
if err != nil {
w.WriteHeader(500)
return
}
to = len(data)
if hasPositiveStartOffset && j+1 < len(rh) { // The case of "bytes=N-M"
to, err = strconv.Atoi(rh[j+1:])
if err != nil {
w.WriteHeader(500)
return
}
to++ // Range header is inclusive, Go slice is exclusive
}
if from >= len(data) && to != from {
w.WriteHeader(416)
return
}
if from > len(data) {
from = len(data)
}
if to > len(data) {
to = len(data)
}
}
data = data[from:to]
if data != readData {
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", from, to-1, len(readData)))
w.WriteHeader(http.StatusPartialContent)
}
if _, err := w.Write([]byte(data)); err != nil {
panic(err)
}
}
type http2Error string
func (h http2Error) Error() string {
return string(h)
}
// TestRangeReaderRetry tests Reader resumption logic. It ensures that offset
// and seen bytes are handled correctly so that data is not corrupted.
// This tests only works for the HTTP Reader.
// TODO: Design a similar test for gRPC.
func TestRangeReaderRetry(t *testing.T) {
internalErr := http2Error("blah blah INTERNAL_ERROR")
goawayErr := http2Error("http2: server sent GOAWAY and closed the connection; LastStreamID=15, ErrCode=NO_ERROR, debug=\"load_shed\"")
readBytes := []byte(readData)
hc, close := newTestServer(handleRangeRead)
defer close()
ctx := context.Background()
multiReaderTest(ctx, t, func(t *testing.T, c *Client) {
obj := c.Bucket("b").Object("o")
for i, test := range []struct {
offset, length int64
bodies []fakeReadCloser
want string
}{
{
offset: 0,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{10}, err: io.EOF},
},
want: readData,
},
{
offset: 0,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: internalErr},
{data: readBytes[3:], counts: []int{5, 2}, err: io.EOF},
},
want: readData,
},
{
offset: 0,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{5}, err: internalErr},
{data: readBytes[5:], counts: []int{1, 3}, err: goawayErr},
{data: readBytes[9:], counts: []int{1}, err: io.EOF},
},
want: readData,
},
{
offset: 0,
length: 5,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: internalErr},
{data: readBytes[3:], counts: []int{2}, err: io.EOF},
},
want: readData[:5],
},
{
offset: 0,
length: 5,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: goawayErr},
{data: readBytes[3:], counts: []int{2}, err: io.EOF},
},
want: readData[:5],
},
{
offset: 1,
length: 5,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: internalErr},
{data: readBytes[3:], counts: []int{2}, err: io.EOF},
},
want: readData[:5],
},
{
offset: 1,
length: 3,
bodies: []fakeReadCloser{
{data: readBytes[1:], counts: []int{1}, err: internalErr},
{data: readBytes[2:], counts: []int{2}, err: io.EOF},
},
want: readData[1:4],
},
{
offset: 4,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes[4:], counts: []int{1}, err: internalErr},
{data: readBytes[5:], counts: []int{4}, err: internalErr},
{data: readBytes[9:], counts: []int{1}, err: io.EOF},
},
want: readData[4:],
},
{
offset: -4,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes[6:], counts: []int{1}, err: internalErr},
{data: readBytes[7:], counts: []int{3}, err: io.EOF},
},
want: readData[6:],
},
} {
r, err := obj.NewRangeReader(ctx, test.offset, test.length)
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
b := 0
r.reader = &httpReader{
body: &test.bodies[0],
reopen: func(int64) (*http.Response, error) {
b++
return &http.Response{Body: &test.bodies[b]}, nil
},
}
buf := make([]byte, len(readData)/2)
var gotb []byte
for {
n, err := r.Read(buf)
gotb = append(gotb, buf[:n]...)
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
}
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
if got := string(gotb); got != test.want {
t.Errorf("#%d: got %q, want %q", i, got, test.want)
}
if r.Attrs.Size != int64(len(readData)) {
t.Errorf("#%d: got Attrs.Size=%q, want %q", i, r.Attrs.Size, len(readData))
}
wantOffset := test.offset
if wantOffset < 0 {
wantOffset += int64(len(readData))
if wantOffset < 0 {
wantOffset = 0
}
}
if got := r.Attrs.StartOffset; got != wantOffset {
t.Errorf("#%d: got Attrs.Offset=%q, want %q", i, got, wantOffset)
}
}
r, err := obj.NewRangeReader(ctx, -100, 10)
if err == nil {
t.Fatal("Expected a non-nil error with negative offset and positive length")
} else if want := "storage: invalid offset"; !strings.HasPrefix(err.Error(), want) {
t.Errorf("Error mismatch\nGot: %q\nWant prefix: %q\n", err.Error(), want)
}
if r != nil {
t.Errorf("Expected nil reader")
}
}, option.WithHTTPClient(hc))
}
type fakeReadCloser struct {
data []byte
counts []int // how much of data to deliver on each read
err error // error to return with last count
d int // current position in data
c int // current position in counts
}
func (f *fakeReadCloser) Close() error {
return nil
}
func (f *fakeReadCloser) Read(buf []byte) (int, error) {
i := f.c
n := 0
if i < len(f.counts) {
n = f.counts[i]
}
var err error
if i >= len(f.counts)-1 {
err = f.err
}
copy(buf, f.data[f.d:f.d+n])
if len(buf) < n {
n = len(buf)
f.counts[i] -= n
err = nil
} else {
f.c++
}
f.d += n
return n, err
}
func TestFakeReadCloser(t *testing.T) {
e := errors.New("")
f := &fakeReadCloser{
data: []byte(readData),
counts: []int{1, 2, 3},
err: e,
}
wants := []string{"0", "12", "345"}
buf := make([]byte, 10)
for i := 0; i < 3; i++ {
n, err := f.Read(buf)
if got, want := n, f.counts[i]; got != want {
t.Fatalf("i=%d: got %d, want %d", i, got, want)
}
var wantErr error
if i == 2 {
wantErr = e
}
if err != wantErr {
t.Fatalf("i=%d: got error %v, want %v", i, err, wantErr)
}
if got, want := string(buf[:n]), wants[i]; got != want {
t.Fatalf("i=%d: got %q, want %q", i, got, want)
}
}
}
func TestContentEncodingGzipWithReader(t *testing.T) {
bucketName := "my-bucket"
objectName := "gzip-test"
getAttrsURL := fmt.Sprintf("/b/%s/o/%s?alt=json&prettyPrint=false&projection=full", bucketName, objectName)
downloadObjectXMLurl := fmt.Sprintf("/%s/%s", bucketName, objectName)
downloadObjectJSONurl := fmt.Sprintf("/b/%s/o/%s?alt=media&prettyPrint=false&projection=full", bucketName, objectName)
original := bytes.Repeat([]byte("a"), 4<<10)
mockGCS := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.String() {
case getAttrsURL:
fmt.Fprintf(w, `{
"bucket": "bucket", "name": "name", "contentEncoding": "gzip",
"contentLength": 43,
"contentType": "text/plain","timeCreated": "2020-04-10T16:08:58-07:00",
"updated": "2020-04-14T16:08:58-07:00"
}`)
return
case downloadObjectXMLurl, downloadObjectJSONurl:
// Serve back the file.
w.Header().Set("Content-Type", "text/plain")
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("Etag", `"c50e3e41c9bc9df34e84c94ce073f928"`)
w.Header().Set("X-Goog-Generation", "1587012235914578")
w.Header().Set("X-Goog-MetaGeneration", "2")
w.Header().Set("X-Goog-Stored-Content-Encoding", "gzip")
w.Header().Set("vary", "Accept-Encoding")
w.Header().Set("x-goog-stored-content-length", "43")
w.Header().Set("x-goog-hash", "crc32c=pYIWwQ==")
w.Header().Set("x-goog-hash", "md5=xQ4+Qcm8nfNOhMlM4HP5KA==")
w.Header().Set("x-goog-storage-class", "STANDARD")
gz := gzip.NewWriter(w)
gz.Write(original)
gz.Close()
default:
fmt.Fprintf(w, "unrecognized URL %s", r.URL)
}
}))
mockGCS.EnableHTTP2 = true
mockGCS.StartTLS()
defer mockGCS.Close()
ctx := context.Background()
hc := mockGCS.Client()
ux, _ := url.Parse(mockGCS.URL)
hc.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true
wrt := &alwaysToTargetURLRoundTripper{
destURL: ux,
hc: hc,
}
whc := &http.Client{Transport: wrt}
// 2. Different flavours of the read should all return the body.
readerCreators := []struct {
name string
create func(ctx context.Context, obj *ObjectHandle) (*Reader, error)
}{
{
"NewReader", func(cxt context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewReader(ctx)
},
},
{
"NewRangeReader(0, -1)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 0, -1)
},
},
{
"NewRangeReader(1kB, 2kB)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 1<<10, 2<<10)
},
},
{
"NewRangeReader(2kB, -1)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 2<<10, -1)
},
},
{
"NewRangeReader(2kB, 3kB)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 2<<10, 3<<10)
},
},
}
multiReaderTest(ctx, t, func(t *testing.T, c *Client) {
for _, tt := range readerCreators {
t.Run(tt.name, func(t *testing.T) {
obj := c.Bucket(bucketName).Object(objectName)
_, err := obj.Attrs(ctx)
if err != nil {
t.Fatal(err)
}
rd, err := tt.create(ctx, obj)
if err != nil {
t.Fatal(err)
}
defer rd.Close()
got, err := ioutil.ReadAll(rd)
if err != nil {
t.Fatal(err)
}
if g, w := got, original; !bytes.Equal(g, w) {
t.Fatalf("Response mismatch\nGot:\n%q\n\nWant:\n%q", g, w)
}
})
}
}, option.WithEndpoint(mockGCS.URL), option.WithoutAuthentication(), option.WithHTTPClient(whc))
}
// alwaysToTargetURLRoundTripper ensures that every single request
// is routed to a target destination. Some requests within the storage
// client by-pass using the provided HTTP client, hence this enforcemenet.
type alwaysToTargetURLRoundTripper struct {
destURL *url.URL
hc *http.Client
}
func (adrt *alwaysToTargetURLRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
req.URL.Host = adrt.destURL.Host
// Cloud Storage has full control over the response headers for their
// HTTP server but unfortunately we don't, so we have to prune
// the Range header to mimick GCS ignoring Range header:
// https://cloud.google.com/storage/docs/transcoding#range
delete(req.Header, "Range")
return adrt.hc.Do(req)
}
// multiTransportTest initializes fresh clients for each transport, then runs
// given testing function using each transport-specific client, supplying the
// test function with the sub-test instance, the context it was given, the name
// of an existing bucket to use, a bucket name to use for bucket creation, and
// the client to use.
func multiReaderTest(ctx context.Context, t *testing.T, test func(*testing.T, *Client), opts ...option.ClientOption) {
jsonOpts := append(opts, WithJSONReads())
xmlOpts := append(opts, WithXMLReads())
jsonClient, err := NewClient(ctx, jsonOpts...)
if err != nil {
t.Fatal(err)
}
xmlClient, err := NewClient(ctx, xmlOpts...)
if err != nil {
t.Fatal(err)
}
clients := map[string]*Client{
"xmlReads": xmlClient,
"jsonReads": jsonClient,
}
for transport, client := range clients {
t.Run(transport, func(t *testing.T) {
defer client.Close()
test(t, client)
})
}
} | random_line_split | |
reader_test.go | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"bytes"
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"strings"
"testing"
"google.golang.org/api/option"
)
const readData = "0123456789"
func TestRangeReader(t *testing.T) {
ctx := context.Background()
hc, close := newTestServer(handleRangeRead)
defer close()
multiReaderTest(ctx, t, func(t *testing.T, c *Client) {
obj := c.Bucket("b").Object("o")
for _, test := range []struct {
offset, length int64
want string
}{
{0, -1, readData},
{0, 10, readData},
{0, 5, readData[:5]},
{1, 3, readData[1:4]},
{6, -1, readData[6:]},
{4, 20, readData[4:]},
{-20, -1, readData},
{-6, -1, readData[4:]},
} {
r, err := obj.NewRangeReader(ctx, test.offset, test.length)
if err != nil {
t.Errorf("%d/%d: %v", test.offset, test.length, err)
continue
}
gotb, err := ioutil.ReadAll(r)
if err != nil {
t.Errorf("%d/%d: %v", test.offset, test.length, err)
continue
}
if got := string(gotb); got != test.want {
t.Errorf("%d/%d: got %q, want %q", test.offset, test.length, got, test.want)
}
}
}, option.WithHTTPClient(hc))
}
func handleRangeRead(w http.ResponseWriter, r *http.Request) {
rh := strings.TrimSpace(r.Header.Get("Range"))
data := readData
var from, to int
if rh == "" {
from = 0
to = len(data)
} else {
// assume "bytes=N-", "bytes=-N" or "bytes=N-M"
var err error
i := strings.IndexRune(rh, '=')
j := strings.IndexRune(rh, '-')
hasPositiveStartOffset := i+1 != j
if hasPositiveStartOffset { // The case of "bytes=N-"
from, err = strconv.Atoi(rh[i+1 : j])
} else { // The case of "bytes=-N"
from, err = strconv.Atoi(rh[i+1:])
from += len(data)
if from < 0 {
from = 0
}
}
if err != nil {
w.WriteHeader(500)
return
}
to = len(data)
if hasPositiveStartOffset && j+1 < len(rh) { // The case of "bytes=N-M"
to, err = strconv.Atoi(rh[j+1:])
if err != nil {
w.WriteHeader(500)
return
}
to++ // Range header is inclusive, Go slice is exclusive
}
if from >= len(data) && to != from {
w.WriteHeader(416)
return
}
if from > len(data) {
from = len(data)
}
if to > len(data) {
to = len(data)
}
}
data = data[from:to]
if data != readData {
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", from, to-1, len(readData)))
w.WriteHeader(http.StatusPartialContent)
}
if _, err := w.Write([]byte(data)); err != nil {
panic(err)
}
}
type http2Error string
func (h http2Error) Error() string {
return string(h)
}
// TestRangeReaderRetry tests Reader resumption logic. It ensures that offset
// and seen bytes are handled correctly so that data is not corrupted.
// This tests only works for the HTTP Reader.
// TODO: Design a similar test for gRPC.
func TestRangeReaderRetry(t *testing.T) {
internalErr := http2Error("blah blah INTERNAL_ERROR")
goawayErr := http2Error("http2: server sent GOAWAY and closed the connection; LastStreamID=15, ErrCode=NO_ERROR, debug=\"load_shed\"")
readBytes := []byte(readData)
hc, close := newTestServer(handleRangeRead)
defer close()
ctx := context.Background()
multiReaderTest(ctx, t, func(t *testing.T, c *Client) {
obj := c.Bucket("b").Object("o")
for i, test := range []struct {
offset, length int64
bodies []fakeReadCloser
want string
}{
{
offset: 0,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{10}, err: io.EOF},
},
want: readData,
},
{
offset: 0,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: internalErr},
{data: readBytes[3:], counts: []int{5, 2}, err: io.EOF},
},
want: readData,
},
{
offset: 0,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{5}, err: internalErr},
{data: readBytes[5:], counts: []int{1, 3}, err: goawayErr},
{data: readBytes[9:], counts: []int{1}, err: io.EOF},
},
want: readData,
},
{
offset: 0,
length: 5,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: internalErr},
{data: readBytes[3:], counts: []int{2}, err: io.EOF},
},
want: readData[:5],
},
{
offset: 0,
length: 5,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: goawayErr},
{data: readBytes[3:], counts: []int{2}, err: io.EOF},
},
want: readData[:5],
},
{
offset: 1,
length: 5,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: internalErr},
{data: readBytes[3:], counts: []int{2}, err: io.EOF},
},
want: readData[:5],
},
{
offset: 1,
length: 3,
bodies: []fakeReadCloser{
{data: readBytes[1:], counts: []int{1}, err: internalErr},
{data: readBytes[2:], counts: []int{2}, err: io.EOF},
},
want: readData[1:4],
},
{
offset: 4,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes[4:], counts: []int{1}, err: internalErr},
{data: readBytes[5:], counts: []int{4}, err: internalErr},
{data: readBytes[9:], counts: []int{1}, err: io.EOF},
},
want: readData[4:],
},
{
offset: -4,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes[6:], counts: []int{1}, err: internalErr},
{data: readBytes[7:], counts: []int{3}, err: io.EOF},
},
want: readData[6:],
},
} {
r, err := obj.NewRangeReader(ctx, test.offset, test.length)
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
b := 0
r.reader = &httpReader{
body: &test.bodies[0],
reopen: func(int64) (*http.Response, error) {
b++
return &http.Response{Body: &test.bodies[b]}, nil
},
}
buf := make([]byte, len(readData)/2)
var gotb []byte
for {
n, err := r.Read(buf)
gotb = append(gotb, buf[:n]...)
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
}
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
if got := string(gotb); got != test.want {
t.Errorf("#%d: got %q, want %q", i, got, test.want)
}
if r.Attrs.Size != int64(len(readData)) {
t.Errorf("#%d: got Attrs.Size=%q, want %q", i, r.Attrs.Size, len(readData))
}
wantOffset := test.offset
if wantOffset < 0 {
wantOffset += int64(len(readData))
if wantOffset < 0 {
wantOffset = 0
}
}
if got := r.Attrs.StartOffset; got != wantOffset {
t.Errorf("#%d: got Attrs.Offset=%q, want %q", i, got, wantOffset)
}
}
r, err := obj.NewRangeReader(ctx, -100, 10)
if err == nil {
t.Fatal("Expected a non-nil error with negative offset and positive length")
} else if want := "storage: invalid offset"; !strings.HasPrefix(err.Error(), want) {
t.Errorf("Error mismatch\nGot: %q\nWant prefix: %q\n", err.Error(), want)
}
if r != nil {
t.Errorf("Expected nil reader")
}
}, option.WithHTTPClient(hc))
}
type fakeReadCloser struct {
data []byte
counts []int // how much of data to deliver on each read
err error // error to return with last count
d int // current position in data
c int // current position in counts
}
func (f *fakeReadCloser) Close() error |
func (f *fakeReadCloser) Read(buf []byte) (int, error) {
i := f.c
n := 0
if i < len(f.counts) {
n = f.counts[i]
}
var err error
if i >= len(f.counts)-1 {
err = f.err
}
copy(buf, f.data[f.d:f.d+n])
if len(buf) < n {
n = len(buf)
f.counts[i] -= n
err = nil
} else {
f.c++
}
f.d += n
return n, err
}
func TestFakeReadCloser(t *testing.T) {
e := errors.New("")
f := &fakeReadCloser{
data: []byte(readData),
counts: []int{1, 2, 3},
err: e,
}
wants := []string{"0", "12", "345"}
buf := make([]byte, 10)
for i := 0; i < 3; i++ {
n, err := f.Read(buf)
if got, want := n, f.counts[i]; got != want {
t.Fatalf("i=%d: got %d, want %d", i, got, want)
}
var wantErr error
if i == 2 {
wantErr = e
}
if err != wantErr {
t.Fatalf("i=%d: got error %v, want %v", i, err, wantErr)
}
if got, want := string(buf[:n]), wants[i]; got != want {
t.Fatalf("i=%d: got %q, want %q", i, got, want)
}
}
}
func TestContentEncodingGzipWithReader(t *testing.T) {
bucketName := "my-bucket"
objectName := "gzip-test"
getAttrsURL := fmt.Sprintf("/b/%s/o/%s?alt=json&prettyPrint=false&projection=full", bucketName, objectName)
downloadObjectXMLurl := fmt.Sprintf("/%s/%s", bucketName, objectName)
downloadObjectJSONurl := fmt.Sprintf("/b/%s/o/%s?alt=media&prettyPrint=false&projection=full", bucketName, objectName)
original := bytes.Repeat([]byte("a"), 4<<10)
mockGCS := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.String() {
case getAttrsURL:
fmt.Fprintf(w, `{
"bucket": "bucket", "name": "name", "contentEncoding": "gzip",
"contentLength": 43,
"contentType": "text/plain","timeCreated": "2020-04-10T16:08:58-07:00",
"updated": "2020-04-14T16:08:58-07:00"
}`)
return
case downloadObjectXMLurl, downloadObjectJSONurl:
// Serve back the file.
w.Header().Set("Content-Type", "text/plain")
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("Etag", `"c50e3e41c9bc9df34e84c94ce073f928"`)
w.Header().Set("X-Goog-Generation", "1587012235914578")
w.Header().Set("X-Goog-MetaGeneration", "2")
w.Header().Set("X-Goog-Stored-Content-Encoding", "gzip")
w.Header().Set("vary", "Accept-Encoding")
w.Header().Set("x-goog-stored-content-length", "43")
w.Header().Set("x-goog-hash", "crc32c=pYIWwQ==")
w.Header().Set("x-goog-hash", "md5=xQ4+Qcm8nfNOhMlM4HP5KA==")
w.Header().Set("x-goog-storage-class", "STANDARD")
gz := gzip.NewWriter(w)
gz.Write(original)
gz.Close()
default:
fmt.Fprintf(w, "unrecognized URL %s", r.URL)
}
}))
mockGCS.EnableHTTP2 = true
mockGCS.StartTLS()
defer mockGCS.Close()
ctx := context.Background()
hc := mockGCS.Client()
ux, _ := url.Parse(mockGCS.URL)
hc.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true
wrt := &alwaysToTargetURLRoundTripper{
destURL: ux,
hc: hc,
}
whc := &http.Client{Transport: wrt}
// 2. Different flavours of the read should all return the body.
readerCreators := []struct {
name string
create func(ctx context.Context, obj *ObjectHandle) (*Reader, error)
}{
{
"NewReader", func(cxt context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewReader(ctx)
},
},
{
"NewRangeReader(0, -1)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 0, -1)
},
},
{
"NewRangeReader(1kB, 2kB)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 1<<10, 2<<10)
},
},
{
"NewRangeReader(2kB, -1)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 2<<10, -1)
},
},
{
"NewRangeReader(2kB, 3kB)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 2<<10, 3<<10)
},
},
}
multiReaderTest(ctx, t, func(t *testing.T, c *Client) {
for _, tt := range readerCreators {
t.Run(tt.name, func(t *testing.T) {
obj := c.Bucket(bucketName).Object(objectName)
_, err := obj.Attrs(ctx)
if err != nil {
t.Fatal(err)
}
rd, err := tt.create(ctx, obj)
if err != nil {
t.Fatal(err)
}
defer rd.Close()
got, err := ioutil.ReadAll(rd)
if err != nil {
t.Fatal(err)
}
if g, w := got, original; !bytes.Equal(g, w) {
t.Fatalf("Response mismatch\nGot:\n%q\n\nWant:\n%q", g, w)
}
})
}
}, option.WithEndpoint(mockGCS.URL), option.WithoutAuthentication(), option.WithHTTPClient(whc))
}
// alwaysToTargetURLRoundTripper ensures that every single request
// is routed to a target destination. Some requests within the storage
// client by-pass using the provided HTTP client, hence this enforcemenet.
type alwaysToTargetURLRoundTripper struct {
destURL *url.URL
hc *http.Client
}
func (adrt *alwaysToTargetURLRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
req.URL.Host = adrt.destURL.Host
// Cloud Storage has full control over the response headers for their
// HTTP server but unfortunately we don't, so we have to prune
// the Range header to mimick GCS ignoring Range header:
// https://cloud.google.com/storage/docs/transcoding#range
delete(req.Header, "Range")
return adrt.hc.Do(req)
}
// multiTransportTest initializes fresh clients for each transport, then runs
// given testing function using each transport-specific client, supplying the
// test function with the sub-test instance, the context it was given, the name
// of an existing bucket to use, a bucket name to use for bucket creation, and
// the client to use.
func multiReaderTest(ctx context.Context, t *testing.T, test func(*testing.T, *Client), opts ...option.ClientOption) {
jsonOpts := append(opts, WithJSONReads())
xmlOpts := append(opts, WithXMLReads())
jsonClient, err := NewClient(ctx, jsonOpts...)
if err != nil {
t.Fatal(err)
}
xmlClient, err := NewClient(ctx, xmlOpts...)
if err != nil {
t.Fatal(err)
}
clients := map[string]*Client{
"xmlReads": xmlClient,
"jsonReads": jsonClient,
}
for transport, client := range clients {
t.Run(transport, func(t *testing.T) {
defer client.Close()
test(t, client)
})
}
}
| {
return nil
} | identifier_body |
reader_test.go | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"bytes"
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"strings"
"testing"
"google.golang.org/api/option"
)
const readData = "0123456789"
func TestRangeReader(t *testing.T) {
ctx := context.Background()
hc, close := newTestServer(handleRangeRead)
defer close()
multiReaderTest(ctx, t, func(t *testing.T, c *Client) {
obj := c.Bucket("b").Object("o")
for _, test := range []struct {
offset, length int64
want string
}{
{0, -1, readData},
{0, 10, readData},
{0, 5, readData[:5]},
{1, 3, readData[1:4]},
{6, -1, readData[6:]},
{4, 20, readData[4:]},
{-20, -1, readData},
{-6, -1, readData[4:]},
} {
r, err := obj.NewRangeReader(ctx, test.offset, test.length)
if err != nil {
t.Errorf("%d/%d: %v", test.offset, test.length, err)
continue
}
gotb, err := ioutil.ReadAll(r)
if err != nil {
t.Errorf("%d/%d: %v", test.offset, test.length, err)
continue
}
if got := string(gotb); got != test.want {
t.Errorf("%d/%d: got %q, want %q", test.offset, test.length, got, test.want)
}
}
}, option.WithHTTPClient(hc))
}
func handleRangeRead(w http.ResponseWriter, r *http.Request) {
rh := strings.TrimSpace(r.Header.Get("Range"))
data := readData
var from, to int
if rh == "" {
from = 0
to = len(data)
} else {
// assume "bytes=N-", "bytes=-N" or "bytes=N-M"
var err error
i := strings.IndexRune(rh, '=')
j := strings.IndexRune(rh, '-')
hasPositiveStartOffset := i+1 != j
if hasPositiveStartOffset { // The case of "bytes=N-"
from, err = strconv.Atoi(rh[i+1 : j])
} else { // The case of "bytes=-N"
from, err = strconv.Atoi(rh[i+1:])
from += len(data)
if from < 0 {
from = 0
}
}
if err != nil {
w.WriteHeader(500)
return
}
to = len(data)
if hasPositiveStartOffset && j+1 < len(rh) { // The case of "bytes=N-M"
to, err = strconv.Atoi(rh[j+1:])
if err != nil {
w.WriteHeader(500)
return
}
to++ // Range header is inclusive, Go slice is exclusive
}
if from >= len(data) && to != from {
w.WriteHeader(416)
return
}
if from > len(data) {
from = len(data)
}
if to > len(data) {
to = len(data)
}
}
data = data[from:to]
if data != readData {
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", from, to-1, len(readData)))
w.WriteHeader(http.StatusPartialContent)
}
if _, err := w.Write([]byte(data)); err != nil {
panic(err)
}
}
type http2Error string
func (h http2Error) Error() string {
return string(h)
}
// TestRangeReaderRetry tests Reader resumption logic. It ensures that offset
// and seen bytes are handled correctly so that data is not corrupted.
// This tests only works for the HTTP Reader.
// TODO: Design a similar test for gRPC.
func TestRangeReaderRetry(t *testing.T) {
internalErr := http2Error("blah blah INTERNAL_ERROR")
goawayErr := http2Error("http2: server sent GOAWAY and closed the connection; LastStreamID=15, ErrCode=NO_ERROR, debug=\"load_shed\"")
readBytes := []byte(readData)
hc, close := newTestServer(handleRangeRead)
defer close()
ctx := context.Background()
multiReaderTest(ctx, t, func(t *testing.T, c *Client) {
obj := c.Bucket("b").Object("o")
for i, test := range []struct {
offset, length int64
bodies []fakeReadCloser
want string
}{
{
offset: 0,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{10}, err: io.EOF},
},
want: readData,
},
{
offset: 0,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: internalErr},
{data: readBytes[3:], counts: []int{5, 2}, err: io.EOF},
},
want: readData,
},
{
offset: 0,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{5}, err: internalErr},
{data: readBytes[5:], counts: []int{1, 3}, err: goawayErr},
{data: readBytes[9:], counts: []int{1}, err: io.EOF},
},
want: readData,
},
{
offset: 0,
length: 5,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: internalErr},
{data: readBytes[3:], counts: []int{2}, err: io.EOF},
},
want: readData[:5],
},
{
offset: 0,
length: 5,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: goawayErr},
{data: readBytes[3:], counts: []int{2}, err: io.EOF},
},
want: readData[:5],
},
{
offset: 1,
length: 5,
bodies: []fakeReadCloser{
{data: readBytes, counts: []int{3}, err: internalErr},
{data: readBytes[3:], counts: []int{2}, err: io.EOF},
},
want: readData[:5],
},
{
offset: 1,
length: 3,
bodies: []fakeReadCloser{
{data: readBytes[1:], counts: []int{1}, err: internalErr},
{data: readBytes[2:], counts: []int{2}, err: io.EOF},
},
want: readData[1:4],
},
{
offset: 4,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes[4:], counts: []int{1}, err: internalErr},
{data: readBytes[5:], counts: []int{4}, err: internalErr},
{data: readBytes[9:], counts: []int{1}, err: io.EOF},
},
want: readData[4:],
},
{
offset: -4,
length: -1,
bodies: []fakeReadCloser{
{data: readBytes[6:], counts: []int{1}, err: internalErr},
{data: readBytes[7:], counts: []int{3}, err: io.EOF},
},
want: readData[6:],
},
} {
r, err := obj.NewRangeReader(ctx, test.offset, test.length)
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
b := 0
r.reader = &httpReader{
body: &test.bodies[0],
reopen: func(int64) (*http.Response, error) {
b++
return &http.Response{Body: &test.bodies[b]}, nil
},
}
buf := make([]byte, len(readData)/2)
var gotb []byte
for {
n, err := r.Read(buf)
gotb = append(gotb, buf[:n]...)
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("#%d: %v", i, err)
}
}
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
if got := string(gotb); got != test.want {
t.Errorf("#%d: got %q, want %q", i, got, test.want)
}
if r.Attrs.Size != int64(len(readData)) {
t.Errorf("#%d: got Attrs.Size=%q, want %q", i, r.Attrs.Size, len(readData))
}
wantOffset := test.offset
if wantOffset < 0 {
wantOffset += int64(len(readData))
if wantOffset < 0 {
wantOffset = 0
}
}
if got := r.Attrs.StartOffset; got != wantOffset {
t.Errorf("#%d: got Attrs.Offset=%q, want %q", i, got, wantOffset)
}
}
r, err := obj.NewRangeReader(ctx, -100, 10)
if err == nil {
t.Fatal("Expected a non-nil error with negative offset and positive length")
} else if want := "storage: invalid offset"; !strings.HasPrefix(err.Error(), want) {
t.Errorf("Error mismatch\nGot: %q\nWant prefix: %q\n", err.Error(), want)
}
if r != nil {
t.Errorf("Expected nil reader")
}
}, option.WithHTTPClient(hc))
}
type fakeReadCloser struct {
data []byte
counts []int // how much of data to deliver on each read
err error // error to return with last count
d int // current position in data
c int // current position in counts
}
func (f *fakeReadCloser) Close() error {
return nil
}
func (f *fakeReadCloser) Read(buf []byte) (int, error) {
i := f.c
n := 0
if i < len(f.counts) {
n = f.counts[i]
}
var err error
if i >= len(f.counts)-1 {
err = f.err
}
copy(buf, f.data[f.d:f.d+n])
if len(buf) < n {
n = len(buf)
f.counts[i] -= n
err = nil
} else {
f.c++
}
f.d += n
return n, err
}
func | (t *testing.T) {
e := errors.New("")
f := &fakeReadCloser{
data: []byte(readData),
counts: []int{1, 2, 3},
err: e,
}
wants := []string{"0", "12", "345"}
buf := make([]byte, 10)
for i := 0; i < 3; i++ {
n, err := f.Read(buf)
if got, want := n, f.counts[i]; got != want {
t.Fatalf("i=%d: got %d, want %d", i, got, want)
}
var wantErr error
if i == 2 {
wantErr = e
}
if err != wantErr {
t.Fatalf("i=%d: got error %v, want %v", i, err, wantErr)
}
if got, want := string(buf[:n]), wants[i]; got != want {
t.Fatalf("i=%d: got %q, want %q", i, got, want)
}
}
}
func TestContentEncodingGzipWithReader(t *testing.T) {
bucketName := "my-bucket"
objectName := "gzip-test"
getAttrsURL := fmt.Sprintf("/b/%s/o/%s?alt=json&prettyPrint=false&projection=full", bucketName, objectName)
downloadObjectXMLurl := fmt.Sprintf("/%s/%s", bucketName, objectName)
downloadObjectJSONurl := fmt.Sprintf("/b/%s/o/%s?alt=media&prettyPrint=false&projection=full", bucketName, objectName)
original := bytes.Repeat([]byte("a"), 4<<10)
mockGCS := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.String() {
case getAttrsURL:
fmt.Fprintf(w, `{
"bucket": "bucket", "name": "name", "contentEncoding": "gzip",
"contentLength": 43,
"contentType": "text/plain","timeCreated": "2020-04-10T16:08:58-07:00",
"updated": "2020-04-14T16:08:58-07:00"
}`)
return
case downloadObjectXMLurl, downloadObjectJSONurl:
// Serve back the file.
w.Header().Set("Content-Type", "text/plain")
w.Header().Set("Content-Encoding", "gzip")
w.Header().Set("Etag", `"c50e3e41c9bc9df34e84c94ce073f928"`)
w.Header().Set("X-Goog-Generation", "1587012235914578")
w.Header().Set("X-Goog-MetaGeneration", "2")
w.Header().Set("X-Goog-Stored-Content-Encoding", "gzip")
w.Header().Set("vary", "Accept-Encoding")
w.Header().Set("x-goog-stored-content-length", "43")
w.Header().Set("x-goog-hash", "crc32c=pYIWwQ==")
w.Header().Set("x-goog-hash", "md5=xQ4+Qcm8nfNOhMlM4HP5KA==")
w.Header().Set("x-goog-storage-class", "STANDARD")
gz := gzip.NewWriter(w)
gz.Write(original)
gz.Close()
default:
fmt.Fprintf(w, "unrecognized URL %s", r.URL)
}
}))
mockGCS.EnableHTTP2 = true
mockGCS.StartTLS()
defer mockGCS.Close()
ctx := context.Background()
hc := mockGCS.Client()
ux, _ := url.Parse(mockGCS.URL)
hc.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true
wrt := &alwaysToTargetURLRoundTripper{
destURL: ux,
hc: hc,
}
whc := &http.Client{Transport: wrt}
// 2. Different flavours of the read should all return the body.
readerCreators := []struct {
name string
create func(ctx context.Context, obj *ObjectHandle) (*Reader, error)
}{
{
"NewReader", func(cxt context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewReader(ctx)
},
},
{
"NewRangeReader(0, -1)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 0, -1)
},
},
{
"NewRangeReader(1kB, 2kB)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 1<<10, 2<<10)
},
},
{
"NewRangeReader(2kB, -1)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 2<<10, -1)
},
},
{
"NewRangeReader(2kB, 3kB)",
func(ctx context.Context, obj *ObjectHandle) (*Reader, error) {
return obj.NewRangeReader(ctx, 2<<10, 3<<10)
},
},
}
multiReaderTest(ctx, t, func(t *testing.T, c *Client) {
for _, tt := range readerCreators {
t.Run(tt.name, func(t *testing.T) {
obj := c.Bucket(bucketName).Object(objectName)
_, err := obj.Attrs(ctx)
if err != nil {
t.Fatal(err)
}
rd, err := tt.create(ctx, obj)
if err != nil {
t.Fatal(err)
}
defer rd.Close()
got, err := ioutil.ReadAll(rd)
if err != nil {
t.Fatal(err)
}
if g, w := got, original; !bytes.Equal(g, w) {
t.Fatalf("Response mismatch\nGot:\n%q\n\nWant:\n%q", g, w)
}
})
}
}, option.WithEndpoint(mockGCS.URL), option.WithoutAuthentication(), option.WithHTTPClient(whc))
}
// alwaysToTargetURLRoundTripper ensures that every single request
// is routed to a target destination. Some requests within the storage
// client by-pass using the provided HTTP client, hence this enforcemenet.
type alwaysToTargetURLRoundTripper struct {
destURL *url.URL
hc *http.Client
}
func (adrt *alwaysToTargetURLRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
req.URL.Host = adrt.destURL.Host
// Cloud Storage has full control over the response headers for their
// HTTP server but unfortunately we don't, so we have to prune
// the Range header to mimick GCS ignoring Range header:
// https://cloud.google.com/storage/docs/transcoding#range
delete(req.Header, "Range")
return adrt.hc.Do(req)
}
// multiTransportTest initializes fresh clients for each transport, then runs
// given testing function using each transport-specific client, supplying the
// test function with the sub-test instance, the context it was given, the name
// of an existing bucket to use, a bucket name to use for bucket creation, and
// the client to use.
func multiReaderTest(ctx context.Context, t *testing.T, test func(*testing.T, *Client), opts ...option.ClientOption) {
jsonOpts := append(opts, WithJSONReads())
xmlOpts := append(opts, WithXMLReads())
jsonClient, err := NewClient(ctx, jsonOpts...)
if err != nil {
t.Fatal(err)
}
xmlClient, err := NewClient(ctx, xmlOpts...)
if err != nil {
t.Fatal(err)
}
clients := map[string]*Client{
"xmlReads": xmlClient,
"jsonReads": jsonClient,
}
for transport, client := range clients {
t.Run(transport, func(t *testing.T) {
defer client.Close()
test(t, client)
})
}
}
| TestFakeReadCloser | identifier_name |
moment-ext.ts | import * as moment from 'moment'
import * as $ from 'jquery'
import { isNativeDate } from './util'
/*
GENERAL NOTE on moments throughout the *entire rest* of the codebase:
All moments are assumed to be ambiguously-zoned unless otherwise noted,
with the NOTABLE EXCEOPTION of start/end dates that live on *Event Objects*.
Ambiguously-TIMED moments are assumed to be ambiguously-zoned by nature.
*/
declare module 'moment' {
interface Moment {
hasTime(): boolean
time(): moment.Duration
stripZone()
stripTime()
}
}
let ambigDateOfMonthRegex = /^\s*\d{4}-\d\d$/
let ambigTimeOrZoneRegex =
/^\s*\d{4}-(?:(\d\d-\d\d)|(W\d\d$)|(W\d\d-\d)|(\d\d\d))((T| )(\d\d(:\d\d(:\d\d(\.\d+)?)?)?)?)?$/
let newMomentProto: any = moment.fn // where we will attach our new methods
let oldMomentProto = $.extend({}, newMomentProto) // copy of original moment methods
// tell momentjs to transfer these properties upon clone
let momentProperties = (moment as any).momentProperties
momentProperties.push('_fullCalendar')
momentProperties.push('_ambigTime')
momentProperties.push('_ambigZone')
/*
Call this if you want Moment's original format method to be used
*/
function oldMomentFormat(mom, formatStr?) {
return oldMomentProto.format.call(mom, formatStr) // oldMomentProto defined in moment-ext.js
}
export { newMomentProto, oldMomentProto, oldMomentFormat }
// Creating
// -------------------------------------------------------------------------------------------------
// Creates a new moment, similar to the vanilla moment(...) constructor, but with
// extra features (ambiguous time, enhanced formatting). When given an existing moment,
// it will function as a clone (and retain the zone of the moment). Anything else will
// result in a moment in the local zone.
const momentExt: any = function() {
return makeMoment(arguments)
}
export default momentExt
// Sames as momentExt, but forces the resulting moment to be in the UTC timezone.
momentExt.utc = function() {
let mom = makeMoment(arguments, true)
// Force it into UTC because makeMoment doesn't guarantee it
// (if given a pre-existing moment for example)
if (mom.hasTime()) { // don't give ambiguously-timed moments a UTC zone
mom.utc()
}
return mom
}
// Same as momentExt, but when given an ISO8601 string, the timezone offset is preserved.
// ISO8601 strings with no timezone offset will become ambiguously zoned.
momentExt.parseZone = function() {
return makeMoment(arguments, true, true)
}
// Builds an enhanced moment from args. When given an existing moment, it clones. When given a
// native Date, or called with no arguments (the current time), the resulting moment will be local.
// Anything else needs to be "parsed" (a string or an array), and will be affected by:
// parseAsUTC - if there is no zone information, should we parse the input in UTC?
// parseZone - if there is zone information, should we force the zone of the moment?
function makeMoment(args, parseAsUTC= false, parseZone= false) {
let input = args[0]
let isSingleString = args.length === 1 && typeof input === 'string'
let isAmbigTime
let isAmbigZone
let ambigMatch
let mom
if (moment.isMoment(input) || isNativeDate(input) || input === undefined) {
mom = moment.apply(null, args)
} else { // "parsing" is required
isAmbigTime = false
isAmbigZone = false
if (isSingleString) {
if (ambigDateOfMonthRegex.test(input)) {
// accept strings like '2014-05', but convert to the first of the month
input += '-01'
args = [ input ] // for when we pass it on to moment's constructor
isAmbigTime = true
isAmbigZone = true
} else if ((ambigMatch = ambigTimeOrZoneRegex.exec(input))) {
isAmbigTime = !ambigMatch[5] // no time part?
isAmbigZone = true
}
} else if ($.isArray(input)) {
// arrays have no timezone information, so assume ambiguous zone
isAmbigZone = true
}
// otherwise, probably a string with a format
if (parseAsUTC || isAmbigTime) {
mom = moment.utc.apply(moment, args)
} else {
mom = moment.apply(null, args)
}
if (isAmbigTime) {
mom._ambigTime = true
mom._ambigZone = true // ambiguous time always means ambiguous zone
} else if (parseZone) { // let's record the inputted zone somehow
if (isAmbigZone) {
mom._ambigZone = true
} else if (isSingleString) {
mom.utcOffset(input) // if not a valid zone, will assign UTC
}
}
}
mom._fullCalendar = true // flag for extended functionality
return mom
}
// Week Number
// -------------------------------------------------------------------------------------------------
// Returns the week number, considering the locale's custom week number calcuation
// `weeks` is an alias for `week`
newMomentProto.week = newMomentProto.weeks = function(input) {
let weekCalc = this._locale._fullCalendar_weekCalc
if (input == null && typeof weekCalc === 'function') { // custom function only works for getter
return weekCalc(this)
} else if (weekCalc === 'ISO') {
return oldMomentProto.isoWeek.apply(this, arguments) // ISO getter/setter
}
return oldMomentProto.week.apply(this, arguments) // local getter/setter
}
// Time-of-day
// -------------------------------------------------------------------------------------------------
// GETTER
// Returns a Duration with the hours/minutes/seconds/ms values of the moment.
// If the moment has an ambiguous time, a duration of 00:00 will be returned.
//
// SETTER
// You can supply a Duration, a Moment, or a Duration-like argument.
// When setting the time, and the moment has an ambiguous time, it then becomes unambiguous.
newMomentProto.time = function(time) {
// Fallback to the original method (if there is one) if this moment wasn't created via FullCalendar.
// `time` is a generic enough method name where this precaution is necessary to avoid collisions w/ other plugins.
if (!this._fullCalendar) {
return oldMomentProto.time.apply(this, arguments)
}
if (time == null) { // getter
return moment.duration({
hours: this.hours(),
minutes: this.minutes(),
seconds: this.seconds(),
milliseconds: this.milliseconds()
})
} else { // setter
this._ambigTime = false // mark that the moment now has a time
if (!moment.isDuration(time) && !moment.isMoment(time)) {
time = moment.duration(time)
}
// The day value should cause overflow (so 24 hours becomes 00:00:00 of next day).
// Only for Duration times, not Moment times.
let dayHours = 0
if (moment.isDuration(time)) {
dayHours = Math.floor(time.asDays()) * 24
}
// We need to set the individual fields.
// Can't use startOf('day') then add duration. In case of DST at start of day.
return this.hours(dayHours + time.hours())
.minutes(time.minutes())
.seconds(time.seconds())
.milliseconds(time.milliseconds())
}
}
// Converts the moment to UTC, stripping out its time-of-day and timezone offset,
// but preserving its YMD. A moment with a stripped time will display no time
// nor timezone offset when .format() is called.
newMomentProto.stripTime = function() {
if (!this._ambigTime) {
this.utc(true) // keepLocalTime=true (for keeping *date* value)
// set time to zero
this.set({
hours: 0, | seconds: 0,
ms: 0
})
// Mark the time as ambiguous. This needs to happen after the .utc() call, which might call .utcOffset(),
// which clears all ambig flags.
this._ambigTime = true
this._ambigZone = true // if ambiguous time, also ambiguous timezone offset
}
return this // for chaining
}
// Returns if the moment has a non-ambiguous time (boolean)
newMomentProto.hasTime = function() {
return !this._ambigTime
}
// Timezone
// -------------------------------------------------------------------------------------------------
// Converts the moment to UTC, stripping out its timezone offset, but preserving its
// YMD and time-of-day. A moment with a stripped timezone offset will display no
// timezone offset when .format() is called.
newMomentProto.stripZone = function() {
let wasAmbigTime
if (!this._ambigZone) {
wasAmbigTime = this._ambigTime
this.utc(true) // keepLocalTime=true (for keeping date and time values)
// the above call to .utc()/.utcOffset() unfortunately might clear the ambig flags, so restore
this._ambigTime = wasAmbigTime || false
// Mark the zone as ambiguous. This needs to happen after the .utc() call, which might call .utcOffset(),
// which clears the ambig flags.
this._ambigZone = true
}
return this // for chaining
}
// Returns of the moment has a non-ambiguous timezone offset (boolean)
newMomentProto.hasZone = function() {
return !this._ambigZone
}
// implicitly marks a zone
newMomentProto.local = function(keepLocalTime) {
// for when converting from ambiguously-zoned to local,
// keep the time values when converting from UTC -> local
oldMomentProto.local.call(this, this._ambigZone || keepLocalTime)
// ensure non-ambiguous
// this probably already happened via local() -> utcOffset(), but don't rely on Moment's internals
this._ambigTime = false
this._ambigZone = false
return this // for chaining
}
// implicitly marks a zone
newMomentProto.utc = function(keepLocalTime) {
oldMomentProto.utc.call(this, keepLocalTime)
// ensure non-ambiguous
// this probably already happened via utc() -> utcOffset(), but don't rely on Moment's internals
this._ambigTime = false
this._ambigZone = false
return this
}
// implicitly marks a zone (will probably get called upon .utc() and .local())
newMomentProto.utcOffset = function(tzo) {
if (tzo != null) { // setter
// these assignments needs to happen before the original zone method is called.
// I forget why, something to do with a browser crash.
this._ambigTime = false
this._ambigZone = false
}
return oldMomentProto.utcOffset.apply(this, arguments)
} | minutes: 0, | random_line_split |
moment-ext.ts | import * as moment from 'moment'
import * as $ from 'jquery'
import { isNativeDate } from './util'
/*
GENERAL NOTE on moments throughout the *entire rest* of the codebase:
All moments are assumed to be ambiguously-zoned unless otherwise noted,
with the NOTABLE EXCEOPTION of start/end dates that live on *Event Objects*.
Ambiguously-TIMED moments are assumed to be ambiguously-zoned by nature.
*/
declare module 'moment' {
interface Moment {
hasTime(): boolean
time(): moment.Duration
stripZone()
stripTime()
}
}
let ambigDateOfMonthRegex = /^\s*\d{4}-\d\d$/
let ambigTimeOrZoneRegex =
/^\s*\d{4}-(?:(\d\d-\d\d)|(W\d\d$)|(W\d\d-\d)|(\d\d\d))((T| )(\d\d(:\d\d(:\d\d(\.\d+)?)?)?)?)?$/
let newMomentProto: any = moment.fn // where we will attach our new methods
let oldMomentProto = $.extend({}, newMomentProto) // copy of original moment methods
// tell momentjs to transfer these properties upon clone
let momentProperties = (moment as any).momentProperties
momentProperties.push('_fullCalendar')
momentProperties.push('_ambigTime')
momentProperties.push('_ambigZone')
/*
Call this if you want Moment's original format method to be used
*/
function | (mom, formatStr?) {
return oldMomentProto.format.call(mom, formatStr) // oldMomentProto defined in moment-ext.js
}
export { newMomentProto, oldMomentProto, oldMomentFormat }
// Creating
// -------------------------------------------------------------------------------------------------
// Creates a new moment, similar to the vanilla moment(...) constructor, but with
// extra features (ambiguous time, enhanced formatting). When given an existing moment,
// it will function as a clone (and retain the zone of the moment). Anything else will
// result in a moment in the local zone.
const momentExt: any = function() {
return makeMoment(arguments)
}
export default momentExt
// Sames as momentExt, but forces the resulting moment to be in the UTC timezone.
momentExt.utc = function() {
let mom = makeMoment(arguments, true)
// Force it into UTC because makeMoment doesn't guarantee it
// (if given a pre-existing moment for example)
if (mom.hasTime()) { // don't give ambiguously-timed moments a UTC zone
mom.utc()
}
return mom
}
// Same as momentExt, but when given an ISO8601 string, the timezone offset is preserved.
// ISO8601 strings with no timezone offset will become ambiguously zoned.
momentExt.parseZone = function() {
return makeMoment(arguments, true, true)
}
// Builds an enhanced moment from args. When given an existing moment, it clones. When given a
// native Date, or called with no arguments (the current time), the resulting moment will be local.
// Anything else needs to be "parsed" (a string or an array), and will be affected by:
// parseAsUTC - if there is no zone information, should we parse the input in UTC?
// parseZone - if there is zone information, should we force the zone of the moment?
function makeMoment(args, parseAsUTC= false, parseZone= false) {
let input = args[0]
let isSingleString = args.length === 1 && typeof input === 'string'
let isAmbigTime
let isAmbigZone
let ambigMatch
let mom
if (moment.isMoment(input) || isNativeDate(input) || input === undefined) {
mom = moment.apply(null, args)
} else { // "parsing" is required
isAmbigTime = false
isAmbigZone = false
if (isSingleString) {
if (ambigDateOfMonthRegex.test(input)) {
// accept strings like '2014-05', but convert to the first of the month
input += '-01'
args = [ input ] // for when we pass it on to moment's constructor
isAmbigTime = true
isAmbigZone = true
} else if ((ambigMatch = ambigTimeOrZoneRegex.exec(input))) {
isAmbigTime = !ambigMatch[5] // no time part?
isAmbigZone = true
}
} else if ($.isArray(input)) {
// arrays have no timezone information, so assume ambiguous zone
isAmbigZone = true
}
// otherwise, probably a string with a format
if (parseAsUTC || isAmbigTime) {
mom = moment.utc.apply(moment, args)
} else {
mom = moment.apply(null, args)
}
if (isAmbigTime) {
mom._ambigTime = true
mom._ambigZone = true // ambiguous time always means ambiguous zone
} else if (parseZone) { // let's record the inputted zone somehow
if (isAmbigZone) {
mom._ambigZone = true
} else if (isSingleString) {
mom.utcOffset(input) // if not a valid zone, will assign UTC
}
}
}
mom._fullCalendar = true // flag for extended functionality
return mom
}
// Week Number
// -------------------------------------------------------------------------------------------------
// Returns the week number, considering the locale's custom week number calcuation
// `weeks` is an alias for `week`
newMomentProto.week = newMomentProto.weeks = function(input) {
let weekCalc = this._locale._fullCalendar_weekCalc
if (input == null && typeof weekCalc === 'function') { // custom function only works for getter
return weekCalc(this)
} else if (weekCalc === 'ISO') {
return oldMomentProto.isoWeek.apply(this, arguments) // ISO getter/setter
}
return oldMomentProto.week.apply(this, arguments) // local getter/setter
}
// Time-of-day
// -------------------------------------------------------------------------------------------------
// GETTER
// Returns a Duration with the hours/minutes/seconds/ms values of the moment.
// If the moment has an ambiguous time, a duration of 00:00 will be returned.
//
// SETTER
// You can supply a Duration, a Moment, or a Duration-like argument.
// When setting the time, and the moment has an ambiguous time, it then becomes unambiguous.
newMomentProto.time = function(time) {
// Fallback to the original method (if there is one) if this moment wasn't created via FullCalendar.
// `time` is a generic enough method name where this precaution is necessary to avoid collisions w/ other plugins.
if (!this._fullCalendar) {
return oldMomentProto.time.apply(this, arguments)
}
if (time == null) { // getter
return moment.duration({
hours: this.hours(),
minutes: this.minutes(),
seconds: this.seconds(),
milliseconds: this.milliseconds()
})
} else { // setter
this._ambigTime = false // mark that the moment now has a time
if (!moment.isDuration(time) && !moment.isMoment(time)) {
time = moment.duration(time)
}
// The day value should cause overflow (so 24 hours becomes 00:00:00 of next day).
// Only for Duration times, not Moment times.
let dayHours = 0
if (moment.isDuration(time)) {
dayHours = Math.floor(time.asDays()) * 24
}
// We need to set the individual fields.
// Can't use startOf('day') then add duration. In case of DST at start of day.
return this.hours(dayHours + time.hours())
.minutes(time.minutes())
.seconds(time.seconds())
.milliseconds(time.milliseconds())
}
}
// Converts the moment to UTC, stripping out its time-of-day and timezone offset,
// but preserving its YMD. A moment with a stripped time will display no time
// nor timezone offset when .format() is called.
newMomentProto.stripTime = function() {
if (!this._ambigTime) {
this.utc(true) // keepLocalTime=true (for keeping *date* value)
// set time to zero
this.set({
hours: 0,
minutes: 0,
seconds: 0,
ms: 0
})
// Mark the time as ambiguous. This needs to happen after the .utc() call, which might call .utcOffset(),
// which clears all ambig flags.
this._ambigTime = true
this._ambigZone = true // if ambiguous time, also ambiguous timezone offset
}
return this // for chaining
}
// Returns if the moment has a non-ambiguous time (boolean)
newMomentProto.hasTime = function() {
return !this._ambigTime
}
// Timezone
// -------------------------------------------------------------------------------------------------
// Converts the moment to UTC, stripping out its timezone offset, but preserving its
// YMD and time-of-day. A moment with a stripped timezone offset will display no
// timezone offset when .format() is called.
newMomentProto.stripZone = function() {
let wasAmbigTime
if (!this._ambigZone) {
wasAmbigTime = this._ambigTime
this.utc(true) // keepLocalTime=true (for keeping date and time values)
// the above call to .utc()/.utcOffset() unfortunately might clear the ambig flags, so restore
this._ambigTime = wasAmbigTime || false
// Mark the zone as ambiguous. This needs to happen after the .utc() call, which might call .utcOffset(),
// which clears the ambig flags.
this._ambigZone = true
}
return this // for chaining
}
// Returns of the moment has a non-ambiguous timezone offset (boolean)
newMomentProto.hasZone = function() {
return !this._ambigZone
}
// implicitly marks a zone
newMomentProto.local = function(keepLocalTime) {
// for when converting from ambiguously-zoned to local,
// keep the time values when converting from UTC -> local
oldMomentProto.local.call(this, this._ambigZone || keepLocalTime)
// ensure non-ambiguous
// this probably already happened via local() -> utcOffset(), but don't rely on Moment's internals
this._ambigTime = false
this._ambigZone = false
return this // for chaining
}
// implicitly marks a zone
newMomentProto.utc = function(keepLocalTime) {
oldMomentProto.utc.call(this, keepLocalTime)
// ensure non-ambiguous
// this probably already happened via utc() -> utcOffset(), but don't rely on Moment's internals
this._ambigTime = false
this._ambigZone = false
return this
}
// implicitly marks a zone (will probably get called upon .utc() and .local())
newMomentProto.utcOffset = function(tzo) {
if (tzo != null) { // setter
// these assignments needs to happen before the original zone method is called.
// I forget why, something to do with a browser crash.
this._ambigTime = false
this._ambigZone = false
}
return oldMomentProto.utcOffset.apply(this, arguments)
}
| oldMomentFormat | identifier_name |
moment-ext.ts | import * as moment from 'moment'
import * as $ from 'jquery'
import { isNativeDate } from './util'
/*
GENERAL NOTE on moments throughout the *entire rest* of the codebase:
All moments are assumed to be ambiguously-zoned unless otherwise noted,
with the NOTABLE EXCEOPTION of start/end dates that live on *Event Objects*.
Ambiguously-TIMED moments are assumed to be ambiguously-zoned by nature.
*/
declare module 'moment' {
interface Moment {
hasTime(): boolean
time(): moment.Duration
stripZone()
stripTime()
}
}
let ambigDateOfMonthRegex = /^\s*\d{4}-\d\d$/
let ambigTimeOrZoneRegex =
/^\s*\d{4}-(?:(\d\d-\d\d)|(W\d\d$)|(W\d\d-\d)|(\d\d\d))((T| )(\d\d(:\d\d(:\d\d(\.\d+)?)?)?)?)?$/
let newMomentProto: any = moment.fn // where we will attach our new methods
let oldMomentProto = $.extend({}, newMomentProto) // copy of original moment methods
// tell momentjs to transfer these properties upon clone
let momentProperties = (moment as any).momentProperties
momentProperties.push('_fullCalendar')
momentProperties.push('_ambigTime')
momentProperties.push('_ambigZone')
/*
Call this if you want Moment's original format method to be used
*/
function oldMomentFormat(mom, formatStr?) {
return oldMomentProto.format.call(mom, formatStr) // oldMomentProto defined in moment-ext.js
}
export { newMomentProto, oldMomentProto, oldMomentFormat }
// Creating
// -------------------------------------------------------------------------------------------------
// Creates a new moment, similar to the vanilla moment(...) constructor, but with
// extra features (ambiguous time, enhanced formatting). When given an existing moment,
// it will function as a clone (and retain the zone of the moment). Anything else will
// result in a moment in the local zone.
const momentExt: any = function() {
return makeMoment(arguments)
}
export default momentExt
// Sames as momentExt, but forces the resulting moment to be in the UTC timezone.
momentExt.utc = function() {
let mom = makeMoment(arguments, true)
// Force it into UTC because makeMoment doesn't guarantee it
// (if given a pre-existing moment for example)
if (mom.hasTime()) { // don't give ambiguously-timed moments a UTC zone
mom.utc()
}
return mom
}
// Same as momentExt, but when given an ISO8601 string, the timezone offset is preserved.
// ISO8601 strings with no timezone offset will become ambiguously zoned.
momentExt.parseZone = function() {
return makeMoment(arguments, true, true)
}
// Builds an enhanced moment from args. When given an existing moment, it clones. When given a
// native Date, or called with no arguments (the current time), the resulting moment will be local.
// Anything else needs to be "parsed" (a string or an array), and will be affected by:
// parseAsUTC - if there is no zone information, should we parse the input in UTC?
// parseZone - if there is zone information, should we force the zone of the moment?
function makeMoment(args, parseAsUTC= false, parseZone= false) |
// Week Number
// -------------------------------------------------------------------------------------------------
// Returns the week number, considering the locale's custom week number calcuation
// `weeks` is an alias for `week`
newMomentProto.week = newMomentProto.weeks = function(input) {
let weekCalc = this._locale._fullCalendar_weekCalc
if (input == null && typeof weekCalc === 'function') { // custom function only works for getter
return weekCalc(this)
} else if (weekCalc === 'ISO') {
return oldMomentProto.isoWeek.apply(this, arguments) // ISO getter/setter
}
return oldMomentProto.week.apply(this, arguments) // local getter/setter
}
// Time-of-day
// -------------------------------------------------------------------------------------------------
// GETTER
// Returns a Duration with the hours/minutes/seconds/ms values of the moment.
// If the moment has an ambiguous time, a duration of 00:00 will be returned.
//
// SETTER
// You can supply a Duration, a Moment, or a Duration-like argument.
// When setting the time, and the moment has an ambiguous time, it then becomes unambiguous.
newMomentProto.time = function(time) {
// Fallback to the original method (if there is one) if this moment wasn't created via FullCalendar.
// `time` is a generic enough method name where this precaution is necessary to avoid collisions w/ other plugins.
if (!this._fullCalendar) {
return oldMomentProto.time.apply(this, arguments)
}
if (time == null) { // getter
return moment.duration({
hours: this.hours(),
minutes: this.minutes(),
seconds: this.seconds(),
milliseconds: this.milliseconds()
})
} else { // setter
this._ambigTime = false // mark that the moment now has a time
if (!moment.isDuration(time) && !moment.isMoment(time)) {
time = moment.duration(time)
}
// The day value should cause overflow (so 24 hours becomes 00:00:00 of next day).
// Only for Duration times, not Moment times.
let dayHours = 0
if (moment.isDuration(time)) {
dayHours = Math.floor(time.asDays()) * 24
}
// We need to set the individual fields.
// Can't use startOf('day') then add duration. In case of DST at start of day.
return this.hours(dayHours + time.hours())
.minutes(time.minutes())
.seconds(time.seconds())
.milliseconds(time.milliseconds())
}
}
// Converts the moment to UTC, stripping out its time-of-day and timezone offset,
// but preserving its YMD. A moment with a stripped time will display no time
// nor timezone offset when .format() is called.
newMomentProto.stripTime = function() {
if (!this._ambigTime) {
this.utc(true) // keepLocalTime=true (for keeping *date* value)
// set time to zero
this.set({
hours: 0,
minutes: 0,
seconds: 0,
ms: 0
})
// Mark the time as ambiguous. This needs to happen after the .utc() call, which might call .utcOffset(),
// which clears all ambig flags.
this._ambigTime = true
this._ambigZone = true // if ambiguous time, also ambiguous timezone offset
}
return this // for chaining
}
// Returns if the moment has a non-ambiguous time (boolean)
newMomentProto.hasTime = function() {
return !this._ambigTime
}
// Timezone
// -------------------------------------------------------------------------------------------------
// Converts the moment to UTC, stripping out its timezone offset, but preserving its
// YMD and time-of-day. A moment with a stripped timezone offset will display no
// timezone offset when .format() is called.
newMomentProto.stripZone = function() {
let wasAmbigTime
if (!this._ambigZone) {
wasAmbigTime = this._ambigTime
this.utc(true) // keepLocalTime=true (for keeping date and time values)
// the above call to .utc()/.utcOffset() unfortunately might clear the ambig flags, so restore
this._ambigTime = wasAmbigTime || false
// Mark the zone as ambiguous. This needs to happen after the .utc() call, which might call .utcOffset(),
// which clears the ambig flags.
this._ambigZone = true
}
return this // for chaining
}
// Returns of the moment has a non-ambiguous timezone offset (boolean)
newMomentProto.hasZone = function() {
return !this._ambigZone
}
// implicitly marks a zone
newMomentProto.local = function(keepLocalTime) {
// for when converting from ambiguously-zoned to local,
// keep the time values when converting from UTC -> local
oldMomentProto.local.call(this, this._ambigZone || keepLocalTime)
// ensure non-ambiguous
// this probably already happened via local() -> utcOffset(), but don't rely on Moment's internals
this._ambigTime = false
this._ambigZone = false
return this // for chaining
}
// implicitly marks a zone
newMomentProto.utc = function(keepLocalTime) {
oldMomentProto.utc.call(this, keepLocalTime)
// ensure non-ambiguous
// this probably already happened via utc() -> utcOffset(), but don't rely on Moment's internals
this._ambigTime = false
this._ambigZone = false
return this
}
// implicitly marks a zone (will probably get called upon .utc() and .local())
newMomentProto.utcOffset = function(tzo) {
if (tzo != null) { // setter
// these assignments needs to happen before the original zone method is called.
// I forget why, something to do with a browser crash.
this._ambigTime = false
this._ambigZone = false
}
return oldMomentProto.utcOffset.apply(this, arguments)
}
| {
let input = args[0]
let isSingleString = args.length === 1 && typeof input === 'string'
let isAmbigTime
let isAmbigZone
let ambigMatch
let mom
if (moment.isMoment(input) || isNativeDate(input) || input === undefined) {
mom = moment.apply(null, args)
} else { // "parsing" is required
isAmbigTime = false
isAmbigZone = false
if (isSingleString) {
if (ambigDateOfMonthRegex.test(input)) {
// accept strings like '2014-05', but convert to the first of the month
input += '-01'
args = [ input ] // for when we pass it on to moment's constructor
isAmbigTime = true
isAmbigZone = true
} else if ((ambigMatch = ambigTimeOrZoneRegex.exec(input))) {
isAmbigTime = !ambigMatch[5] // no time part?
isAmbigZone = true
}
} else if ($.isArray(input)) {
// arrays have no timezone information, so assume ambiguous zone
isAmbigZone = true
}
// otherwise, probably a string with a format
if (parseAsUTC || isAmbigTime) {
mom = moment.utc.apply(moment, args)
} else {
mom = moment.apply(null, args)
}
if (isAmbigTime) {
mom._ambigTime = true
mom._ambigZone = true // ambiguous time always means ambiguous zone
} else if (parseZone) { // let's record the inputted zone somehow
if (isAmbigZone) {
mom._ambigZone = true
} else if (isSingleString) {
mom.utcOffset(input) // if not a valid zone, will assign UTC
}
}
}
mom._fullCalendar = true // flag for extended functionality
return mom
} | identifier_body |
qwe58.py | #from __future__ import print_function
# -*- coding: UTF-8 -*-
import cv2
import numpy as np
import sys
from cython_bbox import bbox_overlaps
from projection import ground_point_to_bird_view_proj
from projection import bird_view_proj_to_ground_point as bv2gp
#import projection.brid_view_proj_to_ground_point as bv2gp
import os
from scipy.fftpack import fft,ifft
import threading
from scipy.optimize import curve_fit
#sys.path.append('/home/m13/MCDC_FHS/py-R-FCN/caffe/python')
#sys.path.append('/home/m13/MCDC_FHS/py-R-FCN/lib')
sys.path.append('/home/m13/MCDC_FHS/py-R-FCN2/hy-frcnn/R-FCN-PSROIAlign/caffe/python')
sys.path.append('/home/m13/MCDC_FHS/py-R-FCN2/hy-frcnn/R-FCN-PSROIAlign/lib')
import caffe
from utils.timer import Timer
from utils.blob import im_list_to_blob
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
from fast_rcnn.nms_wrapper import nms
# hah
import json
from pre import Filter
CLASSES = ('__background__','vehicle'
)
FRAME_TIME = 0.05023106289
Y_THRESHOLD = 0.35 #0.2
def _get_image_blob(im):
im_orig = im.astype(np.float32, copy=True)
im_orig -= np.array([[[102.9801, 115.9465, 122.7717]]])
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
#print im_size_min, im_size_max
processed_ims = []
im_scale_factors = []
for target_size in (900,): #scale
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
#print '1',im_scale
if np.round(im_scale * im_size_max) > 1300:
im_scale = float(1300) / float(im_size_max)
#print '2',im_scale
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
#print '#',im.shape, im_scale
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im, rois):
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def smooth_curve(x_result,N):
plen=N
head = [x_result[0] for z in range(plen)]
tail = [x_result[-1] for z in range(plen)]
pad_result= head+x_result+tail
xm_result=[]
for i in range(len(x_result)):
xm_result.append(np.mean(pad_result[i:i+plen+1]))
xm_result.reverse()
head = [xm_result[0] for z in range(plen)]
tail = [xm_result[-1] for z in range(plen)]
pad_result= head+xm_result+tail
hx_result=[]
for i in range(len(x_result)):
hx_result.append(np.mean(pad_result[i:i+plen+1]))
hx_result.reverse()
return hx_result
def fitfun(x,a,b):
return a/x+b
def im_detect(net, im, boxes=None):
blobs, im_scales = _get_blobs(im, boxes)
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
#print blobs['data'].shape
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
scores = blobs_out['cls_prob']
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
#print 'debug--- boxes.size()', pred_boxes.shape
return scores, pred_boxes
num=0
def vis_detections(im, class_name, dets, thresh=0.8):
"""Draw detected bounding boxes."""
global num
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
frame = im
im = im[:, :, (2, 1, 0)]
#fig, ax = plt.subplots(figsize=(12, 12))
#ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 255), 2)
print(num)
cv2.imwrite('./'+str(num)+".jpg", frame)
def run_video(model, deploy, vp,outfile,timefile,nb):
tot=0
with open("/data/mcdc_data/valid/camera_parameter.json") as f:
cam_param = json.load(f)
cam_param["camera_height"] *=1
#with open('/data/mcdc_data/valid/valid_video_00_time.txt') as f:
with open(timefile) as f: # TODO: modify it: video_00
time_stamp = [line[:-1] for line in f]
time_stamp = map(eval, time_stamp)
caffe.set_mode_gpu()
net = caffe.Net(deploy, model, caffe.TEST)
cap = cv2.VideoCapture(vp)
#fps = round(cap.get(cv2.CAP_PROP_FPS))
success, image = cap.read()
#video_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
#video_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_id = 0
stamp_id = 0
x_result = []
arealist=[]
h_param=[]
w_param=[]
while (success):
tot += 1
timer = Timer()
timer.tic()
#if frame_id > 20: break
#xmin_crop, xmax_crop, ymin_crop, ymax_crop = 300, 1300, 400, 1200
#croped_image=image[400:1200,300:1300,:]
scores, boxes = im_detect(net, image)
#boxes[: , 4] += xmin_crop
#boxes[: , 5] += ymin_crop
#print '----debug-----box', boxes[:4, :]
#exit(0)
timer.toc()
print (str(nb)+'Detection took {:.3f}s for ''{:d} object proposals').format(timer.total_time, boxes.shape[0])
CONF_THRESH = 0.80
NMS_THRESH = 0.3
min_y = 0
min_x = 51.0
#Max = 0
#arealist=[]
maxb = [0,0,10,10] | for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1
cls_boxes = boxes[:, 4:8] #boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
cls_dets = np.hstack((cls_boxes,cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(cls_dets, NMS_THRESH)
# add vote
dets_NMSed = cls_dets[keep, :]
BBOX_VOTE_FLAG=True
if BBOX_VOTE_FLAG:
dets = bbox_vote(dets_NMSed, cls_dets)
else:
dets = dets_NMSed
#vis_detections_video(im, cls, dets, thresh=CONF_THRESH)
################################
inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
if len(inds) == 0:
continue
#print(inds)
for i in inds:
#area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
bbox = dets[i, :4]
#bbox[0] += 300
#bbox[1] += 400
#bbox[2] += 300
#bbox[3] += 400
x, y = ground_point_to_bird_view_proj((bbox[2] + bbox[0])/2, bbox[3], cam_param)
#print(bbox)
if -cam_param['cam_to_left'] - Y_THRESHOLD < y and y < cam_param['cam_to_right'] + Y_THRESHOLD and x < min_x:
#Max = area
min_x, min_y = x, y
maxb = dets[i, :4] # max area bbox
#score = dets[i, :4]
#arealist+=[{"xmin":float(bbox[0]), "ymin":float(bbox[1]), "xmax": float(bbox[2]),"ymax":float(bbox[3])}]
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 255), 2)
#h_param.append(maxb[2]-maxb[0])
#cv2.rectangle(image, (bbox))
arealist+=[{"xmin":float(maxb[0]), "ymin":float(maxb[1]), "xmax": float(maxb[2]),"ymax":float(maxb[3])}]
w_param.append(maxb[2]-maxb[0])
h_param.append(maxb[3]-maxb[1])
cv2.rectangle(image, (maxb[0], maxb[1]), (maxb[2], maxb[3]), (0, 255, 0), 2)
#cv2.imwrite('output/'+str(tot)+".jpg", image)
#for x in range(5, 50, 3):
# for y in range(-3, 4):
# u, v = bv2gp(x, y, cam_param)
# cv2.putText(image, str(x) + "," + str(y), (u, v), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 2)
# cv2.circle(image, (u, v), 3, (0, (x + y + 103) % 2 * 255, (x + y + 102) % 2 * 255), -1)
#cv2.imwrite('output/'+ 'qwe_' +str(nb-1) + '_' + str(tot)+".jpg", image)
x_result.append(min_x)
#videoWriter.write(image)
##############################
frame_id += 1
stamp_id += 1
success, image = cap.read()
vx_result = []
for i in range(1,len(h_param)-1,2):
h_param[i]=(float(h_param[i+1])+float(h_param[i+1]))/2
w_param[i]=(float(w_param[i+1])+float(w_param[i+1]))/2
lineparam=[ 4.29128904e+03, -1.99320880e+00] # new 05
lineparam= [ 4.29169018e+03, -2.00891006e+00]
fitd_w=[fitfun(i,lineparam[0],lineparam[1]) for i in w_param]
lineparam=[ 3.33988934e+03, -2.55092719e+00] # new 05
lineparam=[ 3.33810684e+03, -2.61628836e+00]
fitd_h=[fitfun(i,lineparam[0],lineparam[1]) for i in h_param]
fitd=[0.2*fitd_h[i]+0.8*fitd_w[i] for i in range(len(fitd_w))]
fitd_m=smooth_curve(list(fitd),10)
xm_result=fitd_m[:]
if np.mean(fitd)>20:
fitd_m=smooth_curve(list(fitd),30)
else:
fitd_m=smooth_curve(list(fitd),10)
if np.mean(fitd)>20:
fitds=smooth_curve(list(fitd),40)
else:
fitds=smooth_curve(list(fitd),15)
x_result=fitds[:]
fix_frame_time = False
if fix_frame_time:
FRAME_TIME = (time_stamp[-1] - time_stamp[0]) / (len(time_stamp) - 1)
for fid, (x1, x2) in enumerate(zip(x_result[2:], x_result[:-2]), 1):
vx_result += [(x1 - x2) / (2 * FRAME_TIME)]
else:
for fid, (x1, x2) in enumerate(zip(x_result[2:], x_result[:-2]), 1):
vx_result += ([(x1 - x2) / (time_stamp[fid+1] - time_stamp[fid - 1])])
# vx_result = vx_result + [vx_result[-1]]
vx_result = [vx_result[0]]+vx_result + [vx_result[-1]]
vx_result=smooth_curve(list(vx_result),10)
frame_data = []
for fid, (vx, x) in enumerate(zip(vx_result, xm_result)):
#frame_data += [{"vx": 0, "x": x, "fid": fid}]
frame_data += [{"vx": vx, "x": x, "fid": fid}]
result = {
"frame_data": frame_data,
"end_frame": frame_id,
"start_frame": 0,
"track_id": None,
"arealist": arealist,
}
outjson=os.path.join("/home/m13/test_pre/",outfile)
with open(outjson, "w") as output_file:
json.dump(result, output_file)
#videoWriter.release()
def bbox_vote(dets_NMS, dets_all, thresh=0.8):
dets_voted = np.zeros_like(dets_NMS) # Empty matrix with the same shape and type
_overlaps = bbox_overlaps(
np.ascontiguousarray(dets_NMS[:, 0:4], dtype=np.float),
np.ascontiguousarray(dets_all[:, 0:4], dtype=np.float))
# for each survived box
for i, det in enumerate(dets_NMS):
dets_overlapped = dets_all[np.where(_overlaps[i, :] >= thresh)[0]]
assert(len(dets_overlapped) > 0)
boxes = dets_overlapped[:, 0:4]
scores = dets_overlapped[:, 4]
out_box = np.dot(scores, boxes)
dets_voted[i][0:4] = out_box / sum(scores) # Weighted bounding boxes
dets_voted[i][4] = det[4] # Keep the original score
# Weighted scores (if enabled)
BBOX_VOTE_N_WEIGHTED_SCORE=1
BBOX_VOTE_WEIGHT_EMPTY=0.5
if BBOX_VOTE_N_WEIGHTED_SCORE > 1:
n_agreement = BBOX_VOTE_N_WEIGHTED_SCORE
w_empty = BBOX_VOTE_WEIGHT_EMPTY
n_detected = len(scores)
if n_detected >= n_agreement:
top_scores = -np.sort(-scores)[:n_agreement]
new_score = np.average(top_scores)
else:
new_score = np.average(scores) * (n_detected * 1.0 + (n_agreement - n_detected) * w_empty) / n_agreement
dets_voted[i][4] = min(new_score, dets_voted[i][4])
return dets_voted
def action(model, deploy,infile,outfile,timefile, nnn):
run_video(model, deploy,infile,outfile,timefile, nnn)
if __name__ == '__main__':
model = sys.argv[2] #model
deploy = sys.argv[1] #deploy
imp = sys.argv[3] # video path
output = sys.argv[4] # output json
timer_tot = Timer()
timer_tot.tic()
filelist=os.listdir(imp)
threads = []
nnn = 0
for i in filelist:
if i.find(".avi")>-1:
if i.find("效果")>-1:
continue
outfile=i.split(".")[0]+"_pre.json"
infile=os.path.join(imp,i)
timefile=i.split(".")[0]+"_time.txt"
timefile=os.path.join(imp,timefile)
print(infile)
print(outfile)
print(timefile)
nnn += 1
t = threading.Thread(target = action, args = (model, deploy,infile,outfile,timefile, nnn))
threads.append(t)
#run_video(model, deploy,infile,outfile,timefile)
for t in threads:
#t.setDaemon(True)
t.start()
while True:
if (len(threading.enumerate()) < 3):
break
for t in threads:
t.join()
timer_tot.toc()
print(timer_tot.total_time)
'''
tot = 0
if __name__ == '__main__':
model = sys.argv[2] #model
deploy = sys.argv[1] #deploy
imp = sys.argv[3] # video path
output = sys.argv[4] # output json
timer_tot = Timer()
timer_tot.tic()
filelist=os.listdir(imp)
for i in filelist:
if i.find(".avi")>-1:
if i.find("效果")>-1:
continue
outfile=i.split(".")[0]+"_pre.json"
infile=os.path.join(imp,i)
timefile=i.split(".")[0]+"_time.txt"
timefile=os.path.join(imp,timefile)
print(infile)
print(outfile)
print(timefile)
run_video(model, deploy,infile,outfile,timefile,11)
timer_tot.toc()
print(timer_tot.total_time)
#vidcap = cv2.VideoCapture('/data/mcdc_data/valid/valid_video_00.avi')
#success,image = vidcap.read()
#count = 0
#while success:
# success,image = vidcap.read()
''' | random_line_split | |
qwe58.py | #from __future__ import print_function
# -*- coding: UTF-8 -*-
import cv2
import numpy as np
import sys
from cython_bbox import bbox_overlaps
from projection import ground_point_to_bird_view_proj
from projection import bird_view_proj_to_ground_point as bv2gp
#import projection.brid_view_proj_to_ground_point as bv2gp
import os
from scipy.fftpack import fft,ifft
import threading
from scipy.optimize import curve_fit
#sys.path.append('/home/m13/MCDC_FHS/py-R-FCN/caffe/python')
#sys.path.append('/home/m13/MCDC_FHS/py-R-FCN/lib')
sys.path.append('/home/m13/MCDC_FHS/py-R-FCN2/hy-frcnn/R-FCN-PSROIAlign/caffe/python')
sys.path.append('/home/m13/MCDC_FHS/py-R-FCN2/hy-frcnn/R-FCN-PSROIAlign/lib')
import caffe
from utils.timer import Timer
from utils.blob import im_list_to_blob
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
from fast_rcnn.nms_wrapper import nms
# hah
import json
from pre import Filter
CLASSES = ('__background__','vehicle'
)
FRAME_TIME = 0.05023106289
Y_THRESHOLD = 0.35 #0.2
def _get_image_blob(im):
im_orig = im.astype(np.float32, copy=True)
im_orig -= np.array([[[102.9801, 115.9465, 122.7717]]])
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
#print im_size_min, im_size_max
processed_ims = []
im_scale_factors = []
for target_size in (900,): #scale
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
#print '1',im_scale
if np.round(im_scale * im_size_max) > 1300:
im_scale = float(1300) / float(im_size_max)
#print '2',im_scale
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
#print '#',im.shape, im_scale
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im, rois):
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def smooth_curve(x_result,N):
plen=N
head = [x_result[0] for z in range(plen)]
tail = [x_result[-1] for z in range(plen)]
pad_result= head+x_result+tail
xm_result=[]
for i in range(len(x_result)):
xm_result.append(np.mean(pad_result[i:i+plen+1]))
xm_result.reverse()
head = [xm_result[0] for z in range(plen)]
tail = [xm_result[-1] for z in range(plen)]
pad_result= head+xm_result+tail
hx_result=[]
for i in range(len(x_result)):
hx_result.append(np.mean(pad_result[i:i+plen+1]))
hx_result.reverse()
return hx_result
def fitfun(x,a,b):
return a/x+b
def im_detect(net, im, boxes=None):
blobs, im_scales = _get_blobs(im, boxes)
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
#print blobs['data'].shape
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
scores = blobs_out['cls_prob']
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
#print 'debug--- boxes.size()', pred_boxes.shape
return scores, pred_boxes
num=0
def vis_detections(im, class_name, dets, thresh=0.8):
"""Draw detected bounding boxes."""
global num
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
frame = im
im = im[:, :, (2, 1, 0)]
#fig, ax = plt.subplots(figsize=(12, 12))
#ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 255), 2)
print(num)
cv2.imwrite('./'+str(num)+".jpg", frame)
def run_video(model, deploy, vp,outfile,timefile,nb):
tot=0
with open("/data/mcdc_data/valid/camera_parameter.json") as f:
cam_param = json.load(f)
cam_param["camera_height"] *=1
#with open('/data/mcdc_data/valid/valid_video_00_time.txt') as f:
with open(timefile) as f: # TODO: modify it: video_00
time_stamp = [line[:-1] for line in f]
time_stamp = map(eval, time_stamp)
caffe.set_mode_gpu()
net = caffe.Net(deploy, model, caffe.TEST)
cap = cv2.VideoCapture(vp)
#fps = round(cap.get(cv2.CAP_PROP_FPS))
success, image = cap.read()
#video_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
#video_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_id = 0
stamp_id = 0
x_result = []
arealist=[]
h_param=[]
w_param=[]
while (success):
tot += 1
timer = Timer()
timer.tic()
#if frame_id > 20: break
#xmin_crop, xmax_crop, ymin_crop, ymax_crop = 300, 1300, 400, 1200
#croped_image=image[400:1200,300:1300,:]
scores, boxes = im_detect(net, image)
#boxes[: , 4] += xmin_crop
#boxes[: , 5] += ymin_crop
#print '----debug-----box', boxes[:4, :]
#exit(0)
timer.toc()
print (str(nb)+'Detection took {:.3f}s for ''{:d} object proposals').format(timer.total_time, boxes.shape[0])
CONF_THRESH = 0.80
NMS_THRESH = 0.3
min_y = 0
min_x = 51.0
#Max = 0
#arealist=[]
maxb = [0,0,10,10]
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1
cls_boxes = boxes[:, 4:8] #boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
cls_dets = np.hstack((cls_boxes,cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(cls_dets, NMS_THRESH)
# add vote
dets_NMSed = cls_dets[keep, :]
BBOX_VOTE_FLAG=True
if BBOX_VOTE_FLAG:
dets = bbox_vote(dets_NMSed, cls_dets)
else:
dets = dets_NMSed
#vis_detections_video(im, cls, dets, thresh=CONF_THRESH)
################################
inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
if len(inds) == 0:
continue
#print(inds)
for i in inds:
#area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
bbox = dets[i, :4]
#bbox[0] += 300
#bbox[1] += 400
#bbox[2] += 300
#bbox[3] += 400
x, y = ground_point_to_bird_view_proj((bbox[2] + bbox[0])/2, bbox[3], cam_param)
#print(bbox)
if -cam_param['cam_to_left'] - Y_THRESHOLD < y and y < cam_param['cam_to_right'] + Y_THRESHOLD and x < min_x:
#Max = area
min_x, min_y = x, y
maxb = dets[i, :4] # max area bbox
#score = dets[i, :4]
#arealist+=[{"xmin":float(bbox[0]), "ymin":float(bbox[1]), "xmax": float(bbox[2]),"ymax":float(bbox[3])}]
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 255), 2)
#h_param.append(maxb[2]-maxb[0])
#cv2.rectangle(image, (bbox))
arealist+=[{"xmin":float(maxb[0]), "ymin":float(maxb[1]), "xmax": float(maxb[2]),"ymax":float(maxb[3])}]
w_param.append(maxb[2]-maxb[0])
h_param.append(maxb[3]-maxb[1])
cv2.rectangle(image, (maxb[0], maxb[1]), (maxb[2], maxb[3]), (0, 255, 0), 2)
#cv2.imwrite('output/'+str(tot)+".jpg", image)
#for x in range(5, 50, 3):
# for y in range(-3, 4):
# u, v = bv2gp(x, y, cam_param)
# cv2.putText(image, str(x) + "," + str(y), (u, v), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 2)
# cv2.circle(image, (u, v), 3, (0, (x + y + 103) % 2 * 255, (x + y + 102) % 2 * 255), -1)
#cv2.imwrite('output/'+ 'qwe_' +str(nb-1) + '_' + str(tot)+".jpg", image)
x_result.append(min_x)
#videoWriter.write(image)
##############################
frame_id += 1
stamp_id += 1
success, image = cap.read()
vx_result = []
for i in range(1,len(h_param)-1,2):
h_param[i]=(float(h_param[i+1])+float(h_param[i+1]))/2
w_param[i]=(float(w_param[i+1])+float(w_param[i+1]))/2
lineparam=[ 4.29128904e+03, -1.99320880e+00] # new 05
lineparam= [ 4.29169018e+03, -2.00891006e+00]
fitd_w=[fitfun(i,lineparam[0],lineparam[1]) for i in w_param]
lineparam=[ 3.33988934e+03, -2.55092719e+00] # new 05
lineparam=[ 3.33810684e+03, -2.61628836e+00]
fitd_h=[fitfun(i,lineparam[0],lineparam[1]) for i in h_param]
fitd=[0.2*fitd_h[i]+0.8*fitd_w[i] for i in range(len(fitd_w))]
fitd_m=smooth_curve(list(fitd),10)
xm_result=fitd_m[:]
if np.mean(fitd)>20:
fitd_m=smooth_curve(list(fitd),30)
else:
fitd_m=smooth_curve(list(fitd),10)
if np.mean(fitd)>20:
fitds=smooth_curve(list(fitd),40)
else:
fitds=smooth_curve(list(fitd),15)
x_result=fitds[:]
fix_frame_time = False
if fix_frame_time:
FRAME_TIME = (time_stamp[-1] - time_stamp[0]) / (len(time_stamp) - 1)
for fid, (x1, x2) in enumerate(zip(x_result[2:], x_result[:-2]), 1):
vx_result += [(x1 - x2) / (2 * FRAME_TIME)]
else:
for fid, (x1, x2) in enumerate(zip(x_result[2:], x_result[:-2]), 1):
vx_result += ([(x1 - x2) / (time_stamp[fid+1] - time_stamp[fid - 1])])
# vx_result = vx_result + [vx_result[-1]]
vx_result = [vx_result[0]]+vx_result + [vx_result[-1]]
vx_result=smooth_curve(list(vx_result),10)
frame_data = []
for fid, (vx, x) in enumerate(zip(vx_result, xm_result)):
#frame_data += [{"vx": 0, "x": x, "fid": fid}]
frame_data += [{"vx": vx, "x": x, "fid": fid}]
result = {
"frame_data": frame_data,
"end_frame": frame_id,
"start_frame": 0,
"track_id": None,
"arealist": arealist,
}
outjson=os.path.join("/home/m13/test_pre/",outfile)
with open(outjson, "w") as output_file:
json.dump(result, output_file)
#videoWriter.release()
def bbox_vote(dets_NMS, dets_all, thresh=0.8):
dets_voted = np.zeros_like(dets_NMS) # Empty matrix with the same shape and type
_overlaps = bbox_overlaps(
np.ascontiguousarray(dets_NMS[:, 0:4], dtype=np.float),
np.ascontiguousarray(dets_all[:, 0:4], dtype=np.float))
# for each survived box
for i, det in enumerate(dets_NMS):
dets_overlapped = dets_all[np.where(_overlaps[i, :] >= thresh)[0]]
assert(len(dets_overlapped) > 0)
boxes = dets_overlapped[:, 0:4]
scores = dets_overlapped[:, 4]
out_box = np.dot(scores, boxes)
dets_voted[i][0:4] = out_box / sum(scores) # Weighted bounding boxes
dets_voted[i][4] = det[4] # Keep the original score
# Weighted scores (if enabled)
BBOX_VOTE_N_WEIGHTED_SCORE=1
BBOX_VOTE_WEIGHT_EMPTY=0.5
if BBOX_VOTE_N_WEIGHTED_SCORE > 1:
n_agreement = BBOX_VOTE_N_WEIGHTED_SCORE
w_empty = BBOX_VOTE_WEIGHT_EMPTY
n_detected = len(scores)
if n_detected >= n_agreement:
top_scores = -np.sort(-scores)[:n_agreement]
new_score = np.average(top_scores)
else:
new_score = np.average(scores) * (n_detected * 1.0 + (n_agreement - n_detected) * w_empty) / n_agreement
dets_voted[i][4] = min(new_score, dets_voted[i][4])
return dets_voted
def action(model, deploy,infile,outfile,timefile, nnn):
run_video(model, deploy,infile,outfile,timefile, nnn)
if __name__ == '__main__':
model = sys.argv[2] #model
deploy = sys.argv[1] #deploy
imp = sys.argv[3] # video path
output = sys.argv[4] # output json
timer_tot = Timer()
timer_tot.tic()
filelist=os.listdir(imp)
threads = []
nnn = 0
for i in filelist:
if i.find(".avi")>-1:
if i.find("效果")>-1:
cont | outfile=i.split(".")[0]+"_pre.json"
infile=os.path.join(imp,i)
timefile=i.split(".")[0]+"_time.txt"
timefile=os.path.join(imp,timefile)
print(infile)
print(outfile)
print(timefile)
nnn += 1
t = threading.Thread(target = action, args = (model, deploy,infile,outfile,timefile, nnn))
threads.append(t)
#run_video(model, deploy,infile,outfile,timefile)
for t in threads:
#t.setDaemon(True)
t.start()
while True:
if (len(threading.enumerate()) < 3):
break
for t in threads:
t.join()
timer_tot.toc()
print(timer_tot.total_time)
'''
tot = 0
if __name__ == '__main__':
model = sys.argv[2] #model
deploy = sys.argv[1] #deploy
imp = sys.argv[3] # video path
output = sys.argv[4] # output json
timer_tot = Timer()
timer_tot.tic()
filelist=os.listdir(imp)
for i in filelist:
if i.find(".avi")>-1:
if i.find("效果")>-1:
continue
outfile=i.split(".")[0]+"_pre.json"
infile=os.path.join(imp,i)
timefile=i.split(".")[0]+"_time.txt"
timefile=os.path.join(imp,timefile)
print(infile)
print(outfile)
print(timefile)
run_video(model, deploy,infile,outfile,timefile,11)
timer_tot.toc()
print(timer_tot.total_time)
#vidcap = cv2.VideoCapture('/data/mcdc_data/valid/valid_video_00.avi')
#success,image = vidcap.read()
#count = 0
#while success:
# success,image = vidcap.read()
'''
| inue
| conditional_block |
qwe58.py | #from __future__ import print_function
# -*- coding: UTF-8 -*-
import cv2
import numpy as np
import sys
from cython_bbox import bbox_overlaps
from projection import ground_point_to_bird_view_proj
from projection import bird_view_proj_to_ground_point as bv2gp
#import projection.brid_view_proj_to_ground_point as bv2gp
import os
from scipy.fftpack import fft,ifft
import threading
from scipy.optimize import curve_fit
#sys.path.append('/home/m13/MCDC_FHS/py-R-FCN/caffe/python')
#sys.path.append('/home/m13/MCDC_FHS/py-R-FCN/lib')
sys.path.append('/home/m13/MCDC_FHS/py-R-FCN2/hy-frcnn/R-FCN-PSROIAlign/caffe/python')
sys.path.append('/home/m13/MCDC_FHS/py-R-FCN2/hy-frcnn/R-FCN-PSROIAlign/lib')
import caffe
from utils.timer import Timer
from utils.blob import im_list_to_blob
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
from fast_rcnn.nms_wrapper import nms
# hah
import json
from pre import Filter
CLASSES = ('__background__','vehicle'
)
FRAME_TIME = 0.05023106289
Y_THRESHOLD = 0.35 #0.2
def _get_image_blob(im):
im_orig = im.astype(np.float32, copy=True)
im_orig -= np.array([[[102.9801, 115.9465, 122.7717]]])
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
#print im_size_min, im_size_max
processed_ims = []
im_scale_factors = []
for target_size in (900,): #scale
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
#print '1',im_scale
if np.round(im_scale * im_size_max) > 1300:
im_scale = float(1300) / float(im_size_max)
#print '2',im_scale
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
#print '#',im.shape, im_scale
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im, rois):
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def smooth_curve(x_result,N):
plen=N
head = [x_result[0] for z in range(plen)]
tail = [x_result[-1] for z in range(plen)]
pad_result= head+x_result+tail
xm_result=[]
for i in range(len(x_result)):
xm_result.append(np.mean(pad_result[i:i+plen+1]))
xm_result.reverse()
head = [xm_result[0] for z in range(plen)]
tail = [xm_result[-1] for z in range(plen)]
pad_result= head+xm_result+tail
hx_result=[]
for i in range(len(x_result)):
hx_result.append(np.mean(pad_result[i:i+plen+1]))
hx_result.reverse()
return hx_result
def fitfun(x,a,b):
return a/x+b
def im_detect(net, im, boxes=None):
blobs, im_scales = _get_blobs(im, boxes)
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
#print blobs['data'].shape
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
scores = blobs_out['cls_prob']
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
#print 'debug--- boxes.size()', pred_boxes.shape
return scores, pred_boxes
num=0
def vis_detections(im, class_name, dets, thresh=0.8):
"""Draw detected bounding boxes."""
global num
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
frame = im
im = im[:, :, (2, 1, 0)]
#fig, ax = plt.subplots(figsize=(12, 12))
#ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 255), 2)
print(num)
cv2.imwrite('./'+str(num)+".jpg", frame)
def run_video(model, deploy, vp,outfile,timefile,nb):
tot=0
with open("/data/mcdc_data/valid/camera_parameter.json") as f:
cam_param = json.load(f)
cam_param["camera_height"] *=1
#with open('/data/mcdc_data/valid/valid_video_00_time.txt') as f:
with open(timefile) as f: # TODO: modify it: video_00
time_stamp = [line[:-1] for line in f]
time_stamp = map(eval, time_stamp)
caffe.set_mode_gpu()
net = caffe.Net(deploy, model, caffe.TEST)
cap = cv2.VideoCapture(vp)
#fps = round(cap.get(cv2.CAP_PROP_FPS))
success, image = cap.read()
#video_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
#video_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_id = 0
stamp_id = 0
x_result = []
arealist=[]
h_param=[]
w_param=[]
while (success):
tot += 1
timer = Timer()
timer.tic()
#if frame_id > 20: break
#xmin_crop, xmax_crop, ymin_crop, ymax_crop = 300, 1300, 400, 1200
#croped_image=image[400:1200,300:1300,:]
scores, boxes = im_detect(net, image)
#boxes[: , 4] += xmin_crop
#boxes[: , 5] += ymin_crop
#print '----debug-----box', boxes[:4, :]
#exit(0)
timer.toc()
print (str(nb)+'Detection took {:.3f}s for ''{:d} object proposals').format(timer.total_time, boxes.shape[0])
CONF_THRESH = 0.80
NMS_THRESH = 0.3
min_y = 0
min_x = 51.0
#Max = 0
#arealist=[]
maxb = [0,0,10,10]
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1
cls_boxes = boxes[:, 4:8] #boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
cls_dets = np.hstack((cls_boxes,cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(cls_dets, NMS_THRESH)
# add vote
dets_NMSed = cls_dets[keep, :]
BBOX_VOTE_FLAG=True
if BBOX_VOTE_FLAG:
dets = bbox_vote(dets_NMSed, cls_dets)
else:
dets = dets_NMSed
#vis_detections_video(im, cls, dets, thresh=CONF_THRESH)
################################
inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
if len(inds) == 0:
continue
#print(inds)
for i in inds:
#area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
bbox = dets[i, :4]
#bbox[0] += 300
#bbox[1] += 400
#bbox[2] += 300
#bbox[3] += 400
x, y = ground_point_to_bird_view_proj((bbox[2] + bbox[0])/2, bbox[3], cam_param)
#print(bbox)
if -cam_param['cam_to_left'] - Y_THRESHOLD < y and y < cam_param['cam_to_right'] + Y_THRESHOLD and x < min_x:
#Max = area
min_x, min_y = x, y
maxb = dets[i, :4] # max area bbox
#score = dets[i, :4]
#arealist+=[{"xmin":float(bbox[0]), "ymin":float(bbox[1]), "xmax": float(bbox[2]),"ymax":float(bbox[3])}]
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 255), 2)
#h_param.append(maxb[2]-maxb[0])
#cv2.rectangle(image, (bbox))
arealist+=[{"xmin":float(maxb[0]), "ymin":float(maxb[1]), "xmax": float(maxb[2]),"ymax":float(maxb[3])}]
w_param.append(maxb[2]-maxb[0])
h_param.append(maxb[3]-maxb[1])
cv2.rectangle(image, (maxb[0], maxb[1]), (maxb[2], maxb[3]), (0, 255, 0), 2)
#cv2.imwrite('output/'+str(tot)+".jpg", image)
#for x in range(5, 50, 3):
# for y in range(-3, 4):
# u, v = bv2gp(x, y, cam_param)
# cv2.putText(image, str(x) + "," + str(y), (u, v), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 2)
# cv2.circle(image, (u, v), 3, (0, (x + y + 103) % 2 * 255, (x + y + 102) % 2 * 255), -1)
#cv2.imwrite('output/'+ 'qwe_' +str(nb-1) + '_' + str(tot)+".jpg", image)
x_result.append(min_x)
#videoWriter.write(image)
##############################
frame_id += 1
stamp_id += 1
success, image = cap.read()
vx_result = []
for i in range(1,len(h_param)-1,2):
h_param[i]=(float(h_param[i+1])+float(h_param[i+1]))/2
w_param[i]=(float(w_param[i+1])+float(w_param[i+1]))/2
lineparam=[ 4.29128904e+03, -1.99320880e+00] # new 05
lineparam= [ 4.29169018e+03, -2.00891006e+00]
fitd_w=[fitfun(i,lineparam[0],lineparam[1]) for i in w_param]
lineparam=[ 3.33988934e+03, -2.55092719e+00] # new 05
lineparam=[ 3.33810684e+03, -2.61628836e+00]
fitd_h=[fitfun(i,lineparam[0],lineparam[1]) for i in h_param]
fitd=[0.2*fitd_h[i]+0.8*fitd_w[i] for i in range(len(fitd_w))]
fitd_m=smooth_curve(list(fitd),10)
xm_result=fitd_m[:]
if np.mean(fitd)>20:
fitd_m=smooth_curve(list(fitd),30)
else:
fitd_m=smooth_curve(list(fitd),10)
if np.mean(fitd)>20:
fitds=smooth_curve(list(fitd),40)
else:
fitds=smooth_curve(list(fitd),15)
x_result=fitds[:]
fix_frame_time = False
if fix_frame_time:
FRAME_TIME = (time_stamp[-1] - time_stamp[0]) / (len(time_stamp) - 1)
for fid, (x1, x2) in enumerate(zip(x_result[2:], x_result[:-2]), 1):
vx_result += [(x1 - x2) / (2 * FRAME_TIME)]
else:
for fid, (x1, x2) in enumerate(zip(x_result[2:], x_result[:-2]), 1):
vx_result += ([(x1 - x2) / (time_stamp[fid+1] - time_stamp[fid - 1])])
# vx_result = vx_result + [vx_result[-1]]
vx_result = [vx_result[0]]+vx_result + [vx_result[-1]]
vx_result=smooth_curve(list(vx_result),10)
frame_data = []
for fid, (vx, x) in enumerate(zip(vx_result, xm_result)):
#frame_data += [{"vx": 0, "x": x, "fid": fid}]
frame_data += [{"vx": vx, "x": x, "fid": fid}]
result = {
"frame_data": frame_data,
"end_frame": frame_id,
"start_frame": 0,
"track_id": None,
"arealist": arealist,
}
outjson=os.path.join("/home/m13/test_pre/",outfile)
with open(outjson, "w") as output_file:
json.dump(result, output_file)
#videoWriter.release()
def bbox_vote(dets_NMS, dets_all, thresh=0.8):
|
def action(model, deploy,infile,outfile,timefile, nnn):
run_video(model, deploy,infile,outfile,timefile, nnn)
if __name__ == '__main__':
model = sys.argv[2] #model
deploy = sys.argv[1] #deploy
imp = sys.argv[3] # video path
output = sys.argv[4] # output json
timer_tot = Timer()
timer_tot.tic()
filelist=os.listdir(imp)
threads = []
nnn = 0
for i in filelist:
if i.find(".avi")>-1:
if i.find("效果")>-1:
continue
outfile=i.split(".")[0]+"_pre.json"
infile=os.path.join(imp,i)
timefile=i.split(".")[0]+"_time.txt"
timefile=os.path.join(imp,timefile)
print(infile)
print(outfile)
print(timefile)
nnn += 1
t = threading.Thread(target = action, args = (model, deploy,infile,outfile,timefile, nnn))
threads.append(t)
#run_video(model, deploy,infile,outfile,timefile)
for t in threads:
#t.setDaemon(True)
t.start()
while True:
if (len(threading.enumerate()) < 3):
break
for t in threads:
t.join()
timer_tot.toc()
print(timer_tot.total_time)
'''
tot = 0
if __name__ == '__main__':
model = sys.argv[2] #model
deploy = sys.argv[1] #deploy
imp = sys.argv[3] # video path
output = sys.argv[4] # output json
timer_tot = Timer()
timer_tot.tic()
filelist=os.listdir(imp)
for i in filelist:
if i.find(".avi")>-1:
if i.find("效果")>-1:
continue
outfile=i.split(".")[0]+"_pre.json"
infile=os.path.join(imp,i)
timefile=i.split(".")[0]+"_time.txt"
timefile=os.path.join(imp,timefile)
print(infile)
print(outfile)
print(timefile)
run_video(model, deploy,infile,outfile,timefile,11)
timer_tot.toc()
print(timer_tot.total_time)
#vidcap = cv2.VideoCapture('/data/mcdc_data/valid/valid_video_00.avi')
#success,image = vidcap.read()
#count = 0
#while success:
# success,image = vidcap.read()
'''
| dets_voted = np.zeros_like(dets_NMS) # Empty matrix with the same shape and type
_overlaps = bbox_overlaps(
np.ascontiguousarray(dets_NMS[:, 0:4], dtype=np.float),
np.ascontiguousarray(dets_all[:, 0:4], dtype=np.float))
# for each survived box
for i, det in enumerate(dets_NMS):
dets_overlapped = dets_all[np.where(_overlaps[i, :] >= thresh)[0]]
assert(len(dets_overlapped) > 0)
boxes = dets_overlapped[:, 0:4]
scores = dets_overlapped[:, 4]
out_box = np.dot(scores, boxes)
dets_voted[i][0:4] = out_box / sum(scores) # Weighted bounding boxes
dets_voted[i][4] = det[4] # Keep the original score
# Weighted scores (if enabled)
BBOX_VOTE_N_WEIGHTED_SCORE=1
BBOX_VOTE_WEIGHT_EMPTY=0.5
if BBOX_VOTE_N_WEIGHTED_SCORE > 1:
n_agreement = BBOX_VOTE_N_WEIGHTED_SCORE
w_empty = BBOX_VOTE_WEIGHT_EMPTY
n_detected = len(scores)
if n_detected >= n_agreement:
top_scores = -np.sort(-scores)[:n_agreement]
new_score = np.average(top_scores)
else:
new_score = np.average(scores) * (n_detected * 1.0 + (n_agreement - n_detected) * w_empty) / n_agreement
dets_voted[i][4] = min(new_score, dets_voted[i][4])
return dets_voted | identifier_body |
qwe58.py | #from __future__ import print_function
# -*- coding: UTF-8 -*-
import cv2
import numpy as np
import sys
from cython_bbox import bbox_overlaps
from projection import ground_point_to_bird_view_proj
from projection import bird_view_proj_to_ground_point as bv2gp
#import projection.brid_view_proj_to_ground_point as bv2gp
import os
from scipy.fftpack import fft,ifft
import threading
from scipy.optimize import curve_fit
#sys.path.append('/home/m13/MCDC_FHS/py-R-FCN/caffe/python')
#sys.path.append('/home/m13/MCDC_FHS/py-R-FCN/lib')
sys.path.append('/home/m13/MCDC_FHS/py-R-FCN2/hy-frcnn/R-FCN-PSROIAlign/caffe/python')
sys.path.append('/home/m13/MCDC_FHS/py-R-FCN2/hy-frcnn/R-FCN-PSROIAlign/lib')
import caffe
from utils.timer import Timer
from utils.blob import im_list_to_blob
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
from fast_rcnn.nms_wrapper import nms
# hah
import json
from pre import Filter
CLASSES = ('__background__','vehicle'
)
FRAME_TIME = 0.05023106289
Y_THRESHOLD = 0.35 #0.2
def _get_image_blob(im):
im_orig = im.astype(np.float32, copy=True)
im_orig -= np.array([[[102.9801, 115.9465, 122.7717]]])
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
#print im_size_min, im_size_max
processed_ims = []
im_scale_factors = []
for target_size in (900,): #scale
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
#print '1',im_scale
if np.round(im_scale * im_size_max) > 1300:
im_scale = float(1300) / float(im_size_max)
#print '2',im_scale
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
#print '#',im.shape, im_scale
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im, rois):
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def | (x_result,N):
plen=N
head = [x_result[0] for z in range(plen)]
tail = [x_result[-1] for z in range(plen)]
pad_result= head+x_result+tail
xm_result=[]
for i in range(len(x_result)):
xm_result.append(np.mean(pad_result[i:i+plen+1]))
xm_result.reverse()
head = [xm_result[0] for z in range(plen)]
tail = [xm_result[-1] for z in range(plen)]
pad_result= head+xm_result+tail
hx_result=[]
for i in range(len(x_result)):
hx_result.append(np.mean(pad_result[i:i+plen+1]))
hx_result.reverse()
return hx_result
def fitfun(x,a,b):
return a/x+b
def im_detect(net, im, boxes=None):
blobs, im_scales = _get_blobs(im, boxes)
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
#print blobs['data'].shape
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
scores = blobs_out['cls_prob']
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
#print 'debug--- boxes.size()', pred_boxes.shape
return scores, pred_boxes
num=0
def vis_detections(im, class_name, dets, thresh=0.8):
"""Draw detected bounding boxes."""
global num
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
frame = im
im = im[:, :, (2, 1, 0)]
#fig, ax = plt.subplots(figsize=(12, 12))
#ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 255), 2)
print(num)
cv2.imwrite('./'+str(num)+".jpg", frame)
def run_video(model, deploy, vp,outfile,timefile,nb):
tot=0
with open("/data/mcdc_data/valid/camera_parameter.json") as f:
cam_param = json.load(f)
cam_param["camera_height"] *=1
#with open('/data/mcdc_data/valid/valid_video_00_time.txt') as f:
with open(timefile) as f: # TODO: modify it: video_00
time_stamp = [line[:-1] for line in f]
time_stamp = map(eval, time_stamp)
caffe.set_mode_gpu()
net = caffe.Net(deploy, model, caffe.TEST)
cap = cv2.VideoCapture(vp)
#fps = round(cap.get(cv2.CAP_PROP_FPS))
success, image = cap.read()
#video_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
#video_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_id = 0
stamp_id = 0
x_result = []
arealist=[]
h_param=[]
w_param=[]
while (success):
tot += 1
timer = Timer()
timer.tic()
#if frame_id > 20: break
#xmin_crop, xmax_crop, ymin_crop, ymax_crop = 300, 1300, 400, 1200
#croped_image=image[400:1200,300:1300,:]
scores, boxes = im_detect(net, image)
#boxes[: , 4] += xmin_crop
#boxes[: , 5] += ymin_crop
#print '----debug-----box', boxes[:4, :]
#exit(0)
timer.toc()
print (str(nb)+'Detection took {:.3f}s for ''{:d} object proposals').format(timer.total_time, boxes.shape[0])
CONF_THRESH = 0.80
NMS_THRESH = 0.3
min_y = 0
min_x = 51.0
#Max = 0
#arealist=[]
maxb = [0,0,10,10]
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1
cls_boxes = boxes[:, 4:8] #boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
cls_dets = np.hstack((cls_boxes,cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(cls_dets, NMS_THRESH)
# add vote
dets_NMSed = cls_dets[keep, :]
BBOX_VOTE_FLAG=True
if BBOX_VOTE_FLAG:
dets = bbox_vote(dets_NMSed, cls_dets)
else:
dets = dets_NMSed
#vis_detections_video(im, cls, dets, thresh=CONF_THRESH)
################################
inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
if len(inds) == 0:
continue
#print(inds)
for i in inds:
#area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
bbox = dets[i, :4]
#bbox[0] += 300
#bbox[1] += 400
#bbox[2] += 300
#bbox[3] += 400
x, y = ground_point_to_bird_view_proj((bbox[2] + bbox[0])/2, bbox[3], cam_param)
#print(bbox)
if -cam_param['cam_to_left'] - Y_THRESHOLD < y and y < cam_param['cam_to_right'] + Y_THRESHOLD and x < min_x:
#Max = area
min_x, min_y = x, y
maxb = dets[i, :4] # max area bbox
#score = dets[i, :4]
#arealist+=[{"xmin":float(bbox[0]), "ymin":float(bbox[1]), "xmax": float(bbox[2]),"ymax":float(bbox[3])}]
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 255), 2)
#h_param.append(maxb[2]-maxb[0])
#cv2.rectangle(image, (bbox))
arealist+=[{"xmin":float(maxb[0]), "ymin":float(maxb[1]), "xmax": float(maxb[2]),"ymax":float(maxb[3])}]
w_param.append(maxb[2]-maxb[0])
h_param.append(maxb[3]-maxb[1])
cv2.rectangle(image, (maxb[0], maxb[1]), (maxb[2], maxb[3]), (0, 255, 0), 2)
#cv2.imwrite('output/'+str(tot)+".jpg", image)
#for x in range(5, 50, 3):
# for y in range(-3, 4):
# u, v = bv2gp(x, y, cam_param)
# cv2.putText(image, str(x) + "," + str(y), (u, v), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 2)
# cv2.circle(image, (u, v), 3, (0, (x + y + 103) % 2 * 255, (x + y + 102) % 2 * 255), -1)
#cv2.imwrite('output/'+ 'qwe_' +str(nb-1) + '_' + str(tot)+".jpg", image)
x_result.append(min_x)
#videoWriter.write(image)
##############################
frame_id += 1
stamp_id += 1
success, image = cap.read()
vx_result = []
for i in range(1,len(h_param)-1,2):
h_param[i]=(float(h_param[i+1])+float(h_param[i+1]))/2
w_param[i]=(float(w_param[i+1])+float(w_param[i+1]))/2
lineparam=[ 4.29128904e+03, -1.99320880e+00] # new 05
lineparam= [ 4.29169018e+03, -2.00891006e+00]
fitd_w=[fitfun(i,lineparam[0],lineparam[1]) for i in w_param]
lineparam=[ 3.33988934e+03, -2.55092719e+00] # new 05
lineparam=[ 3.33810684e+03, -2.61628836e+00]
fitd_h=[fitfun(i,lineparam[0],lineparam[1]) for i in h_param]
fitd=[0.2*fitd_h[i]+0.8*fitd_w[i] for i in range(len(fitd_w))]
fitd_m=smooth_curve(list(fitd),10)
xm_result=fitd_m[:]
if np.mean(fitd)>20:
fitd_m=smooth_curve(list(fitd),30)
else:
fitd_m=smooth_curve(list(fitd),10)
if np.mean(fitd)>20:
fitds=smooth_curve(list(fitd),40)
else:
fitds=smooth_curve(list(fitd),15)
x_result=fitds[:]
fix_frame_time = False
if fix_frame_time:
FRAME_TIME = (time_stamp[-1] - time_stamp[0]) / (len(time_stamp) - 1)
for fid, (x1, x2) in enumerate(zip(x_result[2:], x_result[:-2]), 1):
vx_result += [(x1 - x2) / (2 * FRAME_TIME)]
else:
for fid, (x1, x2) in enumerate(zip(x_result[2:], x_result[:-2]), 1):
vx_result += ([(x1 - x2) / (time_stamp[fid+1] - time_stamp[fid - 1])])
# vx_result = vx_result + [vx_result[-1]]
vx_result = [vx_result[0]]+vx_result + [vx_result[-1]]
vx_result=smooth_curve(list(vx_result),10)
frame_data = []
for fid, (vx, x) in enumerate(zip(vx_result, xm_result)):
#frame_data += [{"vx": 0, "x": x, "fid": fid}]
frame_data += [{"vx": vx, "x": x, "fid": fid}]
result = {
"frame_data": frame_data,
"end_frame": frame_id,
"start_frame": 0,
"track_id": None,
"arealist": arealist,
}
outjson=os.path.join("/home/m13/test_pre/",outfile)
with open(outjson, "w") as output_file:
json.dump(result, output_file)
#videoWriter.release()
def bbox_vote(dets_NMS, dets_all, thresh=0.8):
dets_voted = np.zeros_like(dets_NMS) # Empty matrix with the same shape and type
_overlaps = bbox_overlaps(
np.ascontiguousarray(dets_NMS[:, 0:4], dtype=np.float),
np.ascontiguousarray(dets_all[:, 0:4], dtype=np.float))
# for each survived box
for i, det in enumerate(dets_NMS):
dets_overlapped = dets_all[np.where(_overlaps[i, :] >= thresh)[0]]
assert(len(dets_overlapped) > 0)
boxes = dets_overlapped[:, 0:4]
scores = dets_overlapped[:, 4]
out_box = np.dot(scores, boxes)
dets_voted[i][0:4] = out_box / sum(scores) # Weighted bounding boxes
dets_voted[i][4] = det[4] # Keep the original score
# Weighted scores (if enabled)
BBOX_VOTE_N_WEIGHTED_SCORE=1
BBOX_VOTE_WEIGHT_EMPTY=0.5
if BBOX_VOTE_N_WEIGHTED_SCORE > 1:
n_agreement = BBOX_VOTE_N_WEIGHTED_SCORE
w_empty = BBOX_VOTE_WEIGHT_EMPTY
n_detected = len(scores)
if n_detected >= n_agreement:
top_scores = -np.sort(-scores)[:n_agreement]
new_score = np.average(top_scores)
else:
new_score = np.average(scores) * (n_detected * 1.0 + (n_agreement - n_detected) * w_empty) / n_agreement
dets_voted[i][4] = min(new_score, dets_voted[i][4])
return dets_voted
def action(model, deploy,infile,outfile,timefile, nnn):
run_video(model, deploy,infile,outfile,timefile, nnn)
if __name__ == '__main__':
model = sys.argv[2] #model
deploy = sys.argv[1] #deploy
imp = sys.argv[3] # video path
output = sys.argv[4] # output json
timer_tot = Timer()
timer_tot.tic()
filelist=os.listdir(imp)
threads = []
nnn = 0
for i in filelist:
if i.find(".avi")>-1:
if i.find("效果")>-1:
continue
outfile=i.split(".")[0]+"_pre.json"
infile=os.path.join(imp,i)
timefile=i.split(".")[0]+"_time.txt"
timefile=os.path.join(imp,timefile)
print(infile)
print(outfile)
print(timefile)
nnn += 1
t = threading.Thread(target = action, args = (model, deploy,infile,outfile,timefile, nnn))
threads.append(t)
#run_video(model, deploy,infile,outfile,timefile)
for t in threads:
#t.setDaemon(True)
t.start()
while True:
if (len(threading.enumerate()) < 3):
break
for t in threads:
t.join()
timer_tot.toc()
print(timer_tot.total_time)
'''
tot = 0
if __name__ == '__main__':
model = sys.argv[2] #model
deploy = sys.argv[1] #deploy
imp = sys.argv[3] # video path
output = sys.argv[4] # output json
timer_tot = Timer()
timer_tot.tic()
filelist=os.listdir(imp)
for i in filelist:
if i.find(".avi")>-1:
if i.find("效果")>-1:
continue
outfile=i.split(".")[0]+"_pre.json"
infile=os.path.join(imp,i)
timefile=i.split(".")[0]+"_time.txt"
timefile=os.path.join(imp,timefile)
print(infile)
print(outfile)
print(timefile)
run_video(model, deploy,infile,outfile,timefile,11)
timer_tot.toc()
print(timer_tot.total_time)
#vidcap = cv2.VideoCapture('/data/mcdc_data/valid/valid_video_00.avi')
#success,image = vidcap.read()
#count = 0
#while success:
# success,image = vidcap.read()
'''
| smooth_curve | identifier_name |
address.go | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd || netbsd || openbsd
// +build darwin dragonfly freebsd netbsd openbsd
package route
import (
"runtime"
"syscall"
)
// An Addr represents an address associated with packet routing.
type Addr interface {
// Family returns an address family.
Family() int
}
// A LinkAddr represents a link-layer address.
type LinkAddr struct {
Index int // interface index when attached
Name string // interface name when attached
Addr []byte // link-layer address when attached
}
// Family implements the Family method of Addr interface.
func (a *LinkAddr) Family() int { return syscall.AF_LINK }
func (a *LinkAddr) lenAndSpace() (int, int) {
l := 8 + len(a.Name) + len(a.Addr)
return l, roundup(l)
}
func (a *LinkAddr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
nlen, alen := len(a.Name), len(a.Addr)
if nlen > 255 || alen > 255 {
return 0, errInvalidAddr
}
b[0] = byte(l)
b[1] = syscall.AF_LINK
if a.Index > 0 {
nativeEndian.PutUint16(b[2:4], uint16(a.Index))
}
data := b[8:]
if nlen > 0 {
b[5] = byte(nlen)
copy(data[:nlen], a.Name)
data = data[nlen:]
}
if alen > 0 {
b[6] = byte(alen)
copy(data[:alen], a.Addr)
data = data[alen:]
}
return ll, nil
}
func parseLinkAddr(b []byte) (Addr, error) {
if len(b) < 8 {
return nil, errInvalidAddr
}
_, a, err := parseKernelLinkAddr(syscall.AF_LINK, b[4:])
if err != nil {
return nil, err
}
a.(*LinkAddr).Index = int(nativeEndian.Uint16(b[2:4]))
return a, nil
}
// parseKernelLinkAddr parses b as a link-layer address in
// conventional BSD kernel form.
func parseKernelLinkAddr(_ int, b []byte) (int, Addr, error) {
// The encoding looks like the following:
// +----------------------------+
// | Type (1 octet) |
// +----------------------------+
// | Name length (1 octet) |
// +----------------------------+
// | Address length (1 octet) |
// +----------------------------+
// | Selector length (1 octet) |
// +----------------------------+
// | Data (variable) |
// +----------------------------+
//
// On some platforms, all-bit-one of length field means "don't
// care".
nlen, alen, slen := int(b[1]), int(b[2]), int(b[3])
if nlen == 0xff {
nlen = 0
}
if alen == 0xff {
alen = 0
}
if slen == 0xff {
slen = 0
}
l := 4 + nlen + alen + slen
if len(b) < l {
return 0, nil, errInvalidAddr
}
data := b[4:]
var name string
var addr []byte
if nlen > 0 {
name = string(data[:nlen])
data = data[nlen:]
}
if alen > 0 {
addr = data[:alen]
data = data[alen:]
}
return l, &LinkAddr{Name: name, Addr: addr}, nil
}
// An Inet4Addr represents an internet address for IPv4.
type Inet4Addr struct {
IP [4]byte // IP address
}
// Family implements the Family method of Addr interface.
func (a *Inet4Addr) Family() int { return syscall.AF_INET }
func (a *Inet4Addr) lenAndSpace() (int, int) |
func (a *Inet4Addr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
b[0] = byte(l)
b[1] = syscall.AF_INET
copy(b[4:8], a.IP[:])
return ll, nil
}
// An Inet6Addr represents an internet address for IPv6.
type Inet6Addr struct {
IP [16]byte // IP address
ZoneID int // zone identifier
}
// Family implements the Family method of Addr interface.
func (a *Inet6Addr) Family() int { return syscall.AF_INET6 }
func (a *Inet6Addr) lenAndSpace() (int, int) {
return sizeofSockaddrInet6, roundup(sizeofSockaddrInet6)
}
func (a *Inet6Addr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
b[0] = byte(l)
b[1] = syscall.AF_INET6
copy(b[8:24], a.IP[:])
if a.ZoneID > 0 {
nativeEndian.PutUint32(b[24:28], uint32(a.ZoneID))
}
return ll, nil
}
// parseInetAddr parses b as an internet address for IPv4 or IPv6.
func parseInetAddr(af int, b []byte) (Addr, error) {
switch af {
case syscall.AF_INET:
if len(b) < sizeofSockaddrInet {
return nil, errInvalidAddr
}
a := &Inet4Addr{}
copy(a.IP[:], b[4:8])
return a, nil
case syscall.AF_INET6:
if len(b) < sizeofSockaddrInet6 {
return nil, errInvalidAddr
}
a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))}
copy(a.IP[:], b[8:24])
if a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) {
// KAME based IPv6 protocol stack usually
// embeds the interface index in the
// interface-local or link-local address as
// the kernel-internal form.
id := int(bigEndian.Uint16(a.IP[2:4]))
if id != 0 {
a.ZoneID = id
a.IP[2], a.IP[3] = 0, 0
}
}
return a, nil
default:
return nil, errInvalidAddr
}
}
// parseKernelInetAddr parses b as an internet address in conventional
// BSD kernel form.
func parseKernelInetAddr(af int, b []byte) (int, Addr, error) {
// The encoding looks similar to the NLRI encoding.
// +----------------------------+
// | Length (1 octet) |
// +----------------------------+
// | Address prefix (variable) |
// +----------------------------+
//
// The differences between the kernel form and the NLRI
// encoding are:
//
// - The length field of the kernel form indicates the prefix
// length in bytes, not in bits
//
// - In the kernel form, zero value of the length field
// doesn't mean 0.0.0.0/0 or ::/0
//
// - The kernel form appends leading bytes to the prefix field
// to make the <length, prefix> tuple to be conformed with
// the routing message boundary
l := int(b[0])
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
// On Darwin, an address in the kernel form is also
// used as a message filler.
if l == 0 || len(b) > roundup(l) {
l = roundup(l)
}
} else {
l = roundup(l)
}
if len(b) < l {
return 0, nil, errInvalidAddr
}
// Don't reorder case expressions.
// The case expressions for IPv6 must come first.
const (
off4 = 4 // offset of in_addr
off6 = 8 // offset of in6_addr
)
switch {
case b[0] == sizeofSockaddrInet6:
a := &Inet6Addr{}
copy(a.IP[:], b[off6:off6+16])
return int(b[0]), a, nil
case af == syscall.AF_INET6:
a := &Inet6Addr{}
if l-1 < off6 {
copy(a.IP[:], b[1:l])
} else {
copy(a.IP[:], b[l-off6:l])
}
return int(b[0]), a, nil
case b[0] == sizeofSockaddrInet:
a := &Inet4Addr{}
copy(a.IP[:], b[off4:off4+4])
return int(b[0]), a, nil
default: // an old fashion, AF_UNSPEC or unknown means AF_INET
a := &Inet4Addr{}
if l-1 < off4 {
copy(a.IP[:], b[1:l])
} else {
copy(a.IP[:], b[l-off4:l])
}
return int(b[0]), a, nil
}
}
// A DefaultAddr represents an address of various operating
// system-specific features.
type DefaultAddr struct {
af int
Raw []byte // raw format of address
}
// Family implements the Family method of Addr interface.
func (a *DefaultAddr) Family() int { return a.af }
func (a *DefaultAddr) lenAndSpace() (int, int) {
l := len(a.Raw)
return l, roundup(l)
}
func (a *DefaultAddr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
if l > 255 {
return 0, errInvalidAddr
}
b[1] = byte(l)
copy(b[:l], a.Raw)
return ll, nil
}
func parseDefaultAddr(b []byte) (Addr, error) {
if len(b) < 2 || len(b) < int(b[0]) {
return nil, errInvalidAddr
}
a := &DefaultAddr{af: int(b[1]), Raw: b[:b[0]]}
return a, nil
}
func addrsSpace(as []Addr) int {
var l int
for _, a := range as {
switch a := a.(type) {
case *LinkAddr:
_, ll := a.lenAndSpace()
l += ll
case *Inet4Addr:
_, ll := a.lenAndSpace()
l += ll
case *Inet6Addr:
_, ll := a.lenAndSpace()
l += ll
case *DefaultAddr:
_, ll := a.lenAndSpace()
l += ll
}
}
return l
}
// marshalAddrs marshals as and returns a bitmap indicating which
// address is stored in b.
func marshalAddrs(b []byte, as []Addr) (uint, error) {
var attrs uint
for i, a := range as {
switch a := a.(type) {
case *LinkAddr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
case *Inet4Addr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
case *Inet6Addr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
case *DefaultAddr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
}
}
return attrs, nil
}
func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) {
var as [syscall.RTAX_MAX]Addr
af := int(syscall.AF_UNSPEC)
for i := uint(0); i < syscall.RTAX_MAX && len(b) >= roundup(0); i++ {
if attrs&(1<<i) == 0 {
continue
}
if i <= syscall.RTAX_BRD {
switch b[1] {
case syscall.AF_LINK:
a, err := parseLinkAddr(b)
if err != nil {
return nil, err
}
as[i] = a
l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
}
b = b[l:]
case syscall.AF_INET, syscall.AF_INET6:
af = int(b[1])
a, err := parseInetAddr(af, b)
if err != nil {
return nil, err
}
as[i] = a
l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
}
b = b[l:]
default:
l, a, err := fn(af, b)
if err != nil {
return nil, err
}
as[i] = a
ll := roundup(l)
if len(b) < ll {
b = b[l:]
} else {
b = b[ll:]
}
}
} else {
a, err := parseDefaultAddr(b)
if err != nil {
return nil, err
}
as[i] = a
l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
}
b = b[l:]
}
}
// The only remaining bytes in b should be alignment.
// However, under some circumstances DragonFly BSD appears to put
// more addresses in the message than are indicated in the address
// bitmask, so don't check for this.
return as[:], nil
}
| {
return sizeofSockaddrInet, roundup(sizeofSockaddrInet)
} | identifier_body |
address.go | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd || netbsd || openbsd
// +build darwin dragonfly freebsd netbsd openbsd
package route
import (
"runtime"
"syscall"
)
// An Addr represents an address associated with packet routing.
type Addr interface {
// Family returns an address family.
Family() int
}
// A LinkAddr represents a link-layer address.
type LinkAddr struct {
Index int // interface index when attached
Name string // interface name when attached
Addr []byte // link-layer address when attached
}
// Family implements the Family method of Addr interface.
func (a *LinkAddr) Family() int { return syscall.AF_LINK }
func (a *LinkAddr) lenAndSpace() (int, int) {
l := 8 + len(a.Name) + len(a.Addr)
return l, roundup(l)
}
func (a *LinkAddr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
nlen, alen := len(a.Name), len(a.Addr)
if nlen > 255 || alen > 255 {
return 0, errInvalidAddr
}
b[0] = byte(l)
b[1] = syscall.AF_LINK
if a.Index > 0 {
nativeEndian.PutUint16(b[2:4], uint16(a.Index))
}
data := b[8:]
if nlen > 0 {
b[5] = byte(nlen)
copy(data[:nlen], a.Name)
data = data[nlen:]
}
if alen > 0 {
b[6] = byte(alen)
copy(data[:alen], a.Addr)
data = data[alen:]
}
return ll, nil
}
func parseLinkAddr(b []byte) (Addr, error) {
if len(b) < 8 {
return nil, errInvalidAddr
}
_, a, err := parseKernelLinkAddr(syscall.AF_LINK, b[4:])
if err != nil {
return nil, err
}
a.(*LinkAddr).Index = int(nativeEndian.Uint16(b[2:4]))
return a, nil
}
// parseKernelLinkAddr parses b as a link-layer address in
// conventional BSD kernel form.
func parseKernelLinkAddr(_ int, b []byte) (int, Addr, error) {
// The encoding looks like the following:
// +----------------------------+
// | Type (1 octet) |
// +----------------------------+
// | Name length (1 octet) |
// +----------------------------+
// | Address length (1 octet) |
// +----------------------------+
// | Selector length (1 octet) |
// +----------------------------+
// | Data (variable) |
// +----------------------------+
//
// On some platforms, all-bit-one of length field means "don't
// care".
nlen, alen, slen := int(b[1]), int(b[2]), int(b[3])
if nlen == 0xff {
nlen = 0
}
if alen == 0xff {
alen = 0
}
if slen == 0xff {
slen = 0
}
l := 4 + nlen + alen + slen
if len(b) < l {
return 0, nil, errInvalidAddr
}
data := b[4:]
var name string
var addr []byte
if nlen > 0 {
name = string(data[:nlen])
data = data[nlen:]
}
if alen > 0 {
addr = data[:alen]
data = data[alen:]
}
return l, &LinkAddr{Name: name, Addr: addr}, nil
}
// An Inet4Addr represents an internet address for IPv4.
type Inet4Addr struct {
IP [4]byte // IP address
}
// Family implements the Family method of Addr interface.
func (a *Inet4Addr) Family() int { return syscall.AF_INET }
func (a *Inet4Addr) lenAndSpace() (int, int) {
return sizeofSockaddrInet, roundup(sizeofSockaddrInet)
}
func (a *Inet4Addr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
b[0] = byte(l)
b[1] = syscall.AF_INET
copy(b[4:8], a.IP[:])
return ll, nil
}
// An Inet6Addr represents an internet address for IPv6.
type Inet6Addr struct {
IP [16]byte // IP address
ZoneID int // zone identifier
}
// Family implements the Family method of Addr interface.
func (a *Inet6Addr) Family() int { return syscall.AF_INET6 }
func (a *Inet6Addr) lenAndSpace() (int, int) {
return sizeofSockaddrInet6, roundup(sizeofSockaddrInet6)
}
func (a *Inet6Addr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
b[0] = byte(l)
b[1] = syscall.AF_INET6
copy(b[8:24], a.IP[:])
if a.ZoneID > 0 {
nativeEndian.PutUint32(b[24:28], uint32(a.ZoneID))
}
return ll, nil
}
// parseInetAddr parses b as an internet address for IPv4 or IPv6.
func | (af int, b []byte) (Addr, error) {
switch af {
case syscall.AF_INET:
if len(b) < sizeofSockaddrInet {
return nil, errInvalidAddr
}
a := &Inet4Addr{}
copy(a.IP[:], b[4:8])
return a, nil
case syscall.AF_INET6:
if len(b) < sizeofSockaddrInet6 {
return nil, errInvalidAddr
}
a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))}
copy(a.IP[:], b[8:24])
if a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) {
// KAME based IPv6 protocol stack usually
// embeds the interface index in the
// interface-local or link-local address as
// the kernel-internal form.
id := int(bigEndian.Uint16(a.IP[2:4]))
if id != 0 {
a.ZoneID = id
a.IP[2], a.IP[3] = 0, 0
}
}
return a, nil
default:
return nil, errInvalidAddr
}
}
// parseKernelInetAddr parses b as an internet address in conventional
// BSD kernel form.
func parseKernelInetAddr(af int, b []byte) (int, Addr, error) {
// The encoding looks similar to the NLRI encoding.
// +----------------------------+
// | Length (1 octet) |
// +----------------------------+
// | Address prefix (variable) |
// +----------------------------+
//
// The differences between the kernel form and the NLRI
// encoding are:
//
// - The length field of the kernel form indicates the prefix
// length in bytes, not in bits
//
// - In the kernel form, zero value of the length field
// doesn't mean 0.0.0.0/0 or ::/0
//
// - The kernel form appends leading bytes to the prefix field
// to make the <length, prefix> tuple to be conformed with
// the routing message boundary
l := int(b[0])
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
// On Darwin, an address in the kernel form is also
// used as a message filler.
if l == 0 || len(b) > roundup(l) {
l = roundup(l)
}
} else {
l = roundup(l)
}
if len(b) < l {
return 0, nil, errInvalidAddr
}
// Don't reorder case expressions.
// The case expressions for IPv6 must come first.
const (
off4 = 4 // offset of in_addr
off6 = 8 // offset of in6_addr
)
switch {
case b[0] == sizeofSockaddrInet6:
a := &Inet6Addr{}
copy(a.IP[:], b[off6:off6+16])
return int(b[0]), a, nil
case af == syscall.AF_INET6:
a := &Inet6Addr{}
if l-1 < off6 {
copy(a.IP[:], b[1:l])
} else {
copy(a.IP[:], b[l-off6:l])
}
return int(b[0]), a, nil
case b[0] == sizeofSockaddrInet:
a := &Inet4Addr{}
copy(a.IP[:], b[off4:off4+4])
return int(b[0]), a, nil
default: // an old fashion, AF_UNSPEC or unknown means AF_INET
a := &Inet4Addr{}
if l-1 < off4 {
copy(a.IP[:], b[1:l])
} else {
copy(a.IP[:], b[l-off4:l])
}
return int(b[0]), a, nil
}
}
// A DefaultAddr represents an address of various operating
// system-specific features.
type DefaultAddr struct {
af int
Raw []byte // raw format of address
}
// Family implements the Family method of Addr interface.
func (a *DefaultAddr) Family() int { return a.af }
func (a *DefaultAddr) lenAndSpace() (int, int) {
l := len(a.Raw)
return l, roundup(l)
}
func (a *DefaultAddr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
if l > 255 {
return 0, errInvalidAddr
}
b[1] = byte(l)
copy(b[:l], a.Raw)
return ll, nil
}
func parseDefaultAddr(b []byte) (Addr, error) {
if len(b) < 2 || len(b) < int(b[0]) {
return nil, errInvalidAddr
}
a := &DefaultAddr{af: int(b[1]), Raw: b[:b[0]]}
return a, nil
}
func addrsSpace(as []Addr) int {
var l int
for _, a := range as {
switch a := a.(type) {
case *LinkAddr:
_, ll := a.lenAndSpace()
l += ll
case *Inet4Addr:
_, ll := a.lenAndSpace()
l += ll
case *Inet6Addr:
_, ll := a.lenAndSpace()
l += ll
case *DefaultAddr:
_, ll := a.lenAndSpace()
l += ll
}
}
return l
}
// marshalAddrs marshals as and returns a bitmap indicating which
// address is stored in b.
func marshalAddrs(b []byte, as []Addr) (uint, error) {
var attrs uint
for i, a := range as {
switch a := a.(type) {
case *LinkAddr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
case *Inet4Addr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
case *Inet6Addr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
case *DefaultAddr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
}
}
return attrs, nil
}
func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) {
var as [syscall.RTAX_MAX]Addr
af := int(syscall.AF_UNSPEC)
for i := uint(0); i < syscall.RTAX_MAX && len(b) >= roundup(0); i++ {
if attrs&(1<<i) == 0 {
continue
}
if i <= syscall.RTAX_BRD {
switch b[1] {
case syscall.AF_LINK:
a, err := parseLinkAddr(b)
if err != nil {
return nil, err
}
as[i] = a
l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
}
b = b[l:]
case syscall.AF_INET, syscall.AF_INET6:
af = int(b[1])
a, err := parseInetAddr(af, b)
if err != nil {
return nil, err
}
as[i] = a
l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
}
b = b[l:]
default:
l, a, err := fn(af, b)
if err != nil {
return nil, err
}
as[i] = a
ll := roundup(l)
if len(b) < ll {
b = b[l:]
} else {
b = b[ll:]
}
}
} else {
a, err := parseDefaultAddr(b)
if err != nil {
return nil, err
}
as[i] = a
l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
}
b = b[l:]
}
}
// The only remaining bytes in b should be alignment.
// However, under some circumstances DragonFly BSD appears to put
// more addresses in the message than are indicated in the address
// bitmask, so don't check for this.
return as[:], nil
}
| parseInetAddr | identifier_name |
address.go | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd || netbsd || openbsd
// +build darwin dragonfly freebsd netbsd openbsd
package route
import (
"runtime"
"syscall"
)
// An Addr represents an address associated with packet routing.
type Addr interface {
// Family returns an address family.
Family() int
}
// A LinkAddr represents a link-layer address.
type LinkAddr struct {
Index int // interface index when attached
Name string // interface name when attached
Addr []byte // link-layer address when attached
}
// Family implements the Family method of Addr interface.
func (a *LinkAddr) Family() int { return syscall.AF_LINK }
func (a *LinkAddr) lenAndSpace() (int, int) {
l := 8 + len(a.Name) + len(a.Addr)
return l, roundup(l)
}
func (a *LinkAddr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
nlen, alen := len(a.Name), len(a.Addr)
if nlen > 255 || alen > 255 {
return 0, errInvalidAddr
}
b[0] = byte(l)
b[1] = syscall.AF_LINK
if a.Index > 0 {
nativeEndian.PutUint16(b[2:4], uint16(a.Index))
}
data := b[8:]
if nlen > 0 {
b[5] = byte(nlen)
copy(data[:nlen], a.Name)
data = data[nlen:]
}
if alen > 0 {
b[6] = byte(alen)
copy(data[:alen], a.Addr)
data = data[alen:]
}
return ll, nil
}
func parseLinkAddr(b []byte) (Addr, error) {
if len(b) < 8 {
return nil, errInvalidAddr
}
_, a, err := parseKernelLinkAddr(syscall.AF_LINK, b[4:])
if err != nil {
return nil, err
}
a.(*LinkAddr).Index = int(nativeEndian.Uint16(b[2:4]))
return a, nil
}
// parseKernelLinkAddr parses b as a link-layer address in
// conventional BSD kernel form.
func parseKernelLinkAddr(_ int, b []byte) (int, Addr, error) {
// The encoding looks like the following:
// +----------------------------+
// | Type (1 octet) |
// +----------------------------+
// | Name length (1 octet) |
// +----------------------------+
// | Address length (1 octet) |
// +----------------------------+
// | Selector length (1 octet) |
// +----------------------------+
// | Data (variable) |
// +----------------------------+
//
// On some platforms, all-bit-one of length field means "don't
// care".
nlen, alen, slen := int(b[1]), int(b[2]), int(b[3])
if nlen == 0xff {
nlen = 0
}
if alen == 0xff {
alen = 0
}
if slen == 0xff {
slen = 0
}
l := 4 + nlen + alen + slen
if len(b) < l {
return 0, nil, errInvalidAddr
}
data := b[4:]
var name string
var addr []byte
if nlen > 0 {
name = string(data[:nlen])
data = data[nlen:]
}
if alen > 0 {
addr = data[:alen]
data = data[alen:]
}
return l, &LinkAddr{Name: name, Addr: addr}, nil
}
// An Inet4Addr represents an internet address for IPv4.
type Inet4Addr struct {
IP [4]byte // IP address
}
// Family implements the Family method of Addr interface.
func (a *Inet4Addr) Family() int { return syscall.AF_INET }
func (a *Inet4Addr) lenAndSpace() (int, int) {
return sizeofSockaddrInet, roundup(sizeofSockaddrInet)
}
func (a *Inet4Addr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
b[0] = byte(l)
b[1] = syscall.AF_INET
copy(b[4:8], a.IP[:])
return ll, nil
}
// An Inet6Addr represents an internet address for IPv6.
type Inet6Addr struct {
IP [16]byte // IP address
ZoneID int // zone identifier
}
// Family implements the Family method of Addr interface.
func (a *Inet6Addr) Family() int { return syscall.AF_INET6 }
func (a *Inet6Addr) lenAndSpace() (int, int) {
return sizeofSockaddrInet6, roundup(sizeofSockaddrInet6)
}
func (a *Inet6Addr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
b[0] = byte(l)
b[1] = syscall.AF_INET6
copy(b[8:24], a.IP[:])
if a.ZoneID > 0 {
nativeEndian.PutUint32(b[24:28], uint32(a.ZoneID))
}
return ll, nil
}
// parseInetAddr parses b as an internet address for IPv4 or IPv6.
func parseInetAddr(af int, b []byte) (Addr, error) {
switch af {
case syscall.AF_INET:
if len(b) < sizeofSockaddrInet {
return nil, errInvalidAddr
}
a := &Inet4Addr{}
copy(a.IP[:], b[4:8])
return a, nil
case syscall.AF_INET6:
if len(b) < sizeofSockaddrInet6 {
return nil, errInvalidAddr
}
a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))}
copy(a.IP[:], b[8:24])
if a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) {
// KAME based IPv6 protocol stack usually
// embeds the interface index in the
// interface-local or link-local address as
// the kernel-internal form.
id := int(bigEndian.Uint16(a.IP[2:4]))
if id != 0 {
a.ZoneID = id
a.IP[2], a.IP[3] = 0, 0
}
}
return a, nil
default:
return nil, errInvalidAddr
}
}
// parseKernelInetAddr parses b as an internet address in conventional
// BSD kernel form.
func parseKernelInetAddr(af int, b []byte) (int, Addr, error) {
// The encoding looks similar to the NLRI encoding.
// +----------------------------+
// | Length (1 octet) |
// +----------------------------+
// | Address prefix (variable) |
// +----------------------------+
//
// The differences between the kernel form and the NLRI
// encoding are:
//
// - The length field of the kernel form indicates the prefix
// length in bytes, not in bits
//
// - In the kernel form, zero value of the length field
// doesn't mean 0.0.0.0/0 or ::/0
//
// - The kernel form appends leading bytes to the prefix field
// to make the <length, prefix> tuple to be conformed with
// the routing message boundary
l := int(b[0])
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
// On Darwin, an address in the kernel form is also
// used as a message filler.
if l == 0 || len(b) > roundup(l) {
l = roundup(l)
}
} else {
l = roundup(l)
}
if len(b) < l {
return 0, nil, errInvalidAddr
}
// Don't reorder case expressions.
// The case expressions for IPv6 must come first.
const (
off4 = 4 // offset of in_addr
off6 = 8 // offset of in6_addr
)
switch {
case b[0] == sizeofSockaddrInet6:
a := &Inet6Addr{}
copy(a.IP[:], b[off6:off6+16])
return int(b[0]), a, nil
case af == syscall.AF_INET6:
a := &Inet6Addr{}
if l-1 < off6 {
copy(a.IP[:], b[1:l])
} else {
copy(a.IP[:], b[l-off6:l])
}
return int(b[0]), a, nil
case b[0] == sizeofSockaddrInet:
a := &Inet4Addr{}
copy(a.IP[:], b[off4:off4+4])
return int(b[0]), a, nil
default: // an old fashion, AF_UNSPEC or unknown means AF_INET
a := &Inet4Addr{}
if l-1 < off4 {
copy(a.IP[:], b[1:l])
} else {
copy(a.IP[:], b[l-off4:l])
}
return int(b[0]), a, nil
}
}
// A DefaultAddr represents an address of various operating
// system-specific features.
type DefaultAddr struct {
af int
Raw []byte // raw format of address
}
// Family implements the Family method of Addr interface.
func (a *DefaultAddr) Family() int { return a.af }
func (a *DefaultAddr) lenAndSpace() (int, int) {
l := len(a.Raw)
return l, roundup(l)
}
func (a *DefaultAddr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
if l > 255 {
return 0, errInvalidAddr
}
b[1] = byte(l)
copy(b[:l], a.Raw)
return ll, nil
}
func parseDefaultAddr(b []byte) (Addr, error) {
if len(b) < 2 || len(b) < int(b[0]) {
return nil, errInvalidAddr
}
a := &DefaultAddr{af: int(b[1]), Raw: b[:b[0]]}
return a, nil
}
func addrsSpace(as []Addr) int {
var l int
for _, a := range as {
switch a := a.(type) {
case *LinkAddr:
_, ll := a.lenAndSpace()
l += ll
case *Inet4Addr:
_, ll := a.lenAndSpace()
l += ll
case *Inet6Addr:
_, ll := a.lenAndSpace()
l += ll
case *DefaultAddr:
_, ll := a.lenAndSpace()
l += ll
}
}
return l
}
// marshalAddrs marshals as and returns a bitmap indicating which
// address is stored in b.
func marshalAddrs(b []byte, as []Addr) (uint, error) {
var attrs uint
for i, a := range as {
switch a := a.(type) {
case *LinkAddr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
case *Inet4Addr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
case *Inet6Addr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
case *DefaultAddr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
}
}
return attrs, nil
}
func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) {
var as [syscall.RTAX_MAX]Addr
af := int(syscall.AF_UNSPEC)
for i := uint(0); i < syscall.RTAX_MAX && len(b) >= roundup(0); i++ {
if attrs&(1<<i) == 0 {
continue
}
if i <= syscall.RTAX_BRD {
switch b[1] {
case syscall.AF_LINK:
a, err := parseLinkAddr(b)
if err != nil {
return nil, err
}
as[i] = a
l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
}
b = b[l:]
case syscall.AF_INET, syscall.AF_INET6:
af = int(b[1])
a, err := parseInetAddr(af, b)
if err != nil {
return nil, err
}
as[i] = a
l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
}
b = b[l:]
default:
l, a, err := fn(af, b)
if err != nil {
return nil, err
}
as[i] = a
ll := roundup(l)
if len(b) < ll {
b = b[l:]
} else {
b = b[ll:]
}
}
} else {
a, err := parseDefaultAddr(b)
if err != nil {
return nil, err
}
as[i] = a
l := roundup(int(b[0]))
if len(b) < l |
b = b[l:]
}
}
// The only remaining bytes in b should be alignment.
// However, under some circumstances DragonFly BSD appears to put
// more addresses in the message than are indicated in the address
// bitmask, so don't check for this.
return as[:], nil
}
| {
return nil, errMessageTooShort
} | conditional_block |
address.go | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd || netbsd || openbsd
// +build darwin dragonfly freebsd netbsd openbsd
package route
import (
"runtime"
"syscall"
)
// An Addr represents an address associated with packet routing.
type Addr interface {
// Family returns an address family.
Family() int
}
// A LinkAddr represents a link-layer address.
type LinkAddr struct {
Index int // interface index when attached
Name string // interface name when attached
Addr []byte // link-layer address when attached
}
// Family implements the Family method of Addr interface.
func (a *LinkAddr) Family() int { return syscall.AF_LINK }
func (a *LinkAddr) lenAndSpace() (int, int) {
l := 8 + len(a.Name) + len(a.Addr)
return l, roundup(l)
}
func (a *LinkAddr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
nlen, alen := len(a.Name), len(a.Addr)
if nlen > 255 || alen > 255 {
return 0, errInvalidAddr
}
b[0] = byte(l)
b[1] = syscall.AF_LINK
if a.Index > 0 {
nativeEndian.PutUint16(b[2:4], uint16(a.Index))
}
data := b[8:]
if nlen > 0 {
b[5] = byte(nlen)
copy(data[:nlen], a.Name)
data = data[nlen:]
}
if alen > 0 {
b[6] = byte(alen)
copy(data[:alen], a.Addr)
data = data[alen:]
}
return ll, nil
}
func parseLinkAddr(b []byte) (Addr, error) {
if len(b) < 8 {
return nil, errInvalidAddr
}
_, a, err := parseKernelLinkAddr(syscall.AF_LINK, b[4:])
if err != nil {
return nil, err
}
a.(*LinkAddr).Index = int(nativeEndian.Uint16(b[2:4]))
return a, nil
}
// parseKernelLinkAddr parses b as a link-layer address in
// conventional BSD kernel form.
func parseKernelLinkAddr(_ int, b []byte) (int, Addr, error) {
// The encoding looks like the following:
// +----------------------------+
// | Type (1 octet) |
// +----------------------------+
// | Name length (1 octet) |
// +----------------------------+
// | Address length (1 octet) |
// +----------------------------+
// | Selector length (1 octet) |
// +----------------------------+
// | Data (variable) |
// +----------------------------+
//
// On some platforms, all-bit-one of length field means "don't
// care".
nlen, alen, slen := int(b[1]), int(b[2]), int(b[3])
if nlen == 0xff {
nlen = 0
}
if alen == 0xff {
alen = 0
}
if slen == 0xff {
slen = 0
}
l := 4 + nlen + alen + slen
if len(b) < l {
return 0, nil, errInvalidAddr
}
data := b[4:]
var name string
var addr []byte
if nlen > 0 {
name = string(data[:nlen])
data = data[nlen:]
}
if alen > 0 {
addr = data[:alen]
data = data[alen:]
}
return l, &LinkAddr{Name: name, Addr: addr}, nil
}
// An Inet4Addr represents an internet address for IPv4.
type Inet4Addr struct {
IP [4]byte // IP address
}
// Family implements the Family method of Addr interface.
func (a *Inet4Addr) Family() int { return syscall.AF_INET }
func (a *Inet4Addr) lenAndSpace() (int, int) {
return sizeofSockaddrInet, roundup(sizeofSockaddrInet)
}
func (a *Inet4Addr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
b[0] = byte(l)
b[1] = syscall.AF_INET
copy(b[4:8], a.IP[:])
return ll, nil
}
// An Inet6Addr represents an internet address for IPv6.
type Inet6Addr struct {
IP [16]byte // IP address
ZoneID int // zone identifier
}
// Family implements the Family method of Addr interface.
func (a *Inet6Addr) Family() int { return syscall.AF_INET6 }
func (a *Inet6Addr) lenAndSpace() (int, int) {
return sizeofSockaddrInet6, roundup(sizeofSockaddrInet6)
}
func (a *Inet6Addr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
b[0] = byte(l)
b[1] = syscall.AF_INET6
copy(b[8:24], a.IP[:])
if a.ZoneID > 0 {
nativeEndian.PutUint32(b[24:28], uint32(a.ZoneID))
}
return ll, nil
}
// parseInetAddr parses b as an internet address for IPv4 or IPv6.
func parseInetAddr(af int, b []byte) (Addr, error) {
switch af {
case syscall.AF_INET:
if len(b) < sizeofSockaddrInet {
return nil, errInvalidAddr
}
a := &Inet4Addr{}
copy(a.IP[:], b[4:8])
return a, nil
case syscall.AF_INET6:
if len(b) < sizeofSockaddrInet6 {
return nil, errInvalidAddr
}
a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))}
copy(a.IP[:], b[8:24])
if a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) {
// KAME based IPv6 protocol stack usually
// embeds the interface index in the
// interface-local or link-local address as
// the kernel-internal form.
id := int(bigEndian.Uint16(a.IP[2:4]))
if id != 0 {
a.ZoneID = id
a.IP[2], a.IP[3] = 0, 0
}
}
return a, nil
default:
return nil, errInvalidAddr
}
}
// parseKernelInetAddr parses b as an internet address in conventional
// BSD kernel form.
func parseKernelInetAddr(af int, b []byte) (int, Addr, error) {
// The encoding looks similar to the NLRI encoding.
// +----------------------------+
// | Length (1 octet) |
// +----------------------------+
// | Address prefix (variable) |
// +----------------------------+
//
// The differences between the kernel form and the NLRI
// encoding are:
//
// - The length field of the kernel form indicates the prefix
// length in bytes, not in bits
//
// - In the kernel form, zero value of the length field
// doesn't mean 0.0.0.0/0 or ::/0
//
// - The kernel form appends leading bytes to the prefix field
// to make the <length, prefix> tuple to be conformed with
// the routing message boundary
l := int(b[0])
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
// On Darwin, an address in the kernel form is also
// used as a message filler.
if l == 0 || len(b) > roundup(l) {
l = roundup(l)
}
} else {
l = roundup(l)
}
if len(b) < l {
return 0, nil, errInvalidAddr
}
// Don't reorder case expressions.
// The case expressions for IPv6 must come first.
const (
off4 = 4 // offset of in_addr
off6 = 8 // offset of in6_addr
)
switch {
case b[0] == sizeofSockaddrInet6:
a := &Inet6Addr{}
copy(a.IP[:], b[off6:off6+16])
return int(b[0]), a, nil
case af == syscall.AF_INET6:
a := &Inet6Addr{}
if l-1 < off6 {
copy(a.IP[:], b[1:l])
} else {
copy(a.IP[:], b[l-off6:l])
}
return int(b[0]), a, nil
case b[0] == sizeofSockaddrInet:
a := &Inet4Addr{}
copy(a.IP[:], b[off4:off4+4])
return int(b[0]), a, nil
default: // an old fashion, AF_UNSPEC or unknown means AF_INET
a := &Inet4Addr{}
if l-1 < off4 {
copy(a.IP[:], b[1:l])
} else {
copy(a.IP[:], b[l-off4:l])
}
return int(b[0]), a, nil
}
}
// A DefaultAddr represents an address of various operating
// system-specific features.
type DefaultAddr struct {
af int
Raw []byte // raw format of address
}
// Family implements the Family method of Addr interface.
func (a *DefaultAddr) Family() int { return a.af }
func (a *DefaultAddr) lenAndSpace() (int, int) {
l := len(a.Raw)
return l, roundup(l)
}
func (a *DefaultAddr) marshal(b []byte) (int, error) {
l, ll := a.lenAndSpace()
if len(b) < ll {
return 0, errShortBuffer
}
if l > 255 {
return 0, errInvalidAddr
}
b[1] = byte(l)
copy(b[:l], a.Raw)
return ll, nil
}
func parseDefaultAddr(b []byte) (Addr, error) {
if len(b) < 2 || len(b) < int(b[0]) {
return nil, errInvalidAddr
}
a := &DefaultAddr{af: int(b[1]), Raw: b[:b[0]]}
return a, nil
}
func addrsSpace(as []Addr) int {
var l int
for _, a := range as {
switch a := a.(type) {
case *LinkAddr:
_, ll := a.lenAndSpace()
l += ll
case *Inet4Addr:
_, ll := a.lenAndSpace()
l += ll
case *Inet6Addr:
_, ll := a.lenAndSpace()
l += ll
case *DefaultAddr:
_, ll := a.lenAndSpace()
l += ll
}
}
return l
}
// marshalAddrs marshals as and returns a bitmap indicating which
// address is stored in b.
func marshalAddrs(b []byte, as []Addr) (uint, error) {
var attrs uint
for i, a := range as {
switch a := a.(type) {
case *LinkAddr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
case *Inet4Addr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
case *Inet6Addr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
case *DefaultAddr:
l, err := a.marshal(b)
if err != nil {
return 0, err
}
b = b[l:]
attrs |= 1 << uint(i)
}
}
return attrs, nil
}
func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) {
var as [syscall.RTAX_MAX]Addr
af := int(syscall.AF_UNSPEC)
for i := uint(0); i < syscall.RTAX_MAX && len(b) >= roundup(0); i++ {
if attrs&(1<<i) == 0 {
continue
}
if i <= syscall.RTAX_BRD {
switch b[1] {
case syscall.AF_LINK:
a, err := parseLinkAddr(b)
if err != nil {
return nil, err
}
as[i] = a
l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
}
b = b[l:]
case syscall.AF_INET, syscall.AF_INET6:
af = int(b[1])
a, err := parseInetAddr(af, b)
if err != nil {
return nil, err
}
as[i] = a
l := roundup(int(b[0]))
if len(b) < l {
return nil, errMessageTooShort
}
b = b[l:]
default:
l, a, err := fn(af, b)
if err != nil {
return nil, err
}
as[i] = a
ll := roundup(l)
if len(b) < ll {
b = b[l:]
} else {
b = b[ll:]
}
}
} else {
a, err := parseDefaultAddr(b)
if err != nil {
return nil, err
}
as[i] = a
l := roundup(int(b[0])) | b = b[l:]
}
}
// The only remaining bytes in b should be alignment.
// However, under some circumstances DragonFly BSD appears to put
// more addresses in the message than are indicated in the address
// bitmask, so don't check for this.
return as[:], nil
} | if len(b) < l {
return nil, errMessageTooShort
} | random_line_split |
main.rs | extern crate sdl2;
use sdl2::pixels::Color;
use sdl2::event::Event;
use sdl2::GameControllerSubsystem;
use sdl2::controller::GameController;
use sdl2::controller::Button;
use sdl2::render::Canvas;
use sdl2::render::Texture;
use sdl2::render::TextureCreator;
use sdl2::rect::Point;
use sdl2::rect::Rect;
use sdl2::controller::Axis;
use sdl2::ttf;
use sdl2::ttf::Font;
use sdl2::video::Window;
use sdl2::video::WindowContext;
use std::i16;
use std::thread::sleep;
use std::time::Duration;
use crate::structs::Vector2;
use crate::structs::Spaceship;
use crate::structs::Projectile;
use crate::structs::GameState;
use crate::structs::State;
mod structs;
//mod subroutines;
const DEADZONE: f32 = 0.20;
const PLAYER_WIDTH: u32 = 50;
const SCREEN_WIDTH: u32 = 1280;
const SCREEN_HEIGHT: u32 = 720;
fn open_controller(css: &GameControllerSubsystem, index: u32) -> Option<GameController> {
match css.open(index) {
Ok(cont) => {
println!("Successfully opened controller {}", index);
Some(cont)
}
Err(_e) => {
println!("Unable to open controller {}", index);
None
}
}
}
fn check_deadzone(mut stick: Vector2<f32>) -> Vector2<f32> {
if stick.x > -DEADZONE && stick.x < DEADZONE && stick.y > -DEADZONE && stick.y < DEADZONE {
stick.x = 0.0;
stick.y = 0.0;
}
stick
}
fn text_texture<'a>(text: &str, texture_creator: &'a TextureCreator<WindowContext>, font: &Font) -> Texture<'a> {
let color = Color::RGB(0, 255, 0);
match font.render(text).solid(color) {
Ok(surface) => {
match texture_creator.create_texture_from_surface(surface) {
Ok(t) => {
t
}
Err(e) => |
}
}
Err(e) => {
panic!("{}", e);
}
}
}
fn obtain_result<T, E: std::fmt::Display>(res: Result<T, E>) -> T {
match res {
Ok(r) => {
r
}
Err(e) => {
panic!("{}", e);
}
}
}
fn draw_centered_text(canvas: &mut Canvas<Window>, texture: &Texture, y_offset: i32) {
//Draw the title
let dst = {
let query = texture.query();
let xpos = (SCREEN_WIDTH / 2 - query.width / 2) as i32;
let ypos = (SCREEN_HEIGHT / 2 - query.height / 2) as i32 + y_offset;
Rect::new(xpos, ypos, query.width, query.height)
};
canvas.copy(texture, None, dst).unwrap();
}
fn delete_marked_entities<T>(optionvec: &mut Vec<Option<T>>, marks: Vec<usize>) {
for i in marks {
optionvec[i] = None;
}
}
fn insert_into_option_vec<T>(optionvec: &mut Vec<Option<T>>, item: T) {
let mut index = None;
for (i, p) in optionvec.iter().enumerate() {
if let None = p {
index = Some(i);
}
}
match index {
Some(i) => {
optionvec[i] = Some(item);
}
None => {
optionvec.push(Some(item));
}
}
}
fn clamp<T: std::cmp::PartialOrd>(value: T, lower_bound: T, upper_bound: T) -> T{
let mut clamped_value = value;
if clamped_value < lower_bound {
clamped_value = lower_bound;
}
if clamped_value > upper_bound {
clamped_value = upper_bound;
}
clamped_value
}
fn main() {
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
//Create the actual window
let window = video_subsystem.window("Galaxy Monkey", SCREEN_WIDTH, SCREEN_HEIGHT).position_centered().build().unwrap();
//Create primary drawing interface
let mut canvas = window.into_canvas().build().unwrap();
//Create the texture creator
let texture_creator = canvas.texture_creator();
//Create the event_pump
let mut event_pump = sdl_context.event_pump().unwrap();
//Init the controller subsystem
let controller_ss = sdl_context.game_controller().unwrap();
//Init the timer subsystem
let mut timer_ss = sdl_context.timer().unwrap();
//Create array of controllers
let mut _controllers: [Option<GameController>; 4] = [None, None, None, None];
//Init the ttf subsystem
let ttf_context = obtain_result(ttf::init());
//Load the font
let font = obtain_result(ttf_context.load_font("fonts/CursedTimerULiL.ttf", 64));
//Create title screen texture
let game_title = text_texture("Galaxy Monkey", &texture_creator, &font);
//Create press start text
let press_start_text = text_texture("Press Start", &texture_creator, &font);
let mut press_start_position: i32 = 150;
//Round # texture variable
let mut round_number_texture = text_texture("Round 0", &texture_creator, &font);
//Timer variable for making "Press Start" flash
let mut press_start_timer = 0;
let mut displaying = true;
//Timer variable for transitioning between rounds
let mut round_transition_timer = 0;
let mut going_to_next_round = false;
//Initialize the game state
let mut game_state = {
let left_joystick = Vector2 {
x: 0.0,
y: 0.0
};
let right_joystick = Vector2 {
x: 0.0,
y: 0.0
};
let player = {
let x = (SCREEN_WIDTH / 2 - PLAYER_WIDTH) as f32;
let y = (SCREEN_HEIGHT / 2 - PLAYER_WIDTH) as f32;
let position = Vector2 {
x,
y
};
Spaceship {
position
}
};
let friendly_projectiles = Vec::new();
let enemies = Vec::new();
let round_number = 0;
GameState {
player,
state: State::StartMenu,
left_joystick,
right_joystick,
friendly_projectiles,
enemies,
round_number
}
};
let mut old_ticks = 0;
'running: loop {
//Get milliseconds since last frame
let ticks = timer_ss.ticks();
let time_delta = ticks - old_ticks;
match game_state.state {
State::StartMenu => {
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} |
Event::ControllerButtonDown {button: Button::Back, ..} => {
break 'running;
}
Event::ControllerButtonDown {button: Button::Start, ..} |
Event::KeyDown {..} => {
game_state.state = State::Playing;
}
Event::JoyDeviceAdded {which: i, ..} => {
_controllers[i as usize] = open_controller(&controller_ss, i);
}
Event::MouseWheel {y, ..} => {
press_start_position -= y * 30;
}
_ => {}
}
}
//Clear the screen
canvas.set_draw_color(Color::RGB(0, 0, 0));
canvas.clear();
//Draw the title
draw_centered_text(&mut canvas, &game_title, -200);
//Draw press start
const INTERVAL: u32 = 500;
if ticks - press_start_timer > INTERVAL {
displaying = !displaying;
press_start_timer = ticks;
}
if displaying {
draw_centered_text(&mut canvas, &press_start_text, press_start_position);
}
}
State::Playing => {
//Process events
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} |
Event::ControllerButtonDown {button: Button::Back, ..} => {
break 'running;
}
Event::JoyDeviceAdded {which: i, ..} => {
_controllers[i as usize] = open_controller(&controller_ss, i);
}
Event::ControllerAxisMotion {axis: ax, value: v, ..} => {
match ax {
Axis::LeftX => {
game_state.left_joystick.x = v as f32 / i16::MAX as f32;
}
Axis::LeftY => {
game_state.left_joystick.y = v as f32 / i16::MAX as f32;
}
Axis::RightX => {
game_state.right_joystick.x = v as f32 / i16::MAX as f32;
}
Axis::RightY => {
game_state.right_joystick.y = v as f32 / i16::MAX as f32;
}
_ => {}
}
game_state.left_joystick = check_deadzone(game_state.left_joystick);
game_state.right_joystick = check_deadzone(game_state.right_joystick);
}
Event::KeyDown {keycode: Some(key), ..} => {
match key {
_ => {
println!("You pressed the unbound key: {}", key);
}
}
}
_ => {}
}
}
//Check if enemies option-vec is empty
let mut enemies_is_empty = true;
for enemy in game_state.enemies.iter() {
if let Some(_e) = enemy {
enemies_is_empty = false;
break;
}
}
//This will probably become the trigger for advancing rounds
if enemies_is_empty {
if !going_to_next_round {
//Start the timer
round_transition_timer = ticks;
//Increment round number
game_state.round_number += 1;
//Create round # texture
round_number_texture = text_texture(&format!("Round {}", game_state.round_number), &texture_creator, &font);
going_to_next_round = true;
}
const INTERVAL: u32 = 2500; //Timer duration in millis
if ticks - round_transition_timer > INTERVAL {
let new_enemy = {
let position = Vector2 {
x: 0.0,
y: 30.0
};
Spaceship {
position
}
};
//Insert enemy into vec
insert_into_option_vec(&mut game_state.enemies, new_enemy);
going_to_next_round = false;
}
}
//If the right stick is not neutral, fire a projectile
if game_state.right_joystick.x != 0.0 || game_state.right_joystick.y != 0.0 {
//Construct this new projectile
let projectile = {
let xpos = game_state.player.position.x + (PLAYER_WIDTH / 2) as f32;
let ypos = game_state.player.position.y + (PLAYER_WIDTH / 2) as f32;
let position = Vector2 {
x: xpos,
y: ypos
};
const PROJECTILE_SPEED: f32 = 10.0;
let angle = f32::atan(game_state.right_joystick.y / game_state.right_joystick.x);
let xvel = {
if game_state.right_joystick.x < 0.0 {
-(PROJECTILE_SPEED * f32::cos(angle))
} else {
PROJECTILE_SPEED * f32::cos(angle)
}
};
let yvel = {
if game_state.right_joystick.x < 0.0 {
-(PROJECTILE_SPEED * f32::sin(angle))
} else {
PROJECTILE_SPEED * f32::sin(angle)
}
};
let velocity = Vector2 {
x: xvel,
y: yvel
};
Projectile {
position,
velocity
}
};
//Insert new projectile into vec
insert_into_option_vec(&mut game_state.friendly_projectiles, projectile);
}
//Update the player
const PLAYER_SPEED: f32 = 3.0;
game_state.player.position.x += game_state.left_joystick.x * PLAYER_SPEED;
game_state.player.position.y += game_state.left_joystick.y * PLAYER_SPEED;
game_state.player.position.x = clamp(game_state.player.position.x, 0.0, (SCREEN_WIDTH - PLAYER_WIDTH) as f32);
game_state.player.position.y = clamp(game_state.player.position.y, 0.0, (SCREEN_HEIGHT - PLAYER_WIDTH) as f32);
//Update all enemies
let mut enemies_to_destroy = Vec::new();
for (i, enemy) in game_state.enemies.iter_mut().enumerate() {
if let Some(e) = enemy {
if e.position.x > SCREEN_WIDTH as f32 {
enemies_to_destroy.push(i);
}
e.position.x += 1.0;
}
}
//Set all offscreen enemies to None
delete_marked_entities(&mut game_state.enemies, enemies_to_destroy);
//Update all projectiles
let mut projectiles_to_destroy = Vec::new();
for (i, projectile) in game_state.friendly_projectiles.iter_mut().enumerate() {
if let Some(p) = projectile {
if p.position.x < 0.0 || p.position.x > SCREEN_WIDTH as f32 ||
p.position.y < 0.0 || p.position.y > SCREEN_HEIGHT as f32 {
projectiles_to_destroy.push(i);
}
p.position.x += p.velocity.x;
p.position.y += p.velocity.y;
}
}
//Set all offscreen projectiles to None
delete_marked_entities(&mut game_state.friendly_projectiles, projectiles_to_destroy);
//Clear the canvas
canvas.set_draw_color(Color::RGB(0, 0, 0));
canvas.clear();
//Draw the spaceship
canvas.set_draw_color(Color::RGB(150, 150, 150));
canvas.fill_rect(Rect::new(game_state.player.position.x as i32, game_state.player.position.y as i32, PLAYER_WIDTH, PLAYER_WIDTH)).unwrap();
//Draw all enemies
canvas.set_draw_color(Color::RGB(50, 120, 0));
for enemy in game_state.enemies.iter() {
if let Some(e) = enemy {
canvas.fill_rect(Rect::new(e.position.x as i32, e.position.y as i32, PLAYER_WIDTH, PLAYER_WIDTH)).unwrap();
}
}
//Draw all projectiles
canvas.set_draw_color(Color::RGB(150, 150, 150));
for projectile in game_state.friendly_projectiles.iter() {
if let Some(p) = projectile {
let point = Point::new(p.position.x as i32, p.position.y as i32);
canvas.draw_point(point).unwrap();
}
}
//Draw the round transition text if necessary
if going_to_next_round {
draw_centered_text(&mut canvas, &round_number_texture, 0);
}
}
}
canvas.present();
//Update old_ticks
old_ticks = ticks;
if time_delta < 8 {
sleep(Duration::from_millis((8 - time_delta) as u64));
}
}
}
| {
panic!("{}", e);
} | conditional_block |
main.rs | extern crate sdl2;
use sdl2::pixels::Color;
use sdl2::event::Event;
use sdl2::GameControllerSubsystem;
use sdl2::controller::GameController;
use sdl2::controller::Button;
use sdl2::render::Canvas;
use sdl2::render::Texture;
use sdl2::render::TextureCreator;
use sdl2::rect::Point;
use sdl2::rect::Rect;
use sdl2::controller::Axis;
use sdl2::ttf;
use sdl2::ttf::Font;
use sdl2::video::Window;
use sdl2::video::WindowContext;
use std::i16;
use std::thread::sleep;
use std::time::Duration;
use crate::structs::Vector2;
use crate::structs::Spaceship;
use crate::structs::Projectile;
use crate::structs::GameState;
use crate::structs::State;
mod structs;
//mod subroutines;
const DEADZONE: f32 = 0.20;
const PLAYER_WIDTH: u32 = 50;
const SCREEN_WIDTH: u32 = 1280;
const SCREEN_HEIGHT: u32 = 720;
fn open_controller(css: &GameControllerSubsystem, index: u32) -> Option<GameController> {
match css.open(index) {
Ok(cont) => {
println!("Successfully opened controller {}", index);
Some(cont)
}
Err(_e) => {
println!("Unable to open controller {}", index);
None
}
}
}
fn check_deadzone(mut stick: Vector2<f32>) -> Vector2<f32> {
if stick.x > -DEADZONE && stick.x < DEADZONE && stick.y > -DEADZONE && stick.y < DEADZONE {
stick.x = 0.0;
stick.y = 0.0;
}
stick
}
fn text_texture<'a>(text: &str, texture_creator: &'a TextureCreator<WindowContext>, font: &Font) -> Texture<'a> |
fn obtain_result<T, E: std::fmt::Display>(res: Result<T, E>) -> T {
match res {
Ok(r) => {
r
}
Err(e) => {
panic!("{}", e);
}
}
}
fn draw_centered_text(canvas: &mut Canvas<Window>, texture: &Texture, y_offset: i32) {
//Draw the title
let dst = {
let query = texture.query();
let xpos = (SCREEN_WIDTH / 2 - query.width / 2) as i32;
let ypos = (SCREEN_HEIGHT / 2 - query.height / 2) as i32 + y_offset;
Rect::new(xpos, ypos, query.width, query.height)
};
canvas.copy(texture, None, dst).unwrap();
}
fn delete_marked_entities<T>(optionvec: &mut Vec<Option<T>>, marks: Vec<usize>) {
for i in marks {
optionvec[i] = None;
}
}
fn insert_into_option_vec<T>(optionvec: &mut Vec<Option<T>>, item: T) {
let mut index = None;
for (i, p) in optionvec.iter().enumerate() {
if let None = p {
index = Some(i);
}
}
match index {
Some(i) => {
optionvec[i] = Some(item);
}
None => {
optionvec.push(Some(item));
}
}
}
fn clamp<T: std::cmp::PartialOrd>(value: T, lower_bound: T, upper_bound: T) -> T{
let mut clamped_value = value;
if clamped_value < lower_bound {
clamped_value = lower_bound;
}
if clamped_value > upper_bound {
clamped_value = upper_bound;
}
clamped_value
}
fn main() {
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
//Create the actual window
let window = video_subsystem.window("Galaxy Monkey", SCREEN_WIDTH, SCREEN_HEIGHT).position_centered().build().unwrap();
//Create primary drawing interface
let mut canvas = window.into_canvas().build().unwrap();
//Create the texture creator
let texture_creator = canvas.texture_creator();
//Create the event_pump
let mut event_pump = sdl_context.event_pump().unwrap();
//Init the controller subsystem
let controller_ss = sdl_context.game_controller().unwrap();
//Init the timer subsystem
let mut timer_ss = sdl_context.timer().unwrap();
//Create array of controllers
let mut _controllers: [Option<GameController>; 4] = [None, None, None, None];
//Init the ttf subsystem
let ttf_context = obtain_result(ttf::init());
//Load the font
let font = obtain_result(ttf_context.load_font("fonts/CursedTimerULiL.ttf", 64));
//Create title screen texture
let game_title = text_texture("Galaxy Monkey", &texture_creator, &font);
//Create press start text
let press_start_text = text_texture("Press Start", &texture_creator, &font);
let mut press_start_position: i32 = 150;
//Round # texture variable
let mut round_number_texture = text_texture("Round 0", &texture_creator, &font);
//Timer variable for making "Press Start" flash
let mut press_start_timer = 0;
let mut displaying = true;
//Timer variable for transitioning between rounds
let mut round_transition_timer = 0;
let mut going_to_next_round = false;
//Initialize the game state
let mut game_state = {
let left_joystick = Vector2 {
x: 0.0,
y: 0.0
};
let right_joystick = Vector2 {
x: 0.0,
y: 0.0
};
let player = {
let x = (SCREEN_WIDTH / 2 - PLAYER_WIDTH) as f32;
let y = (SCREEN_HEIGHT / 2 - PLAYER_WIDTH) as f32;
let position = Vector2 {
x,
y
};
Spaceship {
position
}
};
let friendly_projectiles = Vec::new();
let enemies = Vec::new();
let round_number = 0;
GameState {
player,
state: State::StartMenu,
left_joystick,
right_joystick,
friendly_projectiles,
enemies,
round_number
}
};
let mut old_ticks = 0;
'running: loop {
//Get milliseconds since last frame
let ticks = timer_ss.ticks();
let time_delta = ticks - old_ticks;
match game_state.state {
State::StartMenu => {
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} |
Event::ControllerButtonDown {button: Button::Back, ..} => {
break 'running;
}
Event::ControllerButtonDown {button: Button::Start, ..} |
Event::KeyDown {..} => {
game_state.state = State::Playing;
}
Event::JoyDeviceAdded {which: i, ..} => {
_controllers[i as usize] = open_controller(&controller_ss, i);
}
Event::MouseWheel {y, ..} => {
press_start_position -= y * 30;
}
_ => {}
}
}
//Clear the screen
canvas.set_draw_color(Color::RGB(0, 0, 0));
canvas.clear();
//Draw the title
draw_centered_text(&mut canvas, &game_title, -200);
//Draw press start
const INTERVAL: u32 = 500;
if ticks - press_start_timer > INTERVAL {
displaying = !displaying;
press_start_timer = ticks;
}
if displaying {
draw_centered_text(&mut canvas, &press_start_text, press_start_position);
}
}
State::Playing => {
//Process events
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} |
Event::ControllerButtonDown {button: Button::Back, ..} => {
break 'running;
}
Event::JoyDeviceAdded {which: i, ..} => {
_controllers[i as usize] = open_controller(&controller_ss, i);
}
Event::ControllerAxisMotion {axis: ax, value: v, ..} => {
match ax {
Axis::LeftX => {
game_state.left_joystick.x = v as f32 / i16::MAX as f32;
}
Axis::LeftY => {
game_state.left_joystick.y = v as f32 / i16::MAX as f32;
}
Axis::RightX => {
game_state.right_joystick.x = v as f32 / i16::MAX as f32;
}
Axis::RightY => {
game_state.right_joystick.y = v as f32 / i16::MAX as f32;
}
_ => {}
}
game_state.left_joystick = check_deadzone(game_state.left_joystick);
game_state.right_joystick = check_deadzone(game_state.right_joystick);
}
Event::KeyDown {keycode: Some(key), ..} => {
match key {
_ => {
println!("You pressed the unbound key: {}", key);
}
}
}
_ => {}
}
}
//Check if enemies option-vec is empty
let mut enemies_is_empty = true;
for enemy in game_state.enemies.iter() {
if let Some(_e) = enemy {
enemies_is_empty = false;
break;
}
}
//This will probably become the trigger for advancing rounds
if enemies_is_empty {
if !going_to_next_round {
//Start the timer
round_transition_timer = ticks;
//Increment round number
game_state.round_number += 1;
//Create round # texture
round_number_texture = text_texture(&format!("Round {}", game_state.round_number), &texture_creator, &font);
going_to_next_round = true;
}
const INTERVAL: u32 = 2500; //Timer duration in millis
if ticks - round_transition_timer > INTERVAL {
let new_enemy = {
let position = Vector2 {
x: 0.0,
y: 30.0
};
Spaceship {
position
}
};
//Insert enemy into vec
insert_into_option_vec(&mut game_state.enemies, new_enemy);
going_to_next_round = false;
}
}
//If the right stick is not neutral, fire a projectile
if game_state.right_joystick.x != 0.0 || game_state.right_joystick.y != 0.0 {
//Construct this new projectile
let projectile = {
let xpos = game_state.player.position.x + (PLAYER_WIDTH / 2) as f32;
let ypos = game_state.player.position.y + (PLAYER_WIDTH / 2) as f32;
let position = Vector2 {
x: xpos,
y: ypos
};
const PROJECTILE_SPEED: f32 = 10.0;
let angle = f32::atan(game_state.right_joystick.y / game_state.right_joystick.x);
let xvel = {
if game_state.right_joystick.x < 0.0 {
-(PROJECTILE_SPEED * f32::cos(angle))
} else {
PROJECTILE_SPEED * f32::cos(angle)
}
};
let yvel = {
if game_state.right_joystick.x < 0.0 {
-(PROJECTILE_SPEED * f32::sin(angle))
} else {
PROJECTILE_SPEED * f32::sin(angle)
}
};
let velocity = Vector2 {
x: xvel,
y: yvel
};
Projectile {
position,
velocity
}
};
//Insert new projectile into vec
insert_into_option_vec(&mut game_state.friendly_projectiles, projectile);
}
//Update the player
const PLAYER_SPEED: f32 = 3.0;
game_state.player.position.x += game_state.left_joystick.x * PLAYER_SPEED;
game_state.player.position.y += game_state.left_joystick.y * PLAYER_SPEED;
game_state.player.position.x = clamp(game_state.player.position.x, 0.0, (SCREEN_WIDTH - PLAYER_WIDTH) as f32);
game_state.player.position.y = clamp(game_state.player.position.y, 0.0, (SCREEN_HEIGHT - PLAYER_WIDTH) as f32);
//Update all enemies
let mut enemies_to_destroy = Vec::new();
for (i, enemy) in game_state.enemies.iter_mut().enumerate() {
if let Some(e) = enemy {
if e.position.x > SCREEN_WIDTH as f32 {
enemies_to_destroy.push(i);
}
e.position.x += 1.0;
}
}
//Set all offscreen enemies to None
delete_marked_entities(&mut game_state.enemies, enemies_to_destroy);
//Update all projectiles
let mut projectiles_to_destroy = Vec::new();
for (i, projectile) in game_state.friendly_projectiles.iter_mut().enumerate() {
if let Some(p) = projectile {
if p.position.x < 0.0 || p.position.x > SCREEN_WIDTH as f32 ||
p.position.y < 0.0 || p.position.y > SCREEN_HEIGHT as f32 {
projectiles_to_destroy.push(i);
}
p.position.x += p.velocity.x;
p.position.y += p.velocity.y;
}
}
//Set all offscreen projectiles to None
delete_marked_entities(&mut game_state.friendly_projectiles, projectiles_to_destroy);
//Clear the canvas
canvas.set_draw_color(Color::RGB(0, 0, 0));
canvas.clear();
//Draw the spaceship
canvas.set_draw_color(Color::RGB(150, 150, 150));
canvas.fill_rect(Rect::new(game_state.player.position.x as i32, game_state.player.position.y as i32, PLAYER_WIDTH, PLAYER_WIDTH)).unwrap();
//Draw all enemies
canvas.set_draw_color(Color::RGB(50, 120, 0));
for enemy in game_state.enemies.iter() {
if let Some(e) = enemy {
canvas.fill_rect(Rect::new(e.position.x as i32, e.position.y as i32, PLAYER_WIDTH, PLAYER_WIDTH)).unwrap();
}
}
//Draw all projectiles
canvas.set_draw_color(Color::RGB(150, 150, 150));
for projectile in game_state.friendly_projectiles.iter() {
if let Some(p) = projectile {
let point = Point::new(p.position.x as i32, p.position.y as i32);
canvas.draw_point(point).unwrap();
}
}
//Draw the round transition text if necessary
if going_to_next_round {
draw_centered_text(&mut canvas, &round_number_texture, 0);
}
}
}
canvas.present();
//Update old_ticks
old_ticks = ticks;
if time_delta < 8 {
sleep(Duration::from_millis((8 - time_delta) as u64));
}
}
}
| {
let color = Color::RGB(0, 255, 0);
match font.render(text).solid(color) {
Ok(surface) => {
match texture_creator.create_texture_from_surface(surface) {
Ok(t) => {
t
}
Err(e) => {
panic!("{}", e);
}
}
}
Err(e) => {
panic!("{}", e);
}
}
} | identifier_body |
main.rs | extern crate sdl2;
use sdl2::pixels::Color;
use sdl2::event::Event;
use sdl2::GameControllerSubsystem;
use sdl2::controller::GameController;
use sdl2::controller::Button;
use sdl2::render::Canvas;
use sdl2::render::Texture;
use sdl2::render::TextureCreator;
use sdl2::rect::Point;
use sdl2::rect::Rect;
use sdl2::controller::Axis;
use sdl2::ttf;
use sdl2::ttf::Font;
use sdl2::video::Window;
use sdl2::video::WindowContext;
use std::i16;
use std::thread::sleep;
use std::time::Duration;
use crate::structs::Vector2;
use crate::structs::Spaceship;
use crate::structs::Projectile;
use crate::structs::GameState;
use crate::structs::State;
mod structs;
//mod subroutines;
const DEADZONE: f32 = 0.20;
const PLAYER_WIDTH: u32 = 50;
const SCREEN_WIDTH: u32 = 1280;
const SCREEN_HEIGHT: u32 = 720;
fn open_controller(css: &GameControllerSubsystem, index: u32) -> Option<GameController> {
match css.open(index) {
Ok(cont) => {
println!("Successfully opened controller {}", index);
Some(cont)
}
Err(_e) => {
println!("Unable to open controller {}", index);
None
}
}
}
fn check_deadzone(mut stick: Vector2<f32>) -> Vector2<f32> {
if stick.x > -DEADZONE && stick.x < DEADZONE && stick.y > -DEADZONE && stick.y < DEADZONE {
stick.x = 0.0;
stick.y = 0.0;
}
stick
}
fn text_texture<'a>(text: &str, texture_creator: &'a TextureCreator<WindowContext>, font: &Font) -> Texture<'a> {
let color = Color::RGB(0, 255, 0);
match font.render(text).solid(color) {
Ok(surface) => {
match texture_creator.create_texture_from_surface(surface) {
Ok(t) => {
t
}
Err(e) => {
panic!("{}", e);
}
}
}
Err(e) => {
panic!("{}", e);
}
}
}
fn obtain_result<T, E: std::fmt::Display>(res: Result<T, E>) -> T {
match res {
Ok(r) => {
r
}
Err(e) => {
panic!("{}", e);
}
}
}
fn draw_centered_text(canvas: &mut Canvas<Window>, texture: &Texture, y_offset: i32) {
//Draw the title
let dst = {
let query = texture.query();
let xpos = (SCREEN_WIDTH / 2 - query.width / 2) as i32;
let ypos = (SCREEN_HEIGHT / 2 - query.height / 2) as i32 + y_offset;
Rect::new(xpos, ypos, query.width, query.height)
};
canvas.copy(texture, None, dst).unwrap();
}
fn delete_marked_entities<T>(optionvec: &mut Vec<Option<T>>, marks: Vec<usize>) {
for i in marks {
optionvec[i] = None;
}
}
fn insert_into_option_vec<T>(optionvec: &mut Vec<Option<T>>, item: T) {
let mut index = None;
for (i, p) in optionvec.iter().enumerate() {
if let None = p {
index = Some(i);
}
}
match index {
Some(i) => {
optionvec[i] = Some(item);
}
None => {
optionvec.push(Some(item));
}
}
}
fn clamp<T: std::cmp::PartialOrd>(value: T, lower_bound: T, upper_bound: T) -> T{
let mut clamped_value = value;
if clamped_value < lower_bound {
clamped_value = lower_bound;
}
if clamped_value > upper_bound {
clamped_value = upper_bound;
}
clamped_value
}
fn main() {
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
//Create the actual window
let window = video_subsystem.window("Galaxy Monkey", SCREEN_WIDTH, SCREEN_HEIGHT).position_centered().build().unwrap();
//Create primary drawing interface
let mut canvas = window.into_canvas().build().unwrap();
//Create the texture creator
let texture_creator = canvas.texture_creator();
//Create the event_pump
let mut event_pump = sdl_context.event_pump().unwrap();
//Init the controller subsystem
let controller_ss = sdl_context.game_controller().unwrap();
//Init the timer subsystem
let mut timer_ss = sdl_context.timer().unwrap();
//Create array of controllers
let mut _controllers: [Option<GameController>; 4] = [None, None, None, None];
//Init the ttf subsystem
let ttf_context = obtain_result(ttf::init());
//Load the font
let font = obtain_result(ttf_context.load_font("fonts/CursedTimerULiL.ttf", 64));
//Create title screen texture
let game_title = text_texture("Galaxy Monkey", &texture_creator, &font);
//Create press start text
let press_start_text = text_texture("Press Start", &texture_creator, &font);
let mut press_start_position: i32 = 150;
//Round # texture variable
let mut round_number_texture = text_texture("Round 0", &texture_creator, &font);
//Timer variable for making "Press Start" flash
let mut press_start_timer = 0;
let mut displaying = true;
//Timer variable for transitioning between rounds
let mut round_transition_timer = 0;
let mut going_to_next_round = false;
//Initialize the game state
let mut game_state = {
let left_joystick = Vector2 {
x: 0.0,
y: 0.0
};
let right_joystick = Vector2 {
x: 0.0,
y: 0.0
};
let player = {
let x = (SCREEN_WIDTH / 2 - PLAYER_WIDTH) as f32;
let y = (SCREEN_HEIGHT / 2 - PLAYER_WIDTH) as f32;
let position = Vector2 {
x,
y
};
Spaceship {
position
}
};
let friendly_projectiles = Vec::new();
let enemies = Vec::new();
let round_number = 0;
GameState {
player,
state: State::StartMenu,
left_joystick,
right_joystick,
friendly_projectiles,
enemies,
round_number
}
};
let mut old_ticks = 0;
'running: loop {
//Get milliseconds since last frame
let ticks = timer_ss.ticks();
let time_delta = ticks - old_ticks;
match game_state.state {
State::StartMenu => {
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} |
Event::ControllerButtonDown {button: Button::Back, ..} => {
break 'running;
}
Event::ControllerButtonDown {button: Button::Start, ..} |
Event::KeyDown {..} => {
game_state.state = State::Playing;
}
Event::JoyDeviceAdded {which: i, ..} => {
_controllers[i as usize] = open_controller(&controller_ss, i);
}
Event::MouseWheel {y, ..} => {
press_start_position -= y * 30;
}
_ => {}
}
}
//Clear the screen
canvas.set_draw_color(Color::RGB(0, 0, 0));
canvas.clear();
//Draw the title
draw_centered_text(&mut canvas, &game_title, -200);
//Draw press start
const INTERVAL: u32 = 500;
if ticks - press_start_timer > INTERVAL {
displaying = !displaying;
press_start_timer = ticks;
}
if displaying {
draw_centered_text(&mut canvas, &press_start_text, press_start_position);
}
}
State::Playing => {
//Process events
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} |
Event::ControllerButtonDown {button: Button::Back, ..} => {
break 'running;
}
Event::JoyDeviceAdded {which: i, ..} => {
_controllers[i as usize] = open_controller(&controller_ss, i);
}
Event::ControllerAxisMotion {axis: ax, value: v, ..} => {
match ax {
Axis::LeftX => {
game_state.left_joystick.x = v as f32 / i16::MAX as f32;
}
Axis::LeftY => {
game_state.left_joystick.y = v as f32 / i16::MAX as f32;
}
Axis::RightX => {
game_state.right_joystick.x = v as f32 / i16::MAX as f32;
}
Axis::RightY => {
game_state.right_joystick.y = v as f32 / i16::MAX as f32;
}
_ => {}
}
game_state.left_joystick = check_deadzone(game_state.left_joystick);
game_state.right_joystick = check_deadzone(game_state.right_joystick);
}
Event::KeyDown {keycode: Some(key), ..} => {
match key {
_ => {
println!("You pressed the unbound key: {}", key);
}
}
}
_ => {}
}
}
//Check if enemies option-vec is empty
let mut enemies_is_empty = true;
for enemy in game_state.enemies.iter() {
if let Some(_e) = enemy {
enemies_is_empty = false;
break;
}
}
//This will probably become the trigger for advancing rounds
if enemies_is_empty { | if !going_to_next_round {
//Start the timer
round_transition_timer = ticks;
//Increment round number
game_state.round_number += 1;
//Create round # texture
round_number_texture = text_texture(&format!("Round {}", game_state.round_number), &texture_creator, &font);
going_to_next_round = true;
}
const INTERVAL: u32 = 2500; //Timer duration in millis
if ticks - round_transition_timer > INTERVAL {
let new_enemy = {
let position = Vector2 {
x: 0.0,
y: 30.0
};
Spaceship {
position
}
};
//Insert enemy into vec
insert_into_option_vec(&mut game_state.enemies, new_enemy);
going_to_next_round = false;
}
}
//If the right stick is not neutral, fire a projectile
if game_state.right_joystick.x != 0.0 || game_state.right_joystick.y != 0.0 {
//Construct this new projectile
let projectile = {
let xpos = game_state.player.position.x + (PLAYER_WIDTH / 2) as f32;
let ypos = game_state.player.position.y + (PLAYER_WIDTH / 2) as f32;
let position = Vector2 {
x: xpos,
y: ypos
};
const PROJECTILE_SPEED: f32 = 10.0;
let angle = f32::atan(game_state.right_joystick.y / game_state.right_joystick.x);
let xvel = {
if game_state.right_joystick.x < 0.0 {
-(PROJECTILE_SPEED * f32::cos(angle))
} else {
PROJECTILE_SPEED * f32::cos(angle)
}
};
let yvel = {
if game_state.right_joystick.x < 0.0 {
-(PROJECTILE_SPEED * f32::sin(angle))
} else {
PROJECTILE_SPEED * f32::sin(angle)
}
};
let velocity = Vector2 {
x: xvel,
y: yvel
};
Projectile {
position,
velocity
}
};
//Insert new projectile into vec
insert_into_option_vec(&mut game_state.friendly_projectiles, projectile);
}
//Update the player
const PLAYER_SPEED: f32 = 3.0;
game_state.player.position.x += game_state.left_joystick.x * PLAYER_SPEED;
game_state.player.position.y += game_state.left_joystick.y * PLAYER_SPEED;
game_state.player.position.x = clamp(game_state.player.position.x, 0.0, (SCREEN_WIDTH - PLAYER_WIDTH) as f32);
game_state.player.position.y = clamp(game_state.player.position.y, 0.0, (SCREEN_HEIGHT - PLAYER_WIDTH) as f32);
//Update all enemies
let mut enemies_to_destroy = Vec::new();
for (i, enemy) in game_state.enemies.iter_mut().enumerate() {
if let Some(e) = enemy {
if e.position.x > SCREEN_WIDTH as f32 {
enemies_to_destroy.push(i);
}
e.position.x += 1.0;
}
}
//Set all offscreen enemies to None
delete_marked_entities(&mut game_state.enemies, enemies_to_destroy);
//Update all projectiles
let mut projectiles_to_destroy = Vec::new();
for (i, projectile) in game_state.friendly_projectiles.iter_mut().enumerate() {
if let Some(p) = projectile {
if p.position.x < 0.0 || p.position.x > SCREEN_WIDTH as f32 ||
p.position.y < 0.0 || p.position.y > SCREEN_HEIGHT as f32 {
projectiles_to_destroy.push(i);
}
p.position.x += p.velocity.x;
p.position.y += p.velocity.y;
}
}
//Set all offscreen projectiles to None
delete_marked_entities(&mut game_state.friendly_projectiles, projectiles_to_destroy);
//Clear the canvas
canvas.set_draw_color(Color::RGB(0, 0, 0));
canvas.clear();
//Draw the spaceship
canvas.set_draw_color(Color::RGB(150, 150, 150));
canvas.fill_rect(Rect::new(game_state.player.position.x as i32, game_state.player.position.y as i32, PLAYER_WIDTH, PLAYER_WIDTH)).unwrap();
//Draw all enemies
canvas.set_draw_color(Color::RGB(50, 120, 0));
for enemy in game_state.enemies.iter() {
if let Some(e) = enemy {
canvas.fill_rect(Rect::new(e.position.x as i32, e.position.y as i32, PLAYER_WIDTH, PLAYER_WIDTH)).unwrap();
}
}
//Draw all projectiles
canvas.set_draw_color(Color::RGB(150, 150, 150));
for projectile in game_state.friendly_projectiles.iter() {
if let Some(p) = projectile {
let point = Point::new(p.position.x as i32, p.position.y as i32);
canvas.draw_point(point).unwrap();
}
}
//Draw the round transition text if necessary
if going_to_next_round {
draw_centered_text(&mut canvas, &round_number_texture, 0);
}
}
}
canvas.present();
//Update old_ticks
old_ticks = ticks;
if time_delta < 8 {
sleep(Duration::from_millis((8 - time_delta) as u64));
}
}
} | random_line_split | |
main.rs | extern crate sdl2;
use sdl2::pixels::Color;
use sdl2::event::Event;
use sdl2::GameControllerSubsystem;
use sdl2::controller::GameController;
use sdl2::controller::Button;
use sdl2::render::Canvas;
use sdl2::render::Texture;
use sdl2::render::TextureCreator;
use sdl2::rect::Point;
use sdl2::rect::Rect;
use sdl2::controller::Axis;
use sdl2::ttf;
use sdl2::ttf::Font;
use sdl2::video::Window;
use sdl2::video::WindowContext;
use std::i16;
use std::thread::sleep;
use std::time::Duration;
use crate::structs::Vector2;
use crate::structs::Spaceship;
use crate::structs::Projectile;
use crate::structs::GameState;
use crate::structs::State;
mod structs;
//mod subroutines;
const DEADZONE: f32 = 0.20;
const PLAYER_WIDTH: u32 = 50;
const SCREEN_WIDTH: u32 = 1280;
const SCREEN_HEIGHT: u32 = 720;
fn open_controller(css: &GameControllerSubsystem, index: u32) -> Option<GameController> {
match css.open(index) {
Ok(cont) => {
println!("Successfully opened controller {}", index);
Some(cont)
}
Err(_e) => {
println!("Unable to open controller {}", index);
None
}
}
}
fn check_deadzone(mut stick: Vector2<f32>) -> Vector2<f32> {
if stick.x > -DEADZONE && stick.x < DEADZONE && stick.y > -DEADZONE && stick.y < DEADZONE {
stick.x = 0.0;
stick.y = 0.0;
}
stick
}
fn text_texture<'a>(text: &str, texture_creator: &'a TextureCreator<WindowContext>, font: &Font) -> Texture<'a> {
let color = Color::RGB(0, 255, 0);
match font.render(text).solid(color) {
Ok(surface) => {
match texture_creator.create_texture_from_surface(surface) {
Ok(t) => {
t
}
Err(e) => {
panic!("{}", e);
}
}
}
Err(e) => {
panic!("{}", e);
}
}
}
fn obtain_result<T, E: std::fmt::Display>(res: Result<T, E>) -> T {
match res {
Ok(r) => {
r
}
Err(e) => {
panic!("{}", e);
}
}
}
fn draw_centered_text(canvas: &mut Canvas<Window>, texture: &Texture, y_offset: i32) {
//Draw the title
let dst = {
let query = texture.query();
let xpos = (SCREEN_WIDTH / 2 - query.width / 2) as i32;
let ypos = (SCREEN_HEIGHT / 2 - query.height / 2) as i32 + y_offset;
Rect::new(xpos, ypos, query.width, query.height)
};
canvas.copy(texture, None, dst).unwrap();
}
fn delete_marked_entities<T>(optionvec: &mut Vec<Option<T>>, marks: Vec<usize>) {
for i in marks {
optionvec[i] = None;
}
}
fn insert_into_option_vec<T>(optionvec: &mut Vec<Option<T>>, item: T) {
let mut index = None;
for (i, p) in optionvec.iter().enumerate() {
if let None = p {
index = Some(i);
}
}
match index {
Some(i) => {
optionvec[i] = Some(item);
}
None => {
optionvec.push(Some(item));
}
}
}
fn | <T: std::cmp::PartialOrd>(value: T, lower_bound: T, upper_bound: T) -> T{
let mut clamped_value = value;
if clamped_value < lower_bound {
clamped_value = lower_bound;
}
if clamped_value > upper_bound {
clamped_value = upper_bound;
}
clamped_value
}
fn main() {
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
//Create the actual window
let window = video_subsystem.window("Galaxy Monkey", SCREEN_WIDTH, SCREEN_HEIGHT).position_centered().build().unwrap();
//Create primary drawing interface
let mut canvas = window.into_canvas().build().unwrap();
//Create the texture creator
let texture_creator = canvas.texture_creator();
//Create the event_pump
let mut event_pump = sdl_context.event_pump().unwrap();
//Init the controller subsystem
let controller_ss = sdl_context.game_controller().unwrap();
//Init the timer subsystem
let mut timer_ss = sdl_context.timer().unwrap();
//Create array of controllers
let mut _controllers: [Option<GameController>; 4] = [None, None, None, None];
//Init the ttf subsystem
let ttf_context = obtain_result(ttf::init());
//Load the font
let font = obtain_result(ttf_context.load_font("fonts/CursedTimerULiL.ttf", 64));
//Create title screen texture
let game_title = text_texture("Galaxy Monkey", &texture_creator, &font);
//Create press start text
let press_start_text = text_texture("Press Start", &texture_creator, &font);
let mut press_start_position: i32 = 150;
//Round # texture variable
let mut round_number_texture = text_texture("Round 0", &texture_creator, &font);
//Timer variable for making "Press Start" flash
let mut press_start_timer = 0;
let mut displaying = true;
//Timer variable for transitioning between rounds
let mut round_transition_timer = 0;
let mut going_to_next_round = false;
//Initialize the game state
let mut game_state = {
let left_joystick = Vector2 {
x: 0.0,
y: 0.0
};
let right_joystick = Vector2 {
x: 0.0,
y: 0.0
};
let player = {
let x = (SCREEN_WIDTH / 2 - PLAYER_WIDTH) as f32;
let y = (SCREEN_HEIGHT / 2 - PLAYER_WIDTH) as f32;
let position = Vector2 {
x,
y
};
Spaceship {
position
}
};
let friendly_projectiles = Vec::new();
let enemies = Vec::new();
let round_number = 0;
GameState {
player,
state: State::StartMenu,
left_joystick,
right_joystick,
friendly_projectiles,
enemies,
round_number
}
};
let mut old_ticks = 0;
'running: loop {
//Get milliseconds since last frame
let ticks = timer_ss.ticks();
let time_delta = ticks - old_ticks;
match game_state.state {
State::StartMenu => {
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} |
Event::ControllerButtonDown {button: Button::Back, ..} => {
break 'running;
}
Event::ControllerButtonDown {button: Button::Start, ..} |
Event::KeyDown {..} => {
game_state.state = State::Playing;
}
Event::JoyDeviceAdded {which: i, ..} => {
_controllers[i as usize] = open_controller(&controller_ss, i);
}
Event::MouseWheel {y, ..} => {
press_start_position -= y * 30;
}
_ => {}
}
}
//Clear the screen
canvas.set_draw_color(Color::RGB(0, 0, 0));
canvas.clear();
//Draw the title
draw_centered_text(&mut canvas, &game_title, -200);
//Draw press start
const INTERVAL: u32 = 500;
if ticks - press_start_timer > INTERVAL {
displaying = !displaying;
press_start_timer = ticks;
}
if displaying {
draw_centered_text(&mut canvas, &press_start_text, press_start_position);
}
}
State::Playing => {
//Process events
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} |
Event::ControllerButtonDown {button: Button::Back, ..} => {
break 'running;
}
Event::JoyDeviceAdded {which: i, ..} => {
_controllers[i as usize] = open_controller(&controller_ss, i);
}
Event::ControllerAxisMotion {axis: ax, value: v, ..} => {
match ax {
Axis::LeftX => {
game_state.left_joystick.x = v as f32 / i16::MAX as f32;
}
Axis::LeftY => {
game_state.left_joystick.y = v as f32 / i16::MAX as f32;
}
Axis::RightX => {
game_state.right_joystick.x = v as f32 / i16::MAX as f32;
}
Axis::RightY => {
game_state.right_joystick.y = v as f32 / i16::MAX as f32;
}
_ => {}
}
game_state.left_joystick = check_deadzone(game_state.left_joystick);
game_state.right_joystick = check_deadzone(game_state.right_joystick);
}
Event::KeyDown {keycode: Some(key), ..} => {
match key {
_ => {
println!("You pressed the unbound key: {}", key);
}
}
}
_ => {}
}
}
//Check if enemies option-vec is empty
let mut enemies_is_empty = true;
for enemy in game_state.enemies.iter() {
if let Some(_e) = enemy {
enemies_is_empty = false;
break;
}
}
//This will probably become the trigger for advancing rounds
if enemies_is_empty {
if !going_to_next_round {
//Start the timer
round_transition_timer = ticks;
//Increment round number
game_state.round_number += 1;
//Create round # texture
round_number_texture = text_texture(&format!("Round {}", game_state.round_number), &texture_creator, &font);
going_to_next_round = true;
}
const INTERVAL: u32 = 2500; //Timer duration in millis
if ticks - round_transition_timer > INTERVAL {
let new_enemy = {
let position = Vector2 {
x: 0.0,
y: 30.0
};
Spaceship {
position
}
};
//Insert enemy into vec
insert_into_option_vec(&mut game_state.enemies, new_enemy);
going_to_next_round = false;
}
}
//If the right stick is not neutral, fire a projectile
if game_state.right_joystick.x != 0.0 || game_state.right_joystick.y != 0.0 {
//Construct this new projectile
let projectile = {
let xpos = game_state.player.position.x + (PLAYER_WIDTH / 2) as f32;
let ypos = game_state.player.position.y + (PLAYER_WIDTH / 2) as f32;
let position = Vector2 {
x: xpos,
y: ypos
};
const PROJECTILE_SPEED: f32 = 10.0;
let angle = f32::atan(game_state.right_joystick.y / game_state.right_joystick.x);
let xvel = {
if game_state.right_joystick.x < 0.0 {
-(PROJECTILE_SPEED * f32::cos(angle))
} else {
PROJECTILE_SPEED * f32::cos(angle)
}
};
let yvel = {
if game_state.right_joystick.x < 0.0 {
-(PROJECTILE_SPEED * f32::sin(angle))
} else {
PROJECTILE_SPEED * f32::sin(angle)
}
};
let velocity = Vector2 {
x: xvel,
y: yvel
};
Projectile {
position,
velocity
}
};
//Insert new projectile into vec
insert_into_option_vec(&mut game_state.friendly_projectiles, projectile);
}
//Update the player
const PLAYER_SPEED: f32 = 3.0;
game_state.player.position.x += game_state.left_joystick.x * PLAYER_SPEED;
game_state.player.position.y += game_state.left_joystick.y * PLAYER_SPEED;
game_state.player.position.x = clamp(game_state.player.position.x, 0.0, (SCREEN_WIDTH - PLAYER_WIDTH) as f32);
game_state.player.position.y = clamp(game_state.player.position.y, 0.0, (SCREEN_HEIGHT - PLAYER_WIDTH) as f32);
//Update all enemies
let mut enemies_to_destroy = Vec::new();
for (i, enemy) in game_state.enemies.iter_mut().enumerate() {
if let Some(e) = enemy {
if e.position.x > SCREEN_WIDTH as f32 {
enemies_to_destroy.push(i);
}
e.position.x += 1.0;
}
}
//Set all offscreen enemies to None
delete_marked_entities(&mut game_state.enemies, enemies_to_destroy);
//Update all projectiles
let mut projectiles_to_destroy = Vec::new();
for (i, projectile) in game_state.friendly_projectiles.iter_mut().enumerate() {
if let Some(p) = projectile {
if p.position.x < 0.0 || p.position.x > SCREEN_WIDTH as f32 ||
p.position.y < 0.0 || p.position.y > SCREEN_HEIGHT as f32 {
projectiles_to_destroy.push(i);
}
p.position.x += p.velocity.x;
p.position.y += p.velocity.y;
}
}
//Set all offscreen projectiles to None
delete_marked_entities(&mut game_state.friendly_projectiles, projectiles_to_destroy);
//Clear the canvas
canvas.set_draw_color(Color::RGB(0, 0, 0));
canvas.clear();
//Draw the spaceship
canvas.set_draw_color(Color::RGB(150, 150, 150));
canvas.fill_rect(Rect::new(game_state.player.position.x as i32, game_state.player.position.y as i32, PLAYER_WIDTH, PLAYER_WIDTH)).unwrap();
//Draw all enemies
canvas.set_draw_color(Color::RGB(50, 120, 0));
for enemy in game_state.enemies.iter() {
if let Some(e) = enemy {
canvas.fill_rect(Rect::new(e.position.x as i32, e.position.y as i32, PLAYER_WIDTH, PLAYER_WIDTH)).unwrap();
}
}
//Draw all projectiles
canvas.set_draw_color(Color::RGB(150, 150, 150));
for projectile in game_state.friendly_projectiles.iter() {
if let Some(p) = projectile {
let point = Point::new(p.position.x as i32, p.position.y as i32);
canvas.draw_point(point).unwrap();
}
}
//Draw the round transition text if necessary
if going_to_next_round {
draw_centered_text(&mut canvas, &round_number_texture, 0);
}
}
}
canvas.present();
//Update old_ticks
old_ticks = ticks;
if time_delta < 8 {
sleep(Duration::from_millis((8 - time_delta) as u64));
}
}
}
| clamp | identifier_name |
bind.go | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
import (
"reflect"
"github.com/apache/beam/sdks/v2/go/pkg/beam/core/funcx"
"github.com/apache/beam/sdks/v2/go/pkg/beam/core/typex"
"github.com/apache/beam/sdks/v2/go/pkg/beam/internal/errors"
)
// TODO(herohde) 4/21/2017: Bind is where most user mistakes will likely show
// up. We should verify that common mistakes yield reasonable errors.
// Bind returns the inbound, outbound and underlying output types for a Fn,
// when bound to the underlying input types. The complication of bind is
// primarily that UserFns have loose signatures and bind must produce valid
// type information for the execution plan.
//
// For example,
//
// func (t EventTime, k typex.X, v int, emit func(string, typex.X))
//
// or
//
// func (context.Context, k typex.X, v int) (string, typex.X, error)
//
// are UserFns that may take one or two incoming fulltypes: either KV<X,int>
// or X with a singleton side input of type int. For the purpose of the
// shape of data processing, the two forms are equivalent. The non-data types,
// context.Context and error, are not part of the data signature, but in play
// only at runtime.
//
// If either was bound to the input type [KV<string,int>], bind would return:
//
// inbound: [Main: KV<X,int>]
// outbound: [KV<string,X>]
// output: [KV<string,string>]
//
// Note that it propagates the assignment of X to string in the output type.
//
// If either was instead bound to the input fulltypes [float, int], the
// result would be:
//
// inbound: [Main: X, Singleton: int]
// outbound: [KV<string,X>]
// output: [KV<string, float>]
//
// Here, the inbound shape and output types are different from before.
func Bind(fn *funcx.Fn, typedefs map[string]reflect.Type, in ...typex.FullType) ([]typex.FullType, []InputKind, []typex.FullType, []typex.FullType, error) {
addContext := func(err error, fn *funcx.Fn) error {
return errors.WithContextf(err, "binding fn %v", fn.Fn.Name())
}
inbound, kinds, err := findInbound(fn, in...)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
outbound, err := findOutbound(fn)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
subst, err := typex.Bind(inbound, in)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
for k, v := range typedefs {
if substK, exists := subst[k]; exists {
err := errors.Errorf("cannot substitute type %v with %v, already defined as %v", k, v, substK)
return nil, nil, nil, nil, addContext(err, fn)
}
subst[k] = v
}
out, err := typex.Substitute(outbound, subst)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
return inbound, kinds, outbound, out, nil
}
func findOutbound(fn *funcx.Fn) ([]typex.FullType, error) {
ret := trimIllegal(returnTypes(funcx.SubReturns(fn.Ret, fn.Returns(funcx.RetValue)...)))
params := funcx.SubParams(fn.Param, fn.Params(funcx.FnEmit)...)
var outbound []typex.FullType
// The direct output is the "main" output, if any.
switch len(ret) {
case 0:
break // ok: no direct output.
case 1:
outbound = append(outbound, typex.New(ret[0]))
case 2:
outbound = append(outbound, typex.NewKV(typex.New(ret[0]), typex.New(ret[1])))
default:
return nil, errors.Errorf("too many return values: %v", ret)
}
for _, param := range params {
values, _ := funcx.UnfoldEmit(param.T)
trimmed := trimIllegal(values)
if len(trimmed) == 2 {
outbound = append(outbound, typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1])))
} else {
outbound = append(outbound, typex.New(trimmed[0]))
}
}
return outbound, nil
}
func returnTypes(list []funcx.ReturnParam) []reflect.Type {
var ret []reflect.Type
for _, elm := range list {
ret = append(ret, elm.T)
}
return ret
}
func findInbound(fn *funcx.Fn, in ...typex.FullType) ([]typex.FullType, []InputKind, error) {
// log.Printf("Bind inbound: %v %v", fn, in)
addContext := func(err error, p []funcx.FnParam, in any) error {
return errors.WithContextf(err, "binding params %v to input %v", p, in)
}
var inbound []typex.FullType
var kinds []InputKind
params := funcx.SubParams(fn.Param, fn.Params(funcx.FnValue|funcx.FnIter|funcx.FnReIter|funcx.FnMultiMap)...)
index := 0
for _, input := range in {
arity, err := inboundArity(input, index == 0)
if err != nil {
return nil, nil, addContext(err, params, input)
}
if len(params)-index < arity {
return nil, nil, addContext(errors.New("too few params"), params[index:], input)
}
paramsToBind := params[index : index+arity]
elm, kind, err := tryBindInbound(input, paramsToBind, index == 0)
if err != nil {
return nil, nil, addContext(err, paramsToBind, input)
}
inbound = append(inbound, elm)
kinds = append(kinds, kind)
index += arity
}
if index < len(params) {
return nil, nil, addContext(errors.New("too few inputs: forgot an input or to annotate options?"), params, in)
}
if index > len(params) {
return nil, nil, addContext(errors.New("too many inputs"), params, in)
}
return inbound, kinds, nil
}
func tryBindInbound(t typex.FullType, args []funcx.FnParam, isMain bool) (typex.FullType, InputKind, error) {
kind := Main
var other typex.FullType
switch t.Class() {
case typex.Concrete, typex.Container:
if isMain {
other = typex.New(args[0].T)
} else {
// We accept various forms for side input. We have to disambiguate
// []string into a Singleton of type []string or a Slice of type
// string by matching up the incoming type and the param type.
arg := args[0]
switch arg.Kind {
case funcx.FnValue:
if args[0].T.Kind() == reflect.Slice && t.Type() == args[0].T.Elem() {
// TODO(herohde) 6/29/2017: we do not allow universal slices, for now.
kind = Slice
other = typex.New(args[0].T.Elem())
} else {
kind = Singleton
other = typex.New(args[0].T)
}
case funcx.FnIter:
values, _ := funcx.UnfoldIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = Iter
other = typex.New(trimmed[0])
case funcx.FnReIter:
values, _ := funcx.UnfoldReIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = ReIter
other = typex.New(trimmed[0])
case funcx.FnMultiMap:
return nil, kind, errors.Errorf("input to MultiMap side input must be KV, got %v", t)
default:
return nil, kind, errors.Errorf("unexpected param kind: %v", arg)
}
}
case typex.Composite:
switch t.Type() {
case typex.KVType:
if isMain {
if args[0].Kind != funcx.FnValue {
return nil, kind, errors.Errorf("key of %v cannot bind to %v", t, args[0])
}
if args[1].Kind != funcx.FnValue {
return nil, kind, errors.Errorf("value of %v cannot bind to %v", t, args[1])
}
other = typex.NewKV(typex.New(args[0].T), typex.New(args[1].T))
} else {
switch args[0].Kind {
case funcx.FnIter:
values, _ := funcx.UnfoldIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 2 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = Iter
other = typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1]))
case funcx.FnReIter:
values, _ := funcx.UnfoldReIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 2 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = ReIter
other = typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1]))
case funcx.FnMultiMap:
values, _ := funcx.UnfoldMultiMap(args[0].T)
kind = MultiMap
trimmed := trimIllegal(values)
if len(trimmed) != 2 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
other = typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1]))
default:
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
}
case typex.CoGBKType:
if args[0].Kind != funcx.FnValue {
return nil, kind, errors.Errorf("key of %v cannot bind to %v", t, args[0])
}
components := []typex.FullType{typex.New(args[0].T)}
for i := 1; i < len(args); i++ {
switch args[i].Kind {
case funcx.FnIter:
values, _ := funcx.UnfoldIter(args[i].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("values of %v cannot bind to %v", t, args[i])
}
components = append(components, typex.New(trimmed[0]))
case funcx.FnReIter:
values, _ := funcx.UnfoldReIter(args[i].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("values of %v cannot bind to %v", t, args[i])
}
components = append(components, typex.New(trimmed[0]))
default:
return nil, kind, errors.Errorf("values of %v cannot bind to %v", t, args[i])
}
}
other = typex.NewCoGBK(components...)
default:
return nil, kind, errors.Errorf("unexpected inbound type: %v", t.Type())
}
default:
return nil, kind, errors.Errorf("unexpected inbound class: %v", t.Class())
}
if !typex.IsStructurallyAssignable(t, other) {
return nil, kind, errors.Errorf("%v is not assignable to %v", t, other)
}
return other, kind, nil
}
func inboundArity(t typex.FullType, isMain bool) (int, error) {
if t.Class() == typex.Composite {
switch t.Type() {
case typex.KVType:
if isMain {
return 2, nil
}
// A KV side input must be a single iterator/map.
return 1, nil
case typex.CoGBKType:
return len(t.Components()), nil
default:
return 0, errors.Errorf("unexpected composite inbound type: %v", t.Type())
}
}
return 1, nil
}
func trimIllegal(list []reflect.Type) []reflect.Type {
var ret []reflect.Type
for _, elm := range list {
switch typex.ClassOf(elm) {
case typex.Concrete, typex.Universal, typex.Container:
ret = append(ret, elm)
}
}
return ret
} | // See the License for the specific language governing permissions and
// limitations under the License.
package graph | random_line_split |
bind.go | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package graph
import (
"reflect"
"github.com/apache/beam/sdks/v2/go/pkg/beam/core/funcx"
"github.com/apache/beam/sdks/v2/go/pkg/beam/core/typex"
"github.com/apache/beam/sdks/v2/go/pkg/beam/internal/errors"
)
// TODO(herohde) 4/21/2017: Bind is where most user mistakes will likely show
// up. We should verify that common mistakes yield reasonable errors.
// Bind returns the inbound, outbound and underlying output types for a Fn,
// when bound to the underlying input types. The complication of bind is
// primarily that UserFns have loose signatures and bind must produce valid
// type information for the execution plan.
//
// For example,
//
// func (t EventTime, k typex.X, v int, emit func(string, typex.X))
//
// or
//
// func (context.Context, k typex.X, v int) (string, typex.X, error)
//
// are UserFns that may take one or two incoming fulltypes: either KV<X,int>
// or X with a singleton side input of type int. For the purpose of the
// shape of data processing, the two forms are equivalent. The non-data types,
// context.Context and error, are not part of the data signature, but in play
// only at runtime.
//
// If either was bound to the input type [KV<string,int>], bind would return:
//
// inbound: [Main: KV<X,int>]
// outbound: [KV<string,X>]
// output: [KV<string,string>]
//
// Note that it propagates the assignment of X to string in the output type.
//
// If either was instead bound to the input fulltypes [float, int], the
// result would be:
//
// inbound: [Main: X, Singleton: int]
// outbound: [KV<string,X>]
// output: [KV<string, float>]
//
// Here, the inbound shape and output types are different from before.
func Bind(fn *funcx.Fn, typedefs map[string]reflect.Type, in ...typex.FullType) ([]typex.FullType, []InputKind, []typex.FullType, []typex.FullType, error) {
addContext := func(err error, fn *funcx.Fn) error {
return errors.WithContextf(err, "binding fn %v", fn.Fn.Name())
}
inbound, kinds, err := findInbound(fn, in...)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
outbound, err := findOutbound(fn)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
subst, err := typex.Bind(inbound, in)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
for k, v := range typedefs {
if substK, exists := subst[k]; exists {
err := errors.Errorf("cannot substitute type %v with %v, already defined as %v", k, v, substK)
return nil, nil, nil, nil, addContext(err, fn)
}
subst[k] = v
}
out, err := typex.Substitute(outbound, subst)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
return inbound, kinds, outbound, out, nil
}
func findOutbound(fn *funcx.Fn) ([]typex.FullType, error) {
ret := trimIllegal(returnTypes(funcx.SubReturns(fn.Ret, fn.Returns(funcx.RetValue)...)))
params := funcx.SubParams(fn.Param, fn.Params(funcx.FnEmit)...)
var outbound []typex.FullType
// The direct output is the "main" output, if any.
switch len(ret) {
case 0:
break // ok: no direct output.
case 1:
outbound = append(outbound, typex.New(ret[0]))
case 2:
outbound = append(outbound, typex.NewKV(typex.New(ret[0]), typex.New(ret[1])))
default:
return nil, errors.Errorf("too many return values: %v", ret)
}
for _, param := range params {
values, _ := funcx.UnfoldEmit(param.T)
trimmed := trimIllegal(values)
if len(trimmed) == 2 {
outbound = append(outbound, typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1])))
} else {
outbound = append(outbound, typex.New(trimmed[0]))
}
}
return outbound, nil
}
func returnTypes(list []funcx.ReturnParam) []reflect.Type {
var ret []reflect.Type
for _, elm := range list {
ret = append(ret, elm.T)
}
return ret
}
func findInbound(fn *funcx.Fn, in ...typex.FullType) ([]typex.FullType, []InputKind, error) {
// log.Printf("Bind inbound: %v %v", fn, in)
addContext := func(err error, p []funcx.FnParam, in any) error {
return errors.WithContextf(err, "binding params %v to input %v", p, in)
}
var inbound []typex.FullType
var kinds []InputKind
params := funcx.SubParams(fn.Param, fn.Params(funcx.FnValue|funcx.FnIter|funcx.FnReIter|funcx.FnMultiMap)...)
index := 0
for _, input := range in {
arity, err := inboundArity(input, index == 0)
if err != nil {
return nil, nil, addContext(err, params, input)
}
if len(params)-index < arity {
return nil, nil, addContext(errors.New("too few params"), params[index:], input)
}
paramsToBind := params[index : index+arity]
elm, kind, err := tryBindInbound(input, paramsToBind, index == 0)
if err != nil {
return nil, nil, addContext(err, paramsToBind, input)
}
inbound = append(inbound, elm)
kinds = append(kinds, kind)
index += arity
}
if index < len(params) {
return nil, nil, addContext(errors.New("too few inputs: forgot an input or to annotate options?"), params, in)
}
if index > len(params) {
return nil, nil, addContext(errors.New("too many inputs"), params, in)
}
return inbound, kinds, nil
}
func tryBindInbound(t typex.FullType, args []funcx.FnParam, isMain bool) (typex.FullType, InputKind, error) {
kind := Main
var other typex.FullType
switch t.Class() {
case typex.Concrete, typex.Container:
if isMain {
other = typex.New(args[0].T)
} else {
// We accept various forms for side input. We have to disambiguate
// []string into a Singleton of type []string or a Slice of type
// string by matching up the incoming type and the param type.
arg := args[0]
switch arg.Kind {
case funcx.FnValue:
if args[0].T.Kind() == reflect.Slice && t.Type() == args[0].T.Elem() {
// TODO(herohde) 6/29/2017: we do not allow universal slices, for now.
kind = Slice
other = typex.New(args[0].T.Elem())
} else {
kind = Singleton
other = typex.New(args[0].T)
}
case funcx.FnIter:
values, _ := funcx.UnfoldIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = Iter
other = typex.New(trimmed[0])
case funcx.FnReIter:
values, _ := funcx.UnfoldReIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = ReIter
other = typex.New(trimmed[0])
case funcx.FnMultiMap:
return nil, kind, errors.Errorf("input to MultiMap side input must be KV, got %v", t)
default:
return nil, kind, errors.Errorf("unexpected param kind: %v", arg)
}
}
case typex.Composite:
switch t.Type() {
case typex.KVType:
if isMain {
if args[0].Kind != funcx.FnValue {
return nil, kind, errors.Errorf("key of %v cannot bind to %v", t, args[0])
}
if args[1].Kind != funcx.FnValue {
return nil, kind, errors.Errorf("value of %v cannot bind to %v", t, args[1])
}
other = typex.NewKV(typex.New(args[0].T), typex.New(args[1].T))
} else {
switch args[0].Kind {
case funcx.FnIter:
values, _ := funcx.UnfoldIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 2 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = Iter
other = typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1]))
case funcx.FnReIter:
values, _ := funcx.UnfoldReIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 2 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = ReIter
other = typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1]))
case funcx.FnMultiMap:
values, _ := funcx.UnfoldMultiMap(args[0].T)
kind = MultiMap
trimmed := trimIllegal(values)
if len(trimmed) != 2 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
other = typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1]))
default:
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
}
case typex.CoGBKType:
if args[0].Kind != funcx.FnValue {
return nil, kind, errors.Errorf("key of %v cannot bind to %v", t, args[0])
}
components := []typex.FullType{typex.New(args[0].T)}
for i := 1; i < len(args); i++ {
switch args[i].Kind {
case funcx.FnIter:
values, _ := funcx.UnfoldIter(args[i].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("values of %v cannot bind to %v", t, args[i])
}
components = append(components, typex.New(trimmed[0]))
case funcx.FnReIter:
values, _ := funcx.UnfoldReIter(args[i].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("values of %v cannot bind to %v", t, args[i])
}
components = append(components, typex.New(trimmed[0]))
default:
return nil, kind, errors.Errorf("values of %v cannot bind to %v", t, args[i])
}
}
other = typex.NewCoGBK(components...)
default:
return nil, kind, errors.Errorf("unexpected inbound type: %v", t.Type())
}
default:
return nil, kind, errors.Errorf("unexpected inbound class: %v", t.Class())
}
if !typex.IsStructurallyAssignable(t, other) {
return nil, kind, errors.Errorf("%v is not assignable to %v", t, other)
}
return other, kind, nil
}
func inboundArity(t typex.FullType, isMain bool) (int, error) {
if t.Class() == typex.Composite |
return 1, nil
}
func trimIllegal(list []reflect.Type) []reflect.Type {
var ret []reflect.Type
for _, elm := range list {
switch typex.ClassOf(elm) {
case typex.Concrete, typex.Universal, typex.Container:
ret = append(ret, elm)
}
}
return ret
}
| {
switch t.Type() {
case typex.KVType:
if isMain {
return 2, nil
}
// A KV side input must be a single iterator/map.
return 1, nil
case typex.CoGBKType:
return len(t.Components()), nil
default:
return 0, errors.Errorf("unexpected composite inbound type: %v", t.Type())
}
} | conditional_block |
bind.go | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package graph
import (
"reflect"
"github.com/apache/beam/sdks/v2/go/pkg/beam/core/funcx"
"github.com/apache/beam/sdks/v2/go/pkg/beam/core/typex"
"github.com/apache/beam/sdks/v2/go/pkg/beam/internal/errors"
)
// TODO(herohde) 4/21/2017: Bind is where most user mistakes will likely show
// up. We should verify that common mistakes yield reasonable errors.
// Bind returns the inbound, outbound and underlying output types for a Fn,
// when bound to the underlying input types. The complication of bind is
// primarily that UserFns have loose signatures and bind must produce valid
// type information for the execution plan.
//
// For example,
//
// func (t EventTime, k typex.X, v int, emit func(string, typex.X))
//
// or
//
// func (context.Context, k typex.X, v int) (string, typex.X, error)
//
// are UserFns that may take one or two incoming fulltypes: either KV<X,int>
// or X with a singleton side input of type int. For the purpose of the
// shape of data processing, the two forms are equivalent. The non-data types,
// context.Context and error, are not part of the data signature, but in play
// only at runtime.
//
// If either was bound to the input type [KV<string,int>], bind would return:
//
// inbound: [Main: KV<X,int>]
// outbound: [KV<string,X>]
// output: [KV<string,string>]
//
// Note that it propagates the assignment of X to string in the output type.
//
// If either was instead bound to the input fulltypes [float, int], the
// result would be:
//
// inbound: [Main: X, Singleton: int]
// outbound: [KV<string,X>]
// output: [KV<string, float>]
//
// Here, the inbound shape and output types are different from before.
func | (fn *funcx.Fn, typedefs map[string]reflect.Type, in ...typex.FullType) ([]typex.FullType, []InputKind, []typex.FullType, []typex.FullType, error) {
addContext := func(err error, fn *funcx.Fn) error {
return errors.WithContextf(err, "binding fn %v", fn.Fn.Name())
}
inbound, kinds, err := findInbound(fn, in...)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
outbound, err := findOutbound(fn)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
subst, err := typex.Bind(inbound, in)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
for k, v := range typedefs {
if substK, exists := subst[k]; exists {
err := errors.Errorf("cannot substitute type %v with %v, already defined as %v", k, v, substK)
return nil, nil, nil, nil, addContext(err, fn)
}
subst[k] = v
}
out, err := typex.Substitute(outbound, subst)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
return inbound, kinds, outbound, out, nil
}
func findOutbound(fn *funcx.Fn) ([]typex.FullType, error) {
ret := trimIllegal(returnTypes(funcx.SubReturns(fn.Ret, fn.Returns(funcx.RetValue)...)))
params := funcx.SubParams(fn.Param, fn.Params(funcx.FnEmit)...)
var outbound []typex.FullType
// The direct output is the "main" output, if any.
switch len(ret) {
case 0:
break // ok: no direct output.
case 1:
outbound = append(outbound, typex.New(ret[0]))
case 2:
outbound = append(outbound, typex.NewKV(typex.New(ret[0]), typex.New(ret[1])))
default:
return nil, errors.Errorf("too many return values: %v", ret)
}
for _, param := range params {
values, _ := funcx.UnfoldEmit(param.T)
trimmed := trimIllegal(values)
if len(trimmed) == 2 {
outbound = append(outbound, typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1])))
} else {
outbound = append(outbound, typex.New(trimmed[0]))
}
}
return outbound, nil
}
func returnTypes(list []funcx.ReturnParam) []reflect.Type {
var ret []reflect.Type
for _, elm := range list {
ret = append(ret, elm.T)
}
return ret
}
func findInbound(fn *funcx.Fn, in ...typex.FullType) ([]typex.FullType, []InputKind, error) {
// log.Printf("Bind inbound: %v %v", fn, in)
addContext := func(err error, p []funcx.FnParam, in any) error {
return errors.WithContextf(err, "binding params %v to input %v", p, in)
}
var inbound []typex.FullType
var kinds []InputKind
params := funcx.SubParams(fn.Param, fn.Params(funcx.FnValue|funcx.FnIter|funcx.FnReIter|funcx.FnMultiMap)...)
index := 0
for _, input := range in {
arity, err := inboundArity(input, index == 0)
if err != nil {
return nil, nil, addContext(err, params, input)
}
if len(params)-index < arity {
return nil, nil, addContext(errors.New("too few params"), params[index:], input)
}
paramsToBind := params[index : index+arity]
elm, kind, err := tryBindInbound(input, paramsToBind, index == 0)
if err != nil {
return nil, nil, addContext(err, paramsToBind, input)
}
inbound = append(inbound, elm)
kinds = append(kinds, kind)
index += arity
}
if index < len(params) {
return nil, nil, addContext(errors.New("too few inputs: forgot an input or to annotate options?"), params, in)
}
if index > len(params) {
return nil, nil, addContext(errors.New("too many inputs"), params, in)
}
return inbound, kinds, nil
}
func tryBindInbound(t typex.FullType, args []funcx.FnParam, isMain bool) (typex.FullType, InputKind, error) {
kind := Main
var other typex.FullType
switch t.Class() {
case typex.Concrete, typex.Container:
if isMain {
other = typex.New(args[0].T)
} else {
// We accept various forms for side input. We have to disambiguate
// []string into a Singleton of type []string or a Slice of type
// string by matching up the incoming type and the param type.
arg := args[0]
switch arg.Kind {
case funcx.FnValue:
if args[0].T.Kind() == reflect.Slice && t.Type() == args[0].T.Elem() {
// TODO(herohde) 6/29/2017: we do not allow universal slices, for now.
kind = Slice
other = typex.New(args[0].T.Elem())
} else {
kind = Singleton
other = typex.New(args[0].T)
}
case funcx.FnIter:
values, _ := funcx.UnfoldIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = Iter
other = typex.New(trimmed[0])
case funcx.FnReIter:
values, _ := funcx.UnfoldReIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = ReIter
other = typex.New(trimmed[0])
case funcx.FnMultiMap:
return nil, kind, errors.Errorf("input to MultiMap side input must be KV, got %v", t)
default:
return nil, kind, errors.Errorf("unexpected param kind: %v", arg)
}
}
case typex.Composite:
switch t.Type() {
case typex.KVType:
if isMain {
if args[0].Kind != funcx.FnValue {
return nil, kind, errors.Errorf("key of %v cannot bind to %v", t, args[0])
}
if args[1].Kind != funcx.FnValue {
return nil, kind, errors.Errorf("value of %v cannot bind to %v", t, args[1])
}
other = typex.NewKV(typex.New(args[0].T), typex.New(args[1].T))
} else {
switch args[0].Kind {
case funcx.FnIter:
values, _ := funcx.UnfoldIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 2 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = Iter
other = typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1]))
case funcx.FnReIter:
values, _ := funcx.UnfoldReIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 2 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = ReIter
other = typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1]))
case funcx.FnMultiMap:
values, _ := funcx.UnfoldMultiMap(args[0].T)
kind = MultiMap
trimmed := trimIllegal(values)
if len(trimmed) != 2 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
other = typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1]))
default:
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
}
case typex.CoGBKType:
if args[0].Kind != funcx.FnValue {
return nil, kind, errors.Errorf("key of %v cannot bind to %v", t, args[0])
}
components := []typex.FullType{typex.New(args[0].T)}
for i := 1; i < len(args); i++ {
switch args[i].Kind {
case funcx.FnIter:
values, _ := funcx.UnfoldIter(args[i].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("values of %v cannot bind to %v", t, args[i])
}
components = append(components, typex.New(trimmed[0]))
case funcx.FnReIter:
values, _ := funcx.UnfoldReIter(args[i].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("values of %v cannot bind to %v", t, args[i])
}
components = append(components, typex.New(trimmed[0]))
default:
return nil, kind, errors.Errorf("values of %v cannot bind to %v", t, args[i])
}
}
other = typex.NewCoGBK(components...)
default:
return nil, kind, errors.Errorf("unexpected inbound type: %v", t.Type())
}
default:
return nil, kind, errors.Errorf("unexpected inbound class: %v", t.Class())
}
if !typex.IsStructurallyAssignable(t, other) {
return nil, kind, errors.Errorf("%v is not assignable to %v", t, other)
}
return other, kind, nil
}
func inboundArity(t typex.FullType, isMain bool) (int, error) {
if t.Class() == typex.Composite {
switch t.Type() {
case typex.KVType:
if isMain {
return 2, nil
}
// A KV side input must be a single iterator/map.
return 1, nil
case typex.CoGBKType:
return len(t.Components()), nil
default:
return 0, errors.Errorf("unexpected composite inbound type: %v", t.Type())
}
}
return 1, nil
}
func trimIllegal(list []reflect.Type) []reflect.Type {
var ret []reflect.Type
for _, elm := range list {
switch typex.ClassOf(elm) {
case typex.Concrete, typex.Universal, typex.Container:
ret = append(ret, elm)
}
}
return ret
}
| Bind | identifier_name |
bind.go | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package graph
import (
"reflect"
"github.com/apache/beam/sdks/v2/go/pkg/beam/core/funcx"
"github.com/apache/beam/sdks/v2/go/pkg/beam/core/typex"
"github.com/apache/beam/sdks/v2/go/pkg/beam/internal/errors"
)
// TODO(herohde) 4/21/2017: Bind is where most user mistakes will likely show
// up. We should verify that common mistakes yield reasonable errors.
// Bind returns the inbound, outbound and underlying output types for a Fn,
// when bound to the underlying input types. The complication of bind is
// primarily that UserFns have loose signatures and bind must produce valid
// type information for the execution plan.
//
// For example,
//
// func (t EventTime, k typex.X, v int, emit func(string, typex.X))
//
// or
//
// func (context.Context, k typex.X, v int) (string, typex.X, error)
//
// are UserFns that may take one or two incoming fulltypes: either KV<X,int>
// or X with a singleton side input of type int. For the purpose of the
// shape of data processing, the two forms are equivalent. The non-data types,
// context.Context and error, are not part of the data signature, but in play
// only at runtime.
//
// If either was bound to the input type [KV<string,int>], bind would return:
//
// inbound: [Main: KV<X,int>]
// outbound: [KV<string,X>]
// output: [KV<string,string>]
//
// Note that it propagates the assignment of X to string in the output type.
//
// If either was instead bound to the input fulltypes [float, int], the
// result would be:
//
// inbound: [Main: X, Singleton: int]
// outbound: [KV<string,X>]
// output: [KV<string, float>]
//
// Here, the inbound shape and output types are different from before.
func Bind(fn *funcx.Fn, typedefs map[string]reflect.Type, in ...typex.FullType) ([]typex.FullType, []InputKind, []typex.FullType, []typex.FullType, error) {
addContext := func(err error, fn *funcx.Fn) error {
return errors.WithContextf(err, "binding fn %v", fn.Fn.Name())
}
inbound, kinds, err := findInbound(fn, in...)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
outbound, err := findOutbound(fn)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
subst, err := typex.Bind(inbound, in)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
for k, v := range typedefs {
if substK, exists := subst[k]; exists {
err := errors.Errorf("cannot substitute type %v with %v, already defined as %v", k, v, substK)
return nil, nil, nil, nil, addContext(err, fn)
}
subst[k] = v
}
out, err := typex.Substitute(outbound, subst)
if err != nil {
return nil, nil, nil, nil, addContext(err, fn)
}
return inbound, kinds, outbound, out, nil
}
func findOutbound(fn *funcx.Fn) ([]typex.FullType, error) {
ret := trimIllegal(returnTypes(funcx.SubReturns(fn.Ret, fn.Returns(funcx.RetValue)...)))
params := funcx.SubParams(fn.Param, fn.Params(funcx.FnEmit)...)
var outbound []typex.FullType
// The direct output is the "main" output, if any.
switch len(ret) {
case 0:
break // ok: no direct output.
case 1:
outbound = append(outbound, typex.New(ret[0]))
case 2:
outbound = append(outbound, typex.NewKV(typex.New(ret[0]), typex.New(ret[1])))
default:
return nil, errors.Errorf("too many return values: %v", ret)
}
for _, param := range params {
values, _ := funcx.UnfoldEmit(param.T)
trimmed := trimIllegal(values)
if len(trimmed) == 2 {
outbound = append(outbound, typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1])))
} else {
outbound = append(outbound, typex.New(trimmed[0]))
}
}
return outbound, nil
}
func returnTypes(list []funcx.ReturnParam) []reflect.Type {
var ret []reflect.Type
for _, elm := range list {
ret = append(ret, elm.T)
}
return ret
}
func findInbound(fn *funcx.Fn, in ...typex.FullType) ([]typex.FullType, []InputKind, error) |
func tryBindInbound(t typex.FullType, args []funcx.FnParam, isMain bool) (typex.FullType, InputKind, error) {
kind := Main
var other typex.FullType
switch t.Class() {
case typex.Concrete, typex.Container:
if isMain {
other = typex.New(args[0].T)
} else {
// We accept various forms for side input. We have to disambiguate
// []string into a Singleton of type []string or a Slice of type
// string by matching up the incoming type and the param type.
arg := args[0]
switch arg.Kind {
case funcx.FnValue:
if args[0].T.Kind() == reflect.Slice && t.Type() == args[0].T.Elem() {
// TODO(herohde) 6/29/2017: we do not allow universal slices, for now.
kind = Slice
other = typex.New(args[0].T.Elem())
} else {
kind = Singleton
other = typex.New(args[0].T)
}
case funcx.FnIter:
values, _ := funcx.UnfoldIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = Iter
other = typex.New(trimmed[0])
case funcx.FnReIter:
values, _ := funcx.UnfoldReIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = ReIter
other = typex.New(trimmed[0])
case funcx.FnMultiMap:
return nil, kind, errors.Errorf("input to MultiMap side input must be KV, got %v", t)
default:
return nil, kind, errors.Errorf("unexpected param kind: %v", arg)
}
}
case typex.Composite:
switch t.Type() {
case typex.KVType:
if isMain {
if args[0].Kind != funcx.FnValue {
return nil, kind, errors.Errorf("key of %v cannot bind to %v", t, args[0])
}
if args[1].Kind != funcx.FnValue {
return nil, kind, errors.Errorf("value of %v cannot bind to %v", t, args[1])
}
other = typex.NewKV(typex.New(args[0].T), typex.New(args[1].T))
} else {
switch args[0].Kind {
case funcx.FnIter:
values, _ := funcx.UnfoldIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 2 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = Iter
other = typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1]))
case funcx.FnReIter:
values, _ := funcx.UnfoldReIter(args[0].T)
trimmed := trimIllegal(values)
if len(trimmed) != 2 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
kind = ReIter
other = typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1]))
case funcx.FnMultiMap:
values, _ := funcx.UnfoldMultiMap(args[0].T)
kind = MultiMap
trimmed := trimIllegal(values)
if len(trimmed) != 2 {
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
other = typex.NewKV(typex.New(trimmed[0]), typex.New(trimmed[1]))
default:
return nil, kind, errors.Errorf("%v cannot bind to %v", t, args[0])
}
}
case typex.CoGBKType:
if args[0].Kind != funcx.FnValue {
return nil, kind, errors.Errorf("key of %v cannot bind to %v", t, args[0])
}
components := []typex.FullType{typex.New(args[0].T)}
for i := 1; i < len(args); i++ {
switch args[i].Kind {
case funcx.FnIter:
values, _ := funcx.UnfoldIter(args[i].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("values of %v cannot bind to %v", t, args[i])
}
components = append(components, typex.New(trimmed[0]))
case funcx.FnReIter:
values, _ := funcx.UnfoldReIter(args[i].T)
trimmed := trimIllegal(values)
if len(trimmed) != 1 {
return nil, kind, errors.Errorf("values of %v cannot bind to %v", t, args[i])
}
components = append(components, typex.New(trimmed[0]))
default:
return nil, kind, errors.Errorf("values of %v cannot bind to %v", t, args[i])
}
}
other = typex.NewCoGBK(components...)
default:
return nil, kind, errors.Errorf("unexpected inbound type: %v", t.Type())
}
default:
return nil, kind, errors.Errorf("unexpected inbound class: %v", t.Class())
}
if !typex.IsStructurallyAssignable(t, other) {
return nil, kind, errors.Errorf("%v is not assignable to %v", t, other)
}
return other, kind, nil
}
func inboundArity(t typex.FullType, isMain bool) (int, error) {
if t.Class() == typex.Composite {
switch t.Type() {
case typex.KVType:
if isMain {
return 2, nil
}
// A KV side input must be a single iterator/map.
return 1, nil
case typex.CoGBKType:
return len(t.Components()), nil
default:
return 0, errors.Errorf("unexpected composite inbound type: %v", t.Type())
}
}
return 1, nil
}
func trimIllegal(list []reflect.Type) []reflect.Type {
var ret []reflect.Type
for _, elm := range list {
switch typex.ClassOf(elm) {
case typex.Concrete, typex.Universal, typex.Container:
ret = append(ret, elm)
}
}
return ret
}
| {
// log.Printf("Bind inbound: %v %v", fn, in)
addContext := func(err error, p []funcx.FnParam, in any) error {
return errors.WithContextf(err, "binding params %v to input %v", p, in)
}
var inbound []typex.FullType
var kinds []InputKind
params := funcx.SubParams(fn.Param, fn.Params(funcx.FnValue|funcx.FnIter|funcx.FnReIter|funcx.FnMultiMap)...)
index := 0
for _, input := range in {
arity, err := inboundArity(input, index == 0)
if err != nil {
return nil, nil, addContext(err, params, input)
}
if len(params)-index < arity {
return nil, nil, addContext(errors.New("too few params"), params[index:], input)
}
paramsToBind := params[index : index+arity]
elm, kind, err := tryBindInbound(input, paramsToBind, index == 0)
if err != nil {
return nil, nil, addContext(err, paramsToBind, input)
}
inbound = append(inbound, elm)
kinds = append(kinds, kind)
index += arity
}
if index < len(params) {
return nil, nil, addContext(errors.New("too few inputs: forgot an input or to annotate options?"), params, in)
}
if index > len(params) {
return nil, nil, addContext(errors.New("too many inputs"), params, in)
}
return inbound, kinds, nil
} | identifier_body |
helper.go | package util
import (
"bytes"
"crypto/md5"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"math"
"math/rand"
"net"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
)
//helper工具集
type helper struct {
}
var Helper *helper
func init() {
Helper = NewHelper()
}
func NewHelper() *helper {
return &helper{
}
}
//获取当前时间戳-int类型数据
func (thisObj *helper) CurrTimeInt() int {
return int(time.Now().Unix())
}
//json解析
//@params interface{} t 基本上传参都是map数据类型,如map[string]interface{}或是map[string]string
func (thisObj *helper) JSONMarshal(t interface{}) ([]byte, error) {
buffer := &bytes.Buffer{}
encoder := json.NewEncoder(buffer)
encoder.SetEscapeHTML(false) //HTML标签是否转义处理 => true:{"test":"\u003cbr /\u003e"} false:{"test":"<br />"}
err := encoder.Encode(t)
return buffer.Bytes(), err
}
//创建随机字符串
//@params int keyLength 要创建字符串的长度
//@params int typeN 1 按数字随机,非1 按0~9,a~z,A~Z随机
func (thisObj *helper) CreateRandStr(randStrLength int, typeN int) string {
key := ""
var dic = make([]string, 0)
if typeN == 1 {
dic = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}
} else {
dicStr := "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,1,2,3,4,5,6,7,8,9,0,A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z"
dic = strings.Split(dicStr, ",")
}
length := len(dic)
for i := 0; i < randStrLength; i++ {
rand.Seed(time.Now().UnixNano())
randNum := rand.Intn(length - 1)
key += dic[randNum]
}
return key
}
//http响应json-成功数据
//@params string message 响应消息
//@params interface{} data 响应数据,一般也是map数据类型
func (thisObj *helper) HttpResponseJsonSuccess(message string, data interface{}) {
//http响应结果
result := map[string]interface{}{"code": "success", "msg": message, "data": data}
//设置头信息
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
//解析成json输出
res, _ := json.Marshal(result)
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//失败响应处理
//需要引用包: import ( "github.com/kataras/iris" )
//func (thisObj *helper) HelperResponseFailedJSON(c iris.Context, message string, code interface{}) {
// //获取前置中间件的设置的初始开始时间
// diffTime := c.Values().Get("requestCurrentTime")
// currentTime := time.Now().UnixNano() / 1e3 //计算得出 微秒
// timeConsumed := currentTime - diffTime.(int64)
// result := iris.Map{"code": code, "msg": message, "status": 0, "time_consumed": timeConsumed}
// c.JSON(result)
//}
//http响应seqMap的Json-成功数据
//@params string message 响应消息
//@params string data 响应数据,解析成为json的字符串数据,如`{"test":"123"}`
func (thisObj *helper) HttpResponseSeqJsonSuccess(message string, data string) {
//seqMap存储数据
result := NewSeqMap()
result.Put("code", "success")
result.Put("msg", message)
result.Put("data", data)
//设置头信息
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
//解析成json输出
res := []byte(result.JsonSeq())
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//http响应seqMap的Json(转换成Unicode字符)-成功数据
//@params string message 响应消息
//@params string data 响应数据,解析成为json的字符串数据,如`{"test":"123"}`
func (thisObj *helper) HttpResponseSeqUnicodeJsonSuccess(message string, data string) {
//seqMap存储数据
result := NewSeqMap()
result.Put("code", "success")
result.Put("msg", message)
result.Put("data", data)
//设置头信息
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
//解析成json输出
res := []byte(result.JsonSeqUnicode())
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//http响应json
//@params map[string]interface{} data 响应数据
func (thisObj *helper) HttpResponseJSON(data map[string]interface{}) {
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
res, _ := json.Marshal(data)
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//根据字符串调用对应对象的方法
//调用示例:
// type Test struct {
//
// }
//
// func NewTest()*Test{
// return &Test{
//
// }
// }
//
// func (thisObj *Test) Echo() int {
// return 123456
// }
// res,resOk := util.Helper.CallMethodReflect(NewTest(),"Echo")
// fmt.Println(res,resOk)
// fmt.Println(res[0].Int())
//输出结果:
// [<int Value>] true
// 123456
//@params interface{} any 指定对象(指针)
//@params string methodName 指定对象里被调用的方法
//@params []interface{} args 指定对象里被调用的方法传参
//@return []reflect.Value reflectValue 被调用对象返回的结果集
//@return bool runOk 调用是否成功,true 成功,false 失败(失败情况就得根据情况排查问题出在哪里)
func (thisObj *helper) CallMethodReflect(any interface{}, methodName string, args []interface{}) (reflectValue []reflect.Value, runOk bool) {
//准备调用方法的参数
inputs := make([]reflect.Value, len(args))
for i := range args {
inputs[i] = reflect.ValueOf(args[i])
}
//执行调用指定对象的方法
v := reflect.ValueOf(any).MethodByName(methodName)
//若指定对象被调用的方法不存在或无效
if v.String() == "<invalid Value>" {
runOk = false
return []reflect.Value{}, runOk
}
//调用成功
runOk = true
reflectValue = v.Call(inputs)
return reflectValue, runOk
}
//获取用户IP
//@desc X-Forwarded-For:若是代理过来的,会获取到多个ip,最后一个ip就是真实的
func (thisObj *helper) GetIP(req *http.Request) string {
remoteAddr := req.RemoteAddr
if ip := req.Header.Get("X-Real-IP"); ip != "" {
remoteAddr = ip
} else if ip = req.Header.Get("X-Forwarded-For"); ip != "" {
//原逻辑:容易获取多个ip
//remoteAddr = ip
//现逻辑:获取最后一个真实ip
//使用X-Forwarded-For获取到ip会有多个,如117.136.39.96, 183.232.151.36;修改成只获取最后一个.
//若多个ip存在时,按英文逗号分隔
ipArr := strings.Split(ip,",")
//若存在多个ip值时,需要去除空格;获取最后一位IP
ip := Trim(ipArr[len(ipArr)-1])
//先判断获取最后一位IP是否是公网IP,如果不是就获取第一个IP处理
if !thisObj.CheckIsPublicIP(net.ParseIP(ip)) {
ip = Trim(ipArr[0])
}
remoteAddr = ip
} else {
remoteAddr, _, _ = net.SplitHostPort(remoteAddr)
}
if remoteAddr == "::1" {
remoteAddr = "127.0.0.1"
}
return remoteAddr
}
//检测IP是否是公网IP
func (thisObj *helper) CheckIsPublicIP(IP net.IP) bool {
if IP.IsLoopback() || IP.IsLinkLocalMulticast() || IP.IsLinkLocalUnicast() {
return false
}
if ip4 := IP.To4(); ip4 != nil {
switch true {
case ip4[0] == 10:
return false
case ip4[0] == 172 && ip4[1] >= 16 && ip4[1] <= 31:
return false
case ip4[0] == 192 && ip4[1] == 168:
return false
default:
return true
}
}
return false
}
//把IP127.0.0.1格式转化为long类型
func (thisObj *helper) IP2long(ipStr string) uint32 {
str := net.ParseIP(ipStr)
if str == nil {
return 0
}
//IPv4
ip := str.To4()
if ip == nil {
//IPv6
ip = str.To16()
}
if ip == nil {
return 0
}
return binary.BigEndian.Uint32(ip)
}
//把long类型IP地址转化为127.0.0.1格式
func (thisObj *helper) Long2IP(ipLong uint32) string {
ipByte := make([]byte, 4)
binary.BigEndian.PutUint32(ipByte, ipLong)
ipStr := net.IP(ipByte)
return ipStr.String()
}
//检测是否是IPv4
func (thisObj *helper) IsIPv4(ip string) bool {
//返回的值不等于nil,就是如果ip不是IPv4地址,To4返回nil,当且仅当地址是IPv4地址时,此条件才应返回true.
if net.ParseIP(ip).To4()!=nil {
return true
}
return false
}
//检测是否是IPv6
func (thisObj *helper) IsIPv6(ip string) bool {
if thisObj.IsIPv4(ip) {
return false
}
return true
}
////多个IP地址集合解析(包含IPv6地址解析),如58.177.183.37,134.159.119.48
//func (thisObj *helper) IP2Region(params map[string]interface{}) (actionResult []map[string]interface{}) {
// //必传-多个ip地址集合
// ips := params["ips"].(string)
// //可选-是否需要排重
// isUnique := false
// if params["isUnique"]!=nil {
// isUnique = params["isUnique"].(bool)
// }
// //去除ip左右多出的逗号
// ips = strings.Trim(ips,",")
//
// //多个ip地址数组
// ipsArr := strings.Split(ips,",")
// //ip排重
// ipsMap := make(map[string]map[string]interface{})
//
// //IPV4-解析
// ip2regionLibrary := NewIp2regionLibrary()
// defer ip2regionLibrary.Close()
// //IPV6-解析
// ipv6regionLibrary := NewIpv6regionLibrary(map[string]interface{}{})
// defer ipv6regionLibrary.Close()
//
// //操作结果存储
// actionResult = make([]map[string]interface{},0)
// for _,ip := range ipsArr {
// //操作结果每项数据
// actionResultItem := map[string]interface{}{
// "ipStr":ip,
// "ipAddr":"---",
// }
//
// //------------ ip地址值转换处理 ------------
// //模拟php的代码:
// // 有可能传递过来的是ip2long转换的整型ip值 转换成ip字符串
// // $ipStr = is_numeric($ip) ? long2ip($ip) : $ip;
// ipStr := ""
// //若是整型值
// if thisObj.IsNumeric(ip) {
// //转成字符串
// ipInt,ipIntErr := strconv.Atoi(ip)
// //若转换出错
// if ipIntErr!=nil {
// actionResultItem["ipAddr"] = "IP_TO_INT_ERROR"
// actionResult = append(actionResult,actionResultItem)
// continue
// }
// //将整型值解析成ip地址
// ipStr = thisObj.Long2IP(uint32(ipInt))
// }else{
// //存储ip地址
// ipStr = ip
// }
//
// //确认都是字符串ip返回
// actionResultItem["ipStr"] = ipStr
//
// //是否排重
// if isUnique {
// //------------ 排重处理 ------------
// //若该IP已处理过,则不进行处理
// if ipsMap[ipStr]!=nil {
// continue
// }
// }else{
// //------------ 不排重处理 ------------
// //若是不排重,且之前找到过了,则不需要再重复找了
// if ipsMap[ipStr]!=nil {
// actionResult = append(actionResult,ipsMap[ipStr])
// continue
// }
// }
//
// //------------ ip地址解析处理 ------------
// //如果是IPv4的处理
// if thisObj.IsIPv4(ipStr) {
// //IPV4地址解析处理
// ipv4Res,ipv4ResErr := ip2regionLibrary.DoIpAddressAnalyze(ipStr)
// if ipv4ResErr!=nil {
// actionResultItem["ipAddr"] = "IPV4_ADDRESS_ANALYZE_ERROR"
// }else{
// //调试输出:
// //fmt.Println("ipStr:",ipStr)
// //fmt.Println("ipv4Res:",ipv4Res)
// //输出结果:
// //ipStr: 134.159.119.47
// //ipv4Res: map[City:香港 CityId:0 Country:中国 ISP:澳大利亚电信 Province:香港 Region:0]
//
// if len(ipv4Res)>0 {
// actionResultItem["ipAddr"] = ipv4Res["Country"].(string)+"-"+ipv4Res["Province"].(string)+"-"+ipv4Res["City"].(string)
// }else{
// actionResultItem["ipAddr"] = "IPV4_ADDRESS_NOT_FOUND"
// }
// }
// //如果是IPv6的处理
// }else if thisObj.IsIPv6(ipStr) {
// ipv6Res,ipv6ResErr := ipv6regionLibrary.GetIPv6Address(map[string]interface{}{
// "ipv6":ipStr,
// })
// if ipv6ResErr!=nil {
// actionResultItem["ipAddr"] = "IPV6_ADDRESS_ANALYZE_ERROR"
// }else{
// //调试输出:
// //fmt.Println("ipStr:",ipStr)
// //fmt.Println("ipv6Res:",ipv6Res)
// //输出结果:
// //ipStr: 2408:84e7:4ad:1776:9c97:e79d:1e94:e59e
// //ipv6Res: map[address:中国山西省 中国联通3GNET网络(全省通用) id:50440 ipv6:2408:84e7:4ad:1776:9c97:e79d:1e94:e59e]
//
// if len(ipv6Res)>0 {
// actionResultItem["ipAddr"] = ipv6Res["address"].(string)
// }else{
// actionResultItem["ipAddr"] = "IPV6_ADDRESS_NOT_FOUND"
// }
// }
// //既不是IPv4,也不是IPv6,则是错误的
// }else{
// actionResultItem["ipAddr"] = "IP_IS_NOT_V4_OR_V6"
// }
//
// //存储操作结果
// actionResult = append(actionResult,actionResultItem)
//
// //存储查找过的结果
// ipsMap[ipStr] = actionResultItem
// }
//
// return actionResult
//}
// IsNumeric is_numeric()
// Numeric strings consist of optional sign, any number of digits, optional decimal part and optional exponential part.
// Thus +0123.45e6 is a valid numeric value.
// In PHP hexadecimal (e.g. 0xf4c3b00c) is not supported, but IsNumeric is supported.
func (cthis *helper) IsNumeric(val interface{}) bool {
switch val.(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
return true
case float32, float64, complex64, complex128:
return true
case string:
str := val.(string)
//现逻辑
//@update 2020.03.21 新增若是传入空格的修正处理
// 若这里不进行空格去除处理,到下面894行执行的时候会报错 runtime error: index out of range [0] with length 0,
// 因为等strings.TrimSpace(str)执行去除空格后,str[0]是不可能有值的!
str = strings.TrimSpace(str)
if str == "" {
return false
}
//原逻辑
// Trim any whitespace
//str = strings.TrimSpace(str)
if str[0] == '-' || str[0] == '+' {
if len(str) == 1 {
return false
}
str = str[1:]
}
// hex
if len(str) > 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X') {
for _, h := range str[2:] {
if !((h >= '0' && h <= '9') || (h >= 'a' && h <= 'f') || (h >= 'A' && h <= 'F')) {
return false
}
}
return true
}
// 0-9, Point, Scientific
p, s, l := 0, 0, len(str)
for i, v := range str {
if v == '.' { // Point
if p > 0 || s > 0 || i+1 == l {
return false
}
p = i
} else if v == 'e' || v == 'E' { // Scientific
if i == 0 || s > 0 || i+1 == l {
return false
}
s = i
} else if v < '0' || v > '9' {
return false
}
}
return true
}
return false
}
//GET请求url-路径拼接
//@params string urlPath 请求url,如http://192.168.0.1:58909
//@params map[string]string data 请求参数
//@params bool isEncode 特殊字符是否转义处理
//@params bool isHttp 是否将https转换成http处理
func (thisObj *helper) GETUrlPath(urlPath string, data map[string]string, isEncode bool, isHttp bool) string {
//是否是http请求,如果是,则将https替换成http
if isHttp {
urlPath = strings.Replace(urlPath, "https://", "http://", 1)
}
//拼接uri
urlPath += "?"
for k, v := range data {
if isEncode {
v = url.QueryEscape(v)
}
urlPath += k + "=" + v + "&"
}
urlPath = strings.TrimRight(urlPath, "&")
return urlPath
}
//用户名称尾部隐藏处理
//@params string userName 用户名
func (thisObj *helper) UserNameTailHidden(userName string) string {
usernameShow := ""
//先将字符串转成rune
// rune 等同于int32,常用来处理unicode或utf-8字符
userNameRune := []rune(userName)
//获取用户名称字符串长度(不是底层字节长度,如你好,就是2个字符串长度)
userNameLen := len(userNameRune)
//大于3个长度,则从尾部截取3个替换成***
if userNameLen > 3 {
usernameShow = string(userNameRune[:userNameLen-3])
} else {
switch userNameLen {
case 3:
usernameShow = string(userNameRune[:userNameLen-2])
case 2:
usernameShow = string(userNameRune[:userNameLen-1])
//若是1个长度,则以该值为起始,效果如:张***
case 1:
usernameShow = userName
}
}
return usernameShow + "***"
}
//总页数相关处理
//@params int page 当前页
//@params int pageRows 每页数量
//@params int dataTotal 数据总数
func (thisObj *helper) PageTotal(page int, pageRows int, dataTotal int) map[string]int {
//计算总页数
totalPages := int(math.Ceil(float64(dataTotal) / float64(pageRows)))
//总页数最小值限制
if totalPages < 1 {
totalPages = 1
}
//当前页最小值限制
if page < 1 {
page = 1
}
//当前页最大值限制
if page > totalPages {
page = totalPages
}
//计算每页提取数
limitStart := (page - 1) * pageRows
return map[string]int{
"totalPages": totalPages,
"currentPage": page,
"limitStart": limitStart,
}
}
//根据用户id获取订单号
func (thisObj *helper) GetOrderIdByUserId(userId int,orderIdLen int) string {
//当前时间的时分秒+微妙
timeString := time.Now().Format("150405.000000")
//拼接当前用户id
str := timeString + strconv.Itoa(userId)
//转成md5值
md5str := fmt.Sprintf("%x", md5.Sum([]byte(str)))
//生成指定位数强唯一的订单号
orderId := string([]byte(md5str)[:orderIdLen])
return orderId
}
//根据findKey查找RequestParams值
//@params interface{} requestParams get或post的请求参数
//@params string findKey 要查找的key
//@params string findValDataType 查找值的数据类型
//@params interface{} defaultVal 默认值
func (thisObj *helper) FindRequestParamsVal(requestParams interface{},findKey string,findValDataType string,defaultVal interface{}) (interface{},error) {
switch requestParams.(type) {
//如get请求参数
case map[string]string:
{
//根据findKey找值
findVal,findValExi := requestParams.(map[string]string)[findKey]
//若值不存在,则已默认值返回
if !findValExi {
return defaultVal,errors.New("val_not_found")
}
//找到值的数据类型
switch findValDataType {
case "int":
return InterfaceToInt(findVal)
case "string":
return InterfaceToStr(findVal)
}
}
//如post请求参数
case map[string]interface{}:
{
//根据findKey找值
findVal,findValExi := requestParams.(map[string]interface{})[findKey]
//若值不存在,则已默认值返回
if !findValExi {
return defaultVal,errors.New("val_not_found")
}
//找到值的数据类型
switch findValDataType {
case "int":
return InterfaceToInt(findVal)
case "string":
return InterfaceToStr(findVal)
}
}
}
panic("requestParams_dataType_notFound")
}
//模拟刪除map[string]interface{}里的元素值
//适用于delete()函数执行删除后,造成原map值也跟着被删除,解决该问题的处理
func (thisObj *helper) DeleteElementByMapStrInterface(mapData map[string]interface{},deleteKey string) map[string]interface{} {
mapDataTemp := make(map[string]interface{})
for k,v := range mapData {
//若是与删除key相等,则不进行存储
if k==deleteKey {
continue
}
mapDataTemp[k] = v
}
return mapDataTemp
}
//error对象转字符串输出
func (thisObj *helper) ErrorToString(err error) string {
if err!=nil {
return err.Error()
}
return "nil"
} | identifier_name | ||
helper.go | package util
import (
"bytes"
"crypto/md5"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"math"
"math/rand"
"net"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
)
//helper工具集
type helper struct {
}
var Helper *helper
func init() {
Helper = NewHelper()
}
func NewHelper() *helper {
return &helper{
}
}
//获取当前时间戳-int类型数据
func (thisObj *helper) CurrTimeInt() int {
return int(time.Now().Unix())
}
//json解析
//@params interface{} t 基本上传参都是map数据类型,如map[string]interface{}或是map[string]string
func (thisObj *helper) JSONMarshal(t interface{}) ([]byte, error) {
buffer := &bytes.Buffer{}
encoder := json.NewEncoder(buffer)
encoder.SetEscapeHTML(false) //HTML标签是否转义处理 => true:{"test":"\u003cbr /\u003e"} false:{"test":"<br />"}
err := encoder.Encode(t)
return buffer.Bytes(), err
}
//创建随机字符串
//@params int keyLength 要创建字符串的长度
//@params int typeN 1 按数字随机,非1 按0~9,a~z,A~Z随机
func (thisObj *helper) CreateRandStr(randStrLength int, typeN int) string {
key := ""
var dic = make([]string, 0)
if typeN == 1 {
dic = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"} | dic = strings.Split(dicStr, ",")
}
length := len(dic)
for i := 0; i < randStrLength; i++ {
rand.Seed(time.Now().UnixNano())
randNum := rand.Intn(length - 1)
key += dic[randNum]
}
return key
}
//http响应json-成功数据
//@params string message 响应消息
//@params interface{} data 响应数据,一般也是map数据类型
func (thisObj *helper) HttpResponseJsonSuccess(message string, data interface{}) {
//http响应结果
result := map[string]interface{}{"code": "success", "msg": message, "data": data}
//设置头信息
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
//解析成json输出
res, _ := json.Marshal(result)
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//失败响应处理
//需要引用包: import ( "github.com/kataras/iris" )
//func (thisObj *helper) HelperResponseFailedJSON(c iris.Context, message string, code interface{}) {
// //获取前置中间件的设置的初始开始时间
// diffTime := c.Values().Get("requestCurrentTime")
// currentTime := time.Now().UnixNano() / 1e3 //计算得出 微秒
// timeConsumed := currentTime - diffTime.(int64)
// result := iris.Map{"code": code, "msg": message, "status": 0, "time_consumed": timeConsumed}
// c.JSON(result)
//}
//http响应seqMap的Json-成功数据
//@params string message 响应消息
//@params string data 响应数据,解析成为json的字符串数据,如`{"test":"123"}`
func (thisObj *helper) HttpResponseSeqJsonSuccess(message string, data string) {
//seqMap存储数据
result := NewSeqMap()
result.Put("code", "success")
result.Put("msg", message)
result.Put("data", data)
//设置头信息
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
//解析成json输出
res := []byte(result.JsonSeq())
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//http响应seqMap的Json(转换成Unicode字符)-成功数据
//@params string message 响应消息
//@params string data 响应数据,解析成为json的字符串数据,如`{"test":"123"}`
func (thisObj *helper) HttpResponseSeqUnicodeJsonSuccess(message string, data string) {
//seqMap存储数据
result := NewSeqMap()
result.Put("code", "success")
result.Put("msg", message)
result.Put("data", data)
//设置头信息
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
//解析成json输出
res := []byte(result.JsonSeqUnicode())
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//http响应json
//@params map[string]interface{} data 响应数据
func (thisObj *helper) HttpResponseJSON(data map[string]interface{}) {
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
res, _ := json.Marshal(data)
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//根据字符串调用对应对象的方法
//调用示例:
// type Test struct {
//
// }
//
// func NewTest()*Test{
// return &Test{
//
// }
// }
//
// func (thisObj *Test) Echo() int {
// return 123456
// }
// res,resOk := util.Helper.CallMethodReflect(NewTest(),"Echo")
// fmt.Println(res,resOk)
// fmt.Println(res[0].Int())
//输出结果:
// [<int Value>] true
// 123456
//@params interface{} any 指定对象(指针)
//@params string methodName 指定对象里被调用的方法
//@params []interface{} args 指定对象里被调用的方法传参
//@return []reflect.Value reflectValue 被调用对象返回的结果集
//@return bool runOk 调用是否成功,true 成功,false 失败(失败情况就得根据情况排查问题出在哪里)
func (thisObj *helper) CallMethodReflect(any interface{}, methodName string, args []interface{}) (reflectValue []reflect.Value, runOk bool) {
//准备调用方法的参数
inputs := make([]reflect.Value, len(args))
for i := range args {
inputs[i] = reflect.ValueOf(args[i])
}
//执行调用指定对象的方法
v := reflect.ValueOf(any).MethodByName(methodName)
//若指定对象被调用的方法不存在或无效
if v.String() == "<invalid Value>" {
runOk = false
return []reflect.Value{}, runOk
}
//调用成功
runOk = true
reflectValue = v.Call(inputs)
return reflectValue, runOk
}
//获取用户IP
//@desc X-Forwarded-For:若是代理过来的,会获取到多个ip,最后一个ip就是真实的
func (thisObj *helper) GetIP(req *http.Request) string {
remoteAddr := req.RemoteAddr
if ip := req.Header.Get("X-Real-IP"); ip != "" {
remoteAddr = ip
} else if ip = req.Header.Get("X-Forwarded-For"); ip != "" {
//原逻辑:容易获取多个ip
//remoteAddr = ip
//现逻辑:获取最后一个真实ip
//使用X-Forwarded-For获取到ip会有多个,如117.136.39.96, 183.232.151.36;修改成只获取最后一个.
//若多个ip存在时,按英文逗号分隔
ipArr := strings.Split(ip,",")
//若存在多个ip值时,需要去除空格;获取最后一位IP
ip := Trim(ipArr[len(ipArr)-1])
//先判断获取最后一位IP是否是公网IP,如果不是就获取第一个IP处理
if !thisObj.CheckIsPublicIP(net.ParseIP(ip)) {
ip = Trim(ipArr[0])
}
remoteAddr = ip
} else {
remoteAddr, _, _ = net.SplitHostPort(remoteAddr)
}
if remoteAddr == "::1" {
remoteAddr = "127.0.0.1"
}
return remoteAddr
}
//检测IP是否是公网IP
func (thisObj *helper) CheckIsPublicIP(IP net.IP) bool {
if IP.IsLoopback() || IP.IsLinkLocalMulticast() || IP.IsLinkLocalUnicast() {
return false
}
if ip4 := IP.To4(); ip4 != nil {
switch true {
case ip4[0] == 10:
return false
case ip4[0] == 172 && ip4[1] >= 16 && ip4[1] <= 31:
return false
case ip4[0] == 192 && ip4[1] == 168:
return false
default:
return true
}
}
return false
}
//把IP127.0.0.1格式转化为long类型
func (thisObj *helper) IP2long(ipStr string) uint32 {
str := net.ParseIP(ipStr)
if str == nil {
return 0
}
//IPv4
ip := str.To4()
if ip == nil {
//IPv6
ip = str.To16()
}
if ip == nil {
return 0
}
return binary.BigEndian.Uint32(ip)
}
//把long类型IP地址转化为127.0.0.1格式
func (thisObj *helper) Long2IP(ipLong uint32) string {
ipByte := make([]byte, 4)
binary.BigEndian.PutUint32(ipByte, ipLong)
ipStr := net.IP(ipByte)
return ipStr.String()
}
//检测是否是IPv4
func (thisObj *helper) IsIPv4(ip string) bool {
//返回的值不等于nil,就是如果ip不是IPv4地址,To4返回nil,当且仅当地址是IPv4地址时,此条件才应返回true.
if net.ParseIP(ip).To4()!=nil {
return true
}
return false
}
//检测是否是IPv6
func (thisObj *helper) IsIPv6(ip string) bool {
if thisObj.IsIPv4(ip) {
return false
}
return true
}
////多个IP地址集合解析(包含IPv6地址解析),如58.177.183.37,134.159.119.48
//func (thisObj *helper) IP2Region(params map[string]interface{}) (actionResult []map[string]interface{}) {
// //必传-多个ip地址集合
// ips := params["ips"].(string)
// //可选-是否需要排重
// isUnique := false
// if params["isUnique"]!=nil {
// isUnique = params["isUnique"].(bool)
// }
// //去除ip左右多出的逗号
// ips = strings.Trim(ips,",")
//
// //多个ip地址数组
// ipsArr := strings.Split(ips,",")
// //ip排重
// ipsMap := make(map[string]map[string]interface{})
//
// //IPV4-解析
// ip2regionLibrary := NewIp2regionLibrary()
// defer ip2regionLibrary.Close()
// //IPV6-解析
// ipv6regionLibrary := NewIpv6regionLibrary(map[string]interface{}{})
// defer ipv6regionLibrary.Close()
//
// //操作结果存储
// actionResult = make([]map[string]interface{},0)
// for _,ip := range ipsArr {
// //操作结果每项数据
// actionResultItem := map[string]interface{}{
// "ipStr":ip,
// "ipAddr":"---",
// }
//
// //------------ ip地址值转换处理 ------------
// //模拟php的代码:
// // 有可能传递过来的是ip2long转换的整型ip值 转换成ip字符串
// // $ipStr = is_numeric($ip) ? long2ip($ip) : $ip;
// ipStr := ""
// //若是整型值
// if thisObj.IsNumeric(ip) {
// //转成字符串
// ipInt,ipIntErr := strconv.Atoi(ip)
// //若转换出错
// if ipIntErr!=nil {
// actionResultItem["ipAddr"] = "IP_TO_INT_ERROR"
// actionResult = append(actionResult,actionResultItem)
// continue
// }
// //将整型值解析成ip地址
// ipStr = thisObj.Long2IP(uint32(ipInt))
// }else{
// //存储ip地址
// ipStr = ip
// }
//
// //确认都是字符串ip返回
// actionResultItem["ipStr"] = ipStr
//
// //是否排重
// if isUnique {
// //------------ 排重处理 ------------
// //若该IP已处理过,则不进行处理
// if ipsMap[ipStr]!=nil {
// continue
// }
// }else{
// //------------ 不排重处理 ------------
// //若是不排重,且之前找到过了,则不需要再重复找了
// if ipsMap[ipStr]!=nil {
// actionResult = append(actionResult,ipsMap[ipStr])
// continue
// }
// }
//
// //------------ ip地址解析处理 ------------
// //如果是IPv4的处理
// if thisObj.IsIPv4(ipStr) {
// //IPV4地址解析处理
// ipv4Res,ipv4ResErr := ip2regionLibrary.DoIpAddressAnalyze(ipStr)
// if ipv4ResErr!=nil {
// actionResultItem["ipAddr"] = "IPV4_ADDRESS_ANALYZE_ERROR"
// }else{
// //调试输出:
// //fmt.Println("ipStr:",ipStr)
// //fmt.Println("ipv4Res:",ipv4Res)
// //输出结果:
// //ipStr: 134.159.119.47
// //ipv4Res: map[City:香港 CityId:0 Country:中国 ISP:澳大利亚电信 Province:香港 Region:0]
//
// if len(ipv4Res)>0 {
// actionResultItem["ipAddr"] = ipv4Res["Country"].(string)+"-"+ipv4Res["Province"].(string)+"-"+ipv4Res["City"].(string)
// }else{
// actionResultItem["ipAddr"] = "IPV4_ADDRESS_NOT_FOUND"
// }
// }
// //如果是IPv6的处理
// }else if thisObj.IsIPv6(ipStr) {
// ipv6Res,ipv6ResErr := ipv6regionLibrary.GetIPv6Address(map[string]interface{}{
// "ipv6":ipStr,
// })
// if ipv6ResErr!=nil {
// actionResultItem["ipAddr"] = "IPV6_ADDRESS_ANALYZE_ERROR"
// }else{
// //调试输出:
// //fmt.Println("ipStr:",ipStr)
// //fmt.Println("ipv6Res:",ipv6Res)
// //输出结果:
// //ipStr: 2408:84e7:4ad:1776:9c97:e79d:1e94:e59e
// //ipv6Res: map[address:中国山西省 中国联通3GNET网络(全省通用) id:50440 ipv6:2408:84e7:4ad:1776:9c97:e79d:1e94:e59e]
//
// if len(ipv6Res)>0 {
// actionResultItem["ipAddr"] = ipv6Res["address"].(string)
// }else{
// actionResultItem["ipAddr"] = "IPV6_ADDRESS_NOT_FOUND"
// }
// }
// //既不是IPv4,也不是IPv6,则是错误的
// }else{
// actionResultItem["ipAddr"] = "IP_IS_NOT_V4_OR_V6"
// }
//
// //存储操作结果
// actionResult = append(actionResult,actionResultItem)
//
// //存储查找过的结果
// ipsMap[ipStr] = actionResultItem
// }
//
// return actionResult
//}
// IsNumeric is_numeric()
// Numeric strings consist of optional sign, any number of digits, optional decimal part and optional exponential part.
// Thus +0123.45e6 is a valid numeric value.
// In PHP hexadecimal (e.g. 0xf4c3b00c) is not supported, but IsNumeric is supported.
func (cthis *helper) IsNumeric(val interface{}) bool {
switch val.(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
return true
case float32, float64, complex64, complex128:
return true
case string:
str := val.(string)
//现逻辑
//@update 2020.03.21 新增若是传入空格的修正处理
// 若这里不进行空格去除处理,到下面894行执行的时候会报错 runtime error: index out of range [0] with length 0,
// 因为等strings.TrimSpace(str)执行去除空格后,str[0]是不可能有值的!
str = strings.TrimSpace(str)
if str == "" {
return false
}
//原逻辑
// Trim any whitespace
//str = strings.TrimSpace(str)
if str[0] == '-' || str[0] == '+' {
if len(str) == 1 {
return false
}
str = str[1:]
}
// hex
if len(str) > 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X') {
for _, h := range str[2:] {
if !((h >= '0' && h <= '9') || (h >= 'a' && h <= 'f') || (h >= 'A' && h <= 'F')) {
return false
}
}
return true
}
// 0-9, Point, Scientific
p, s, l := 0, 0, len(str)
for i, v := range str {
if v == '.' { // Point
if p > 0 || s > 0 || i+1 == l {
return false
}
p = i
} else if v == 'e' || v == 'E' { // Scientific
if i == 0 || s > 0 || i+1 == l {
return false
}
s = i
} else if v < '0' || v > '9' {
return false
}
}
return true
}
return false
}
//GET请求url-路径拼接
//@params string urlPath 请求url,如http://192.168.0.1:58909
//@params map[string]string data 请求参数
//@params bool isEncode 特殊字符是否转义处理
//@params bool isHttp 是否将https转换成http处理
func (thisObj *helper) GETUrlPath(urlPath string, data map[string]string, isEncode bool, isHttp bool) string {
//是否是http请求,如果是,则将https替换成http
if isHttp {
urlPath = strings.Replace(urlPath, "https://", "http://", 1)
}
//拼接uri
urlPath += "?"
for k, v := range data {
if isEncode {
v = url.QueryEscape(v)
}
urlPath += k + "=" + v + "&"
}
urlPath = strings.TrimRight(urlPath, "&")
return urlPath
}
//用户名称尾部隐藏处理
//@params string userName 用户名
func (thisObj *helper) UserNameTailHidden(userName string) string {
usernameShow := ""
//先将字符串转成rune
// rune 等同于int32,常用来处理unicode或utf-8字符
userNameRune := []rune(userName)
//获取用户名称字符串长度(不是底层字节长度,如你好,就是2个字符串长度)
userNameLen := len(userNameRune)
//大于3个长度,则从尾部截取3个替换成***
if userNameLen > 3 {
usernameShow = string(userNameRune[:userNameLen-3])
} else {
switch userNameLen {
case 3:
usernameShow = string(userNameRune[:userNameLen-2])
case 2:
usernameShow = string(userNameRune[:userNameLen-1])
//若是1个长度,则以该值为起始,效果如:张***
case 1:
usernameShow = userName
}
}
return usernameShow + "***"
}
//总页数相关处理
//@params int page 当前页
//@params int pageRows 每页数量
//@params int dataTotal 数据总数
func (thisObj *helper) PageTotal(page int, pageRows int, dataTotal int) map[string]int {
//计算总页数
totalPages := int(math.Ceil(float64(dataTotal) / float64(pageRows)))
//总页数最小值限制
if totalPages < 1 {
totalPages = 1
}
//当前页最小值限制
if page < 1 {
page = 1
}
//当前页最大值限制
if page > totalPages {
page = totalPages
}
//计算每页提取数
limitStart := (page - 1) * pageRows
return map[string]int{
"totalPages": totalPages,
"currentPage": page,
"limitStart": limitStart,
}
}
//根据用户id获取订单号
func (thisObj *helper) GetOrderIdByUserId(userId int,orderIdLen int) string {
//当前时间的时分秒+微妙
timeString := time.Now().Format("150405.000000")
//拼接当前用户id
str := timeString + strconv.Itoa(userId)
//转成md5值
md5str := fmt.Sprintf("%x", md5.Sum([]byte(str)))
//生成指定位数强唯一的订单号
orderId := string([]byte(md5str)[:orderIdLen])
return orderId
}
//根据findKey查找RequestParams值
//@params interface{} requestParams get或post的请求参数
//@params string findKey 要查找的key
//@params string findValDataType 查找值的数据类型
//@params interface{} defaultVal 默认值
func (thisObj *helper) FindRequestParamsVal(requestParams interface{},findKey string,findValDataType string,defaultVal interface{}) (interface{},error) {
switch requestParams.(type) {
//如get请求参数
case map[string]string:
{
//根据findKey找值
findVal,findValExi := requestParams.(map[string]string)[findKey]
//若值不存在,则已默认值返回
if !findValExi {
return defaultVal,errors.New("val_not_found")
}
//找到值的数据类型
switch findValDataType {
case "int":
return InterfaceToInt(findVal)
case "string":
return InterfaceToStr(findVal)
}
}
//如post请求参数
case map[string]interface{}:
{
//根据findKey找值
findVal,findValExi := requestParams.(map[string]interface{})[findKey]
//若值不存在,则已默认值返回
if !findValExi {
return defaultVal,errors.New("val_not_found")
}
//找到值的数据类型
switch findValDataType {
case "int":
return InterfaceToInt(findVal)
case "string":
return InterfaceToStr(findVal)
}
}
}
panic("requestParams_dataType_notFound")
}
//模拟刪除map[string]interface{}里的元素值
//适用于delete()函数执行删除后,造成原map值也跟着被删除,解决该问题的处理
func (thisObj *helper) DeleteElementByMapStrInterface(mapData map[string]interface{},deleteKey string) map[string]interface{} {
mapDataTemp := make(map[string]interface{})
for k,v := range mapData {
//若是与删除key相等,则不进行存储
if k==deleteKey {
continue
}
mapDataTemp[k] = v
}
return mapDataTemp
}
//error对象转字符串输出
func (thisObj *helper) ErrorToString(err error) string {
if err!=nil {
return err.Error()
}
return "nil"
} | } else {
dicStr := "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,1,2,3,4,5,6,7,8,9,0,A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z" | random_line_split |
helper.go | package util
import (
"bytes"
"crypto/md5"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"math"
"math/rand"
"net"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
)
//helper工具集
type helper struct {
}
var Helper *helper
func init() {
Hel | NewHelper() *helper {
return &helper{
}
}
//获取当前时间戳-int类型数据
func (thisObj *helper) CurrTimeInt() int {
return int(time.Now().Unix())
}
//json解析
//@params interface{} t 基本上传参都是map数据类型,如map[string]interface{}或是map[string]string
func (thisObj *helper) JSONMarshal(t interface{}) ([]byte, error) {
buffer := &bytes.Buffer{}
encoder := json.NewEncoder(buffer)
encoder.SetEscapeHTML(false) //HTML标签是否转义处理 => true:{"test":"\u003cbr /\u003e"} false:{"test":"<br />"}
err := encoder.Encode(t)
return buffer.Bytes(), err
}
//创建随机字符串
//@params int keyLength 要创建字符串的长度
//@params int typeN 1 按数字随机,非1 按0~9,a~z,A~Z随机
func (thisObj *helper) CreateRandStr(randStrLength int, typeN int) string {
key := ""
var dic = make([]string, 0)
if typeN == 1 {
dic = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}
} else {
dicStr := "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,1,2,3,4,5,6,7,8,9,0,A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z"
dic = strings.Split(dicStr, ",")
}
length := len(dic)
for i := 0; i < randStrLength; i++ {
rand.Seed(time.Now().UnixNano())
randNum := rand.Intn(length - 1)
key += dic[randNum]
}
return key
}
//http响应json-成功数据
//@params string message 响应消息
//@params interface{} data 响应数据,一般也是map数据类型
func (thisObj *helper) HttpResponseJsonSuccess(message string, data interface{}) {
//http响应结果
result := map[string]interface{}{"code": "success", "msg": message, "data": data}
//设置头信息
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
//解析成json输出
res, _ := json.Marshal(result)
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//失败响应处理
//需要引用包: import ( "github.com/kataras/iris" )
//func (thisObj *helper) HelperResponseFailedJSON(c iris.Context, message string, code interface{}) {
// //获取前置中间件的设置的初始开始时间
// diffTime := c.Values().Get("requestCurrentTime")
// currentTime := time.Now().UnixNano() / 1e3 //计算得出 微秒
// timeConsumed := currentTime - diffTime.(int64)
// result := iris.Map{"code": code, "msg": message, "status": 0, "time_consumed": timeConsumed}
// c.JSON(result)
//}
//http响应seqMap的Json-成功数据
//@params string message 响应消息
//@params string data 响应数据,解析成为json的字符串数据,如`{"test":"123"}`
func (thisObj *helper) HttpResponseSeqJsonSuccess(message string, data string) {
//seqMap存储数据
result := NewSeqMap()
result.Put("code", "success")
result.Put("msg", message)
result.Put("data", data)
//设置头信息
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
//解析成json输出
res := []byte(result.JsonSeq())
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//http响应seqMap的Json(转换成Unicode字符)-成功数据
//@params string message 响应消息
//@params string data 响应数据,解析成为json的字符串数据,如`{"test":"123"}`
func (thisObj *helper) HttpResponseSeqUnicodeJsonSuccess(message string, data string) {
//seqMap存储数据
result := NewSeqMap()
result.Put("code", "success")
result.Put("msg", message)
result.Put("data", data)
//设置头信息
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
//解析成json输出
res := []byte(result.JsonSeqUnicode())
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//http响应json
//@params map[string]interface{} data 响应数据
func (thisObj *helper) HttpResponseJSON(data map[string]interface{}) {
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
res, _ := json.Marshal(data)
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//根据字符串调用对应对象的方法
//调用示例:
// type Test struct {
//
// }
//
// func NewTest()*Test{
// return &Test{
//
// }
// }
//
// func (thisObj *Test) Echo() int {
// return 123456
// }
// res,resOk := util.Helper.CallMethodReflect(NewTest(),"Echo")
// fmt.Println(res,resOk)
// fmt.Println(res[0].Int())
//输出结果:
// [<int Value>] true
// 123456
//@params interface{} any 指定对象(指针)
//@params string methodName 指定对象里被调用的方法
//@params []interface{} args 指定对象里被调用的方法传参
//@return []reflect.Value reflectValue 被调用对象返回的结果集
//@return bool runOk 调用是否成功,true 成功,false 失败(失败情况就得根据情况排查问题出在哪里)
func (thisObj *helper) CallMethodReflect(any interface{}, methodName string, args []interface{}) (reflectValue []reflect.Value, runOk bool) {
//准备调用方法的参数
inputs := make([]reflect.Value, len(args))
for i := range args {
inputs[i] = reflect.ValueOf(args[i])
}
//执行调用指定对象的方法
v := reflect.ValueOf(any).MethodByName(methodName)
//若指定对象被调用的方法不存在或无效
if v.String() == "<invalid Value>" {
runOk = false
return []reflect.Value{}, runOk
}
//调用成功
runOk = true
reflectValue = v.Call(inputs)
return reflectValue, runOk
}
//获取用户IP
//@desc X-Forwarded-For:若是代理过来的,会获取到多个ip,最后一个ip就是真实的
func (thisObj *helper) GetIP(req *http.Request) string {
remoteAddr := req.RemoteAddr
if ip := req.Header.Get("X-Real-IP"); ip != "" {
remoteAddr = ip
} else if ip = req.Header.Get("X-Forwarded-For"); ip != "" {
//原逻辑:容易获取多个ip
//remoteAddr = ip
//现逻辑:获取最后一个真实ip
//使用X-Forwarded-For获取到ip会有多个,如117.136.39.96, 183.232.151.36;修改成只获取最后一个.
//若多个ip存在时,按英文逗号分隔
ipArr := strings.Split(ip,",")
//若存在多个ip值时,需要去除空格;获取最后一位IP
ip := Trim(ipArr[len(ipArr)-1])
//先判断获取最后一位IP是否是公网IP,如果不是就获取第一个IP处理
if !thisObj.CheckIsPublicIP(net.ParseIP(ip)) {
ip = Trim(ipArr[0])
}
remoteAddr = ip
} else {
remoteAddr, _, _ = net.SplitHostPort(remoteAddr)
}
if remoteAddr == "::1" {
remoteAddr = "127.0.0.1"
}
return remoteAddr
}
//检测IP是否是公网IP
func (thisObj *helper) CheckIsPublicIP(IP net.IP) bool {
if IP.IsLoopback() || IP.IsLinkLocalMulticast() || IP.IsLinkLocalUnicast() {
return false
}
if ip4 := IP.To4(); ip4 != nil {
switch true {
case ip4[0] == 10:
return false
case ip4[0] == 172 && ip4[1] >= 16 && ip4[1] <= 31:
return false
case ip4[0] == 192 && ip4[1] == 168:
return false
default:
return true
}
}
return false
}
//把IP127.0.0.1格式转化为long类型
func (thisObj *helper) IP2long(ipStr string) uint32 {
str := net.ParseIP(ipStr)
if str == nil {
return 0
}
//IPv4
ip := str.To4()
if ip == nil {
//IPv6
ip = str.To16()
}
if ip == nil {
return 0
}
return binary.BigEndian.Uint32(ip)
}
//把long类型IP地址转化为127.0.0.1格式
func (thisObj *helper) Long2IP(ipLong uint32) string {
ipByte := make([]byte, 4)
binary.BigEndian.PutUint32(ipByte, ipLong)
ipStr := net.IP(ipByte)
return ipStr.String()
}
//检测是否是IPv4
func (thisObj *helper) IsIPv4(ip string) bool {
//返回的值不等于nil,就是如果ip不是IPv4地址,To4返回nil,当且仅当地址是IPv4地址时,此条件才应返回true.
if net.ParseIP(ip).To4()!=nil {
return true
}
return false
}
//检测是否是IPv6
func (thisObj *helper) IsIPv6(ip string) bool {
if thisObj.IsIPv4(ip) {
return false
}
return true
}
////多个IP地址集合解析(包含IPv6地址解析),如58.177.183.37,134.159.119.48
//func (thisObj *helper) IP2Region(params map[string]interface{}) (actionResult []map[string]interface{}) {
// //必传-多个ip地址集合
// ips := params["ips"].(string)
// //可选-是否需要排重
// isUnique := false
// if params["isUnique"]!=nil {
// isUnique = params["isUnique"].(bool)
// }
// //去除ip左右多出的逗号
// ips = strings.Trim(ips,",")
//
// //多个ip地址数组
// ipsArr := strings.Split(ips,",")
// //ip排重
// ipsMap := make(map[string]map[string]interface{})
//
// //IPV4-解析
// ip2regionLibrary := NewIp2regionLibrary()
// defer ip2regionLibrary.Close()
// //IPV6-解析
// ipv6regionLibrary := NewIpv6regionLibrary(map[string]interface{}{})
// defer ipv6regionLibrary.Close()
//
// //操作结果存储
// actionResult = make([]map[string]interface{},0)
// for _,ip := range ipsArr {
// //操作结果每项数据
// actionResultItem := map[string]interface{}{
// "ipStr":ip,
// "ipAddr":"---",
// }
//
// //------------ ip地址值转换处理 ------------
// //模拟php的代码:
// // 有可能传递过来的是ip2long转换的整型ip值 转换成ip字符串
// // $ipStr = is_numeric($ip) ? long2ip($ip) : $ip;
// ipStr := ""
// //若是整型值
// if thisObj.IsNumeric(ip) {
// //转成字符串
// ipInt,ipIntErr := strconv.Atoi(ip)
// //若转换出错
// if ipIntErr!=nil {
// actionResultItem["ipAddr"] = "IP_TO_INT_ERROR"
// actionResult = append(actionResult,actionResultItem)
// continue
// }
// //将整型值解析成ip地址
// ipStr = thisObj.Long2IP(uint32(ipInt))
// }else{
// //存储ip地址
// ipStr = ip
// }
//
// //确认都是字符串ip返回
// actionResultItem["ipStr"] = ipStr
//
// //是否排重
// if isUnique {
// //------------ 排重处理 ------------
// //若该IP已处理过,则不进行处理
// if ipsMap[ipStr]!=nil {
// continue
// }
// }else{
// //------------ 不排重处理 ------------
// //若是不排重,且之前找到过了,则不需要再重复找了
// if ipsMap[ipStr]!=nil {
// actionResult = append(actionResult,ipsMap[ipStr])
// continue
// }
// }
//
// //------------ ip地址解析处理 ------------
// //如果是IPv4的处理
// if thisObj.IsIPv4(ipStr) {
// //IPV4地址解析处理
// ipv4Res,ipv4ResErr := ip2regionLibrary.DoIpAddressAnalyze(ipStr)
// if ipv4ResErr!=nil {
// actionResultItem["ipAddr"] = "IPV4_ADDRESS_ANALYZE_ERROR"
// }else{
// //调试输出:
// //fmt.Println("ipStr:",ipStr)
// //fmt.Println("ipv4Res:",ipv4Res)
// //输出结果:
// //ipStr: 134.159.119.47
// //ipv4Res: map[City:香港 CityId:0 Country:中国 ISP:澳大利亚电信 Province:香港 Region:0]
//
// if len(ipv4Res)>0 {
// actionResultItem["ipAddr"] = ipv4Res["Country"].(string)+"-"+ipv4Res["Province"].(string)+"-"+ipv4Res["City"].(string)
// }else{
// actionResultItem["ipAddr"] = "IPV4_ADDRESS_NOT_FOUND"
// }
// }
// //如果是IPv6的处理
// }else if thisObj.IsIPv6(ipStr) {
// ipv6Res,ipv6ResErr := ipv6regionLibrary.GetIPv6Address(map[string]interface{}{
// "ipv6":ipStr,
// })
// if ipv6ResErr!=nil {
// actionResultItem["ipAddr"] = "IPV6_ADDRESS_ANALYZE_ERROR"
// }else{
// //调试输出:
// //fmt.Println("ipStr:",ipStr)
// //fmt.Println("ipv6Res:",ipv6Res)
// //输出结果:
// //ipStr: 2408:84e7:4ad:1776:9c97:e79d:1e94:e59e
// //ipv6Res: map[address:中国山西省 中国联通3GNET网络(全省通用) id:50440 ipv6:2408:84e7:4ad:1776:9c97:e79d:1e94:e59e]
//
// if len(ipv6Res)>0 {
// actionResultItem["ipAddr"] = ipv6Res["address"].(string)
// }else{
// actionResultItem["ipAddr"] = "IPV6_ADDRESS_NOT_FOUND"
// }
// }
// //既不是IPv4,也不是IPv6,则是错误的
// }else{
// actionResultItem["ipAddr"] = "IP_IS_NOT_V4_OR_V6"
// }
//
// //存储操作结果
// actionResult = append(actionResult,actionResultItem)
//
// //存储查找过的结果
// ipsMap[ipStr] = actionResultItem
// }
//
// return actionResult
//}
// IsNumeric is_numeric()
// Numeric strings consist of optional sign, any number of digits, optional decimal part and optional exponential part.
// Thus +0123.45e6 is a valid numeric value.
// In PHP hexadecimal (e.g. 0xf4c3b00c) is not supported, but IsNumeric is supported.
func (cthis *helper) IsNumeric(val interface{}) bool {
switch val.(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
return true
case float32, float64, complex64, complex128:
return true
case string:
str := val.(string)
//现逻辑
//@update 2020.03.21 新增若是传入空格的修正处理
// 若这里不进行空格去除处理,到下面894行执行的时候会报错 runtime error: index out of range [0] with length 0,
// 因为等strings.TrimSpace(str)执行去除空格后,str[0]是不可能有值的!
str = strings.TrimSpace(str)
if str == "" {
return false
}
//原逻辑
// Trim any whitespace
//str = strings.TrimSpace(str)
if str[0] == '-' || str[0] == '+' {
if len(str) == 1 {
return false
}
str = str[1:]
}
// hex
if len(str) > 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X') {
for _, h := range str[2:] {
if !((h >= '0' && h <= '9') || (h >= 'a' && h <= 'f') || (h >= 'A' && h <= 'F')) {
return false
}
}
return true
}
// 0-9, Point, Scientific
p, s, l := 0, 0, len(str)
for i, v := range str {
if v == '.' { // Point
if p > 0 || s > 0 || i+1 == l {
return false
}
p = i
} else if v == 'e' || v == 'E' { // Scientific
if i == 0 || s > 0 || i+1 == l {
return false
}
s = i
} else if v < '0' || v > '9' {
return false
}
}
return true
}
return false
}
//GET请求url-路径拼接
//@params string urlPath 请求url,如http://192.168.0.1:58909
//@params map[string]string data 请求参数
//@params bool isEncode 特殊字符是否转义处理
//@params bool isHttp 是否将https转换成http处理
func (thisObj *helper) GETUrlPath(urlPath string, data map[string]string, isEncode bool, isHttp bool) string {
//是否是http请求,如果是,则将https替换成http
if isHttp {
urlPath = strings.Replace(urlPath, "https://", "http://", 1)
}
//拼接uri
urlPath += "?"
for k, v := range data {
if isEncode {
v = url.QueryEscape(v)
}
urlPath += k + "=" + v + "&"
}
urlPath = strings.TrimRight(urlPath, "&")
return urlPath
}
//用户名称尾部隐藏处理
//@params string userName 用户名
func (thisObj *helper) UserNameTailHidden(userName string) string {
usernameShow := ""
//先将字符串转成rune
// rune 等同于int32,常用来处理unicode或utf-8字符
userNameRune := []rune(userName)
//获取用户名称字符串长度(不是底层字节长度,如你好,就是2个字符串长度)
userNameLen := len(userNameRune)
//大于3个长度,则从尾部截取3个替换成***
if userNameLen > 3 {
usernameShow = string(userNameRune[:userNameLen-3])
} else {
switch userNameLen {
case 3:
usernameShow = string(userNameRune[:userNameLen-2])
case 2:
usernameShow = string(userNameRune[:userNameLen-1])
//若是1个长度,则以该值为起始,效果如:张***
case 1:
usernameShow = userName
}
}
return usernameShow + "***"
}
//总页数相关处理
//@params int page 当前页
//@params int pageRows 每页数量
//@params int dataTotal 数据总数
func (thisObj *helper) PageTotal(page int, pageRows int, dataTotal int) map[string]int {
//计算总页数
totalPages := int(math.Ceil(float64(dataTotal) / float64(pageRows)))
//总页数最小值限制
if totalPages < 1 {
totalPages = 1
}
//当前页最小值限制
if page < 1 {
page = 1
}
//当前页最大值限制
if page > totalPages {
page = totalPages
}
//计算每页提取数
limitStart := (page - 1) * pageRows
return map[string]int{
"totalPages": totalPages,
"currentPage": page,
"limitStart": limitStart,
}
}
//根据用户id获取订单号
func (thisObj *helper) GetOrderIdByUserId(userId int,orderIdLen int) string {
//当前时间的时分秒+微妙
timeString := time.Now().Format("150405.000000")
//拼接当前用户id
str := timeString + strconv.Itoa(userId)
//转成md5值
md5str := fmt.Sprintf("%x", md5.Sum([]byte(str)))
//生成指定位数强唯一的订单号
orderId := string([]byte(md5str)[:orderIdLen])
return orderId
}
//根据findKey查找RequestParams值
//@params interface{} requestParams get或post的请求参数
//@params string findKey 要查找的key
//@params string findValDataType 查找值的数据类型
//@params interface{} defaultVal 默认值
func (thisObj *helper) FindRequestParamsVal(requestParams interface{},findKey string,findValDataType string,defaultVal interface{}) (interface{},error) {
switch requestParams.(type) {
//如get请求参数
case map[string]string:
{
//根据findKey找值
findVal,findValExi := requestParams.(map[string]string)[findKey]
//若值不存在,则已默认值返回
if !findValExi {
return defaultVal,errors.New("val_not_found")
}
//找到值的数据类型
switch findValDataType {
case "int":
return InterfaceToInt(findVal)
case "string":
return InterfaceToStr(findVal)
}
}
//如post请求参数
case map[string]interface{}:
{
//根据findKey找值
findVal,findValExi := requestParams.(map[string]interface{})[findKey]
//若值不存在,则已默认值返回
if !findValExi {
return defaultVal,errors.New("val_not_found")
}
//找到值的数据类型
switch findValDataType {
case "int":
return InterfaceToInt(findVal)
case "string":
return InterfaceToStr(findVal)
}
}
}
panic("requestParams_dataType_notFound")
}
//模拟刪除map[string]interface{}里的元素值
//适用于delete()函数执行删除后,造成原map值也跟着被删除,解决该问题的处理
func (thisObj *helper) DeleteElementByMapStrInterface(mapData map[string]interface{},deleteKey string) map[string]interface{} {
mapDataTemp := make(map[string]interface{})
for k,v := range mapData {
//若是与删除key相等,则不进行存储
if k==deleteKey {
continue
}
mapDataTemp[k] = v
}
return mapDataTemp
}
//error对象转字符串输出
func (thisObj *helper) ErrorToString(err error) string {
if err!=nil {
return err.Error()
}
return "nil"
} | per = NewHelper()
}
func | identifier_body |
helper.go | package util
import (
"bytes"
"crypto/md5"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"math"
"math/rand"
"net"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
)
//helper工具集
type helper struct {
}
var Helper *helper
func init() {
Helper = NewHelper()
}
func NewHelper() *helper {
return &helper{
}
}
//获取当前时间戳-int类型数据
func (thisObj *helper) CurrTimeInt() int {
return int(time.Now().Unix())
}
//json解析
//@params interface{} t 基本上传参都是map数据类型,如map[string]interface{}或是map[string]string
func (thisObj *helper) JSONMarshal(t interface{}) ([]byte, error) {
buffer := &bytes.Buffer{}
encoder := json.NewEncoder(buffer)
encoder.SetEscapeHTML(false) //HTML标签是否转义处理 => true:{"test":"\u003cbr /\u003e"} false:{"test":"<br />"}
err := encoder.Encode(t)
return buffer.Bytes(), err
}
//创建随机字符串
//@params int keyLength 要创建字符串的长度
//@params int typeN 1 按数字随机,非1 按0~9,a~z,A~Z随机
func (thisObj *helper) CreateRandStr(randStrLength int, typeN int) string {
key := ""
var dic = make([]string, 0)
if typeN == 1 {
dic = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}
} else {
dicStr := "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,1,2,3,4,5,6,7,8,9,0,A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z"
dic = strings.Split(dicStr, ",")
}
length := len(dic)
for i := 0; i < randStrLength; i++ {
rand.Seed(time.Now().UnixNano())
randNum := rand.Intn(length - 1)
key += dic[randNum]
}
return key
}
//http响应json-成功数据
//@params string message 响应消息
//@params interface{} data 响应数据,一般也是map数据类型
func (thisObj *helper) HttpResponseJsonSuccess(message string, data interface{}) {
//http响应结果
result := map[string]interface{}{"code": "success", "msg": message, "data": data}
//设置头信息
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
//解析成json输出
res, _ := json.Marshal(result)
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//失败响应处理
//需要引用包: import ( "github.com/kataras/iris" )
//func (thisObj *helper) HelperResponseFailedJSON(c iris.Context, message string, code interface{}) {
// //获取前置中间件的设置的初始开始时间
// diffTime := c.Values().Get("requestCurrentTime")
// currentTime := time.Now().UnixNano() / 1e3 //计算得出 微秒
// timeConsumed := currentTime - diffTime.(int64)
// result := iris.Map{"code": code, "msg": message, "status": 0, "time_consumed": timeConsumed}
// c.JSON(result)
//}
//http响应seqMap的Json-成功数据
//@params string message 响应消息
//@params string data 响应数据,解析成为json的字符串数据,如`{"test":"123"}`
func (thisObj *helper) HttpResponseSeqJsonSuccess(message string, data string) {
//seqMap存储数据
result := NewSeqMap()
result.Put("code", "success")
result.Put("msg", message)
result.Put("data", data)
//设置头信息
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
//解析成json输出
res := []byte(result.JsonSeq())
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//http响应seqMap的Json(转换成Unicode字符)-成功数据
//@params string message 响应消息
//@params string data 响应数据,解析成为json的字符串数据,如`{"test":"123"}`
func (thisObj *helper) HttpResponseSeqUnicodeJsonSuccess(message string, data string) {
//seqMap存储数据
result := NewSeqMap()
result.Put("code", "success")
result.Put("msg", message)
result.Put("data", data)
//设置头信息
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
//解析成json输出
res := []byte(result.JsonSeqUnicode())
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//http响应json
//@params map[string]interface{} data 响应数据
func (thisObj *helper) HttpResponseJSON(data map[string]interface{}) {
//xxx.Http.SetHeader("Content-Type", "application/json;charset=utf-8")
res, _ := json.Marshal(data)
if len(res) > 4*1024 {
//xxx.Http.WriteGzip(res)
} else {
//xxx.Http.Write(res)
}
}
//根据字符串调用对应对象的方法
//调用示例:
// type Test struct {
//
// }
//
// func NewTest()*Test{
// return &Test{
//
// }
// }
//
// func (thisObj *Test) Echo() int {
// return 123456
// }
// res,resOk := util.Helper.CallMethodReflect(NewTest(),"Echo")
// fmt.Println(res,resOk)
// fmt.Println(res[0].Int())
//输出结果:
// [<int Value>] true
// 123456
//@params interface{} any 指定对象(指针)
//@params string methodName 指定对象里被调用的方法
//@params []interface{} args 指定对象里被调用的方法传参
//@return []reflect.Value reflectValue 被调用对象返回的结果集
//@return bool runOk 调用是否成功,true 成功,false 失败(失败情况就得根据情况排查问题出在哪里)
func (thisObj *helper) CallMethodReflect(any interface{}, methodName string, args []interface{}) (reflectValue []reflect.Value, runOk bool) {
//准备调用方法的参数
inputs := make([]reflect.Value, len(args))
for i := range args {
inputs[i] = reflect.ValueOf(args[i])
}
//执行调用指定对象的方法
v := reflect.ValueOf(any).MethodByName(methodName)
//若指定对象被调用的方法不存在或无效
if v.String() == "<invalid Value>" {
runOk = false
return []reflect.Value{}, runOk
}
//调用成功
runOk = true
reflectValue = v.Call(inputs)
return reflectValue, runOk
}
//获取用户IP
//@desc X-Forwarded-For:若是代理过来的,会获取到多个ip,最后一个ip就是真实的
func (thisObj *helper) GetIP(req *http.Request) string {
remoteAddr := req.RemoteAddr
if ip := req.Header.Get("X-Real-IP"); ip != "" {
remoteAddr = ip
} else if ip = req.Header.Get("X-Forwarded-For"); ip != "" {
//原逻辑:容易获取多个ip
//remoteAddr = ip
//现逻辑:获取最后一个真实ip
//使用X-Forwarded-For获取到ip会有多个,如117.136.39.96, 183.232.151.36;修改成只获取最后一个.
//若多个ip存在时,按英文逗号分隔
ipArr := strings.Split(ip,",")
//若存在多个ip值时,需要去除空格;获取最后一位IP
ip := Trim(ipArr[len(ipArr)-1])
//先判断获取最后一位IP是否是公网IP,如果不是就获取第一个IP处理
if !thisObj.CheckIsPublicIP(net.ParseIP(ip)) {
ip = Trim(ipArr[0])
}
remoteAddr = ip
} else {
remoteAddr, _, _ = net.SplitHostPort(remoteAddr)
}
if remoteAddr == "::1" {
remoteAddr = "127.0.0.1"
}
return remoteAddr
}
//检测IP是否是公网IP
func (thisObj *helper) CheckIsPublicIP(IP net.IP) bool {
if IP.IsLoopback() || IP.IsLinkLocalMulticast() || IP.IsLinkLocalUnicast() {
return false
}
if ip4 := IP.To4(); ip4 != nil {
switch true {
case ip4[0] == 10:
return false
case ip4[0] == 172 && ip4[1] >= 16 && ip4[1] <= 31:
return false
case ip4[0] == 192 && ip4[1] == 168:
return false
default:
return true
}
}
return false
}
//把IP127.0.0.1格式转化为long类型
func (thisObj *helper) IP2long(ipStr string) uint32 {
str := net.ParseIP(ipStr)
if str == nil {
return 0
}
//IPv4
ip := str.To4()
if ip == nil {
//IPv6
ip = str.To16()
}
if ip == nil {
return 0
}
return binary.BigEndian.Uint32(ip)
}
//把long类型IP地址转化为127.0.0.1格式
func (thisObj *helper) Long2IP(ipLong uint32) string {
ipByte := make([]byte, 4)
binary.BigEndian.PutUint32(ipByte, ipLong)
ipStr := net.IP(ipByte)
return ipStr.String()
}
//检测是否是IPv4
func (thisObj *helper) IsIPv4(ip string) bool {
//返回的值不等于nil,就是如果ip不是IPv4地址,To4返回nil,当且仅当地址是IPv4地址时,此条件才应返回true.
if net.ParseIP(ip).To4()!=nil {
return true
}
return false
}
//检测是否是IPv6
func (thisObj *helper) IsIPv6(ip string) bool {
if thisObj.IsIPv4(ip) {
return false
}
return true
}
////多个IP地址集合解析(包含IPv6地址解析),如58.177.183.37,134.159.119.48
//func (thisObj *helper) IP2Region(params map[string]interface{}) (actionResult []map[string]interface{}) {
// //必传-多个ip地址集合
// ips := params["ips"].(string)
// //可选-是否需要排重
// isUnique := false
// if params["isUnique"]!=nil {
// isUnique = params["isUnique"].(bool)
// }
// //去除ip左右多出的逗号
// ips = strings.Trim(ips,",")
//
// //多个ip地址数组
// ipsArr := strings.Split(ips,",")
// //ip排重
// ipsMap := make(map[string]map[string]interface{})
//
// //IPV4-解析
// ip2regionLibrary := NewIp2regionLibrary()
// defer ip2regionLibrary.Close()
// //IPV6-解析
// ipv6regionLibrary := NewIpv6regionLibrary(map[string]interface{}{})
// defer ipv6regionLibrary.Close()
//
// //操作结果存储
// actionResult = make([]map[string]interface{},0)
// for _,ip := range ipsArr {
// //操作结果每项数据
// actionResultItem := map[string]interface{}{
// "ipStr":ip,
// "ipAddr":"---",
// }
//
// //------------ ip地址值转换处理 ----- | p的代码:
// // 有可能传递过来的是ip2long转换的整型ip值 转换成ip字符串
// // $ipStr = is_numeric($ip) ? long2ip($ip) : $ip;
// ipStr := ""
// //若是整型值
// if thisObj.IsNumeric(ip) {
// //转成字符串
// ipInt,ipIntErr := strconv.Atoi(ip)
// //若转换出错
// if ipIntErr!=nil {
// actionResultItem["ipAddr"] = "IP_TO_INT_ERROR"
// actionResult = append(actionResult,actionResultItem)
// continue
// }
// //将整型值解析成ip地址
// ipStr = thisObj.Long2IP(uint32(ipInt))
// }else{
// //存储ip地址
// ipStr = ip
// }
//
// //确认都是字符串ip返回
// actionResultItem["ipStr"] = ipStr
//
// //是否排重
// if isUnique {
// //------------ 排重处理 ------------
// //若该IP已处理过,则不进行处理
// if ipsMap[ipStr]!=nil {
// continue
// }
// }else{
// //------------ 不排重处理 ------------
// //若是不排重,且之前找到过了,则不需要再重复找了
// if ipsMap[ipStr]!=nil {
// actionResult = append(actionResult,ipsMap[ipStr])
// continue
// }
// }
//
// //------------ ip地址解析处理 ------------
// //如果是IPv4的处理
// if thisObj.IsIPv4(ipStr) {
// //IPV4地址解析处理
// ipv4Res,ipv4ResErr := ip2regionLibrary.DoIpAddressAnalyze(ipStr)
// if ipv4ResErr!=nil {
// actionResultItem["ipAddr"] = "IPV4_ADDRESS_ANALYZE_ERROR"
// }else{
// //调试输出:
// //fmt.Println("ipStr:",ipStr)
// //fmt.Println("ipv4Res:",ipv4Res)
// //输出结果:
// //ipStr: 134.159.119.47
// //ipv4Res: map[City:香港 CityId:0 Country:中国 ISP:澳大利亚电信 Province:香港 Region:0]
//
// if len(ipv4Res)>0 {
// actionResultItem["ipAddr"] = ipv4Res["Country"].(string)+"-"+ipv4Res["Province"].(string)+"-"+ipv4Res["City"].(string)
// }else{
// actionResultItem["ipAddr"] = "IPV4_ADDRESS_NOT_FOUND"
// }
// }
// //如果是IPv6的处理
// }else if thisObj.IsIPv6(ipStr) {
// ipv6Res,ipv6ResErr := ipv6regionLibrary.GetIPv6Address(map[string]interface{}{
// "ipv6":ipStr,
// })
// if ipv6ResErr!=nil {
// actionResultItem["ipAddr"] = "IPV6_ADDRESS_ANALYZE_ERROR"
// }else{
// //调试输出:
// //fmt.Println("ipStr:",ipStr)
// //fmt.Println("ipv6Res:",ipv6Res)
// //输出结果:
// //ipStr: 2408:84e7:4ad:1776:9c97:e79d:1e94:e59e
// //ipv6Res: map[address:中国山西省 中国联通3GNET网络(全省通用) id:50440 ipv6:2408:84e7:4ad:1776:9c97:e79d:1e94:e59e]
//
// if len(ipv6Res)>0 {
// actionResultItem["ipAddr"] = ipv6Res["address"].(string)
// }else{
// actionResultItem["ipAddr"] = "IPV6_ADDRESS_NOT_FOUND"
// }
// }
// //既不是IPv4,也不是IPv6,则是错误的
// }else{
// actionResultItem["ipAddr"] = "IP_IS_NOT_V4_OR_V6"
// }
//
// //存储操作结果
// actionResult = append(actionResult,actionResultItem)
//
// //存储查找过的结果
// ipsMap[ipStr] = actionResultItem
// }
//
// return actionResult
//}
// IsNumeric is_numeric()
// Numeric strings consist of optional sign, any number of digits, optional decimal part and optional exponential part.
// Thus +0123.45e6 is a valid numeric value.
// In PHP hexadecimal (e.g. 0xf4c3b00c) is not supported, but IsNumeric is supported.
func (cthis *helper) IsNumeric(val interface{}) bool {
switch val.(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
return true
case float32, float64, complex64, complex128:
return true
case string:
str := val.(string)
//现逻辑
//@update 2020.03.21 新增若是传入空格的修正处理
// 若这里不进行空格去除处理,到下面894行执行的时候会报错 runtime error: index out of range [0] with length 0,
// 因为等strings.TrimSpace(str)执行去除空格后,str[0]是不可能有值的!
str = strings.TrimSpace(str)
if str == "" {
return false
}
//原逻辑
// Trim any whitespace
//str = strings.TrimSpace(str)
if str[0] == '-' || str[0] == '+' {
if len(str) == 1 {
return false
}
str = str[1:]
}
// hex
if len(str) > 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X') {
for _, h := range str[2:] {
if !((h >= '0' && h <= '9') || (h >= 'a' && h <= 'f') || (h >= 'A' && h <= 'F')) {
return false
}
}
return true
}
// 0-9, Point, Scientific
p, s, l := 0, 0, len(str)
for i, v := range str {
if v == '.' { // Point
if p > 0 || s > 0 || i+1 == l {
return false
}
p = i
} else if v == 'e' || v == 'E' { // Scientific
if i == 0 || s > 0 || i+1 == l {
return false
}
s = i
} else if v < '0' || v > '9' {
return false
}
}
return true
}
return false
}
//GET请求url-路径拼接
//@params string urlPath 请求url,如http://192.168.0.1:58909
//@params map[string]string data 请求参数
//@params bool isEncode 特殊字符是否转义处理
//@params bool isHttp 是否将https转换成http处理
func (thisObj *helper) GETUrlPath(urlPath string, data map[string]string, isEncode bool, isHttp bool) string {
//是否是http请求,如果是,则将https替换成http
if isHttp {
urlPath = strings.Replace(urlPath, "https://", "http://", 1)
}
//拼接uri
urlPath += "?"
for k, v := range data {
if isEncode {
v = url.QueryEscape(v)
}
urlPath += k + "=" + v + "&"
}
urlPath = strings.TrimRight(urlPath, "&")
return urlPath
}
//用户名称尾部隐藏处理
//@params string userName 用户名
func (thisObj *helper) UserNameTailHidden(userName string) string {
usernameShow := ""
//先将字符串转成rune
// rune 等同于int32,常用来处理unicode或utf-8字符
userNameRune := []rune(userName)
//获取用户名称字符串长度(不是底层字节长度,如你好,就是2个字符串长度)
userNameLen := len(userNameRune)
//大于3个长度,则从尾部截取3个替换成***
if userNameLen > 3 {
usernameShow = string(userNameRune[:userNameLen-3])
} else {
switch userNameLen {
case 3:
usernameShow = string(userNameRune[:userNameLen-2])
case 2:
usernameShow = string(userNameRune[:userNameLen-1])
//若是1个长度,则以该值为起始,效果如:张***
case 1:
usernameShow = userName
}
}
return usernameShow + "***"
}
//总页数相关处理
//@params int page 当前页
//@params int pageRows 每页数量
//@params int dataTotal 数据总数
func (thisObj *helper) PageTotal(page int, pageRows int, dataTotal int) map[string]int {
//计算总页数
totalPages := int(math.Ceil(float64(dataTotal) / float64(pageRows)))
//总页数最小值限制
if totalPages < 1 {
totalPages = 1
}
//当前页最小值限制
if page < 1 {
page = 1
}
//当前页最大值限制
if page > totalPages {
page = totalPages
}
//计算每页提取数
limitStart := (page - 1) * pageRows
return map[string]int{
"totalPages": totalPages,
"currentPage": page,
"limitStart": limitStart,
}
}
//根据用户id获取订单号
func (thisObj *helper) GetOrderIdByUserId(userId int,orderIdLen int) string {
//当前时间的时分秒+微妙
timeString := time.Now().Format("150405.000000")
//拼接当前用户id
str := timeString + strconv.Itoa(userId)
//转成md5值
md5str := fmt.Sprintf("%x", md5.Sum([]byte(str)))
//生成指定位数强唯一的订单号
orderId := string([]byte(md5str)[:orderIdLen])
return orderId
}
//根据findKey查找RequestParams值
//@params interface{} requestParams get或post的请求参数
//@params string findKey 要查找的key
//@params string findValDataType 查找值的数据类型
//@params interface{} defaultVal 默认值
func (thisObj *helper) FindRequestParamsVal(requestParams interface{},findKey string,findValDataType string,defaultVal interface{}) (interface{},error) {
switch requestParams.(type) {
//如get请求参数
case map[string]string:
{
//根据findKey找值
findVal,findValExi := requestParams.(map[string]string)[findKey]
//若值不存在,则已默认值返回
if !findValExi {
return defaultVal,errors.New("val_not_found")
}
//找到值的数据类型
switch findValDataType {
case "int":
return InterfaceToInt(findVal)
case "string":
return InterfaceToStr(findVal)
}
}
//如post请求参数
case map[string]interface{}:
{
//根据findKey找值
findVal,findValExi := requestParams.(map[string]interface{})[findKey]
//若值不存在,则已默认值返回
if !findValExi {
return defaultVal,errors.New("val_not_found")
}
//找到值的数据类型
switch findValDataType {
case "int":
return InterfaceToInt(findVal)
case "string":
return InterfaceToStr(findVal)
}
}
}
panic("requestParams_dataType_notFound")
}
//模拟刪除map[string]interface{}里的元素值
//适用于delete()函数执行删除后,造成原map值也跟着被删除,解决该问题的处理
func (thisObj *helper) DeleteElementByMapStrInterface(mapData map[string]interface{},deleteKey string) map[string]interface{} {
mapDataTemp := make(map[string]interface{})
for k,v := range mapData {
//若是与删除key相等,则不进行存储
if k==deleteKey {
continue
}
mapDataTemp[k] = v
}
return mapDataTemp
}
//error对象转字符串输出
func (thisObj *helper) ErrorToString(err error) string {
if err!=nil {
return err.Error()
}
return "nil"
} | -------
// //模拟ph | conditional_block |
q19.rs | /*
LDBC SNB BI query 19. Interaction path between cities
https://ldbc.github.io/ldbc_snb_docs_snapshot/bi-read-19.pdf
*/
use differential_dataflow::collection::AsCollection;
use differential_dataflow::input::Input;
use differential_dataflow::operators::{Join, Count, Iterate, Reduce};
use timely::dataflow::ProbeHandle;
use crate::lib::loader::*;
use crate::lib::types::*;
use crate::lib::helpers::{input_insert_vec, limit, print_trace};
use differential_dataflow::operators::arrange::ArrangeBySelf;
use timely::dataflow::operators::{Probe, Map, Delay};
use std::time::Instant;
use std::cmp::{min, max};
pub fn | (path: String, change_path: String, params: &Vec<String>) {
// unpack parameters
let param_city1 = params[0].parse::<u64>().unwrap();
let param_city2 = params[1].parse::<u64>().unwrap();
timely::execute_from_args(std::env::args(), move |worker| {
let mut timer = worker.timer();
let index = worker.index();
let peers = worker.peers();
let mut probe = ProbeHandle::new();
// create dataflow
let (
mut trace,
mut located_in_input,
mut knows_input,
mut has_creator_input,
mut reply_of_input,
) =
worker.dataflow::<usize,_,_>(|scope| {
let (located_in_input, locatedin) = scope.new_collection::<DynamicConnection, _>();
let (knows_input, knows) = scope.new_collection::<DynamicConnection, _>();
// creators for comments AND posts
let (has_creator_input, has_creator) = scope.new_collection::<DynamicConnection, _>();
// replyOf for comments AND posts
let (reply_of_input, reply_of) = scope.new_collection::<DynamicConnection, _>();
// people of city1
let people1 = locatedin
.filter(move |conn| param_city1.eq(conn.b()))
.map(|conn| conn.a().clone())
;
// people of city2
let people2 = locatedin
.filter(move |conn| param_city2.eq(conn.b()))
.map(|conn| conn.a().clone())
;
// bidirectional knows relation
let bi_knows = knows
.map(|conn| (conn.b().clone(), conn.a().clone()))
.concat(
&knows.map(|conn| (conn.a().clone(), conn.b().clone()))
)
;
// calculate weights starting from personB
let weights = bi_knows
.join_map( // join messages of personB
&has_creator.map(|conn| (conn.b().clone(), conn.a().clone())),
|pb, pa, m| (m.clone(), (pb.clone(), pa.clone())),
)
.join_map( // join a reply of the message
&reply_of.map(|conn| (conn.b().clone(), conn.a().clone())),
|_mp, (pb, pa), mc| (mc.clone(), (pb.clone(), pa.clone()))
)
.join_map( // join creator of last message (personA)
&has_creator.map(|conn| (conn.a().clone(), conn.b().clone())),
|_m, (pb, pa), pm| (pb.clone(), pa.clone(), pm.clone())
)
.filter( // check if the last message's creator is the other person we started with
|(_pb, pa, pm)| pa.eq(pm)
)
// drop duplicated message creator, and make sure the lower id is the first in the tuple
// this is needed for the aggregation
.map(
|(pb, pa, _)| (min(pa, pb), max(pa, pb))
)
// aggregate (p1, p2) pairs,
// which will result in the number of interactions between these people
.count()
// map result for next steps
.map(
// fixme: hack solution as floats cannot be used directly (not implementing Ord)
|((p1, p2), c)| (p1, (p2, (((1.0/c as f32)*10000000000.0) as isize)))
)
; // -> (src, (dst, weight))
// create bidirectional weights
let weights = weights
.concat(&weights.map(|(p1, (p2, c))| (p2, (p1, c))))
;
// root nodes are people from city one.
// ((p, p) 0) represents an initial path from p to p with 0 weight.
let nodes = people1
.map(|p| ((p, p), 0))
;
// calculate shortest paths from people in city 1
// based on https://github.com/frankmcsherry/blog/blob/master/posts/2019-05-20.md
let shortest_paths = nodes
.iterate(|dists| { // calculate shortest path to every other node
let edges = weights.enter(&dists.scope());
let nodes = nodes.enter(&dists.scope());
dists // -> ((src, dst), distance)
.map(|((root, dst), dist)| (dst.clone(), (root.clone(), dist.clone())))
// join next step and calculate weights
.join_map(
&edges,
|_src, (root, distance), (dst, weight)| ((root.clone(), dst.clone()), distance + weight)
) // -> ((root, dst), distance),
// add original nodes
.concat(&nodes) // -> ((root, dst), distance)
// Timely magic, to speed up updates with some time manipulation (see blogpost above)
.inner
.map_in_place(|((_d, w), t, _r)|
t.inner = std::cmp::max(t.inner, *w as u64)
)
.delay(|(_, t, _), _| t.clone())
.as_collection()
// finally keep only the shortest path between two nodes (grouped by (src, dst))
.reduce(|_key, input, output| output.push((*input[0].0, 1)))
}) // -> ((src, dst), distance)
// map for semijoin
.map(|((src, dst), distance)| (dst, (src, distance)))
// filter out result which are not between people from city1 and city2
.semijoin(&people2)
;
// add sorting options and final results
let result = shortest_paths
.map(|(dst, (src, distance))| (
(std::isize::MAX - distance, src, dst), // sort: -distance, +src, +dst
vec![src.to_string(), dst.to_string(), (distance as f64/10000000000.0).to_string()]
))
;
let arrangement = limit(&result, 20)
.arrange_by_self();
arrangement.stream.probe_with(&mut probe);
return (
arrangement.trace,
located_in_input, knows_input, has_creator_input, reply_of_input
);
});
// add inputs
let mut next_time: usize = 1;
input_insert_vec(
load_dynamic_connection("dynamic/person_isLocatedIn_place_0_0.csv", path.as_str(), index, peers),
&mut located_in_input,
next_time
);
input_insert_vec(
load_dynamic_connection("dynamic/person_knows_person_0_0.csv", path.as_str(), index, peers),
&mut knows_input,
next_time
);
// insert hasCreator relations
input_insert_vec(
load_dynamic_connection("dynamic/post_hasCreator_person_0_0.csv", path.as_str(), index, peers),
&mut has_creator_input,
0 // do not advance just yet
);
input_insert_vec(
load_dynamic_connection("dynamic/comment_hasCreator_person_0_0.csv", path.as_str(), index, peers),
&mut has_creator_input,
next_time
);
// insert replyOf relations
input_insert_vec(
load_dynamic_connection("dynamic/comment_replyOf_post_0_0.csv", path.as_str(), index, peers),
&mut reply_of_input,
0 // do not advance just yet
);
input_insert_vec(
load_dynamic_connection("dynamic/comment_replyOf_comment_0_0.csv", path.as_str(), index, peers),
&mut reply_of_input,
next_time
);
eprintln!("LOADED;{:}", timer.elapsed().as_secs_f64());
timer = Instant::now();
// Compute...
while probe.less_than(knows_input.time()) {
worker.step();
}
eprintln!("CALCULATED;{:.10}", timer.elapsed().as_secs_f64());
// print results
print_trace(&mut trace, next_time);
if change_path.eq(&"-".to_string()) {
eprintln!("No change set was given.");
return;
}
println!(" ---------------------------------------------------------------------- ");
// introduce change set
next_time += 1;
timer = Instant::now();
// parse change set file
for mut change_row in load_data(change_path.as_str(), index, peers) {
let create = match change_row.remove(0).as_str() {
"create" => true,
"remove" => false,
x => { panic!("Unknown change. It should be 'remove' or 'create': {}", x); }
};
let input = change_row.remove(0);
let mut row_iter = change_row.into_iter();
let created = parse_datetime(row_iter.next().unwrap());
let id1 = row_iter.next().unwrap().parse::<Id>().unwrap();
let id2 = row_iter.next().unwrap().parse::<Id>().unwrap();
let d = DynamicConnection::new(created, id1, id2);
match input.as_str() {
"person-knows-person" => {
if create {
knows_input.insert(d);
} else {
knows_input.remove(d);
}
},
"person-islocatedin-place" => {
if create {
located_in_input.insert(d);
} else {
located_in_input.remove(d);
}
}
x => { panic!("Unknown change type: {}", x); }
}
}
// advance and flush all inputs...
has_creator_input.advance_to(next_time);
has_creator_input.flush();
reply_of_input.advance_to(next_time);
reply_of_input.flush();
knows_input.advance_to(next_time);
knows_input.flush();
located_in_input.advance_to(next_time);
located_in_input.flush();
// Compute change set...
while probe.less_than(&next_time) {
worker.step();
}
eprintln!("CHANGE_CALCULATED;{:.10}", timer.elapsed().as_secs_f64());
// print changed results
print_trace(&mut trace, next_time);
}).expect("Timely computation failed");
} | run | identifier_name |
q19.rs | /*
LDBC SNB BI query 19. Interaction path between cities
https://ldbc.github.io/ldbc_snb_docs_snapshot/bi-read-19.pdf
*/
use differential_dataflow::collection::AsCollection;
use differential_dataflow::input::Input;
use differential_dataflow::operators::{Join, Count, Iterate, Reduce};
use timely::dataflow::ProbeHandle;
use crate::lib::loader::*;
use crate::lib::types::*;
use crate::lib::helpers::{input_insert_vec, limit, print_trace};
use differential_dataflow::operators::arrange::ArrangeBySelf;
use timely::dataflow::operators::{Probe, Map, Delay};
use std::time::Instant;
use std::cmp::{min, max};
pub fn run(path: String, change_path: String, params: &Vec<String>) | {
// unpack parameters
let param_city1 = params[0].parse::<u64>().unwrap();
let param_city2 = params[1].parse::<u64>().unwrap();
timely::execute_from_args(std::env::args(), move |worker| {
let mut timer = worker.timer();
let index = worker.index();
let peers = worker.peers();
let mut probe = ProbeHandle::new();
// create dataflow
let (
mut trace,
mut located_in_input,
mut knows_input,
mut has_creator_input,
mut reply_of_input,
) =
worker.dataflow::<usize,_,_>(|scope| {
let (located_in_input, locatedin) = scope.new_collection::<DynamicConnection, _>();
let (knows_input, knows) = scope.new_collection::<DynamicConnection, _>();
// creators for comments AND posts
let (has_creator_input, has_creator) = scope.new_collection::<DynamicConnection, _>();
// replyOf for comments AND posts
let (reply_of_input, reply_of) = scope.new_collection::<DynamicConnection, _>();
// people of city1
let people1 = locatedin
.filter(move |conn| param_city1.eq(conn.b()))
.map(|conn| conn.a().clone())
;
// people of city2
let people2 = locatedin
.filter(move |conn| param_city2.eq(conn.b()))
.map(|conn| conn.a().clone())
;
// bidirectional knows relation
let bi_knows = knows
.map(|conn| (conn.b().clone(), conn.a().clone()))
.concat(
&knows.map(|conn| (conn.a().clone(), conn.b().clone()))
)
;
// calculate weights starting from personB
let weights = bi_knows
.join_map( // join messages of personB
&has_creator.map(|conn| (conn.b().clone(), conn.a().clone())),
|pb, pa, m| (m.clone(), (pb.clone(), pa.clone())),
)
.join_map( // join a reply of the message
&reply_of.map(|conn| (conn.b().clone(), conn.a().clone())),
|_mp, (pb, pa), mc| (mc.clone(), (pb.clone(), pa.clone()))
)
.join_map( // join creator of last message (personA)
&has_creator.map(|conn| (conn.a().clone(), conn.b().clone())),
|_m, (pb, pa), pm| (pb.clone(), pa.clone(), pm.clone())
)
.filter( // check if the last message's creator is the other person we started with
|(_pb, pa, pm)| pa.eq(pm)
)
// drop duplicated message creator, and make sure the lower id is the first in the tuple
// this is needed for the aggregation
.map(
|(pb, pa, _)| (min(pa, pb), max(pa, pb))
)
// aggregate (p1, p2) pairs,
// which will result in the number of interactions between these people
.count()
// map result for next steps
.map(
// fixme: hack solution as floats cannot be used directly (not implementing Ord)
|((p1, p2), c)| (p1, (p2, (((1.0/c as f32)*10000000000.0) as isize)))
)
; // -> (src, (dst, weight))
// create bidirectional weights
let weights = weights
.concat(&weights.map(|(p1, (p2, c))| (p2, (p1, c))))
;
// root nodes are people from city one.
// ((p, p) 0) represents an initial path from p to p with 0 weight.
let nodes = people1
.map(|p| ((p, p), 0))
;
// calculate shortest paths from people in city 1
// based on https://github.com/frankmcsherry/blog/blob/master/posts/2019-05-20.md
let shortest_paths = nodes
.iterate(|dists| { // calculate shortest path to every other node
let edges = weights.enter(&dists.scope());
let nodes = nodes.enter(&dists.scope());
dists // -> ((src, dst), distance)
.map(|((root, dst), dist)| (dst.clone(), (root.clone(), dist.clone())))
// join next step and calculate weights
.join_map(
&edges,
|_src, (root, distance), (dst, weight)| ((root.clone(), dst.clone()), distance + weight)
) // -> ((root, dst), distance),
// add original nodes
.concat(&nodes) // -> ((root, dst), distance)
// Timely magic, to speed up updates with some time manipulation (see blogpost above)
.inner
.map_in_place(|((_d, w), t, _r)|
t.inner = std::cmp::max(t.inner, *w as u64)
)
.delay(|(_, t, _), _| t.clone())
.as_collection()
// finally keep only the shortest path between two nodes (grouped by (src, dst))
.reduce(|_key, input, output| output.push((*input[0].0, 1)))
}) // -> ((src, dst), distance)
// map for semijoin
.map(|((src, dst), distance)| (dst, (src, distance)))
// filter out result which are not between people from city1 and city2
.semijoin(&people2)
;
// add sorting options and final results
let result = shortest_paths
.map(|(dst, (src, distance))| (
(std::isize::MAX - distance, src, dst), // sort: -distance, +src, +dst
vec![src.to_string(), dst.to_string(), (distance as f64/10000000000.0).to_string()]
))
;
let arrangement = limit(&result, 20)
.arrange_by_self();
arrangement.stream.probe_with(&mut probe);
return (
arrangement.trace,
located_in_input, knows_input, has_creator_input, reply_of_input
);
});
// add inputs
let mut next_time: usize = 1;
input_insert_vec(
load_dynamic_connection("dynamic/person_isLocatedIn_place_0_0.csv", path.as_str(), index, peers),
&mut located_in_input,
next_time
);
input_insert_vec(
load_dynamic_connection("dynamic/person_knows_person_0_0.csv", path.as_str(), index, peers),
&mut knows_input,
next_time
);
// insert hasCreator relations
input_insert_vec(
load_dynamic_connection("dynamic/post_hasCreator_person_0_0.csv", path.as_str(), index, peers),
&mut has_creator_input,
0 // do not advance just yet
);
input_insert_vec(
load_dynamic_connection("dynamic/comment_hasCreator_person_0_0.csv", path.as_str(), index, peers),
&mut has_creator_input,
next_time
);
// insert replyOf relations
input_insert_vec(
load_dynamic_connection("dynamic/comment_replyOf_post_0_0.csv", path.as_str(), index, peers),
&mut reply_of_input,
0 // do not advance just yet
);
input_insert_vec(
load_dynamic_connection("dynamic/comment_replyOf_comment_0_0.csv", path.as_str(), index, peers),
&mut reply_of_input,
next_time
);
eprintln!("LOADED;{:}", timer.elapsed().as_secs_f64());
timer = Instant::now();
// Compute...
while probe.less_than(knows_input.time()) {
worker.step();
}
eprintln!("CALCULATED;{:.10}", timer.elapsed().as_secs_f64());
// print results
print_trace(&mut trace, next_time);
if change_path.eq(&"-".to_string()) {
eprintln!("No change set was given.");
return;
}
println!(" ---------------------------------------------------------------------- ");
// introduce change set
next_time += 1;
timer = Instant::now();
// parse change set file
for mut change_row in load_data(change_path.as_str(), index, peers) {
let create = match change_row.remove(0).as_str() {
"create" => true,
"remove" => false,
x => { panic!("Unknown change. It should be 'remove' or 'create': {}", x); }
};
let input = change_row.remove(0);
let mut row_iter = change_row.into_iter();
let created = parse_datetime(row_iter.next().unwrap());
let id1 = row_iter.next().unwrap().parse::<Id>().unwrap();
let id2 = row_iter.next().unwrap().parse::<Id>().unwrap();
let d = DynamicConnection::new(created, id1, id2);
match input.as_str() {
"person-knows-person" => {
if create {
knows_input.insert(d);
} else {
knows_input.remove(d);
}
},
"person-islocatedin-place" => {
if create {
located_in_input.insert(d);
} else {
located_in_input.remove(d);
}
}
x => { panic!("Unknown change type: {}", x); }
}
}
// advance and flush all inputs...
has_creator_input.advance_to(next_time);
has_creator_input.flush();
reply_of_input.advance_to(next_time);
reply_of_input.flush();
knows_input.advance_to(next_time);
knows_input.flush();
located_in_input.advance_to(next_time);
located_in_input.flush();
// Compute change set...
while probe.less_than(&next_time) {
worker.step();
}
eprintln!("CHANGE_CALCULATED;{:.10}", timer.elapsed().as_secs_f64());
// print changed results
print_trace(&mut trace, next_time);
}).expect("Timely computation failed");
} | identifier_body | |
q19.rs | /*
LDBC SNB BI query 19. Interaction path between cities
https://ldbc.github.io/ldbc_snb_docs_snapshot/bi-read-19.pdf
*/
use differential_dataflow::collection::AsCollection;
use differential_dataflow::input::Input;
use differential_dataflow::operators::{Join, Count, Iterate, Reduce};
use timely::dataflow::ProbeHandle;
use crate::lib::loader::*;
use crate::lib::types::*;
use crate::lib::helpers::{input_insert_vec, limit, print_trace};
use differential_dataflow::operators::arrange::ArrangeBySelf;
use timely::dataflow::operators::{Probe, Map, Delay};
use std::time::Instant;
use std::cmp::{min, max};
pub fn run(path: String, change_path: String, params: &Vec<String>) {
// unpack parameters
let param_city1 = params[0].parse::<u64>().unwrap();
let param_city2 = params[1].parse::<u64>().unwrap();
timely::execute_from_args(std::env::args(), move |worker| {
let mut timer = worker.timer();
let index = worker.index();
let peers = worker.peers();
let mut probe = ProbeHandle::new();
// create dataflow
let (
mut trace,
mut located_in_input,
mut knows_input,
mut has_creator_input,
mut reply_of_input,
) =
worker.dataflow::<usize,_,_>(|scope| {
let (located_in_input, locatedin) = scope.new_collection::<DynamicConnection, _>();
let (knows_input, knows) = scope.new_collection::<DynamicConnection, _>();
// creators for comments AND posts
let (has_creator_input, has_creator) = scope.new_collection::<DynamicConnection, _>();
// replyOf for comments AND posts
let (reply_of_input, reply_of) = scope.new_collection::<DynamicConnection, _>();
// people of city1
let people1 = locatedin
.filter(move |conn| param_city1.eq(conn.b()))
.map(|conn| conn.a().clone())
;
// people of city2
let people2 = locatedin
.filter(move |conn| param_city2.eq(conn.b()))
.map(|conn| conn.a().clone())
;
// bidirectional knows relation
let bi_knows = knows
.map(|conn| (conn.b().clone(), conn.a().clone()))
.concat( | &knows.map(|conn| (conn.a().clone(), conn.b().clone()))
)
;
// calculate weights starting from personB
let weights = bi_knows
.join_map( // join messages of personB
&has_creator.map(|conn| (conn.b().clone(), conn.a().clone())),
|pb, pa, m| (m.clone(), (pb.clone(), pa.clone())),
)
.join_map( // join a reply of the message
&reply_of.map(|conn| (conn.b().clone(), conn.a().clone())),
|_mp, (pb, pa), mc| (mc.clone(), (pb.clone(), pa.clone()))
)
.join_map( // join creator of last message (personA)
&has_creator.map(|conn| (conn.a().clone(), conn.b().clone())),
|_m, (pb, pa), pm| (pb.clone(), pa.clone(), pm.clone())
)
.filter( // check if the last message's creator is the other person we started with
|(_pb, pa, pm)| pa.eq(pm)
)
// drop duplicated message creator, and make sure the lower id is the first in the tuple
// this is needed for the aggregation
.map(
|(pb, pa, _)| (min(pa, pb), max(pa, pb))
)
// aggregate (p1, p2) pairs,
// which will result in the number of interactions between these people
.count()
// map result for next steps
.map(
// fixme: hack solution as floats cannot be used directly (not implementing Ord)
|((p1, p2), c)| (p1, (p2, (((1.0/c as f32)*10000000000.0) as isize)))
)
; // -> (src, (dst, weight))
// create bidirectional weights
let weights = weights
.concat(&weights.map(|(p1, (p2, c))| (p2, (p1, c))))
;
// root nodes are people from city one.
// ((p, p) 0) represents an initial path from p to p with 0 weight.
let nodes = people1
.map(|p| ((p, p), 0))
;
// calculate shortest paths from people in city 1
// based on https://github.com/frankmcsherry/blog/blob/master/posts/2019-05-20.md
let shortest_paths = nodes
.iterate(|dists| { // calculate shortest path to every other node
let edges = weights.enter(&dists.scope());
let nodes = nodes.enter(&dists.scope());
dists // -> ((src, dst), distance)
.map(|((root, dst), dist)| (dst.clone(), (root.clone(), dist.clone())))
// join next step and calculate weights
.join_map(
&edges,
|_src, (root, distance), (dst, weight)| ((root.clone(), dst.clone()), distance + weight)
) // -> ((root, dst), distance),
// add original nodes
.concat(&nodes) // -> ((root, dst), distance)
// Timely magic, to speed up updates with some time manipulation (see blogpost above)
.inner
.map_in_place(|((_d, w), t, _r)|
t.inner = std::cmp::max(t.inner, *w as u64)
)
.delay(|(_, t, _), _| t.clone())
.as_collection()
// finally keep only the shortest path between two nodes (grouped by (src, dst))
.reduce(|_key, input, output| output.push((*input[0].0, 1)))
}) // -> ((src, dst), distance)
// map for semijoin
.map(|((src, dst), distance)| (dst, (src, distance)))
// filter out result which are not between people from city1 and city2
.semijoin(&people2)
;
// add sorting options and final results
let result = shortest_paths
.map(|(dst, (src, distance))| (
(std::isize::MAX - distance, src, dst), // sort: -distance, +src, +dst
vec![src.to_string(), dst.to_string(), (distance as f64/10000000000.0).to_string()]
))
;
let arrangement = limit(&result, 20)
.arrange_by_self();
arrangement.stream.probe_with(&mut probe);
return (
arrangement.trace,
located_in_input, knows_input, has_creator_input, reply_of_input
);
});
// add inputs
let mut next_time: usize = 1;
input_insert_vec(
load_dynamic_connection("dynamic/person_isLocatedIn_place_0_0.csv", path.as_str(), index, peers),
&mut located_in_input,
next_time
);
input_insert_vec(
load_dynamic_connection("dynamic/person_knows_person_0_0.csv", path.as_str(), index, peers),
&mut knows_input,
next_time
);
// insert hasCreator relations
input_insert_vec(
load_dynamic_connection("dynamic/post_hasCreator_person_0_0.csv", path.as_str(), index, peers),
&mut has_creator_input,
0 // do not advance just yet
);
input_insert_vec(
load_dynamic_connection("dynamic/comment_hasCreator_person_0_0.csv", path.as_str(), index, peers),
&mut has_creator_input,
next_time
);
// insert replyOf relations
input_insert_vec(
load_dynamic_connection("dynamic/comment_replyOf_post_0_0.csv", path.as_str(), index, peers),
&mut reply_of_input,
0 // do not advance just yet
);
input_insert_vec(
load_dynamic_connection("dynamic/comment_replyOf_comment_0_0.csv", path.as_str(), index, peers),
&mut reply_of_input,
next_time
);
eprintln!("LOADED;{:}", timer.elapsed().as_secs_f64());
timer = Instant::now();
// Compute...
while probe.less_than(knows_input.time()) {
worker.step();
}
eprintln!("CALCULATED;{:.10}", timer.elapsed().as_secs_f64());
// print results
print_trace(&mut trace, next_time);
if change_path.eq(&"-".to_string()) {
eprintln!("No change set was given.");
return;
}
println!(" ---------------------------------------------------------------------- ");
// introduce change set
next_time += 1;
timer = Instant::now();
// parse change set file
for mut change_row in load_data(change_path.as_str(), index, peers) {
let create = match change_row.remove(0).as_str() {
"create" => true,
"remove" => false,
x => { panic!("Unknown change. It should be 'remove' or 'create': {}", x); }
};
let input = change_row.remove(0);
let mut row_iter = change_row.into_iter();
let created = parse_datetime(row_iter.next().unwrap());
let id1 = row_iter.next().unwrap().parse::<Id>().unwrap();
let id2 = row_iter.next().unwrap().parse::<Id>().unwrap();
let d = DynamicConnection::new(created, id1, id2);
match input.as_str() {
"person-knows-person" => {
if create {
knows_input.insert(d);
} else {
knows_input.remove(d);
}
},
"person-islocatedin-place" => {
if create {
located_in_input.insert(d);
} else {
located_in_input.remove(d);
}
}
x => { panic!("Unknown change type: {}", x); }
}
}
// advance and flush all inputs...
has_creator_input.advance_to(next_time);
has_creator_input.flush();
reply_of_input.advance_to(next_time);
reply_of_input.flush();
knows_input.advance_to(next_time);
knows_input.flush();
located_in_input.advance_to(next_time);
located_in_input.flush();
// Compute change set...
while probe.less_than(&next_time) {
worker.step();
}
eprintln!("CHANGE_CALCULATED;{:.10}", timer.elapsed().as_secs_f64());
// print changed results
print_trace(&mut trace, next_time);
}).expect("Timely computation failed");
} | random_line_split | |
train_full_random.py | import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import sys
import time
import matplotlib.pyplot as plt
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import pickle
import neuron_models as nm
import lab_manager as lm
import experiments as ex
from sklearn.utils import shuffle as rshuffle
import time
# import skimage
from brian2 import *
plt.style.use('ggplot')
#compile into C++ standalone
#works, but asking it to compile too much will freeze computer -- uses all cores to compile
# Note: The documentations states this only works for a fixed number of run statements
# and not loops. This doesn't appear to be the case, although maybe it works with
# only a few number of loops? I've gotten 4 runs to work.
# tstart variable also doesn't work because it uses a single value for it.
comp = False
# C++ standalone code to run quicker
if comp:
set_device('cpp_standalone', debug=True, build_on_run=False)
# Parallelize using OpenMP. Might not work....
# also only works with C++ standalone/comp = True
# prefs.devices.cpp_standalone.openmp_threads = 4
start_scope()
#path to the data folder
MNIST_data_path = 'data_set/'
# MNIST_data_path = '/home/jplatt/Mothnet/MNIST_data/'
#path to folder to save data
prefix = 'total_data/'
#doesn't work if timestep > 0.05ms
defaultclock.dt = .05*ms
#number of images to run
#num_examples = int(raw_input('Number of images to train: ')) #len(training)
#plot some diagnostics at the end
plot = True
# A way to use a synapse to store LFP, useful when you don't want to
# trace the voltages of all AL neurons.
lfp_syn = True
#-----------------------------------------------------------
#Tunable Parameters: the parameters to change in the network
#0-10
numbers_to_inc = frozenset([0, 1])
#size of network
N_AL = 1000 #must be >= 784
N_KC = 10000
N_BL = 3 #should be the same as the number of classes
#learning rate
# 0.1
eta = 0.01 #fraction of total conductance per spike
"""
Amount of inhibition between AL Neurons.
Enforces WLC dynamics and needs to be scaled
with the size of the network
"""
#0.1
in_AL = 0.1
'''Excititation between AL -> KCs'''
#0.2
ex_ALKC = .25
#excitation kenyon cells to beta lobes
#1.5
ex_KCBL = 0.5
#Lateral inhibition beta lobe
#4
in_BLBL = 1.0
#excitation KC->GGN
#0.01
ex_KCGGN = 0.001
#inhibition GGN->KC
#0.2
in_GGNKC = 0.3
| #probability inter-AL connections
#0.5
PAL = 0.5
#AL->KCs
#0.01
PALKC = 0.02
#KCs->BLs
#0.3
PKCBL = 0.3
taupre = 15*ms #width of STDP
taupost = taupre
input_intensity = 0.3 #scale input
reset_time = 30 #ms
# needed for gradual current
tr = 20*ms # rise time
tf = 20*ms # fall time
width = 150*ms # duration of constat input
max_inp = input_intensity*nA # max current, can be just unit if amplitude is specified with active_
# total run time, too lazy to change name
time_per_image = (width + tr + tf)/ms
#initialize tstart - this is because the reset is called first
tstart = reset_time*ms
#save the parameters to load in for testing
tunable_params = {'N_AL': N_AL,
'N_KC': N_KC,
'N_BL': N_BL,
'eta': eta,
'in_AL': in_AL,
'ex_ALKC': ex_ALKC,
'ex_KCBL': ex_KCBL,
'in_BLBL': in_BLBL,
'ex_KCGGN': ex_KCGGN,
'in_GGNKC': in_GGNKC,
'PAL': PAL,
'PALKC': PALKC,
'PKCBL': PKCBL,
'input_intensity': input_intensity,
'time_per_image': time_per_image,
'reset_time': reset_time,
'width': width,
'tr': tr,
'tf': tf,
'taupre': taupre,
'taupost': taupost}
pickle.dump( tunable_params, open(prefix+"connections/tunable_params.p", "wb" ) )
#--------------------------------------------------------
#Antennal Lobe parameters
al_para = dict(N = N_AL,
g_syn = in_AL,
neuron_class = nm.n_FitzHugh_Nagumo,
syn_class = nm.s_FitzHughNagumo_inh,
PAL = PAL,
mon = [],
lfp_syn = lfp_syn
)
#Kenyon cell parameters
kc_para = dict( N = N_KC,
neuron_class = nm.n_lif,
mon = []
)
#GGN parameters
ggn_para = dict(N = 1,
neuron_class = nm.n_li,
mon = ['v'])
#Beta lobe neuron parameters
bl_para = dict(N = N_BL,
g_syn = in_BLBL,
neuron_class = nm.n_lif,
syn_class = nm.s_lif_in,
mon = ['v'])
#connect all the layers together
conn_para = dict(synALKC_class = nm.s_lif_ex,
ex_ALKC = ex_ALKC,
synKCGGN_class = nm.s_lif_ex,
ex_KCGGN = ex_ALKC,
synGGNKC_class = nm.s_gapjunc_in,
in_GGNKC = in_GGNKC,
synKCBL_class = nm.s_lifSTDP_ex,
ex_KCBL = ex_KCBL,
etaSTDP = eta,
taupreSTDP = taupre,
taupostSTDP = taupost,
PALKC = PALKC,
PKCBL = PKCBL)
t_sim = time.time()
#create the network object
net = Network()
# monlfp if using synapse to monitor
G_AL, S_AL, trace_AL, spikes_AL, G_LFP, S_LFP, trace_LFP = lm.get_AL(al_para, net)
G_KC, trace_KC, spikes_KC = lm.get_KCs(kc_para, net)
G_GGN, trace_GGN = lm.get_GGN(ggn_para, net)
G_BL, S_BL, trace_BL, spikes_BL = lm.get_BL(bl_para, net)
states = [G_AL, G_KC, G_GGN, G_BL]
S_ALKC, S_KCGGN, S_GGNKC, S_KCBL = lm.connect_network(conn_para, states, net)
#----------------------------------------------------------
'''
There are a few components to using a time dependent current.
(1) The string I. This is just a string which describes the function and units
of the time dependent current.
(2) The run_regularly function. This exists in the namespace of the NeuronGroup
you're adding it to. It can execute code strings at regular intervals. This
object needs to be assigned a name and added to the network separately if the
neuron group has previous been added.
(3) Additional parameters: If there are any additional parameters in the string
(rise time, fall time, etc), they need to be definied in the execution script.
(4) Random input - This can be combined with get_rand_I to select the neurons
that receive this time dependent current. If the input is scaled by max_inp,
the last argument in get_rand_I should be 1. Do not double scale input!
'''
# tr, tf, width, max_inp need to be defined
I = ex.get_gradual_current()
# dt here can be smaller than default clock. This is dt of the data provided.
G_run = G_AL.run_regularly('I_inj = {}'.format(I),dt = 0.05*ms)
net.add(G_run)
# troubleshooting function
#@network_operation(dt=5*ms)
#def f2(t):
# print(G_AL.I_inj[0])
#net.add(f2)
# random input
num_classes = 3
samples_per_class = 1
n_samples = int(samples_per_class*num_classes)
p_inj = 0.3
X = np.zeros((num_classes,N_AL))
for j in range(num_classes):
X[j,:] = ex.get_rand_I(N_AL,p_inj,1)
# troubleshooting array
#test_array = np.zeros(N_AL)
#test_array[0] = 0.5
#test_array[999] = 1.0
#run the network
scale_noise = 0.05
# Run random input with gradual current
for i in range(n_samples):
# turns off all neurons
G_AL.scale = 0.0
net.run(reset_time*ms)
print('Ran reset')
G_AL.scale = X[i%num_classes,:] + scale_noise*input_intensity*np.random.uniform(low=0,high=1,size=len(X[i%num_classes,:]))
print(G_AL.scale)
#G_AL.active_ = test_array
net.run(time_per_image*ms, report='text')
tstart = tstart + time_per_image*ms + reset_time*ms
print('Simulation time: {0} seconds'.format(time.time()-t_sim))
# run if built in C++ standalone -- this takes a LONG time
if comp:
print("Compiling...")
device.build(directory=prefix+'run_dir', compile=True, run=True, debug=True)
# store function not defined for C++ standalone
else:
net.store(name = 'trained', filename = prefix+'connections/trained')
#save some of the data
np.savez(prefix+'connections/S_AL.npz', i = S_AL.i, j = S_AL.j)
np.savez(prefix+'connections/S_KCBL.npz', i = S_KCBL.i, j = S_KCBL.j)
np.savez(prefix+'connections/S_ALKC.npz', i = S_ALKC.i, j = S_ALKC.j)
np.save(prefix+'output/spikesAL_t' ,spikes_AL.t/ms)
np.save(prefix+'output/spikesAL_i',spikes_AL.i)
# np.save(prefix+'traceAL_V', trace_AL.V)
np.save(prefix+'output/spikesKC_t' ,spikes_KC.t/ms)
np.save(prefix+'output/spikesKC_i',spikes_KC.i)
# np.save(prefix+'traceKC_V', trace_KC.v)
np.save(prefix+'output/spikesBL_t' ,spikes_BL.t/ms)
np.save(prefix+'output/spikesBL_i',spikes_BL.i)
np.save(prefix+'output/traceBL_V', trace_BL.v)
np.save(prefix+'output/trace_t', trace_BL.t)
np.save(prefix+'output/traceGGN_V', trace_GGN.v)
np.save(prefix+'output/weights', S_KCBL.w_syn)
np.save(prefix+'input',X)
#plot diagnostics
if plot:
fig1 = plt.figure()
plt.plot(spikes_AL.t/ms, spikes_AL.i, '.')
plt.title('Spikes AL')
plt.xlabel('Time (ms)')
plt.ylabel('Neuron Number')
fig1.savefig(prefix+'images/spikes_AL_train.png', bbox_inches = 'tight')
fig2 = plt.figure()
plt.plot(spikes_KC.t/ms, spikes_KC.i, '.')
plt.title('Spikes KC')
plt.xlabel('Time (ms)')
plt.ylabel('Neuron Number')
fig2.savefig(prefix+'images/spikes_KC_train.png', bbox_inches = 'tight')
fig3 = plt.figure()
plt.plot(spikes_BL.t/ms, spikes_BL.i, '.')
plt.title('Spikes BL')
plt.xlabel('Time (ms)')
plt.ylabel('Neuron Number')
plt.ylim(-0.5, N_BL-0.5)
fig3.savefig(prefix+'images/spikes_BL_train.png', bbox_inches = 'tight')
fig4 = plt.figure()
hist(S_KCBL.w_syn / ex_KCBL, 20)
xlabel('Weight / gmax')
plt.title('Weights BL')
fig4.savefig(prefix+'images/weights_BL_train.png', bbox_inches = 'tight')
fig5 = plt.figure()
plt.plot(trace_GGN.t/ms,trace_GGN.v[0]/mV)
plt.title('Trace GGN')
plt.xlabel('Time (ms)')
plt.ylabel('Membrane Voltage (mV)')
fig5.savefig(prefix+'images/trace_GGN_train.png', bbox_inches = 'tight')
fig7 = plt.figure()
plt.subplot(2,1,1)
plt.plot(trace_BL.t/ms, trace_BL.v[0]/mV)
plt.subplot(2,1,2)
plt.plot(trace_BL.t/ms, trace_BL.v[1], 'b')
plt.suptitle('Trace BL train')
fig7.savefig(prefix+'images/trace_BL_train.png', bbox_inches = 'tight')
#fig8 = plt.figure()
#plt.plot(trace_AL.t/ms, mean(trace_AL.I_inj, axis=0)/nA)
#plt.title('Injected current')
#plt.xlabel('Time (ms)')
#plt.ylabel('Avg Current (nA)')
if lfp_syn:
fig9 = plt.figure()
plt.plot(trace_LFP.t/ms, trace_LFP.V[0]/mV)
plt.title('LFP AL')
plt.xlabel('Time (ms)')
plt.ylabel('Membrane Voltage (mV)')
fig9.savefig(prefix+'images/lfp_AL_train.png', bbox_inches = 'tight')
else:
fig6 = plt.figure()
plt.plot(trace_AL.t/ms, mean(trace_AL.V, axis = 0)/mV)
plt.title('LFP AL')
plt.xlabel('Time (ms)')
plt.ylabel('Membrane Voltage (mV)')
fig6.savefig(prefix+'images/lfp_AL_train.png', bbox_inches = 'tight')
#plt.show() | random_line_split | |
train_full_random.py | import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import sys
import time
import matplotlib.pyplot as plt
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import pickle
import neuron_models as nm
import lab_manager as lm
import experiments as ex
from sklearn.utils import shuffle as rshuffle
import time
# import skimage
from brian2 import *
plt.style.use('ggplot')
#compile into C++ standalone
#works, but asking it to compile too much will freeze computer -- uses all cores to compile
# Note: The documentations states this only works for a fixed number of run statements
# and not loops. This doesn't appear to be the case, although maybe it works with
# only a few number of loops? I've gotten 4 runs to work.
# tstart variable also doesn't work because it uses a single value for it.
comp = False
# C++ standalone code to run quicker
if comp:
|
# Parallelize using OpenMP. Might not work....
# also only works with C++ standalone/comp = True
# prefs.devices.cpp_standalone.openmp_threads = 4
start_scope()
#path to the data folder
MNIST_data_path = 'data_set/'
# MNIST_data_path = '/home/jplatt/Mothnet/MNIST_data/'
#path to folder to save data
prefix = 'total_data/'
#doesn't work if timestep > 0.05ms
defaultclock.dt = .05*ms
#number of images to run
#num_examples = int(raw_input('Number of images to train: ')) #len(training)
#plot some diagnostics at the end
plot = True
# A way to use a synapse to store LFP, useful when you don't want to
# trace the voltages of all AL neurons.
lfp_syn = True
#-----------------------------------------------------------
#Tunable Parameters: the parameters to change in the network
#0-10
numbers_to_inc = frozenset([0, 1])
#size of network
N_AL = 1000 #must be >= 784
N_KC = 10000
N_BL = 3 #should be the same as the number of classes
#learning rate
# 0.1
eta = 0.01 #fraction of total conductance per spike
"""
Amount of inhibition between AL Neurons.
Enforces WLC dynamics and needs to be scaled
with the size of the network
"""
#0.1
in_AL = 0.1
'''Excititation between AL -> KCs'''
#0.2
ex_ALKC = .25
#excitation kenyon cells to beta lobes
#1.5
ex_KCBL = 0.5
#Lateral inhibition beta lobe
#4
in_BLBL = 1.0
#excitation KC->GGN
#0.01
ex_KCGGN = 0.001
#inhibition GGN->KC
#0.2
in_GGNKC = 0.3
#probability inter-AL connections
#0.5
PAL = 0.5
#AL->KCs
#0.01
PALKC = 0.02
#KCs->BLs
#0.3
PKCBL = 0.3
taupre = 15*ms #width of STDP
taupost = taupre
input_intensity = 0.3 #scale input
reset_time = 30 #ms
# needed for gradual current
tr = 20*ms # rise time
tf = 20*ms # fall time
width = 150*ms # duration of constat input
max_inp = input_intensity*nA # max current, can be just unit if amplitude is specified with active_
# total run time, too lazy to change name
time_per_image = (width + tr + tf)/ms
#initialize tstart - this is because the reset is called first
tstart = reset_time*ms
#save the parameters to load in for testing
tunable_params = {'N_AL': N_AL,
'N_KC': N_KC,
'N_BL': N_BL,
'eta': eta,
'in_AL': in_AL,
'ex_ALKC': ex_ALKC,
'ex_KCBL': ex_KCBL,
'in_BLBL': in_BLBL,
'ex_KCGGN': ex_KCGGN,
'in_GGNKC': in_GGNKC,
'PAL': PAL,
'PALKC': PALKC,
'PKCBL': PKCBL,
'input_intensity': input_intensity,
'time_per_image': time_per_image,
'reset_time': reset_time,
'width': width,
'tr': tr,
'tf': tf,
'taupre': taupre,
'taupost': taupost}
pickle.dump( tunable_params, open(prefix+"connections/tunable_params.p", "wb" ) )
#--------------------------------------------------------
#Antennal Lobe parameters
al_para = dict(N = N_AL,
g_syn = in_AL,
neuron_class = nm.n_FitzHugh_Nagumo,
syn_class = nm.s_FitzHughNagumo_inh,
PAL = PAL,
mon = [],
lfp_syn = lfp_syn
)
#Kenyon cell parameters
kc_para = dict( N = N_KC,
neuron_class = nm.n_lif,
mon = []
)
#GGN parameters
ggn_para = dict(N = 1,
neuron_class = nm.n_li,
mon = ['v'])
#Beta lobe neuron parameters
bl_para = dict(N = N_BL,
g_syn = in_BLBL,
neuron_class = nm.n_lif,
syn_class = nm.s_lif_in,
mon = ['v'])
#connect all the layers together
conn_para = dict(synALKC_class = nm.s_lif_ex,
ex_ALKC = ex_ALKC,
synKCGGN_class = nm.s_lif_ex,
ex_KCGGN = ex_ALKC,
synGGNKC_class = nm.s_gapjunc_in,
in_GGNKC = in_GGNKC,
synKCBL_class = nm.s_lifSTDP_ex,
ex_KCBL = ex_KCBL,
etaSTDP = eta,
taupreSTDP = taupre,
taupostSTDP = taupost,
PALKC = PALKC,
PKCBL = PKCBL)
t_sim = time.time()
#create the network object
net = Network()
# monlfp if using synapse to monitor
G_AL, S_AL, trace_AL, spikes_AL, G_LFP, S_LFP, trace_LFP = lm.get_AL(al_para, net)
G_KC, trace_KC, spikes_KC = lm.get_KCs(kc_para, net)
G_GGN, trace_GGN = lm.get_GGN(ggn_para, net)
G_BL, S_BL, trace_BL, spikes_BL = lm.get_BL(bl_para, net)
states = [G_AL, G_KC, G_GGN, G_BL]
S_ALKC, S_KCGGN, S_GGNKC, S_KCBL = lm.connect_network(conn_para, states, net)
#----------------------------------------------------------
'''
There are a few components to using a time dependent current.
(1) The string I. This is just a string which describes the function and units
of the time dependent current.
(2) The run_regularly function. This exists in the namespace of the NeuronGroup
you're adding it to. It can execute code strings at regular intervals. This
object needs to be assigned a name and added to the network separately if the
neuron group has previous been added.
(3) Additional parameters: If there are any additional parameters in the string
(rise time, fall time, etc), they need to be definied in the execution script.
(4) Random input - This can be combined with get_rand_I to select the neurons
that receive this time dependent current. If the input is scaled by max_inp,
the last argument in get_rand_I should be 1. Do not double scale input!
'''
# tr, tf, width, max_inp need to be defined
I = ex.get_gradual_current()
# dt here can be smaller than default clock. This is dt of the data provided.
G_run = G_AL.run_regularly('I_inj = {}'.format(I),dt = 0.05*ms)
net.add(G_run)
# troubleshooting function
#@network_operation(dt=5*ms)
#def f2(t):
# print(G_AL.I_inj[0])
#net.add(f2)
# random input
num_classes = 3
samples_per_class = 1
n_samples = int(samples_per_class*num_classes)
p_inj = 0.3
X = np.zeros((num_classes,N_AL))
for j in range(num_classes):
X[j,:] = ex.get_rand_I(N_AL,p_inj,1)
# troubleshooting array
#test_array = np.zeros(N_AL)
#test_array[0] = 0.5
#test_array[999] = 1.0
#run the network
scale_noise = 0.05
# Run random input with gradual current
for i in range(n_samples):
# turns off all neurons
G_AL.scale = 0.0
net.run(reset_time*ms)
print('Ran reset')
G_AL.scale = X[i%num_classes,:] + scale_noise*input_intensity*np.random.uniform(low=0,high=1,size=len(X[i%num_classes,:]))
print(G_AL.scale)
#G_AL.active_ = test_array
net.run(time_per_image*ms, report='text')
tstart = tstart + time_per_image*ms + reset_time*ms
print('Simulation time: {0} seconds'.format(time.time()-t_sim))
# run if built in C++ standalone -- this takes a LONG time
if comp:
print("Compiling...")
device.build(directory=prefix+'run_dir', compile=True, run=True, debug=True)
# store function not defined for C++ standalone
else:
net.store(name = 'trained', filename = prefix+'connections/trained')
#save some of the data
np.savez(prefix+'connections/S_AL.npz', i = S_AL.i, j = S_AL.j)
np.savez(prefix+'connections/S_KCBL.npz', i = S_KCBL.i, j = S_KCBL.j)
np.savez(prefix+'connections/S_ALKC.npz', i = S_ALKC.i, j = S_ALKC.j)
np.save(prefix+'output/spikesAL_t' ,spikes_AL.t/ms)
np.save(prefix+'output/spikesAL_i',spikes_AL.i)
# np.save(prefix+'traceAL_V', trace_AL.V)
np.save(prefix+'output/spikesKC_t' ,spikes_KC.t/ms)
np.save(prefix+'output/spikesKC_i',spikes_KC.i)
# np.save(prefix+'traceKC_V', trace_KC.v)
np.save(prefix+'output/spikesBL_t' ,spikes_BL.t/ms)
np.save(prefix+'output/spikesBL_i',spikes_BL.i)
np.save(prefix+'output/traceBL_V', trace_BL.v)
np.save(prefix+'output/trace_t', trace_BL.t)
np.save(prefix+'output/traceGGN_V', trace_GGN.v)
np.save(prefix+'output/weights', S_KCBL.w_syn)
np.save(prefix+'input',X)
#plot diagnostics
if plot:
fig1 = plt.figure()
plt.plot(spikes_AL.t/ms, spikes_AL.i, '.')
plt.title('Spikes AL')
plt.xlabel('Time (ms)')
plt.ylabel('Neuron Number')
fig1.savefig(prefix+'images/spikes_AL_train.png', bbox_inches = 'tight')
fig2 = plt.figure()
plt.plot(spikes_KC.t/ms, spikes_KC.i, '.')
plt.title('Spikes KC')
plt.xlabel('Time (ms)')
plt.ylabel('Neuron Number')
fig2.savefig(prefix+'images/spikes_KC_train.png', bbox_inches = 'tight')
fig3 = plt.figure()
plt.plot(spikes_BL.t/ms, spikes_BL.i, '.')
plt.title('Spikes BL')
plt.xlabel('Time (ms)')
plt.ylabel('Neuron Number')
plt.ylim(-0.5, N_BL-0.5)
fig3.savefig(prefix+'images/spikes_BL_train.png', bbox_inches = 'tight')
fig4 = plt.figure()
hist(S_KCBL.w_syn / ex_KCBL, 20)
xlabel('Weight / gmax')
plt.title('Weights BL')
fig4.savefig(prefix+'images/weights_BL_train.png', bbox_inches = 'tight')
fig5 = plt.figure()
plt.plot(trace_GGN.t/ms,trace_GGN.v[0]/mV)
plt.title('Trace GGN')
plt.xlabel('Time (ms)')
plt.ylabel('Membrane Voltage (mV)')
fig5.savefig(prefix+'images/trace_GGN_train.png', bbox_inches = 'tight')
fig7 = plt.figure()
plt.subplot(2,1,1)
plt.plot(trace_BL.t/ms, trace_BL.v[0]/mV)
plt.subplot(2,1,2)
plt.plot(trace_BL.t/ms, trace_BL.v[1], 'b')
plt.suptitle('Trace BL train')
fig7.savefig(prefix+'images/trace_BL_train.png', bbox_inches = 'tight')
#fig8 = plt.figure()
#plt.plot(trace_AL.t/ms, mean(trace_AL.I_inj, axis=0)/nA)
#plt.title('Injected current')
#plt.xlabel('Time (ms)')
#plt.ylabel('Avg Current (nA)')
if lfp_syn:
fig9 = plt.figure()
plt.plot(trace_LFP.t/ms, trace_LFP.V[0]/mV)
plt.title('LFP AL')
plt.xlabel('Time (ms)')
plt.ylabel('Membrane Voltage (mV)')
fig9.savefig(prefix+'images/lfp_AL_train.png', bbox_inches = 'tight')
else:
fig6 = plt.figure()
plt.plot(trace_AL.t/ms, mean(trace_AL.V, axis = 0)/mV)
plt.title('LFP AL')
plt.xlabel('Time (ms)')
plt.ylabel('Membrane Voltage (mV)')
fig6.savefig(prefix+'images/lfp_AL_train.png', bbox_inches = 'tight')
#plt.show()
| set_device('cpp_standalone', debug=True, build_on_run=False) | conditional_block |
conf_utils.py | #!/usr/bin/env python
#
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""Tempest configuration utilities."""
from __future__ import print_function
import json
import logging
import fileinput
import os
import subprocess
import pkg_resources
from six.moves import configparser
import yaml
from functest.utils import config
from functest.utils import env
from functest.utils import functest_utils
RALLY_CONF_PATH = "/etc/rally/rally.conf"
RALLY_AARCH64_PATCH_PATH = pkg_resources.resource_filename(
'functest', 'ci/rally_aarch64_patch.conf')
GLANCE_IMAGE_PATH = os.path.join(
getattr(config.CONF, 'dir_functest_images'),
getattr(config.CONF, 'openstack_image_file_name'))
TEMPEST_CUSTOM = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt')
TEMPEST_BLACKLIST = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml')
TEMPEST_CONF_YAML = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml')
CI_INSTALLER_TYPE = env.get('INSTALLER_TYPE')
""" logging configuration """
LOGGER = logging.getLogger(__name__)
def create_rally_deployment(environ=None):
"""Create new rally deployment"""
# set the architecture to default
pod_arch = env.get("POD_ARCH")
arch_filter = ['aarch64']
if pod_arch and pod_arch in arch_filter:
LOGGER.info("Apply aarch64 specific to rally config...")
with open(RALLY_AARCH64_PATCH_PATH, "r") as pfile:
rally_patch_conf = pfile.read()
for line in fileinput.input(RALLY_CONF_PATH):
print(line, end=' ')
if "cirros|testvm" in line:
print(rally_patch_conf)
LOGGER.info("Creating Rally environment...")
try:
cmd = ['rally', 'deployment', 'destroy',
'--deployment',
str(getattr(config.CONF, 'rally_deployment_name'))]
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
except subprocess.CalledProcessError:
pass
cmd = ['rally', 'deployment', 'create', '--fromenv',
'--name', str(getattr(config.CONF, 'rally_deployment_name'))]
output = subprocess.check_output(cmd, env=environ)
LOGGER.info("%s\n%s", " ".join(cmd), output)
cmd = ['rally', 'deployment', 'check']
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
return get_verifier_deployment_id()
def create_verifier():
"""Create new verifier"""
LOGGER.info("Create verifier from existing repo...")
cmd = ['rally', 'verify', 'delete-verifier',
'--id', str(getattr(config.CONF, 'tempest_verifier_name')),
'--force']
try:
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
except subprocess.CalledProcessError:
pass
cmd = ['rally', 'verify', 'create-verifier',
'--source', str(getattr(config.CONF, 'dir_repo_tempest')),
'--name', str(getattr(config.CONF, 'tempest_verifier_name')),
'--type', 'tempest', '--system-wide']
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
return get_verifier_id()
def get_verifier_id():
"""
Returns verifier id for current Tempest
"""
cmd = ("rally verify list-verifiers | awk '/" +
getattr(config.CONF, 'tempest_verifier_name') +
"/ {print $2}'")
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
verifier_uuid = proc.stdout.readline().rstrip()
return verifier_uuid
def get_verifier_deployment_id():
"""
Returns deployment id for active Rally deployment
"""
cmd = ("rally deployment list | awk '/" +
getattr(config.CONF, 'rally_deployment_name') +
"/ {print $2}'")
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
deployment_uuid = proc.stdout.readline().rstrip()
return deployment_uuid
def get_verifier_repo_dir(verifier_id):
"""
Returns installed verifier repo directory for Tempest
"""
return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'repo')
def get_verifier_deployment_dir(verifier_id, deployment_id):
"""
Returns Rally deployment directory for current verifier
"""
return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'for-deployment-{}'.format(deployment_id))
def update_tempest_conf_file(conf_file, rconfig):
"""Update defined paramters into tempest config file"""
with open(TEMPEST_CONF_YAML) as yfile:
conf_yaml = yaml.safe_load(yfile)
if conf_yaml:
|
with open(conf_file, 'wb') as config_file:
rconfig.write(config_file)
def configure_tempest_update_params(
tempest_conf_file, image_id=None, flavor_id=None,
compute_cnt=1, image_alt_id=None, flavor_alt_id=None,
admin_role_name='admin', cidr='192.168.120.0/24',
domain_id='default'):
# pylint: disable=too-many-branches,too-many-arguments,too-many-statements
"""
Add/update needed parameters into tempest.conf file
"""
LOGGER.debug("Updating selected tempest.conf parameters...")
rconfig = configparser.RawConfigParser()
rconfig.read(tempest_conf_file)
rconfig.set('compute', 'volume_device_name', env.get('VOLUME_DEVICE_NAME'))
if image_id is not None:
rconfig.set('compute', 'image_ref', image_id)
if image_alt_id is not None:
rconfig.set('compute', 'image_ref_alt', image_alt_id)
if flavor_id is not None:
rconfig.set('compute', 'flavor_ref', flavor_id)
if flavor_alt_id is not None:
rconfig.set('compute', 'flavor_ref_alt', flavor_alt_id)
if compute_cnt > 1:
# enable multinode tests
rconfig.set('compute', 'min_compute_nodes', compute_cnt)
rconfig.set('compute-feature-enabled', 'live_migration', True)
filters = ['RetryFilter', 'AvailabilityZoneFilter', 'ComputeFilter',
'ComputeCapabilitiesFilter', 'ImagePropertiesFilter',
'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']
rconfig.set(
'compute-feature-enabled', 'scheduler_available_filters',
functest_utils.convert_list_to_ini(filters))
if os.environ.get('OS_REGION_NAME'):
rconfig.set('identity', 'region', os.environ.get('OS_REGION_NAME'))
if env.get("NEW_USER_ROLE").lower() != "member":
rconfig.set(
'auth', 'tempest_roles',
functest_utils.convert_list_to_ini([env.get("NEW_USER_ROLE")]))
if not json.loads(env.get("USE_DYNAMIC_CREDENTIALS").lower()):
rconfig.set('auth', 'use_dynamic_credentials', False)
account_file = os.path.join(
getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')
assert os.path.exists(
account_file), "{} doesn't exist".format(account_file)
rconfig.set('auth', 'test_accounts_file', account_file)
rconfig.set('identity', 'auth_version', 'v3')
rconfig.set('identity', 'admin_role', admin_role_name)
rconfig.set('identity', 'admin_domain_scope', True)
rconfig.set('identity', 'default_domain_id', domain_id)
if not rconfig.has_section('network'):
rconfig.add_section('network')
rconfig.set('network', 'default_network', cidr)
rconfig.set('network', 'project_network_cidr', cidr)
rconfig.set('network', 'project_networks_reachable', False)
rconfig.set(
'validation', 'ssh_timeout',
getattr(config.CONF, 'tempest_validation_ssh_timeout'))
rconfig.set('object-storage', 'operator_role',
getattr(config.CONF, 'tempest_object_storage_operator_role'))
rconfig.set(
'identity', 'v3_endpoint_type',
os.environ.get('OS_INTERFACE', 'public'))
sections = rconfig.sections()
services_list = [
'compute', 'volume', 'image', 'network', 'data-processing',
'object-storage', 'orchestration']
for service in services_list:
if service not in sections:
rconfig.add_section(service)
rconfig.set(
service, 'endpoint_type', os.environ.get('OS_INTERFACE', 'public'))
LOGGER.debug('Add/Update required params defined in tempest_conf.yaml '
'into tempest.conf file')
update_tempest_conf_file(tempest_conf_file, rconfig)
def configure_verifier(deployment_dir):
"""
Execute rally verify configure-verifier, which generates tempest.conf
"""
cmd = ['rally', 'verify', 'configure-verifier', '--reconfigure',
'--id', str(getattr(config.CONF, 'tempest_verifier_name'))]
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
LOGGER.debug("Looking for tempest.conf file...")
tempest_conf_file = os.path.join(deployment_dir, "tempest.conf")
if not os.path.isfile(tempest_conf_file):
LOGGER.error("Tempest configuration file %s NOT found.",
tempest_conf_file)
return None
return tempest_conf_file
| sections = rconfig.sections()
for section in conf_yaml:
if section not in sections:
rconfig.add_section(section)
sub_conf = conf_yaml.get(section)
for key, value in sub_conf.items():
rconfig.set(section, key, value) | conditional_block |
conf_utils.py | #!/usr/bin/env python
#
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""Tempest configuration utilities."""
from __future__ import print_function
import json
import logging
import fileinput
import os
import subprocess
import pkg_resources
from six.moves import configparser
import yaml
from functest.utils import config
from functest.utils import env
from functest.utils import functest_utils
RALLY_CONF_PATH = "/etc/rally/rally.conf"
RALLY_AARCH64_PATCH_PATH = pkg_resources.resource_filename(
'functest', 'ci/rally_aarch64_patch.conf')
GLANCE_IMAGE_PATH = os.path.join(
getattr(config.CONF, 'dir_functest_images'),
getattr(config.CONF, 'openstack_image_file_name'))
TEMPEST_CUSTOM = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt')
TEMPEST_BLACKLIST = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml')
TEMPEST_CONF_YAML = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml')
CI_INSTALLER_TYPE = env.get('INSTALLER_TYPE')
""" logging configuration """
LOGGER = logging.getLogger(__name__)
def create_rally_deployment(environ=None):
"""Create new rally deployment"""
# set the architecture to default
pod_arch = env.get("POD_ARCH")
arch_filter = ['aarch64']
if pod_arch and pod_arch in arch_filter:
LOGGER.info("Apply aarch64 specific to rally config...")
with open(RALLY_AARCH64_PATCH_PATH, "r") as pfile:
rally_patch_conf = pfile.read()
for line in fileinput.input(RALLY_CONF_PATH):
print(line, end=' ')
if "cirros|testvm" in line:
print(rally_patch_conf)
LOGGER.info("Creating Rally environment...")
try:
cmd = ['rally', 'deployment', 'destroy',
'--deployment',
str(getattr(config.CONF, 'rally_deployment_name'))]
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
except subprocess.CalledProcessError:
pass
cmd = ['rally', 'deployment', 'create', '--fromenv',
'--name', str(getattr(config.CONF, 'rally_deployment_name'))]
output = subprocess.check_output(cmd, env=environ)
LOGGER.info("%s\n%s", " ".join(cmd), output)
cmd = ['rally', 'deployment', 'check']
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
return get_verifier_deployment_id()
def create_verifier():
"""Create new verifier"""
LOGGER.info("Create verifier from existing repo...")
cmd = ['rally', 'verify', 'delete-verifier',
'--id', str(getattr(config.CONF, 'tempest_verifier_name')),
'--force']
try:
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
except subprocess.CalledProcessError:
pass
cmd = ['rally', 'verify', 'create-verifier',
'--source', str(getattr(config.CONF, 'dir_repo_tempest')),
'--name', str(getattr(config.CONF, 'tempest_verifier_name')),
'--type', 'tempest', '--system-wide']
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
return get_verifier_id()
def get_verifier_id():
"""
Returns verifier id for current Tempest
"""
cmd = ("rally verify list-verifiers | awk '/" +
getattr(config.CONF, 'tempest_verifier_name') +
"/ {print $2}'")
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
verifier_uuid = proc.stdout.readline().rstrip()
return verifier_uuid
| Returns deployment id for active Rally deployment
"""
cmd = ("rally deployment list | awk '/" +
getattr(config.CONF, 'rally_deployment_name') +
"/ {print $2}'")
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
deployment_uuid = proc.stdout.readline().rstrip()
return deployment_uuid
def get_verifier_repo_dir(verifier_id):
"""
Returns installed verifier repo directory for Tempest
"""
return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'repo')
def get_verifier_deployment_dir(verifier_id, deployment_id):
"""
Returns Rally deployment directory for current verifier
"""
return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'for-deployment-{}'.format(deployment_id))
def update_tempest_conf_file(conf_file, rconfig):
"""Update defined paramters into tempest config file"""
with open(TEMPEST_CONF_YAML) as yfile:
conf_yaml = yaml.safe_load(yfile)
if conf_yaml:
sections = rconfig.sections()
for section in conf_yaml:
if section not in sections:
rconfig.add_section(section)
sub_conf = conf_yaml.get(section)
for key, value in sub_conf.items():
rconfig.set(section, key, value)
with open(conf_file, 'wb') as config_file:
rconfig.write(config_file)
def configure_tempest_update_params(
tempest_conf_file, image_id=None, flavor_id=None,
compute_cnt=1, image_alt_id=None, flavor_alt_id=None,
admin_role_name='admin', cidr='192.168.120.0/24',
domain_id='default'):
# pylint: disable=too-many-branches,too-many-arguments,too-many-statements
"""
Add/update needed parameters into tempest.conf file
"""
LOGGER.debug("Updating selected tempest.conf parameters...")
rconfig = configparser.RawConfigParser()
rconfig.read(tempest_conf_file)
rconfig.set('compute', 'volume_device_name', env.get('VOLUME_DEVICE_NAME'))
if image_id is not None:
rconfig.set('compute', 'image_ref', image_id)
if image_alt_id is not None:
rconfig.set('compute', 'image_ref_alt', image_alt_id)
if flavor_id is not None:
rconfig.set('compute', 'flavor_ref', flavor_id)
if flavor_alt_id is not None:
rconfig.set('compute', 'flavor_ref_alt', flavor_alt_id)
if compute_cnt > 1:
# enable multinode tests
rconfig.set('compute', 'min_compute_nodes', compute_cnt)
rconfig.set('compute-feature-enabled', 'live_migration', True)
filters = ['RetryFilter', 'AvailabilityZoneFilter', 'ComputeFilter',
'ComputeCapabilitiesFilter', 'ImagePropertiesFilter',
'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']
rconfig.set(
'compute-feature-enabled', 'scheduler_available_filters',
functest_utils.convert_list_to_ini(filters))
if os.environ.get('OS_REGION_NAME'):
rconfig.set('identity', 'region', os.environ.get('OS_REGION_NAME'))
if env.get("NEW_USER_ROLE").lower() != "member":
rconfig.set(
'auth', 'tempest_roles',
functest_utils.convert_list_to_ini([env.get("NEW_USER_ROLE")]))
if not json.loads(env.get("USE_DYNAMIC_CREDENTIALS").lower()):
rconfig.set('auth', 'use_dynamic_credentials', False)
account_file = os.path.join(
getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')
assert os.path.exists(
account_file), "{} doesn't exist".format(account_file)
rconfig.set('auth', 'test_accounts_file', account_file)
rconfig.set('identity', 'auth_version', 'v3')
rconfig.set('identity', 'admin_role', admin_role_name)
rconfig.set('identity', 'admin_domain_scope', True)
rconfig.set('identity', 'default_domain_id', domain_id)
if not rconfig.has_section('network'):
rconfig.add_section('network')
rconfig.set('network', 'default_network', cidr)
rconfig.set('network', 'project_network_cidr', cidr)
rconfig.set('network', 'project_networks_reachable', False)
rconfig.set(
'validation', 'ssh_timeout',
getattr(config.CONF, 'tempest_validation_ssh_timeout'))
rconfig.set('object-storage', 'operator_role',
getattr(config.CONF, 'tempest_object_storage_operator_role'))
rconfig.set(
'identity', 'v3_endpoint_type',
os.environ.get('OS_INTERFACE', 'public'))
sections = rconfig.sections()
services_list = [
'compute', 'volume', 'image', 'network', 'data-processing',
'object-storage', 'orchestration']
for service in services_list:
if service not in sections:
rconfig.add_section(service)
rconfig.set(
service, 'endpoint_type', os.environ.get('OS_INTERFACE', 'public'))
LOGGER.debug('Add/Update required params defined in tempest_conf.yaml '
'into tempest.conf file')
update_tempest_conf_file(tempest_conf_file, rconfig)
def configure_verifier(deployment_dir):
"""
Execute rally verify configure-verifier, which generates tempest.conf
"""
cmd = ['rally', 'verify', 'configure-verifier', '--reconfigure',
'--id', str(getattr(config.CONF, 'tempest_verifier_name'))]
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
LOGGER.debug("Looking for tempest.conf file...")
tempest_conf_file = os.path.join(deployment_dir, "tempest.conf")
if not os.path.isfile(tempest_conf_file):
LOGGER.error("Tempest configuration file %s NOT found.",
tempest_conf_file)
return None
return tempest_conf_file | def get_verifier_deployment_id():
""" | random_line_split |
conf_utils.py | #!/usr/bin/env python
#
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""Tempest configuration utilities."""
from __future__ import print_function
import json
import logging
import fileinput
import os
import subprocess
import pkg_resources
from six.moves import configparser
import yaml
from functest.utils import config
from functest.utils import env
from functest.utils import functest_utils
RALLY_CONF_PATH = "/etc/rally/rally.conf"
RALLY_AARCH64_PATCH_PATH = pkg_resources.resource_filename(
'functest', 'ci/rally_aarch64_patch.conf')
GLANCE_IMAGE_PATH = os.path.join(
getattr(config.CONF, 'dir_functest_images'),
getattr(config.CONF, 'openstack_image_file_name'))
TEMPEST_CUSTOM = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt')
TEMPEST_BLACKLIST = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml')
TEMPEST_CONF_YAML = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml')
CI_INSTALLER_TYPE = env.get('INSTALLER_TYPE')
""" logging configuration """
LOGGER = logging.getLogger(__name__)
def create_rally_deployment(environ=None):
|
def create_verifier():
"""Create new verifier"""
LOGGER.info("Create verifier from existing repo...")
cmd = ['rally', 'verify', 'delete-verifier',
'--id', str(getattr(config.CONF, 'tempest_verifier_name')),
'--force']
try:
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
except subprocess.CalledProcessError:
pass
cmd = ['rally', 'verify', 'create-verifier',
'--source', str(getattr(config.CONF, 'dir_repo_tempest')),
'--name', str(getattr(config.CONF, 'tempest_verifier_name')),
'--type', 'tempest', '--system-wide']
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
return get_verifier_id()
def get_verifier_id():
"""
Returns verifier id for current Tempest
"""
cmd = ("rally verify list-verifiers | awk '/" +
getattr(config.CONF, 'tempest_verifier_name') +
"/ {print $2}'")
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
verifier_uuid = proc.stdout.readline().rstrip()
return verifier_uuid
def get_verifier_deployment_id():
"""
Returns deployment id for active Rally deployment
"""
cmd = ("rally deployment list | awk '/" +
getattr(config.CONF, 'rally_deployment_name') +
"/ {print $2}'")
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
deployment_uuid = proc.stdout.readline().rstrip()
return deployment_uuid
def get_verifier_repo_dir(verifier_id):
"""
Returns installed verifier repo directory for Tempest
"""
return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'repo')
def get_verifier_deployment_dir(verifier_id, deployment_id):
"""
Returns Rally deployment directory for current verifier
"""
return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'for-deployment-{}'.format(deployment_id))
def update_tempest_conf_file(conf_file, rconfig):
"""Update defined paramters into tempest config file"""
with open(TEMPEST_CONF_YAML) as yfile:
conf_yaml = yaml.safe_load(yfile)
if conf_yaml:
sections = rconfig.sections()
for section in conf_yaml:
if section not in sections:
rconfig.add_section(section)
sub_conf = conf_yaml.get(section)
for key, value in sub_conf.items():
rconfig.set(section, key, value)
with open(conf_file, 'wb') as config_file:
rconfig.write(config_file)
def configure_tempest_update_params(
tempest_conf_file, image_id=None, flavor_id=None,
compute_cnt=1, image_alt_id=None, flavor_alt_id=None,
admin_role_name='admin', cidr='192.168.120.0/24',
domain_id='default'):
# pylint: disable=too-many-branches,too-many-arguments,too-many-statements
"""
Add/update needed parameters into tempest.conf file
"""
LOGGER.debug("Updating selected tempest.conf parameters...")
rconfig = configparser.RawConfigParser()
rconfig.read(tempest_conf_file)
rconfig.set('compute', 'volume_device_name', env.get('VOLUME_DEVICE_NAME'))
if image_id is not None:
rconfig.set('compute', 'image_ref', image_id)
if image_alt_id is not None:
rconfig.set('compute', 'image_ref_alt', image_alt_id)
if flavor_id is not None:
rconfig.set('compute', 'flavor_ref', flavor_id)
if flavor_alt_id is not None:
rconfig.set('compute', 'flavor_ref_alt', flavor_alt_id)
if compute_cnt > 1:
# enable multinode tests
rconfig.set('compute', 'min_compute_nodes', compute_cnt)
rconfig.set('compute-feature-enabled', 'live_migration', True)
filters = ['RetryFilter', 'AvailabilityZoneFilter', 'ComputeFilter',
'ComputeCapabilitiesFilter', 'ImagePropertiesFilter',
'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']
rconfig.set(
'compute-feature-enabled', 'scheduler_available_filters',
functest_utils.convert_list_to_ini(filters))
if os.environ.get('OS_REGION_NAME'):
rconfig.set('identity', 'region', os.environ.get('OS_REGION_NAME'))
if env.get("NEW_USER_ROLE").lower() != "member":
rconfig.set(
'auth', 'tempest_roles',
functest_utils.convert_list_to_ini([env.get("NEW_USER_ROLE")]))
if not json.loads(env.get("USE_DYNAMIC_CREDENTIALS").lower()):
rconfig.set('auth', 'use_dynamic_credentials', False)
account_file = os.path.join(
getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')
assert os.path.exists(
account_file), "{} doesn't exist".format(account_file)
rconfig.set('auth', 'test_accounts_file', account_file)
rconfig.set('identity', 'auth_version', 'v3')
rconfig.set('identity', 'admin_role', admin_role_name)
rconfig.set('identity', 'admin_domain_scope', True)
rconfig.set('identity', 'default_domain_id', domain_id)
if not rconfig.has_section('network'):
rconfig.add_section('network')
rconfig.set('network', 'default_network', cidr)
rconfig.set('network', 'project_network_cidr', cidr)
rconfig.set('network', 'project_networks_reachable', False)
rconfig.set(
'validation', 'ssh_timeout',
getattr(config.CONF, 'tempest_validation_ssh_timeout'))
rconfig.set('object-storage', 'operator_role',
getattr(config.CONF, 'tempest_object_storage_operator_role'))
rconfig.set(
'identity', 'v3_endpoint_type',
os.environ.get('OS_INTERFACE', 'public'))
sections = rconfig.sections()
services_list = [
'compute', 'volume', 'image', 'network', 'data-processing',
'object-storage', 'orchestration']
for service in services_list:
if service not in sections:
rconfig.add_section(service)
rconfig.set(
service, 'endpoint_type', os.environ.get('OS_INTERFACE', 'public'))
LOGGER.debug('Add/Update required params defined in tempest_conf.yaml '
'into tempest.conf file')
update_tempest_conf_file(tempest_conf_file, rconfig)
def configure_verifier(deployment_dir):
"""
Execute rally verify configure-verifier, which generates tempest.conf
"""
cmd = ['rally', 'verify', 'configure-verifier', '--reconfigure',
'--id', str(getattr(config.CONF, 'tempest_verifier_name'))]
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
LOGGER.debug("Looking for tempest.conf file...")
tempest_conf_file = os.path.join(deployment_dir, "tempest.conf")
if not os.path.isfile(tempest_conf_file):
LOGGER.error("Tempest configuration file %s NOT found.",
tempest_conf_file)
return None
return tempest_conf_file
| """Create new rally deployment"""
# set the architecture to default
pod_arch = env.get("POD_ARCH")
arch_filter = ['aarch64']
if pod_arch and pod_arch in arch_filter:
LOGGER.info("Apply aarch64 specific to rally config...")
with open(RALLY_AARCH64_PATCH_PATH, "r") as pfile:
rally_patch_conf = pfile.read()
for line in fileinput.input(RALLY_CONF_PATH):
print(line, end=' ')
if "cirros|testvm" in line:
print(rally_patch_conf)
LOGGER.info("Creating Rally environment...")
try:
cmd = ['rally', 'deployment', 'destroy',
'--deployment',
str(getattr(config.CONF, 'rally_deployment_name'))]
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
except subprocess.CalledProcessError:
pass
cmd = ['rally', 'deployment', 'create', '--fromenv',
'--name', str(getattr(config.CONF, 'rally_deployment_name'))]
output = subprocess.check_output(cmd, env=environ)
LOGGER.info("%s\n%s", " ".join(cmd), output)
cmd = ['rally', 'deployment', 'check']
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
return get_verifier_deployment_id() | identifier_body |
conf_utils.py | #!/usr/bin/env python
#
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""Tempest configuration utilities."""
from __future__ import print_function
import json
import logging
import fileinput
import os
import subprocess
import pkg_resources
from six.moves import configparser
import yaml
from functest.utils import config
from functest.utils import env
from functest.utils import functest_utils
RALLY_CONF_PATH = "/etc/rally/rally.conf"
RALLY_AARCH64_PATCH_PATH = pkg_resources.resource_filename(
'functest', 'ci/rally_aarch64_patch.conf')
GLANCE_IMAGE_PATH = os.path.join(
getattr(config.CONF, 'dir_functest_images'),
getattr(config.CONF, 'openstack_image_file_name'))
TEMPEST_CUSTOM = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt')
TEMPEST_BLACKLIST = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml')
TEMPEST_CONF_YAML = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml')
CI_INSTALLER_TYPE = env.get('INSTALLER_TYPE')
""" logging configuration """
LOGGER = logging.getLogger(__name__)
def create_rally_deployment(environ=None):
"""Create new rally deployment"""
# set the architecture to default
pod_arch = env.get("POD_ARCH")
arch_filter = ['aarch64']
if pod_arch and pod_arch in arch_filter:
LOGGER.info("Apply aarch64 specific to rally config...")
with open(RALLY_AARCH64_PATCH_PATH, "r") as pfile:
rally_patch_conf = pfile.read()
for line in fileinput.input(RALLY_CONF_PATH):
print(line, end=' ')
if "cirros|testvm" in line:
print(rally_patch_conf)
LOGGER.info("Creating Rally environment...")
try:
cmd = ['rally', 'deployment', 'destroy',
'--deployment',
str(getattr(config.CONF, 'rally_deployment_name'))]
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
except subprocess.CalledProcessError:
pass
cmd = ['rally', 'deployment', 'create', '--fromenv',
'--name', str(getattr(config.CONF, 'rally_deployment_name'))]
output = subprocess.check_output(cmd, env=environ)
LOGGER.info("%s\n%s", " ".join(cmd), output)
cmd = ['rally', 'deployment', 'check']
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
return get_verifier_deployment_id()
def create_verifier():
"""Create new verifier"""
LOGGER.info("Create verifier from existing repo...")
cmd = ['rally', 'verify', 'delete-verifier',
'--id', str(getattr(config.CONF, 'tempest_verifier_name')),
'--force']
try:
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
except subprocess.CalledProcessError:
pass
cmd = ['rally', 'verify', 'create-verifier',
'--source', str(getattr(config.CONF, 'dir_repo_tempest')),
'--name', str(getattr(config.CONF, 'tempest_verifier_name')),
'--type', 'tempest', '--system-wide']
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
return get_verifier_id()
def get_verifier_id():
"""
Returns verifier id for current Tempest
"""
cmd = ("rally verify list-verifiers | awk '/" +
getattr(config.CONF, 'tempest_verifier_name') +
"/ {print $2}'")
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
verifier_uuid = proc.stdout.readline().rstrip()
return verifier_uuid
def get_verifier_deployment_id():
"""
Returns deployment id for active Rally deployment
"""
cmd = ("rally deployment list | awk '/" +
getattr(config.CONF, 'rally_deployment_name') +
"/ {print $2}'")
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
deployment_uuid = proc.stdout.readline().rstrip()
return deployment_uuid
def get_verifier_repo_dir(verifier_id):
"""
Returns installed verifier repo directory for Tempest
"""
return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'repo')
def get_verifier_deployment_dir(verifier_id, deployment_id):
"""
Returns Rally deployment directory for current verifier
"""
return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'for-deployment-{}'.format(deployment_id))
def update_tempest_conf_file(conf_file, rconfig):
"""Update defined paramters into tempest config file"""
with open(TEMPEST_CONF_YAML) as yfile:
conf_yaml = yaml.safe_load(yfile)
if conf_yaml:
sections = rconfig.sections()
for section in conf_yaml:
if section not in sections:
rconfig.add_section(section)
sub_conf = conf_yaml.get(section)
for key, value in sub_conf.items():
rconfig.set(section, key, value)
with open(conf_file, 'wb') as config_file:
rconfig.write(config_file)
def | (
tempest_conf_file, image_id=None, flavor_id=None,
compute_cnt=1, image_alt_id=None, flavor_alt_id=None,
admin_role_name='admin', cidr='192.168.120.0/24',
domain_id='default'):
# pylint: disable=too-many-branches,too-many-arguments,too-many-statements
"""
Add/update needed parameters into tempest.conf file
"""
LOGGER.debug("Updating selected tempest.conf parameters...")
rconfig = configparser.RawConfigParser()
rconfig.read(tempest_conf_file)
rconfig.set('compute', 'volume_device_name', env.get('VOLUME_DEVICE_NAME'))
if image_id is not None:
rconfig.set('compute', 'image_ref', image_id)
if image_alt_id is not None:
rconfig.set('compute', 'image_ref_alt', image_alt_id)
if flavor_id is not None:
rconfig.set('compute', 'flavor_ref', flavor_id)
if flavor_alt_id is not None:
rconfig.set('compute', 'flavor_ref_alt', flavor_alt_id)
if compute_cnt > 1:
# enable multinode tests
rconfig.set('compute', 'min_compute_nodes', compute_cnt)
rconfig.set('compute-feature-enabled', 'live_migration', True)
filters = ['RetryFilter', 'AvailabilityZoneFilter', 'ComputeFilter',
'ComputeCapabilitiesFilter', 'ImagePropertiesFilter',
'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']
rconfig.set(
'compute-feature-enabled', 'scheduler_available_filters',
functest_utils.convert_list_to_ini(filters))
if os.environ.get('OS_REGION_NAME'):
rconfig.set('identity', 'region', os.environ.get('OS_REGION_NAME'))
if env.get("NEW_USER_ROLE").lower() != "member":
rconfig.set(
'auth', 'tempest_roles',
functest_utils.convert_list_to_ini([env.get("NEW_USER_ROLE")]))
if not json.loads(env.get("USE_DYNAMIC_CREDENTIALS").lower()):
rconfig.set('auth', 'use_dynamic_credentials', False)
account_file = os.path.join(
getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')
assert os.path.exists(
account_file), "{} doesn't exist".format(account_file)
rconfig.set('auth', 'test_accounts_file', account_file)
rconfig.set('identity', 'auth_version', 'v3')
rconfig.set('identity', 'admin_role', admin_role_name)
rconfig.set('identity', 'admin_domain_scope', True)
rconfig.set('identity', 'default_domain_id', domain_id)
if not rconfig.has_section('network'):
rconfig.add_section('network')
rconfig.set('network', 'default_network', cidr)
rconfig.set('network', 'project_network_cidr', cidr)
rconfig.set('network', 'project_networks_reachable', False)
rconfig.set(
'validation', 'ssh_timeout',
getattr(config.CONF, 'tempest_validation_ssh_timeout'))
rconfig.set('object-storage', 'operator_role',
getattr(config.CONF, 'tempest_object_storage_operator_role'))
rconfig.set(
'identity', 'v3_endpoint_type',
os.environ.get('OS_INTERFACE', 'public'))
sections = rconfig.sections()
services_list = [
'compute', 'volume', 'image', 'network', 'data-processing',
'object-storage', 'orchestration']
for service in services_list:
if service not in sections:
rconfig.add_section(service)
rconfig.set(
service, 'endpoint_type', os.environ.get('OS_INTERFACE', 'public'))
LOGGER.debug('Add/Update required params defined in tempest_conf.yaml '
'into tempest.conf file')
update_tempest_conf_file(tempest_conf_file, rconfig)
def configure_verifier(deployment_dir):
"""
Execute rally verify configure-verifier, which generates tempest.conf
"""
cmd = ['rally', 'verify', 'configure-verifier', '--reconfigure',
'--id', str(getattr(config.CONF, 'tempest_verifier_name'))]
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output)
LOGGER.debug("Looking for tempest.conf file...")
tempest_conf_file = os.path.join(deployment_dir, "tempest.conf")
if not os.path.isfile(tempest_conf_file):
LOGGER.error("Tempest configuration file %s NOT found.",
tempest_conf_file)
return None
return tempest_conf_file
| configure_tempest_update_params | identifier_name |
navfunc.py | #!/usr/bin/python
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
# author: Luciano Augusto Kruk
# website: www.kruk.eng.br
#
# description: Package of functions for quaternions and
# geodetic coordinates handling.
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
import numpy as np
import math as mt
from numpy import zeros,sin,cos,empty,sqrt;
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
class CGEO:
def __init__(self, lat, lon, h):
self.lat = lat;
self.lon = lon;
self.h = h;
def __repr__(self):
K = 180./mt.pi;
return "<CGEO: lat=%1.2f[deg] lon=%1.2f[deg] h=%1.1f[m]>" % (self.lat*K, self.lon*K, self.h)
class CRECT:
def __init__(self, r_e):
if type(r_e) is np.ndarray:
self.x = r_e.squeeze()[0];
self.y = r_e.squeeze()[1];
self.z = r_e.squeeze()[2];
elif type(r_e) in (tuple, list):
self.x = r_e[0]
self.y = r_e[1]
self.z = r_e[2]
else:
print "ainda nao suportado!"
def __repr__(self):
K = 180./mt.pi;
return "<CRECT: x=%1.2f[m] y=%1.2f[m] z=%1.2f[m]>" % (self.x, self.y, self.z)
def aslist(self):
return [
self.x,
self.y,
self.z
];
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
class CNAVFUNC:
"""
Geodetic Funcions
"""
# Earth Elliptic Model #
earth_a = 6378137.0; # [m]
earth_b = 6356752.3142; # [m]
wie = 1.0 * 7.2921151467e-5;
wie_e = np.asarray([0,0,wie]).reshape((3,1));
earth_f = (earth_a-earth_b)/earth_a;
earth_e = sqrt(earth_f*(2.0-earth_f));
earth_e2 = (earth_e**2.0);
def __init__(self):
pass
def Rlambda(self, lat_rad):
"""
: parameter : lat_rad [rad] latitude
: output : R_lbd
"""
return (self.earth_a*(1.-self.earth_e2)) / ((1.-(self.earth_e2*(sin(lat_rad)**2)))**1.5);
def Rphi(self, lat_rad):
"""
: parameter : lat_rad [rad] latitude
: output : R_phi
"""
return self.earth_a / sqrt(1.-(self.earth_e2*(sin(lat_rad)**2.0)));
def euler2Q(self, (phi, theta, psi)):
"""
Navigation -- from euler to Q.
: parameter : phi [rad]
: parameter : theta [rad]
: parameter : psi [rad]
: output : Q4
"""
half_phi = 0.5*phi;
half_theta = 0.5*theta;
half_psi = 0.5*psi;
return np.asarray([
(cos(half_phi)*cos(half_theta)*cos(half_psi)) + (sin(half_phi)*sin(half_theta)*sin(half_psi)),
(sin(half_phi)*cos(half_theta)*cos(half_psi)) - (cos(half_phi)*sin(half_theta)*sin(half_psi)),
(cos(half_phi)*sin(half_theta)*cos(half_psi)) + (sin(half_phi)*cos(half_theta)*sin(half_psi)),
(cos(half_phi)*cos(half_theta)*sin(half_psi)) - (sin(half_phi)*sin(half_theta)*cos(half_psi))
]);
def Q2euler(self, q):
"""
Navigation -- from Q to euler.
: input : q
: output : phi [rad]
: output : theta [rad]
: output : psi [rad]
"""
phi = mt.atan2(2.0*((q[2]*q[3])+(q[0]*q[1])), (q[0]**2.0)-(q[1]**2.0)-(q[2]**2.0)+(q[3]**2.0));
psi = mt.atan2(2.0*((q[1]*q[2])+(q[0]*q[3])), (q[0]**2.0)+(q[1]**2.0)-(q[2]**2.0)-(q[3]**2.0));
try:
theta = mt.asin(2.0*((q[0]*q[2])-(q[1]*q[3])));
except ValueError:
print "ERRO: norm(Q) = %f" % np.sqrt(np.sum(q**2))
theta = 0;
return (phi, theta, psi)
def Q2C(self, q):
"""
Navigation -- from Q to C.
If Q represents the transformation from 'a' to 'b', the matrix
'C' represents 'Ca2b'.
: input : q
: output : C
"""
#q = q.squeeze();
C = np.empty((3,3));
C[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);
C[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));
C[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));
C[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));
C[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);
C[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));
C[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));
C[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));
C[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);
return C
def C2Q(self, C): |
output: nparray() with Q
"""
return self.euler2Q(self.C2euler(C))
def C2euler(self, C):
"""
Navigation -- from C to (phi,theta,psi)[rad]
output: tuple with angles in [rad]
"""
assert(C[2,2] != 0)
assert(C[0,0] != 0)
assert(C[0,2]>=-1 and C[0,2]<=1)
phi = np.arctan2(C[1,2], C[2,2])
theta = np.arcsin(-C[0,2])
psi = np.arctan2(C[0,1], C[0,0])
return (phi, theta, psi)
def q1_prod_q2(self, q1, q2):
"""
Navigation -- multiplies two quaternions
Let q1 represent C_a2b, and q2 represent C_b2c.
The product C_a2c = C_b2c.C_a2b might be represented
by q3 = q1.q2
output: np.array quaternion q3=q1.q2
"""
q3 = np.array([
(q1[0]*q2[0])-(q2[1]*q1[1])-(q2[2]*q1[2])-(q2[3]*q1[3]),
(q2[0]*q1[1])+(q2[1]*q1[0])+(q2[2]*q1[3])-(q2[3]*q1[2]),
(q2[0]*q1[2])+(q2[2]*q1[0])-(q2[1]*q1[3])+(q2[3]*q1[1]),
(q2[0]*q1[3])+(q2[3]*q1[0])+(q2[1]*q1[2])-(q2[2]*q1[1])
])
return q3
def matrix_Q2euler(self, q):
"""
Converts a matrix with quaternions (N x 4) to euler angles (N x 3).
"""
N = q.shape[0]
dcm = np.zeros((N, 3))
for i in range(N):
phi,theta,psi = self.Q2euler(q[i,:])
dcm[i,:] = [phi, theta, psi]
return dcm
def Re2n(self, lat, lon):
"""
Navigation -- calculates Re2n(lat,lon)
: input : lat [rad]
: input : lon [rad]
: output : Re2n
"""
Re2n = np.empty((3,3));
Re2n[0,0] = -sin(lat)*cos(lon);
Re2n[0,1] = -sin(lat)*sin(lon);
Re2n[0,2] = cos(lat);
Re2n[1,0] = -sin(lon);
Re2n[1,1] = cos(lon);
Re2n[1,2] = 0;
Re2n[2,0] = -cos(lat)*cos(lon);
Re2n[2,1] = -cos(lat)*sin(lon);
Re2n[2,2] = -sin(lat);
return Re2n
def geo2rect(self, geo):
"""
Converter coordenadas ECEF geodeticas para retangulares.
pgeo [in] Coordenadas geodeticas.
prect [out] Coordenadas retangulares.
"""
s = sin(geo.lat);
RN = self.earth_a / sqrt(1.0 - (self.earth_e2 * s * s));
return CRECT((
(RN + geo.h) * cos(geo.lat) * cos(geo.lon),
(RN + geo.h) * cos(geo.lat) * sin(geo.lon),
((RN * (1.0 - self.earth_e2)) + geo.h) * sin(geo.lat)
))
def rect2geo(self, rect):
"""
Converter coordenadas ECEF retangulares para geodeticas.
pgeo [out] Coordenadas geodeticas.
prect [in] Coordenadas retangulares.
"""
p = sqrt((rect.x * rect.x) + (rect.y * rect.y));
geo = CGEO(0,0,0);
geo.h = 0;
RN = self.earth_a;
for i in range(100): # timeout
#print "[lat h] = [%1.09f %1.03f]" % (geo.lat, geo.h)
lastlat = geo.lat;
lasth = geo.h;
# algoritmo de conversao:
s = rect.z / (((1.0 - self.earth_e2) * RN) + geo.h);
geo.lat = mt.atan((rect.z + (self.earth_e2 * RN * s)) / p);
RN = self.earth_a / sqrt(1.0 - (self.earth_e2 * s * s));
geo.h = (p / cos(geo.lat)) - RN;
# erro:
d = ((lastlat - geo.lat) * (lastlat - geo.lat)) + ((lasth - geo.h) * (lasth - geo.h));
if (d < 1e-9):
break;
geo.lon = mt.atan2(rect.y, rect.x);
return geo
def dqdt(self, q, w):
"""
The derivative of the quaternions is $\dot{q} = 1/2 .B(w).q$
This funtion returns $\dot{q}$.
"""
K = 1e1
cq = np.asarray(q).reshape((4,1))
epslon = 1.0 - np.sum(cq**2.0)
B = np.asarray([
[ 0, -w[0], -w[1], -w[2]],
[w[0], 0, w[2], -w[1]],
[w[1], -w[2], 0, w[0]],
[w[2], w[1], -w[0], 0]
])
dq = (0.5 * np.dot(B,cq)) + (K*epslon*cq)
return list(dq.squeeze())
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
if (__name__ == "__main__"):
g = CNAVFUNC();
print g.Rlambda(20./57)
print g.Rphi(30./57)
q = g.euler2Q((10./57, 20./57, -30./57))
print np.asarray(g.Q2euler(q))*57
print g.Q2C(q)
print g.Re2n(0,0)
print g.Re2n(1,0.9)
geo = CGEO(10./57, -30./57, 33);
print geo
rec = g.geo2rect(geo)
print rec
geo = g.rect2geo(rec)
print geo
#----------------------#
# some dynamic tests:
#----------------------#
from scipy.integrate import odeint;
from numpy import dot;
print
# I: inertial frame
# b: body frame
qI2b = g.euler2Q((0,0,0))
# angular rotation between I and b:
# \omega_{Ib}^I
w = np.asarray([2./57, 0, 0]).reshape((3,1))
def eqdiff(q,t,w):
RI2b = g.Q2C(q)
dqdt = g.dqdt(q, dot(RI2b,w))
return dqdt
# a vector described at I:
F = np.asarray([0,0,1]).reshape((3,1))
print "F = "
print F.T
for t in [1,5,20,90]:
# after t seconds, the quaternions should be:
y = odeint(eqdiff, list(qI2b), [0,t], (w,))[1,:]
# with these euler angles:
euler = g.Q2euler(y)
# and described at b:
F_b = dot(g.Q2C(y), F)
print "F_b(phi = %1.03f) = [%1.03f %1.03f %1.03f]" % (
57.*euler[0], F_b[0], F_b[1], F_b[2])
#----------------------#
# some convertion tests:
#----------------------#
euler = (10./57, -40./57, 163./57)
Q = g.euler2Q(euler)
print "euler = "
print np.degrees(euler)
print np.degrees(g.Q2euler(g.euler2Q(euler)))
euler_2 = g.Q2euler(g.C2Q(g.Q2C(g.euler2Q(euler))))
print np.degrees(np.asarray(euler_2))
#----------------------#
# quaternion product:
#----------------------#
print
print "quaternion product"
q_a2b = g.euler2Q((-10., 33., -55.))
q_b2c = g.euler2Q((44., -38., 77.))
print "C_a2c = C_b2c . Ci_a2b ="
print dot(g.Q2C(q_b2c), g.Q2C(q_a2b))
print "C(q_b2c . qa2b) ="
print g.Q2C(g.q1_prod_q2(q_b2c, q_a2b))
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>> | """
Navigation -- from C to Q | random_line_split |
navfunc.py | #!/usr/bin/python
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
# author: Luciano Augusto Kruk
# website: www.kruk.eng.br
#
# description: Package of functions for quaternions and
# geodetic coordinates handling.
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
import numpy as np
import math as mt
from numpy import zeros,sin,cos,empty,sqrt;
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
class CGEO:
def __init__(self, lat, lon, h):
self.lat = lat;
self.lon = lon;
self.h = h;
def __repr__(self):
K = 180./mt.pi;
return "<CGEO: lat=%1.2f[deg] lon=%1.2f[deg] h=%1.1f[m]>" % (self.lat*K, self.lon*K, self.h)
class CRECT:
def __init__(self, r_e):
if type(r_e) is np.ndarray:
self.x = r_e.squeeze()[0];
self.y = r_e.squeeze()[1];
self.z = r_e.squeeze()[2];
elif type(r_e) in (tuple, list):
self.x = r_e[0]
self.y = r_e[1]
self.z = r_e[2]
else:
print "ainda nao suportado!"
def __repr__(self):
K = 180./mt.pi;
return "<CRECT: x=%1.2f[m] y=%1.2f[m] z=%1.2f[m]>" % (self.x, self.y, self.z)
def aslist(self):
return [
self.x,
self.y,
self.z
];
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
class CNAVFUNC:
"""
Geodetic Funcions
"""
# Earth Elliptic Model #
earth_a = 6378137.0; # [m]
earth_b = 6356752.3142; # [m]
wie = 1.0 * 7.2921151467e-5;
wie_e = np.asarray([0,0,wie]).reshape((3,1));
earth_f = (earth_a-earth_b)/earth_a;
earth_e = sqrt(earth_f*(2.0-earth_f));
earth_e2 = (earth_e**2.0);
def __init__(self):
pass
def Rlambda(self, lat_rad):
"""
: parameter : lat_rad [rad] latitude
: output : R_lbd
"""
return (self.earth_a*(1.-self.earth_e2)) / ((1.-(self.earth_e2*(sin(lat_rad)**2)))**1.5);
def Rphi(self, lat_rad):
"""
: parameter : lat_rad [rad] latitude
: output : R_phi
"""
return self.earth_a / sqrt(1.-(self.earth_e2*(sin(lat_rad)**2.0)));
def euler2Q(self, (phi, theta, psi)):
"""
Navigation -- from euler to Q.
: parameter : phi [rad]
: parameter : theta [rad]
: parameter : psi [rad]
: output : Q4
"""
half_phi = 0.5*phi;
half_theta = 0.5*theta;
half_psi = 0.5*psi;
return np.asarray([
(cos(half_phi)*cos(half_theta)*cos(half_psi)) + (sin(half_phi)*sin(half_theta)*sin(half_psi)),
(sin(half_phi)*cos(half_theta)*cos(half_psi)) - (cos(half_phi)*sin(half_theta)*sin(half_psi)),
(cos(half_phi)*sin(half_theta)*cos(half_psi)) + (sin(half_phi)*cos(half_theta)*sin(half_psi)),
(cos(half_phi)*cos(half_theta)*sin(half_psi)) - (sin(half_phi)*sin(half_theta)*cos(half_psi))
]);
def Q2euler(self, q):
"""
Navigation -- from Q to euler.
: input : q
: output : phi [rad]
: output : theta [rad]
: output : psi [rad]
"""
phi = mt.atan2(2.0*((q[2]*q[3])+(q[0]*q[1])), (q[0]**2.0)-(q[1]**2.0)-(q[2]**2.0)+(q[3]**2.0));
psi = mt.atan2(2.0*((q[1]*q[2])+(q[0]*q[3])), (q[0]**2.0)+(q[1]**2.0)-(q[2]**2.0)-(q[3]**2.0));
try:
theta = mt.asin(2.0*((q[0]*q[2])-(q[1]*q[3])));
except ValueError:
print "ERRO: norm(Q) = %f" % np.sqrt(np.sum(q**2))
theta = 0;
return (phi, theta, psi)
def Q2C(self, q):
"""
Navigation -- from Q to C.
If Q represents the transformation from 'a' to 'b', the matrix
'C' represents 'Ca2b'.
: input : q
: output : C
"""
#q = q.squeeze();
C = np.empty((3,3));
C[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);
C[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));
C[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));
C[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));
C[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);
C[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));
C[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));
C[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));
C[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);
return C
def C2Q(self, C):
"""
Navigation -- from C to Q
output: nparray() with Q
"""
return self.euler2Q(self.C2euler(C))
def C2euler(self, C):
"""
Navigation -- from C to (phi,theta,psi)[rad]
output: tuple with angles in [rad]
"""
assert(C[2,2] != 0)
assert(C[0,0] != 0)
assert(C[0,2]>=-1 and C[0,2]<=1)
phi = np.arctan2(C[1,2], C[2,2])
theta = np.arcsin(-C[0,2])
psi = np.arctan2(C[0,1], C[0,0])
return (phi, theta, psi)
def q1_prod_q2(self, q1, q2):
"""
Navigation -- multiplies two quaternions
Let q1 represent C_a2b, and q2 represent C_b2c.
The product C_a2c = C_b2c.C_a2b might be represented
by q3 = q1.q2
output: np.array quaternion q3=q1.q2
"""
q3 = np.array([
(q1[0]*q2[0])-(q2[1]*q1[1])-(q2[2]*q1[2])-(q2[3]*q1[3]),
(q2[0]*q1[1])+(q2[1]*q1[0])+(q2[2]*q1[3])-(q2[3]*q1[2]),
(q2[0]*q1[2])+(q2[2]*q1[0])-(q2[1]*q1[3])+(q2[3]*q1[1]),
(q2[0]*q1[3])+(q2[3]*q1[0])+(q2[1]*q1[2])-(q2[2]*q1[1])
])
return q3
def matrix_Q2euler(self, q):
"""
Converts a matrix with quaternions (N x 4) to euler angles (N x 3).
"""
N = q.shape[0]
dcm = np.zeros((N, 3))
for i in range(N):
phi,theta,psi = self.Q2euler(q[i,:])
dcm[i,:] = [phi, theta, psi]
return dcm
def Re2n(self, lat, lon):
"""
Navigation -- calculates Re2n(lat,lon)
: input : lat [rad]
: input : lon [rad]
: output : Re2n
"""
Re2n = np.empty((3,3));
Re2n[0,0] = -sin(lat)*cos(lon);
Re2n[0,1] = -sin(lat)*sin(lon);
Re2n[0,2] = cos(lat);
Re2n[1,0] = -sin(lon);
Re2n[1,1] = cos(lon);
Re2n[1,2] = 0;
Re2n[2,0] = -cos(lat)*cos(lon);
Re2n[2,1] = -cos(lat)*sin(lon);
Re2n[2,2] = -sin(lat);
return Re2n
def geo2rect(self, geo):
|
def rect2geo(self, rect):
"""
Converter coordenadas ECEF retangulares para geodeticas.
pgeo [out] Coordenadas geodeticas.
prect [in] Coordenadas retangulares.
"""
p = sqrt((rect.x * rect.x) + (rect.y * rect.y));
geo = CGEO(0,0,0);
geo.h = 0;
RN = self.earth_a;
for i in range(100): # timeout
#print "[lat h] = [%1.09f %1.03f]" % (geo.lat, geo.h)
lastlat = geo.lat;
lasth = geo.h;
# algoritmo de conversao:
s = rect.z / (((1.0 - self.earth_e2) * RN) + geo.h);
geo.lat = mt.atan((rect.z + (self.earth_e2 * RN * s)) / p);
RN = self.earth_a / sqrt(1.0 - (self.earth_e2 * s * s));
geo.h = (p / cos(geo.lat)) - RN;
# erro:
d = ((lastlat - geo.lat) * (lastlat - geo.lat)) + ((lasth - geo.h) * (lasth - geo.h));
if (d < 1e-9):
break;
geo.lon = mt.atan2(rect.y, rect.x);
return geo
def dqdt(self, q, w):
"""
The derivative of the quaternions is $\dot{q} = 1/2 .B(w).q$
This funtion returns $\dot{q}$.
"""
K = 1e1
cq = np.asarray(q).reshape((4,1))
epslon = 1.0 - np.sum(cq**2.0)
B = np.asarray([
[ 0, -w[0], -w[1], -w[2]],
[w[0], 0, w[2], -w[1]],
[w[1], -w[2], 0, w[0]],
[w[2], w[1], -w[0], 0]
])
dq = (0.5 * np.dot(B,cq)) + (K*epslon*cq)
return list(dq.squeeze())
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
if (__name__ == "__main__"):
g = CNAVFUNC();
print g.Rlambda(20./57)
print g.Rphi(30./57)
q = g.euler2Q((10./57, 20./57, -30./57))
print np.asarray(g.Q2euler(q))*57
print g.Q2C(q)
print g.Re2n(0,0)
print g.Re2n(1,0.9)
geo = CGEO(10./57, -30./57, 33);
print geo
rec = g.geo2rect(geo)
print rec
geo = g.rect2geo(rec)
print geo
#----------------------#
# some dynamic tests:
#----------------------#
from scipy.integrate import odeint;
from numpy import dot;
print
# I: inertial frame
# b: body frame
qI2b = g.euler2Q((0,0,0))
# angular rotation between I and b:
# \omega_{Ib}^I
w = np.asarray([2./57, 0, 0]).reshape((3,1))
def eqdiff(q,t,w):
RI2b = g.Q2C(q)
dqdt = g.dqdt(q, dot(RI2b,w))
return dqdt
# a vector described at I:
F = np.asarray([0,0,1]).reshape((3,1))
print "F = "
print F.T
for t in [1,5,20,90]:
# after t seconds, the quaternions should be:
y = odeint(eqdiff, list(qI2b), [0,t], (w,))[1,:]
# with these euler angles:
euler = g.Q2euler(y)
# and described at b:
F_b = dot(g.Q2C(y), F)
print "F_b(phi = %1.03f) = [%1.03f %1.03f %1.03f]" % (
57.*euler[0], F_b[0], F_b[1], F_b[2])
#----------------------#
# some convertion tests:
#----------------------#
euler = (10./57, -40./57, 163./57)
Q = g.euler2Q(euler)
print "euler = "
print np.degrees(euler)
print np.degrees(g.Q2euler(g.euler2Q(euler)))
euler_2 = g.Q2euler(g.C2Q(g.Q2C(g.euler2Q(euler))))
print np.degrees(np.asarray(euler_2))
#----------------------#
# quaternion product:
#----------------------#
print
print "quaternion product"
q_a2b = g.euler2Q((-10., 33., -55.))
q_b2c = g.euler2Q((44., -38., 77.))
print "C_a2c = C_b2c . Ci_a2b ="
print dot(g.Q2C(q_b2c), g.Q2C(q_a2b))
print "C(q_b2c . qa2b) ="
print g.Q2C(g.q1_prod_q2(q_b2c, q_a2b))
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
| """
Converter coordenadas ECEF geodeticas para retangulares.
pgeo [in] Coordenadas geodeticas.
prect [out] Coordenadas retangulares.
"""
s = sin(geo.lat);
RN = self.earth_a / sqrt(1.0 - (self.earth_e2 * s * s));
return CRECT((
(RN + geo.h) * cos(geo.lat) * cos(geo.lon),
(RN + geo.h) * cos(geo.lat) * sin(geo.lon),
((RN * (1.0 - self.earth_e2)) + geo.h) * sin(geo.lat)
)) | identifier_body |
navfunc.py | #!/usr/bin/python
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
# author: Luciano Augusto Kruk
# website: www.kruk.eng.br
#
# description: Package of functions for quaternions and
# geodetic coordinates handling.
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
import numpy as np
import math as mt
from numpy import zeros,sin,cos,empty,sqrt;
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
class CGEO:
def __init__(self, lat, lon, h):
self.lat = lat;
self.lon = lon;
self.h = h;
def | (self):
K = 180./mt.pi;
return "<CGEO: lat=%1.2f[deg] lon=%1.2f[deg] h=%1.1f[m]>" % (self.lat*K, self.lon*K, self.h)
class CRECT:
def __init__(self, r_e):
if type(r_e) is np.ndarray:
self.x = r_e.squeeze()[0];
self.y = r_e.squeeze()[1];
self.z = r_e.squeeze()[2];
elif type(r_e) in (tuple, list):
self.x = r_e[0]
self.y = r_e[1]
self.z = r_e[2]
else:
print "ainda nao suportado!"
def __repr__(self):
K = 180./mt.pi;
return "<CRECT: x=%1.2f[m] y=%1.2f[m] z=%1.2f[m]>" % (self.x, self.y, self.z)
def aslist(self):
return [
self.x,
self.y,
self.z
];
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
class CNAVFUNC:
"""
Geodetic Funcions
"""
# Earth Elliptic Model #
earth_a = 6378137.0; # [m]
earth_b = 6356752.3142; # [m]
wie = 1.0 * 7.2921151467e-5;
wie_e = np.asarray([0,0,wie]).reshape((3,1));
earth_f = (earth_a-earth_b)/earth_a;
earth_e = sqrt(earth_f*(2.0-earth_f));
earth_e2 = (earth_e**2.0);
def __init__(self):
pass
def Rlambda(self, lat_rad):
"""
: parameter : lat_rad [rad] latitude
: output : R_lbd
"""
return (self.earth_a*(1.-self.earth_e2)) / ((1.-(self.earth_e2*(sin(lat_rad)**2)))**1.5);
def Rphi(self, lat_rad):
"""
: parameter : lat_rad [rad] latitude
: output : R_phi
"""
return self.earth_a / sqrt(1.-(self.earth_e2*(sin(lat_rad)**2.0)));
def euler2Q(self, (phi, theta, psi)):
"""
Navigation -- from euler to Q.
: parameter : phi [rad]
: parameter : theta [rad]
: parameter : psi [rad]
: output : Q4
"""
half_phi = 0.5*phi;
half_theta = 0.5*theta;
half_psi = 0.5*psi;
return np.asarray([
(cos(half_phi)*cos(half_theta)*cos(half_psi)) + (sin(half_phi)*sin(half_theta)*sin(half_psi)),
(sin(half_phi)*cos(half_theta)*cos(half_psi)) - (cos(half_phi)*sin(half_theta)*sin(half_psi)),
(cos(half_phi)*sin(half_theta)*cos(half_psi)) + (sin(half_phi)*cos(half_theta)*sin(half_psi)),
(cos(half_phi)*cos(half_theta)*sin(half_psi)) - (sin(half_phi)*sin(half_theta)*cos(half_psi))
]);
def Q2euler(self, q):
"""
Navigation -- from Q to euler.
: input : q
: output : phi [rad]
: output : theta [rad]
: output : psi [rad]
"""
phi = mt.atan2(2.0*((q[2]*q[3])+(q[0]*q[1])), (q[0]**2.0)-(q[1]**2.0)-(q[2]**2.0)+(q[3]**2.0));
psi = mt.atan2(2.0*((q[1]*q[2])+(q[0]*q[3])), (q[0]**2.0)+(q[1]**2.0)-(q[2]**2.0)-(q[3]**2.0));
try:
theta = mt.asin(2.0*((q[0]*q[2])-(q[1]*q[3])));
except ValueError:
print "ERRO: norm(Q) = %f" % np.sqrt(np.sum(q**2))
theta = 0;
return (phi, theta, psi)
def Q2C(self, q):
"""
Navigation -- from Q to C.
If Q represents the transformation from 'a' to 'b', the matrix
'C' represents 'Ca2b'.
: input : q
: output : C
"""
#q = q.squeeze();
C = np.empty((3,3));
C[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);
C[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));
C[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));
C[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));
C[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);
C[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));
C[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));
C[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));
C[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);
return C
def C2Q(self, C):
"""
Navigation -- from C to Q
output: nparray() with Q
"""
return self.euler2Q(self.C2euler(C))
def C2euler(self, C):
"""
Navigation -- from C to (phi,theta,psi)[rad]
output: tuple with angles in [rad]
"""
assert(C[2,2] != 0)
assert(C[0,0] != 0)
assert(C[0,2]>=-1 and C[0,2]<=1)
phi = np.arctan2(C[1,2], C[2,2])
theta = np.arcsin(-C[0,2])
psi = np.arctan2(C[0,1], C[0,0])
return (phi, theta, psi)
def q1_prod_q2(self, q1, q2):
"""
Navigation -- multiplies two quaternions
Let q1 represent C_a2b, and q2 represent C_b2c.
The product C_a2c = C_b2c.C_a2b might be represented
by q3 = q1.q2
output: np.array quaternion q3=q1.q2
"""
q3 = np.array([
(q1[0]*q2[0])-(q2[1]*q1[1])-(q2[2]*q1[2])-(q2[3]*q1[3]),
(q2[0]*q1[1])+(q2[1]*q1[0])+(q2[2]*q1[3])-(q2[3]*q1[2]),
(q2[0]*q1[2])+(q2[2]*q1[0])-(q2[1]*q1[3])+(q2[3]*q1[1]),
(q2[0]*q1[3])+(q2[3]*q1[0])+(q2[1]*q1[2])-(q2[2]*q1[1])
])
return q3
def matrix_Q2euler(self, q):
"""
Converts a matrix with quaternions (N x 4) to euler angles (N x 3).
"""
N = q.shape[0]
dcm = np.zeros((N, 3))
for i in range(N):
phi,theta,psi = self.Q2euler(q[i,:])
dcm[i,:] = [phi, theta, psi]
return dcm
def Re2n(self, lat, lon):
"""
Navigation -- calculates Re2n(lat,lon)
: input : lat [rad]
: input : lon [rad]
: output : Re2n
"""
Re2n = np.empty((3,3));
Re2n[0,0] = -sin(lat)*cos(lon);
Re2n[0,1] = -sin(lat)*sin(lon);
Re2n[0,2] = cos(lat);
Re2n[1,0] = -sin(lon);
Re2n[1,1] = cos(lon);
Re2n[1,2] = 0;
Re2n[2,0] = -cos(lat)*cos(lon);
Re2n[2,1] = -cos(lat)*sin(lon);
Re2n[2,2] = -sin(lat);
return Re2n
def geo2rect(self, geo):
"""
Converter coordenadas ECEF geodeticas para retangulares.
pgeo [in] Coordenadas geodeticas.
prect [out] Coordenadas retangulares.
"""
s = sin(geo.lat);
RN = self.earth_a / sqrt(1.0 - (self.earth_e2 * s * s));
return CRECT((
(RN + geo.h) * cos(geo.lat) * cos(geo.lon),
(RN + geo.h) * cos(geo.lat) * sin(geo.lon),
((RN * (1.0 - self.earth_e2)) + geo.h) * sin(geo.lat)
))
def rect2geo(self, rect):
"""
Converter coordenadas ECEF retangulares para geodeticas.
pgeo [out] Coordenadas geodeticas.
prect [in] Coordenadas retangulares.
"""
p = sqrt((rect.x * rect.x) + (rect.y * rect.y));
geo = CGEO(0,0,0);
geo.h = 0;
RN = self.earth_a;
for i in range(100): # timeout
#print "[lat h] = [%1.09f %1.03f]" % (geo.lat, geo.h)
lastlat = geo.lat;
lasth = geo.h;
# algoritmo de conversao:
s = rect.z / (((1.0 - self.earth_e2) * RN) + geo.h);
geo.lat = mt.atan((rect.z + (self.earth_e2 * RN * s)) / p);
RN = self.earth_a / sqrt(1.0 - (self.earth_e2 * s * s));
geo.h = (p / cos(geo.lat)) - RN;
# erro:
d = ((lastlat - geo.lat) * (lastlat - geo.lat)) + ((lasth - geo.h) * (lasth - geo.h));
if (d < 1e-9):
break;
geo.lon = mt.atan2(rect.y, rect.x);
return geo
def dqdt(self, q, w):
"""
The derivative of the quaternions is $\dot{q} = 1/2 .B(w).q$
This funtion returns $\dot{q}$.
"""
K = 1e1
cq = np.asarray(q).reshape((4,1))
epslon = 1.0 - np.sum(cq**2.0)
B = np.asarray([
[ 0, -w[0], -w[1], -w[2]],
[w[0], 0, w[2], -w[1]],
[w[1], -w[2], 0, w[0]],
[w[2], w[1], -w[0], 0]
])
dq = (0.5 * np.dot(B,cq)) + (K*epslon*cq)
return list(dq.squeeze())
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
if (__name__ == "__main__"):
g = CNAVFUNC();
print g.Rlambda(20./57)
print g.Rphi(30./57)
q = g.euler2Q((10./57, 20./57, -30./57))
print np.asarray(g.Q2euler(q))*57
print g.Q2C(q)
print g.Re2n(0,0)
print g.Re2n(1,0.9)
geo = CGEO(10./57, -30./57, 33);
print geo
rec = g.geo2rect(geo)
print rec
geo = g.rect2geo(rec)
print geo
#----------------------#
# some dynamic tests:
#----------------------#
from scipy.integrate import odeint;
from numpy import dot;
print
# I: inertial frame
# b: body frame
qI2b = g.euler2Q((0,0,0))
# angular rotation between I and b:
# \omega_{Ib}^I
w = np.asarray([2./57, 0, 0]).reshape((3,1))
def eqdiff(q,t,w):
RI2b = g.Q2C(q)
dqdt = g.dqdt(q, dot(RI2b,w))
return dqdt
# a vector described at I:
F = np.asarray([0,0,1]).reshape((3,1))
print "F = "
print F.T
for t in [1,5,20,90]:
# after t seconds, the quaternions should be:
y = odeint(eqdiff, list(qI2b), [0,t], (w,))[1,:]
# with these euler angles:
euler = g.Q2euler(y)
# and described at b:
F_b = dot(g.Q2C(y), F)
print "F_b(phi = %1.03f) = [%1.03f %1.03f %1.03f]" % (
57.*euler[0], F_b[0], F_b[1], F_b[2])
#----------------------#
# some convertion tests:
#----------------------#
euler = (10./57, -40./57, 163./57)
Q = g.euler2Q(euler)
print "euler = "
print np.degrees(euler)
print np.degrees(g.Q2euler(g.euler2Q(euler)))
euler_2 = g.Q2euler(g.C2Q(g.Q2C(g.euler2Q(euler))))
print np.degrees(np.asarray(euler_2))
#----------------------#
# quaternion product:
#----------------------#
print
print "quaternion product"
q_a2b = g.euler2Q((-10., 33., -55.))
q_b2c = g.euler2Q((44., -38., 77.))
print "C_a2c = C_b2c . Ci_a2b ="
print dot(g.Q2C(q_b2c), g.Q2C(q_a2b))
print "C(q_b2c . qa2b) ="
print g.Q2C(g.q1_prod_q2(q_b2c, q_a2b))
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
| __repr__ | identifier_name |
navfunc.py | #!/usr/bin/python
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
# author: Luciano Augusto Kruk
# website: www.kruk.eng.br
#
# description: Package of functions for quaternions and
# geodetic coordinates handling.
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
import numpy as np
import math as mt
from numpy import zeros,sin,cos,empty,sqrt;
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
class CGEO:
def __init__(self, lat, lon, h):
self.lat = lat;
self.lon = lon;
self.h = h;
def __repr__(self):
K = 180./mt.pi;
return "<CGEO: lat=%1.2f[deg] lon=%1.2f[deg] h=%1.1f[m]>" % (self.lat*K, self.lon*K, self.h)
class CRECT:
def __init__(self, r_e):
if type(r_e) is np.ndarray:
self.x = r_e.squeeze()[0];
self.y = r_e.squeeze()[1];
self.z = r_e.squeeze()[2];
elif type(r_e) in (tuple, list):
|
else:
print "ainda nao suportado!"
def __repr__(self):
K = 180./mt.pi;
return "<CRECT: x=%1.2f[m] y=%1.2f[m] z=%1.2f[m]>" % (self.x, self.y, self.z)
def aslist(self):
return [
self.x,
self.y,
self.z
];
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
class CNAVFUNC:
"""
Geodetic Funcions
"""
# Earth Elliptic Model #
earth_a = 6378137.0; # [m]
earth_b = 6356752.3142; # [m]
wie = 1.0 * 7.2921151467e-5;
wie_e = np.asarray([0,0,wie]).reshape((3,1));
earth_f = (earth_a-earth_b)/earth_a;
earth_e = sqrt(earth_f*(2.0-earth_f));
earth_e2 = (earth_e**2.0);
def __init__(self):
pass
def Rlambda(self, lat_rad):
"""
: parameter : lat_rad [rad] latitude
: output : R_lbd
"""
return (self.earth_a*(1.-self.earth_e2)) / ((1.-(self.earth_e2*(sin(lat_rad)**2)))**1.5);
def Rphi(self, lat_rad):
"""
: parameter : lat_rad [rad] latitude
: output : R_phi
"""
return self.earth_a / sqrt(1.-(self.earth_e2*(sin(lat_rad)**2.0)));
def euler2Q(self, (phi, theta, psi)):
"""
Navigation -- from euler to Q.
: parameter : phi [rad]
: parameter : theta [rad]
: parameter : psi [rad]
: output : Q4
"""
half_phi = 0.5*phi;
half_theta = 0.5*theta;
half_psi = 0.5*psi;
return np.asarray([
(cos(half_phi)*cos(half_theta)*cos(half_psi)) + (sin(half_phi)*sin(half_theta)*sin(half_psi)),
(sin(half_phi)*cos(half_theta)*cos(half_psi)) - (cos(half_phi)*sin(half_theta)*sin(half_psi)),
(cos(half_phi)*sin(half_theta)*cos(half_psi)) + (sin(half_phi)*cos(half_theta)*sin(half_psi)),
(cos(half_phi)*cos(half_theta)*sin(half_psi)) - (sin(half_phi)*sin(half_theta)*cos(half_psi))
]);
def Q2euler(self, q):
"""
Navigation -- from Q to euler.
: input : q
: output : phi [rad]
: output : theta [rad]
: output : psi [rad]
"""
phi = mt.atan2(2.0*((q[2]*q[3])+(q[0]*q[1])), (q[0]**2.0)-(q[1]**2.0)-(q[2]**2.0)+(q[3]**2.0));
psi = mt.atan2(2.0*((q[1]*q[2])+(q[0]*q[3])), (q[0]**2.0)+(q[1]**2.0)-(q[2]**2.0)-(q[3]**2.0));
try:
theta = mt.asin(2.0*((q[0]*q[2])-(q[1]*q[3])));
except ValueError:
print "ERRO: norm(Q) = %f" % np.sqrt(np.sum(q**2))
theta = 0;
return (phi, theta, psi)
def Q2C(self, q):
"""
Navigation -- from Q to C.
If Q represents the transformation from 'a' to 'b', the matrix
'C' represents 'Ca2b'.
: input : q
: output : C
"""
#q = q.squeeze();
C = np.empty((3,3));
C[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);
C[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));
C[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));
C[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));
C[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);
C[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));
C[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));
C[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));
C[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);
return C
def C2Q(self, C):
"""
Navigation -- from C to Q
output: nparray() with Q
"""
return self.euler2Q(self.C2euler(C))
def C2euler(self, C):
"""
Navigation -- from C to (phi,theta,psi)[rad]
output: tuple with angles in [rad]
"""
assert(C[2,2] != 0)
assert(C[0,0] != 0)
assert(C[0,2]>=-1 and C[0,2]<=1)
phi = np.arctan2(C[1,2], C[2,2])
theta = np.arcsin(-C[0,2])
psi = np.arctan2(C[0,1], C[0,0])
return (phi, theta, psi)
def q1_prod_q2(self, q1, q2):
"""
Navigation -- multiplies two quaternions
Let q1 represent C_a2b, and q2 represent C_b2c.
The product C_a2c = C_b2c.C_a2b might be represented
by q3 = q1.q2
output: np.array quaternion q3=q1.q2
"""
q3 = np.array([
(q1[0]*q2[0])-(q2[1]*q1[1])-(q2[2]*q1[2])-(q2[3]*q1[3]),
(q2[0]*q1[1])+(q2[1]*q1[0])+(q2[2]*q1[3])-(q2[3]*q1[2]),
(q2[0]*q1[2])+(q2[2]*q1[0])-(q2[1]*q1[3])+(q2[3]*q1[1]),
(q2[0]*q1[3])+(q2[3]*q1[0])+(q2[1]*q1[2])-(q2[2]*q1[1])
])
return q3
def matrix_Q2euler(self, q):
"""
Converts a matrix with quaternions (N x 4) to euler angles (N x 3).
"""
N = q.shape[0]
dcm = np.zeros((N, 3))
for i in range(N):
phi,theta,psi = self.Q2euler(q[i,:])
dcm[i,:] = [phi, theta, psi]
return dcm
def Re2n(self, lat, lon):
"""
Navigation -- calculates Re2n(lat,lon)
: input : lat [rad]
: input : lon [rad]
: output : Re2n
"""
Re2n = np.empty((3,3));
Re2n[0,0] = -sin(lat)*cos(lon);
Re2n[0,1] = -sin(lat)*sin(lon);
Re2n[0,2] = cos(lat);
Re2n[1,0] = -sin(lon);
Re2n[1,1] = cos(lon);
Re2n[1,2] = 0;
Re2n[2,0] = -cos(lat)*cos(lon);
Re2n[2,1] = -cos(lat)*sin(lon);
Re2n[2,2] = -sin(lat);
return Re2n
def geo2rect(self, geo):
"""
Converter coordenadas ECEF geodeticas para retangulares.
pgeo [in] Coordenadas geodeticas.
prect [out] Coordenadas retangulares.
"""
s = sin(geo.lat);
RN = self.earth_a / sqrt(1.0 - (self.earth_e2 * s * s));
return CRECT((
(RN + geo.h) * cos(geo.lat) * cos(geo.lon),
(RN + geo.h) * cos(geo.lat) * sin(geo.lon),
((RN * (1.0 - self.earth_e2)) + geo.h) * sin(geo.lat)
))
def rect2geo(self, rect):
"""
Converter coordenadas ECEF retangulares para geodeticas.
pgeo [out] Coordenadas geodeticas.
prect [in] Coordenadas retangulares.
"""
p = sqrt((rect.x * rect.x) + (rect.y * rect.y));
geo = CGEO(0,0,0);
geo.h = 0;
RN = self.earth_a;
for i in range(100): # timeout
#print "[lat h] = [%1.09f %1.03f]" % (geo.lat, geo.h)
lastlat = geo.lat;
lasth = geo.h;
# algoritmo de conversao:
s = rect.z / (((1.0 - self.earth_e2) * RN) + geo.h);
geo.lat = mt.atan((rect.z + (self.earth_e2 * RN * s)) / p);
RN = self.earth_a / sqrt(1.0 - (self.earth_e2 * s * s));
geo.h = (p / cos(geo.lat)) - RN;
# erro:
d = ((lastlat - geo.lat) * (lastlat - geo.lat)) + ((lasth - geo.h) * (lasth - geo.h));
if (d < 1e-9):
break;
geo.lon = mt.atan2(rect.y, rect.x);
return geo
def dqdt(self, q, w):
"""
The derivative of the quaternions is $\dot{q} = 1/2 .B(w).q$
This funtion returns $\dot{q}$.
"""
K = 1e1
cq = np.asarray(q).reshape((4,1))
epslon = 1.0 - np.sum(cq**2.0)
B = np.asarray([
[ 0, -w[0], -w[1], -w[2]],
[w[0], 0, w[2], -w[1]],
[w[1], -w[2], 0, w[0]],
[w[2], w[1], -w[0], 0]
])
dq = (0.5 * np.dot(B,cq)) + (K*epslon*cq)
return list(dq.squeeze())
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
if (__name__ == "__main__"):
g = CNAVFUNC();
print g.Rlambda(20./57)
print g.Rphi(30./57)
q = g.euler2Q((10./57, 20./57, -30./57))
print np.asarray(g.Q2euler(q))*57
print g.Q2C(q)
print g.Re2n(0,0)
print g.Re2n(1,0.9)
geo = CGEO(10./57, -30./57, 33);
print geo
rec = g.geo2rect(geo)
print rec
geo = g.rect2geo(rec)
print geo
#----------------------#
# some dynamic tests:
#----------------------#
from scipy.integrate import odeint;
from numpy import dot;
print
# I: inertial frame
# b: body frame
qI2b = g.euler2Q((0,0,0))
# angular rotation between I and b:
# \omega_{Ib}^I
w = np.asarray([2./57, 0, 0]).reshape((3,1))
def eqdiff(q,t,w):
RI2b = g.Q2C(q)
dqdt = g.dqdt(q, dot(RI2b,w))
return dqdt
# a vector described at I:
F = np.asarray([0,0,1]).reshape((3,1))
print "F = "
print F.T
for t in [1,5,20,90]:
# after t seconds, the quaternions should be:
y = odeint(eqdiff, list(qI2b), [0,t], (w,))[1,:]
# with these euler angles:
euler = g.Q2euler(y)
# and described at b:
F_b = dot(g.Q2C(y), F)
print "F_b(phi = %1.03f) = [%1.03f %1.03f %1.03f]" % (
57.*euler[0], F_b[0], F_b[1], F_b[2])
#----------------------#
# some convertion tests:
#----------------------#
euler = (10./57, -40./57, 163./57)
Q = g.euler2Q(euler)
print "euler = "
print np.degrees(euler)
print np.degrees(g.Q2euler(g.euler2Q(euler)))
euler_2 = g.Q2euler(g.C2Q(g.Q2C(g.euler2Q(euler))))
print np.degrees(np.asarray(euler_2))
#----------------------#
# quaternion product:
#----------------------#
print
print "quaternion product"
q_a2b = g.euler2Q((-10., 33., -55.))
q_b2c = g.euler2Q((44., -38., 77.))
print "C_a2c = C_b2c . Ci_a2b ="
print dot(g.Q2C(q_b2c), g.Q2C(q_a2b))
print "C(q_b2c . qa2b) ="
print g.Q2C(g.q1_prod_q2(q_b2c, q_a2b))
#>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>--<<..>>
| self.x = r_e[0]
self.y = r_e[1]
self.z = r_e[2] | conditional_block |
cluster_save_all_model_flam.py | from __future__ import division
import numpy as np
from numpy import nansum
from scipy.interpolate import griddata
import multiprocessing as mp
from scipy.integrate import simps
import os
import sys
import time
import datetime
# Get data and filter curve directories
if 'agave' in os.uname()[1]:
figs_data_dir = "/home/bajoshi/models_and_photometry/"
cluster_spz_scripts = "/home/bajoshi/spz_scripts/"
filter_curve_dir = figs_data_dir + 'filter_curves/'
elif 'firstlight' in os.uname()[1]:
figs_data_dir = '/Users/baj/Desktop/FIGS/'
cluster_spz_scripts = '/Users/baj/Desktop/FIGS/massive-galaxies/cluster_codes/'
filter_curve_dir = figs_data_dir + 'massive-galaxies/grismz_pipeline/'
else: # If running this code on the laptop
filter_curve_dir = '/Users/bhavinjoshi/Desktop/FIGS/massive-galaxies/grismz_pipeline/'
figs_data_dir = '/Volumes/Bhavins_backup/bc03_models_npy_spectra/'
import cluster_do_fitting as cf
def create_dl_lookup_table(zrange):
print "Creating lookup table for luminosity distances."
dl_mpc = np.zeros(len(zrange))
dl_cm = np.zeros(len(zrange))
for j in range(len(zrange)):
z = zrange[j]
dl_mpc[j] = cf.get_lum_dist(z) # in Mpc
dl_cm[j] = dl_mpc[j] * 3.086e24 # convert Mpc to cm |
# Save a txt file
data = np.array(zip(zrange, dl_mpc, dl_cm), dtype=[('zrange', float), ('dl_mpc', float), ('dl_cm', float)])
np.savetxt('dl_lookup_table.txt', data, fmt=['%.3f', '%.6e', '%.6e'], delimiter=' ', header='z dl_mpc dl_cm')
print "Luminosity distance lookup table saved in txt file.",
print "In same folder as this code."
return None
def compute_filter_flam(filt, filtername, start, model_comp_spec, model_lam_grid, \
total_models, zrange, dl_tbl):
print "\n", "Working on filter:", filtername
if filtername == 'u':
filt['trans'] /= 100.0 # They've given throughput percentages for the u-band
filt_flam_model = np.zeros(shape=(len(zrange), total_models), dtype=np.float64)
"""
print model_comp_spec.nbytes / (1024 * 1024)
print model_lam_grid.nbytes / (1024 * 1024)
print filt_flam_model.nbytes / (1024 * 1024)
print zrange.nbytes / (1024 * 1024)
print sys.getsizeof(filt) / (1024 * 1024)
sys.exit(0)
"""
for j in range(len(zrange)):
z = zrange[j]
#print "At z:", z
# ------------------------------ Now compute model filter magnitudes ------------------------------ #
# Redshift the base models
dl = dl_tbl['dl_cm'][j] # has to be in cm
#print "Lum dist [cm]:", dl
#model_comp_spec_z = model_comp_spec / (4 * np.pi * dl * dl * (1+z))
model_lam_grid_z = model_lam_grid * (1+z)
# first interpolate the transmission curve to the model lam grid
filt_interp = griddata(points=filt['wav'], values=filt['trans'], xi=model_lam_grid_z, \
method='linear')
# Set nan values in interpolated filter to 0.0
filt_nan_idx = np.where(np.isnan(filt_interp))[0]
filt_interp[filt_nan_idx] = 0.0
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(filt['wav'], filt['trans'])
ax.plot(model_lam_grid_z, filt_interp)
plt.show()
sys.exit(0)
"""
# multiply model spectrum to filter curve
#num_vec = nansum(model_comp_spec * filt_interp / (4 * np.pi * dl * dl * (1+z)), axis=1)
den = simps(y=filt_interp, x=model_lam_grid_z)
for i in range(total_models):
num = simps(y=model_comp_spec[i] * filt_interp / (4 * np.pi * dl * dl * (1+z)), x=model_lam_grid_z)
filt_flam_model[j, i] = num / den
# transverse array to make shape consistent with others
# I did it this way so that in the above for loop each filter is looped over only once
# i.e. minimizing the number of times each filter is gridded on to the model grid
#filt_flam_model_t = filt_flam_model.T
# save the model flux densities
np.save(figs_data_dir + 'all_model_flam_' + filtername + '_chab.npy', filt_flam_model)
print "Computation done and saved for:", filtername,
print "Total time taken:", time.time() - start
del filt_flam_model
return None
def main():
# Start time
start = time.time()
dt = datetime.datetime
print "Starting at --", dt.now()
# Redshift grid for models
zrange = np.arange(0.005, 6.005, 0.005)
print "Redshift grid for models:"
print zrange
# Read in lookup table for luminosity distances
if not os.path.isfile('dl_lookup_table.txt'):
create_dl_lookup_table(zrange)
dl_tbl = np.genfromtxt('dl_lookup_table.txt', dtype=None, names=True)
# Read in models with emission lines adn put in numpy array
total_models = 37761
chosen_imf = 'Chabrier'
if chosen_imf == 'Salpeter':
cspout_str = ''
elif chosen_imf == 'Chabrier':
cspout_str = '_chabrier'
# Read model lambda grid # In agnstroms
model_lam_grid_withlines_mmap = np.load(figs_data_dir + 'model_lam_grid_withlines' + cspout_str + '.npy', mmap_mode='r')
"""
# This is already done for the chabrier models by default
if not os.path.isfile(figs_data_dir + 'model_comp_spec_llam_withlines' + cspout_str + '.npy'):
print "Convert models to physical units."
# ---------------- This block only needs to be run once ---------------- #
model_comp_spec_withlines_mmap = np.load(figs_data_dir + 'model_comp_spec_withlines' + cspout_str + '.npy', mmap_mode='r')
# Convert model spectra to correct L_lambda units i.e., erg s^-1 A^-1
# They are given in units of solar luminosity per angstrom
# Therefore they need to be multiplied by L_sol = 3.826e33 erg s^-1
L_sol = 3.826e33
model_comp_spec_llam = model_comp_spec_withlines_mmap * L_sol
np.save(figs_data_dir + 'model_comp_spec_llam_withlines' + cspout_str + '.npy', model_comp_spec_llam)
del model_comp_spec_withlines_mmap
# ---------------- End of code block to convert to Llam ---------------- #
"""
# Now read the model spectra # In erg s^-1 A^-1
model_comp_spec_llam_withlines_mmap = np.load(figs_data_dir + 'model_comp_spec_llam_withlines' + cspout_str + '.npy', mmap_mode='r')
# total run time up to now
print "All models now in numpy array and have emission lines. Total time taken up to now --",
print time.time() - start, "seconds."
# ------------------------------- Read in filter curves ------------------------------- #
"""
The HST filters, in their PYSYNPHOT form, cannot be used in conjunction with
the joblib module. So this function will read them and save them into numpy arrays.
This function has to be run once to convert the HST filters to text files that
can be read with genfromtxt.
"""
#save_hst_filters_to_npy()
uband_curve = np.genfromtxt(filter_curve_dir + 'kpno_mosaic_u.txt', dtype=None, \
names=['wav', 'trans'], skip_header=14)
f435w_filt_curve = np.genfromtxt(filter_curve_dir + 'f435w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f606w_filt_curve = np.genfromtxt(filter_curve_dir + 'f606w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f775w_filt_curve = np.genfromtxt(filter_curve_dir + 'f775w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f850lp_filt_curve = np.genfromtxt(filter_curve_dir + 'f850lp_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f125w_filt_curve = np.genfromtxt(filter_curve_dir + 'f125w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f140w_filt_curve = np.genfromtxt(filter_curve_dir + 'f140w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f160w_filt_curve = np.genfromtxt(filter_curve_dir + 'f160w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
irac1_curve = np.genfromtxt(filter_curve_dir + 'irac1.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
irac2_curve = np.genfromtxt(filter_curve_dir + 'irac2.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
irac3_curve = np.genfromtxt(filter_curve_dir + 'irac3.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
irac4_curve = np.genfromtxt(filter_curve_dir + 'irac4.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
# IRAC wavelengths are in mixrons # convert to angstroms
irac1_curve['wav'] *= 1e4
irac2_curve['wav'] *= 1e4
irac3_curve['wav'] *= 1e4
irac4_curve['wav'] *= 1e4
all_filters = [uband_curve, f435w_filt_curve, f606w_filt_curve, f775w_filt_curve, f850lp_filt_curve, \
f125w_filt_curve, f140w_filt_curve, f160w_filt_curve, irac1_curve, irac2_curve, irac3_curve, irac4_curve]
all_filter_names = ['u', 'f435w', 'f606w', 'f775w', 'f850lp', \
'f125w', 'f140w', 'f160w', 'irac1', 'irac2', 'irac3', 'irac4']
# Loop over all redshifts and filters and compute magnitudes
max_cores = len(all_filters)
for i in range(int(np.ceil(len(all_filters)/max_cores))):
jmin = i*max_cores
jmax = (i+1)*max_cores
if jmax > len(all_filters):
jmax = len(all_filters)
# Will use as many cores as filters
processes = [mp.Process(target=compute_filter_flam, args=(all_filters[j], all_filter_names[j], start, \
model_comp_spec_llam_withlines_mmap, model_lam_grid_withlines_mmap, total_models, zrange, dl_tbl)) \
for j in range(len(all_filters[jmin:jmax]))]
for p in processes:
p.start()
print "Current process ID:", p.pid
for p in processes:
p.join()
print "Finished with filters:", all_filter_names[jmin:jmax]
# This is for agave because sometimes it will exit the for loop
# above without going through all the filters. So you don't
# want agave combining them in the wrong shape.
# SImply comment out the above part and run the block below
# once the flam computation is done.
print "Now combining all filter computations into a single npy file."
# Read in all individual filter flam
u = np.load(figs_data_dir + 'all_model_flam_u_chab.npy')
f435w = np.load(figs_data_dir + 'all_model_flam_f435w_chab.npy')
f606w = np.load(figs_data_dir + 'all_model_flam_f606w_chab.npy')
f775w = np.load(figs_data_dir + 'all_model_flam_f775w_chab.npy')
f850lp = np.load(figs_data_dir + 'all_model_flam_f850lp_chab.npy')
f125w = np.load(figs_data_dir + 'all_model_flam_f125w_chab.npy')
f140w = np.load(figs_data_dir + 'all_model_flam_f140w_chab.npy')
f160w = np.load(figs_data_dir + 'all_model_flam_f160w_chab.npy')
irac1 = np.load(figs_data_dir + 'all_model_flam_irac1_chab.npy')
irac2 = np.load(figs_data_dir + 'all_model_flam_irac2_chab.npy')
irac3 = np.load(figs_data_dir + 'all_model_flam_irac3_chab.npy')
irac4 = np.load(figs_data_dir + 'all_model_flam_irac4_chab.npy')
# now loop over all and write final output
all_indiv_flam = [u, f435w, f606w, f775w, f850lp, f125w, f140w, f160w, irac1, irac2, irac3, irac4]
all_model_flam = np.zeros(shape=(12, 1200, 37761))
for k in range(len(all_indiv_flam)):
all_model_flam[k] = all_indiv_flam[k]
np.save(figs_data_dir + 'all_model_flam_chabrier.npy', all_model_flam)
print "All done. Total time taken:", time.time() - start
return None
if __name__ == '__main__':
main()
sys.exit(0) | random_line_split | |
cluster_save_all_model_flam.py | from __future__ import division
import numpy as np
from numpy import nansum
from scipy.interpolate import griddata
import multiprocessing as mp
from scipy.integrate import simps
import os
import sys
import time
import datetime
# Get data and filter curve directories
if 'agave' in os.uname()[1]:
figs_data_dir = "/home/bajoshi/models_and_photometry/"
cluster_spz_scripts = "/home/bajoshi/spz_scripts/"
filter_curve_dir = figs_data_dir + 'filter_curves/'
elif 'firstlight' in os.uname()[1]:
figs_data_dir = '/Users/baj/Desktop/FIGS/'
cluster_spz_scripts = '/Users/baj/Desktop/FIGS/massive-galaxies/cluster_codes/'
filter_curve_dir = figs_data_dir + 'massive-galaxies/grismz_pipeline/'
else: # If running this code on the laptop
filter_curve_dir = '/Users/bhavinjoshi/Desktop/FIGS/massive-galaxies/grismz_pipeline/'
figs_data_dir = '/Volumes/Bhavins_backup/bc03_models_npy_spectra/'
import cluster_do_fitting as cf
def create_dl_lookup_table(zrange):
print "Creating lookup table for luminosity distances."
dl_mpc = np.zeros(len(zrange))
dl_cm = np.zeros(len(zrange))
for j in range(len(zrange)):
z = zrange[j]
dl_mpc[j] = cf.get_lum_dist(z) # in Mpc
dl_cm[j] = dl_mpc[j] * 3.086e24 # convert Mpc to cm
# Save a txt file
data = np.array(zip(zrange, dl_mpc, dl_cm), dtype=[('zrange', float), ('dl_mpc', float), ('dl_cm', float)])
np.savetxt('dl_lookup_table.txt', data, fmt=['%.3f', '%.6e', '%.6e'], delimiter=' ', header='z dl_mpc dl_cm')
print "Luminosity distance lookup table saved in txt file.",
print "In same folder as this code."
return None
def compute_filter_flam(filt, filtername, start, model_comp_spec, model_lam_grid, \
total_models, zrange, dl_tbl):
print "\n", "Working on filter:", filtername
if filtername == 'u':
filt['trans'] /= 100.0 # They've given throughput percentages for the u-band
filt_flam_model = np.zeros(shape=(len(zrange), total_models), dtype=np.float64)
"""
print model_comp_spec.nbytes / (1024 * 1024)
print model_lam_grid.nbytes / (1024 * 1024)
print filt_flam_model.nbytes / (1024 * 1024)
print zrange.nbytes / (1024 * 1024)
print sys.getsizeof(filt) / (1024 * 1024)
sys.exit(0)
"""
for j in range(len(zrange)):
z = zrange[j]
#print "At z:", z
# ------------------------------ Now compute model filter magnitudes ------------------------------ #
# Redshift the base models
dl = dl_tbl['dl_cm'][j] # has to be in cm
#print "Lum dist [cm]:", dl
#model_comp_spec_z = model_comp_spec / (4 * np.pi * dl * dl * (1+z))
model_lam_grid_z = model_lam_grid * (1+z)
# first interpolate the transmission curve to the model lam grid
filt_interp = griddata(points=filt['wav'], values=filt['trans'], xi=model_lam_grid_z, \
method='linear')
# Set nan values in interpolated filter to 0.0
filt_nan_idx = np.where(np.isnan(filt_interp))[0]
filt_interp[filt_nan_idx] = 0.0
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(filt['wav'], filt['trans'])
ax.plot(model_lam_grid_z, filt_interp)
plt.show()
sys.exit(0)
"""
# multiply model spectrum to filter curve
#num_vec = nansum(model_comp_spec * filt_interp / (4 * np.pi * dl * dl * (1+z)), axis=1)
den = simps(y=filt_interp, x=model_lam_grid_z)
for i in range(total_models):
|
# transverse array to make shape consistent with others
# I did it this way so that in the above for loop each filter is looped over only once
# i.e. minimizing the number of times each filter is gridded on to the model grid
#filt_flam_model_t = filt_flam_model.T
# save the model flux densities
np.save(figs_data_dir + 'all_model_flam_' + filtername + '_chab.npy', filt_flam_model)
print "Computation done and saved for:", filtername,
print "Total time taken:", time.time() - start
del filt_flam_model
return None
def main():
# Start time
start = time.time()
dt = datetime.datetime
print "Starting at --", dt.now()
# Redshift grid for models
zrange = np.arange(0.005, 6.005, 0.005)
print "Redshift grid for models:"
print zrange
# Read in lookup table for luminosity distances
if not os.path.isfile('dl_lookup_table.txt'):
create_dl_lookup_table(zrange)
dl_tbl = np.genfromtxt('dl_lookup_table.txt', dtype=None, names=True)
# Read in models with emission lines adn put in numpy array
total_models = 37761
chosen_imf = 'Chabrier'
if chosen_imf == 'Salpeter':
cspout_str = ''
elif chosen_imf == 'Chabrier':
cspout_str = '_chabrier'
# Read model lambda grid # In agnstroms
model_lam_grid_withlines_mmap = np.load(figs_data_dir + 'model_lam_grid_withlines' + cspout_str + '.npy', mmap_mode='r')
"""
# This is already done for the chabrier models by default
if not os.path.isfile(figs_data_dir + 'model_comp_spec_llam_withlines' + cspout_str + '.npy'):
print "Convert models to physical units."
# ---------------- This block only needs to be run once ---------------- #
model_comp_spec_withlines_mmap = np.load(figs_data_dir + 'model_comp_spec_withlines' + cspout_str + '.npy', mmap_mode='r')
# Convert model spectra to correct L_lambda units i.e., erg s^-1 A^-1
# They are given in units of solar luminosity per angstrom
# Therefore they need to be multiplied by L_sol = 3.826e33 erg s^-1
L_sol = 3.826e33
model_comp_spec_llam = model_comp_spec_withlines_mmap * L_sol
np.save(figs_data_dir + 'model_comp_spec_llam_withlines' + cspout_str + '.npy', model_comp_spec_llam)
del model_comp_spec_withlines_mmap
# ---------------- End of code block to convert to Llam ---------------- #
"""
# Now read the model spectra # In erg s^-1 A^-1
model_comp_spec_llam_withlines_mmap = np.load(figs_data_dir + 'model_comp_spec_llam_withlines' + cspout_str + '.npy', mmap_mode='r')
# total run time up to now
print "All models now in numpy array and have emission lines. Total time taken up to now --",
print time.time() - start, "seconds."
# ------------------------------- Read in filter curves ------------------------------- #
"""
The HST filters, in their PYSYNPHOT form, cannot be used in conjunction with
the joblib module. So this function will read them and save them into numpy arrays.
This function has to be run once to convert the HST filters to text files that
can be read with genfromtxt.
"""
#save_hst_filters_to_npy()
uband_curve = np.genfromtxt(filter_curve_dir + 'kpno_mosaic_u.txt', dtype=None, \
names=['wav', 'trans'], skip_header=14)
f435w_filt_curve = np.genfromtxt(filter_curve_dir + 'f435w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f606w_filt_curve = np.genfromtxt(filter_curve_dir + 'f606w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f775w_filt_curve = np.genfromtxt(filter_curve_dir + 'f775w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f850lp_filt_curve = np.genfromtxt(filter_curve_dir + 'f850lp_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f125w_filt_curve = np.genfromtxt(filter_curve_dir + 'f125w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f140w_filt_curve = np.genfromtxt(filter_curve_dir + 'f140w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f160w_filt_curve = np.genfromtxt(filter_curve_dir + 'f160w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
irac1_curve = np.genfromtxt(filter_curve_dir + 'irac1.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
irac2_curve = np.genfromtxt(filter_curve_dir + 'irac2.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
irac3_curve = np.genfromtxt(filter_curve_dir + 'irac3.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
irac4_curve = np.genfromtxt(filter_curve_dir + 'irac4.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
# IRAC wavelengths are in mixrons # convert to angstroms
irac1_curve['wav'] *= 1e4
irac2_curve['wav'] *= 1e4
irac3_curve['wav'] *= 1e4
irac4_curve['wav'] *= 1e4
all_filters = [uband_curve, f435w_filt_curve, f606w_filt_curve, f775w_filt_curve, f850lp_filt_curve, \
f125w_filt_curve, f140w_filt_curve, f160w_filt_curve, irac1_curve, irac2_curve, irac3_curve, irac4_curve]
all_filter_names = ['u', 'f435w', 'f606w', 'f775w', 'f850lp', \
'f125w', 'f140w', 'f160w', 'irac1', 'irac2', 'irac3', 'irac4']
# Loop over all redshifts and filters and compute magnitudes
max_cores = len(all_filters)
for i in range(int(np.ceil(len(all_filters)/max_cores))):
jmin = i*max_cores
jmax = (i+1)*max_cores
if jmax > len(all_filters):
jmax = len(all_filters)
# Will use as many cores as filters
processes = [mp.Process(target=compute_filter_flam, args=(all_filters[j], all_filter_names[j], start, \
model_comp_spec_llam_withlines_mmap, model_lam_grid_withlines_mmap, total_models, zrange, dl_tbl)) \
for j in range(len(all_filters[jmin:jmax]))]
for p in processes:
p.start()
print "Current process ID:", p.pid
for p in processes:
p.join()
print "Finished with filters:", all_filter_names[jmin:jmax]
# This is for agave because sometimes it will exit the for loop
# above without going through all the filters. So you don't
# want agave combining them in the wrong shape.
# SImply comment out the above part and run the block below
# once the flam computation is done.
print "Now combining all filter computations into a single npy file."
# Read in all individual filter flam
u = np.load(figs_data_dir + 'all_model_flam_u_chab.npy')
f435w = np.load(figs_data_dir + 'all_model_flam_f435w_chab.npy')
f606w = np.load(figs_data_dir + 'all_model_flam_f606w_chab.npy')
f775w = np.load(figs_data_dir + 'all_model_flam_f775w_chab.npy')
f850lp = np.load(figs_data_dir + 'all_model_flam_f850lp_chab.npy')
f125w = np.load(figs_data_dir + 'all_model_flam_f125w_chab.npy')
f140w = np.load(figs_data_dir + 'all_model_flam_f140w_chab.npy')
f160w = np.load(figs_data_dir + 'all_model_flam_f160w_chab.npy')
irac1 = np.load(figs_data_dir + 'all_model_flam_irac1_chab.npy')
irac2 = np.load(figs_data_dir + 'all_model_flam_irac2_chab.npy')
irac3 = np.load(figs_data_dir + 'all_model_flam_irac3_chab.npy')
irac4 = np.load(figs_data_dir + 'all_model_flam_irac4_chab.npy')
# now loop over all and write final output
all_indiv_flam = [u, f435w, f606w, f775w, f850lp, f125w, f140w, f160w, irac1, irac2, irac3, irac4]
all_model_flam = np.zeros(shape=(12, 1200, 37761))
for k in range(len(all_indiv_flam)):
all_model_flam[k] = all_indiv_flam[k]
np.save(figs_data_dir + 'all_model_flam_chabrier.npy', all_model_flam)
print "All done. Total time taken:", time.time() - start
return None
if __name__ == '__main__':
main()
sys.exit(0) | num = simps(y=model_comp_spec[i] * filt_interp / (4 * np.pi * dl * dl * (1+z)), x=model_lam_grid_z)
filt_flam_model[j, i] = num / den | conditional_block |
cluster_save_all_model_flam.py | from __future__ import division
import numpy as np
from numpy import nansum
from scipy.interpolate import griddata
import multiprocessing as mp
from scipy.integrate import simps
import os
import sys
import time
import datetime
# Get data and filter curve directories
if 'agave' in os.uname()[1]:
figs_data_dir = "/home/bajoshi/models_and_photometry/"
cluster_spz_scripts = "/home/bajoshi/spz_scripts/"
filter_curve_dir = figs_data_dir + 'filter_curves/'
elif 'firstlight' in os.uname()[1]:
figs_data_dir = '/Users/baj/Desktop/FIGS/'
cluster_spz_scripts = '/Users/baj/Desktop/FIGS/massive-galaxies/cluster_codes/'
filter_curve_dir = figs_data_dir + 'massive-galaxies/grismz_pipeline/'
else: # If running this code on the laptop
filter_curve_dir = '/Users/bhavinjoshi/Desktop/FIGS/massive-galaxies/grismz_pipeline/'
figs_data_dir = '/Volumes/Bhavins_backup/bc03_models_npy_spectra/'
import cluster_do_fitting as cf
def create_dl_lookup_table(zrange):
print "Creating lookup table for luminosity distances."
dl_mpc = np.zeros(len(zrange))
dl_cm = np.zeros(len(zrange))
for j in range(len(zrange)):
z = zrange[j]
dl_mpc[j] = cf.get_lum_dist(z) # in Mpc
dl_cm[j] = dl_mpc[j] * 3.086e24 # convert Mpc to cm
# Save a txt file
data = np.array(zip(zrange, dl_mpc, dl_cm), dtype=[('zrange', float), ('dl_mpc', float), ('dl_cm', float)])
np.savetxt('dl_lookup_table.txt', data, fmt=['%.3f', '%.6e', '%.6e'], delimiter=' ', header='z dl_mpc dl_cm')
print "Luminosity distance lookup table saved in txt file.",
print "In same folder as this code."
return None
def compute_filter_flam(filt, filtername, start, model_comp_spec, model_lam_grid, \
total_models, zrange, dl_tbl):
|
def main():
# Start time
start = time.time()
dt = datetime.datetime
print "Starting at --", dt.now()
# Redshift grid for models
zrange = np.arange(0.005, 6.005, 0.005)
print "Redshift grid for models:"
print zrange
# Read in lookup table for luminosity distances
if not os.path.isfile('dl_lookup_table.txt'):
create_dl_lookup_table(zrange)
dl_tbl = np.genfromtxt('dl_lookup_table.txt', dtype=None, names=True)
# Read in models with emission lines adn put in numpy array
total_models = 37761
chosen_imf = 'Chabrier'
if chosen_imf == 'Salpeter':
cspout_str = ''
elif chosen_imf == 'Chabrier':
cspout_str = '_chabrier'
# Read model lambda grid # In agnstroms
model_lam_grid_withlines_mmap = np.load(figs_data_dir + 'model_lam_grid_withlines' + cspout_str + '.npy', mmap_mode='r')
"""
# This is already done for the chabrier models by default
if not os.path.isfile(figs_data_dir + 'model_comp_spec_llam_withlines' + cspout_str + '.npy'):
print "Convert models to physical units."
# ---------------- This block only needs to be run once ---------------- #
model_comp_spec_withlines_mmap = np.load(figs_data_dir + 'model_comp_spec_withlines' + cspout_str + '.npy', mmap_mode='r')
# Convert model spectra to correct L_lambda units i.e., erg s^-1 A^-1
# They are given in units of solar luminosity per angstrom
# Therefore they need to be multiplied by L_sol = 3.826e33 erg s^-1
L_sol = 3.826e33
model_comp_spec_llam = model_comp_spec_withlines_mmap * L_sol
np.save(figs_data_dir + 'model_comp_spec_llam_withlines' + cspout_str + '.npy', model_comp_spec_llam)
del model_comp_spec_withlines_mmap
# ---------------- End of code block to convert to Llam ---------------- #
"""
# Now read the model spectra # In erg s^-1 A^-1
model_comp_spec_llam_withlines_mmap = np.load(figs_data_dir + 'model_comp_spec_llam_withlines' + cspout_str + '.npy', mmap_mode='r')
# total run time up to now
print "All models now in numpy array and have emission lines. Total time taken up to now --",
print time.time() - start, "seconds."
# ------------------------------- Read in filter curves ------------------------------- #
"""
The HST filters, in their PYSYNPHOT form, cannot be used in conjunction with
the joblib module. So this function will read them and save them into numpy arrays.
This function has to be run once to convert the HST filters to text files that
can be read with genfromtxt.
"""
#save_hst_filters_to_npy()
uband_curve = np.genfromtxt(filter_curve_dir + 'kpno_mosaic_u.txt', dtype=None, \
names=['wav', 'trans'], skip_header=14)
f435w_filt_curve = np.genfromtxt(filter_curve_dir + 'f435w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f606w_filt_curve = np.genfromtxt(filter_curve_dir + 'f606w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f775w_filt_curve = np.genfromtxt(filter_curve_dir + 'f775w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f850lp_filt_curve = np.genfromtxt(filter_curve_dir + 'f850lp_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f125w_filt_curve = np.genfromtxt(filter_curve_dir + 'f125w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f140w_filt_curve = np.genfromtxt(filter_curve_dir + 'f140w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f160w_filt_curve = np.genfromtxt(filter_curve_dir + 'f160w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
irac1_curve = np.genfromtxt(filter_curve_dir + 'irac1.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
irac2_curve = np.genfromtxt(filter_curve_dir + 'irac2.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
irac3_curve = np.genfromtxt(filter_curve_dir + 'irac3.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
irac4_curve = np.genfromtxt(filter_curve_dir + 'irac4.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
# IRAC wavelengths are in mixrons # convert to angstroms
irac1_curve['wav'] *= 1e4
irac2_curve['wav'] *= 1e4
irac3_curve['wav'] *= 1e4
irac4_curve['wav'] *= 1e4
all_filters = [uband_curve, f435w_filt_curve, f606w_filt_curve, f775w_filt_curve, f850lp_filt_curve, \
f125w_filt_curve, f140w_filt_curve, f160w_filt_curve, irac1_curve, irac2_curve, irac3_curve, irac4_curve]
all_filter_names = ['u', 'f435w', 'f606w', 'f775w', 'f850lp', \
'f125w', 'f140w', 'f160w', 'irac1', 'irac2', 'irac3', 'irac4']
# Loop over all redshifts and filters and compute magnitudes
max_cores = len(all_filters)
for i in range(int(np.ceil(len(all_filters)/max_cores))):
jmin = i*max_cores
jmax = (i+1)*max_cores
if jmax > len(all_filters):
jmax = len(all_filters)
# Will use as many cores as filters
processes = [mp.Process(target=compute_filter_flam, args=(all_filters[j], all_filter_names[j], start, \
model_comp_spec_llam_withlines_mmap, model_lam_grid_withlines_mmap, total_models, zrange, dl_tbl)) \
for j in range(len(all_filters[jmin:jmax]))]
for p in processes:
p.start()
print "Current process ID:", p.pid
for p in processes:
p.join()
print "Finished with filters:", all_filter_names[jmin:jmax]
# This is for agave because sometimes it will exit the for loop
# above without going through all the filters. So you don't
# want agave combining them in the wrong shape.
# SImply comment out the above part and run the block below
# once the flam computation is done.
print "Now combining all filter computations into a single npy file."
# Read in all individual filter flam
u = np.load(figs_data_dir + 'all_model_flam_u_chab.npy')
f435w = np.load(figs_data_dir + 'all_model_flam_f435w_chab.npy')
f606w = np.load(figs_data_dir + 'all_model_flam_f606w_chab.npy')
f775w = np.load(figs_data_dir + 'all_model_flam_f775w_chab.npy')
f850lp = np.load(figs_data_dir + 'all_model_flam_f850lp_chab.npy')
f125w = np.load(figs_data_dir + 'all_model_flam_f125w_chab.npy')
f140w = np.load(figs_data_dir + 'all_model_flam_f140w_chab.npy')
f160w = np.load(figs_data_dir + 'all_model_flam_f160w_chab.npy')
irac1 = np.load(figs_data_dir + 'all_model_flam_irac1_chab.npy')
irac2 = np.load(figs_data_dir + 'all_model_flam_irac2_chab.npy')
irac3 = np.load(figs_data_dir + 'all_model_flam_irac3_chab.npy')
irac4 = np.load(figs_data_dir + 'all_model_flam_irac4_chab.npy')
# now loop over all and write final output
all_indiv_flam = [u, f435w, f606w, f775w, f850lp, f125w, f140w, f160w, irac1, irac2, irac3, irac4]
all_model_flam = np.zeros(shape=(12, 1200, 37761))
for k in range(len(all_indiv_flam)):
all_model_flam[k] = all_indiv_flam[k]
np.save(figs_data_dir + 'all_model_flam_chabrier.npy', all_model_flam)
print "All done. Total time taken:", time.time() - start
return None
if __name__ == '__main__':
main()
sys.exit(0) | print "\n", "Working on filter:", filtername
if filtername == 'u':
filt['trans'] /= 100.0 # They've given throughput percentages for the u-band
filt_flam_model = np.zeros(shape=(len(zrange), total_models), dtype=np.float64)
"""
print model_comp_spec.nbytes / (1024 * 1024)
print model_lam_grid.nbytes / (1024 * 1024)
print filt_flam_model.nbytes / (1024 * 1024)
print zrange.nbytes / (1024 * 1024)
print sys.getsizeof(filt) / (1024 * 1024)
sys.exit(0)
"""
for j in range(len(zrange)):
z = zrange[j]
#print "At z:", z
# ------------------------------ Now compute model filter magnitudes ------------------------------ #
# Redshift the base models
dl = dl_tbl['dl_cm'][j] # has to be in cm
#print "Lum dist [cm]:", dl
#model_comp_spec_z = model_comp_spec / (4 * np.pi * dl * dl * (1+z))
model_lam_grid_z = model_lam_grid * (1+z)
# first interpolate the transmission curve to the model lam grid
filt_interp = griddata(points=filt['wav'], values=filt['trans'], xi=model_lam_grid_z, \
method='linear')
# Set nan values in interpolated filter to 0.0
filt_nan_idx = np.where(np.isnan(filt_interp))[0]
filt_interp[filt_nan_idx] = 0.0
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(filt['wav'], filt['trans'])
ax.plot(model_lam_grid_z, filt_interp)
plt.show()
sys.exit(0)
"""
# multiply model spectrum to filter curve
#num_vec = nansum(model_comp_spec * filt_interp / (4 * np.pi * dl * dl * (1+z)), axis=1)
den = simps(y=filt_interp, x=model_lam_grid_z)
for i in range(total_models):
num = simps(y=model_comp_spec[i] * filt_interp / (4 * np.pi * dl * dl * (1+z)), x=model_lam_grid_z)
filt_flam_model[j, i] = num / den
# transverse array to make shape consistent with others
# I did it this way so that in the above for loop each filter is looped over only once
# i.e. minimizing the number of times each filter is gridded on to the model grid
#filt_flam_model_t = filt_flam_model.T
# save the model flux densities
np.save(figs_data_dir + 'all_model_flam_' + filtername + '_chab.npy', filt_flam_model)
print "Computation done and saved for:", filtername,
print "Total time taken:", time.time() - start
del filt_flam_model
return None | identifier_body |
cluster_save_all_model_flam.py | from __future__ import division
import numpy as np
from numpy import nansum
from scipy.interpolate import griddata
import multiprocessing as mp
from scipy.integrate import simps
import os
import sys
import time
import datetime
# Get data and filter curve directories
if 'agave' in os.uname()[1]:
figs_data_dir = "/home/bajoshi/models_and_photometry/"
cluster_spz_scripts = "/home/bajoshi/spz_scripts/"
filter_curve_dir = figs_data_dir + 'filter_curves/'
elif 'firstlight' in os.uname()[1]:
figs_data_dir = '/Users/baj/Desktop/FIGS/'
cluster_spz_scripts = '/Users/baj/Desktop/FIGS/massive-galaxies/cluster_codes/'
filter_curve_dir = figs_data_dir + 'massive-galaxies/grismz_pipeline/'
else: # If running this code on the laptop
filter_curve_dir = '/Users/bhavinjoshi/Desktop/FIGS/massive-galaxies/grismz_pipeline/'
figs_data_dir = '/Volumes/Bhavins_backup/bc03_models_npy_spectra/'
import cluster_do_fitting as cf
def create_dl_lookup_table(zrange):
print "Creating lookup table for luminosity distances."
dl_mpc = np.zeros(len(zrange))
dl_cm = np.zeros(len(zrange))
for j in range(len(zrange)):
z = zrange[j]
dl_mpc[j] = cf.get_lum_dist(z) # in Mpc
dl_cm[j] = dl_mpc[j] * 3.086e24 # convert Mpc to cm
# Save a txt file
data = np.array(zip(zrange, dl_mpc, dl_cm), dtype=[('zrange', float), ('dl_mpc', float), ('dl_cm', float)])
np.savetxt('dl_lookup_table.txt', data, fmt=['%.3f', '%.6e', '%.6e'], delimiter=' ', header='z dl_mpc dl_cm')
print "Luminosity distance lookup table saved in txt file.",
print "In same folder as this code."
return None
def | (filt, filtername, start, model_comp_spec, model_lam_grid, \
total_models, zrange, dl_tbl):
print "\n", "Working on filter:", filtername
if filtername == 'u':
filt['trans'] /= 100.0 # They've given throughput percentages for the u-band
filt_flam_model = np.zeros(shape=(len(zrange), total_models), dtype=np.float64)
"""
print model_comp_spec.nbytes / (1024 * 1024)
print model_lam_grid.nbytes / (1024 * 1024)
print filt_flam_model.nbytes / (1024 * 1024)
print zrange.nbytes / (1024 * 1024)
print sys.getsizeof(filt) / (1024 * 1024)
sys.exit(0)
"""
for j in range(len(zrange)):
z = zrange[j]
#print "At z:", z
# ------------------------------ Now compute model filter magnitudes ------------------------------ #
# Redshift the base models
dl = dl_tbl['dl_cm'][j] # has to be in cm
#print "Lum dist [cm]:", dl
#model_comp_spec_z = model_comp_spec / (4 * np.pi * dl * dl * (1+z))
model_lam_grid_z = model_lam_grid * (1+z)
# first interpolate the transmission curve to the model lam grid
filt_interp = griddata(points=filt['wav'], values=filt['trans'], xi=model_lam_grid_z, \
method='linear')
# Set nan values in interpolated filter to 0.0
filt_nan_idx = np.where(np.isnan(filt_interp))[0]
filt_interp[filt_nan_idx] = 0.0
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(filt['wav'], filt['trans'])
ax.plot(model_lam_grid_z, filt_interp)
plt.show()
sys.exit(0)
"""
# multiply model spectrum to filter curve
#num_vec = nansum(model_comp_spec * filt_interp / (4 * np.pi * dl * dl * (1+z)), axis=1)
den = simps(y=filt_interp, x=model_lam_grid_z)
for i in range(total_models):
num = simps(y=model_comp_spec[i] * filt_interp / (4 * np.pi * dl * dl * (1+z)), x=model_lam_grid_z)
filt_flam_model[j, i] = num / den
# transverse array to make shape consistent with others
# I did it this way so that in the above for loop each filter is looped over only once
# i.e. minimizing the number of times each filter is gridded on to the model grid
#filt_flam_model_t = filt_flam_model.T
# save the model flux densities
np.save(figs_data_dir + 'all_model_flam_' + filtername + '_chab.npy', filt_flam_model)
print "Computation done and saved for:", filtername,
print "Total time taken:", time.time() - start
del filt_flam_model
return None
def main():
# Start time
start = time.time()
dt = datetime.datetime
print "Starting at --", dt.now()
# Redshift grid for models
zrange = np.arange(0.005, 6.005, 0.005)
print "Redshift grid for models:"
print zrange
# Read in lookup table for luminosity distances
if not os.path.isfile('dl_lookup_table.txt'):
create_dl_lookup_table(zrange)
dl_tbl = np.genfromtxt('dl_lookup_table.txt', dtype=None, names=True)
# Read in models with emission lines adn put in numpy array
total_models = 37761
chosen_imf = 'Chabrier'
if chosen_imf == 'Salpeter':
cspout_str = ''
elif chosen_imf == 'Chabrier':
cspout_str = '_chabrier'
# Read model lambda grid # In agnstroms
model_lam_grid_withlines_mmap = np.load(figs_data_dir + 'model_lam_grid_withlines' + cspout_str + '.npy', mmap_mode='r')
"""
# This is already done for the chabrier models by default
if not os.path.isfile(figs_data_dir + 'model_comp_spec_llam_withlines' + cspout_str + '.npy'):
print "Convert models to physical units."
# ---------------- This block only needs to be run once ---------------- #
model_comp_spec_withlines_mmap = np.load(figs_data_dir + 'model_comp_spec_withlines' + cspout_str + '.npy', mmap_mode='r')
# Convert model spectra to correct L_lambda units i.e., erg s^-1 A^-1
# They are given in units of solar luminosity per angstrom
# Therefore they need to be multiplied by L_sol = 3.826e33 erg s^-1
L_sol = 3.826e33
model_comp_spec_llam = model_comp_spec_withlines_mmap * L_sol
np.save(figs_data_dir + 'model_comp_spec_llam_withlines' + cspout_str + '.npy', model_comp_spec_llam)
del model_comp_spec_withlines_mmap
# ---------------- End of code block to convert to Llam ---------------- #
"""
# Now read the model spectra # In erg s^-1 A^-1
model_comp_spec_llam_withlines_mmap = np.load(figs_data_dir + 'model_comp_spec_llam_withlines' + cspout_str + '.npy', mmap_mode='r')
# total run time up to now
print "All models now in numpy array and have emission lines. Total time taken up to now --",
print time.time() - start, "seconds."
# ------------------------------- Read in filter curves ------------------------------- #
"""
The HST filters, in their PYSYNPHOT form, cannot be used in conjunction with
the joblib module. So this function will read them and save them into numpy arrays.
This function has to be run once to convert the HST filters to text files that
can be read with genfromtxt.
"""
#save_hst_filters_to_npy()
uband_curve = np.genfromtxt(filter_curve_dir + 'kpno_mosaic_u.txt', dtype=None, \
names=['wav', 'trans'], skip_header=14)
f435w_filt_curve = np.genfromtxt(filter_curve_dir + 'f435w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f606w_filt_curve = np.genfromtxt(filter_curve_dir + 'f606w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f775w_filt_curve = np.genfromtxt(filter_curve_dir + 'f775w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f850lp_filt_curve = np.genfromtxt(filter_curve_dir + 'f850lp_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f125w_filt_curve = np.genfromtxt(filter_curve_dir + 'f125w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f140w_filt_curve = np.genfromtxt(filter_curve_dir + 'f140w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
f160w_filt_curve = np.genfromtxt(filter_curve_dir + 'f160w_filt_curve.txt', \
dtype=None, names=['wav', 'trans'])
irac1_curve = np.genfromtxt(filter_curve_dir + 'irac1.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
irac2_curve = np.genfromtxt(filter_curve_dir + 'irac2.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
irac3_curve = np.genfromtxt(filter_curve_dir + 'irac3.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
irac4_curve = np.genfromtxt(filter_curve_dir + 'irac4.txt', dtype=None, \
names=['wav', 'trans'], skip_header=3)
# IRAC wavelengths are in mixrons # convert to angstroms
irac1_curve['wav'] *= 1e4
irac2_curve['wav'] *= 1e4
irac3_curve['wav'] *= 1e4
irac4_curve['wav'] *= 1e4
all_filters = [uband_curve, f435w_filt_curve, f606w_filt_curve, f775w_filt_curve, f850lp_filt_curve, \
f125w_filt_curve, f140w_filt_curve, f160w_filt_curve, irac1_curve, irac2_curve, irac3_curve, irac4_curve]
all_filter_names = ['u', 'f435w', 'f606w', 'f775w', 'f850lp', \
'f125w', 'f140w', 'f160w', 'irac1', 'irac2', 'irac3', 'irac4']
# Loop over all redshifts and filters and compute magnitudes
max_cores = len(all_filters)
for i in range(int(np.ceil(len(all_filters)/max_cores))):
jmin = i*max_cores
jmax = (i+1)*max_cores
if jmax > len(all_filters):
jmax = len(all_filters)
# Will use as many cores as filters
processes = [mp.Process(target=compute_filter_flam, args=(all_filters[j], all_filter_names[j], start, \
model_comp_spec_llam_withlines_mmap, model_lam_grid_withlines_mmap, total_models, zrange, dl_tbl)) \
for j in range(len(all_filters[jmin:jmax]))]
for p in processes:
p.start()
print "Current process ID:", p.pid
for p in processes:
p.join()
print "Finished with filters:", all_filter_names[jmin:jmax]
# This is for agave because sometimes it will exit the for loop
# above without going through all the filters. So you don't
# want agave combining them in the wrong shape.
# SImply comment out the above part and run the block below
# once the flam computation is done.
print "Now combining all filter computations into a single npy file."
# Read in all individual filter flam
u = np.load(figs_data_dir + 'all_model_flam_u_chab.npy')
f435w = np.load(figs_data_dir + 'all_model_flam_f435w_chab.npy')
f606w = np.load(figs_data_dir + 'all_model_flam_f606w_chab.npy')
f775w = np.load(figs_data_dir + 'all_model_flam_f775w_chab.npy')
f850lp = np.load(figs_data_dir + 'all_model_flam_f850lp_chab.npy')
f125w = np.load(figs_data_dir + 'all_model_flam_f125w_chab.npy')
f140w = np.load(figs_data_dir + 'all_model_flam_f140w_chab.npy')
f160w = np.load(figs_data_dir + 'all_model_flam_f160w_chab.npy')
irac1 = np.load(figs_data_dir + 'all_model_flam_irac1_chab.npy')
irac2 = np.load(figs_data_dir + 'all_model_flam_irac2_chab.npy')
irac3 = np.load(figs_data_dir + 'all_model_flam_irac3_chab.npy')
irac4 = np.load(figs_data_dir + 'all_model_flam_irac4_chab.npy')
# now loop over all and write final output
all_indiv_flam = [u, f435w, f606w, f775w, f850lp, f125w, f140w, f160w, irac1, irac2, irac3, irac4]
all_model_flam = np.zeros(shape=(12, 1200, 37761))
for k in range(len(all_indiv_flam)):
all_model_flam[k] = all_indiv_flam[k]
np.save(figs_data_dir + 'all_model_flam_chabrier.npy', all_model_flam)
print "All done. Total time taken:", time.time() - start
return None
if __name__ == '__main__':
main()
sys.exit(0) | compute_filter_flam | identifier_name |
lib.rs | // Copyright (c) Microsoft. All rights reserved.
#![deny(rust_2018_idioms, warnings)]
#![deny(clippy::all, clippy::pedantic)]
#![allow(
clippy::default_trait_access,
clippy::doc_markdown, // clippy want the "IoT" of "IoT Hub" in a code fence
clippy::missing_errors_doc,
clippy::module_name_repetitions,
clippy::must_use_candidate,
clippy::shadow_unrelated,
clippy::too_many_arguments,
clippy::too_many_lines,
clippy::type_complexity,
clippy::use_self,
)]
pub mod app;
mod error;
pub mod logging;
pub mod signal;
pub mod watchdog;
pub mod workload;
pub mod workload_manager;
pub mod unix;
use futures::sync::mpsc;
use identity_client::IdentityClient;
use std::fs::DirBuilder;
use std::path::Path;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use std::{collections::BTreeMap, fs::OpenOptions, io::Read};
use workload_manager::WorkloadManager;
use edgelet_core::{
crypto::{AZIOT_EDGED_CA_ALIAS, MANIFEST_TRUST_BUNDLE_ALIAS, TRUST_BUNDLE_ALIAS},
settings::AutoReprovisioningMode,
};
use edgelet_core::{
Authenticator, Listen, MakeModuleRuntime, Module, ModuleAction, ModuleRuntime,
ModuleRuntimeErrorReason, ModuleSpec, RuntimeSettings, WorkloadConfig,
};
use edgelet_http::logging::LoggingService;
use edgelet_http::{ConcurrencyThrottling, HyperExt, API_VERSION};
use edgelet_http_mgmt::ManagementService;
use edgelet_utils::log_failure;
pub use error::{Error, ErrorKind, InitializeErrorReason};
use failure::{Context, Fail, ResultExt};
use futures::future::Either;
use futures::sync::{
mpsc::{UnboundedReceiver, UnboundedSender},
oneshot::{self, Receiver},
};
use futures::{future, Future, Stream};
use hyper::server::conn::Http;
use hyper::{Body, Request};
use log::{debug, error, info, Level};
use serde::de::DeserializeOwned;
use serde::Serialize;
use sha2::{Digest, Sha256};
use crate::watchdog::Watchdog;
use crate::workload::WorkloadData;
const MGMT_SOCKET_DEFAULT_PERMISSION: u32 = 0o660;
const EDGE_RUNTIME_MODULEID: &str = "$edgeAgent";
const EDGE_RUNTIME_MODULE_NAME: &str = "edgeAgent";
const AUTH_SCHEME: &str = "sasToken";
/// The following constants are all environment variables names injected into
/// the Edge Agent container.
///
/// This variable holds the host name of the IoT Hub instance that edge agent
/// is expected to work with.
const HOSTNAME_KEY: &str = "IOTEDGE_IOTHUBHOSTNAME";
/// This variable holds the host name for the parent edge device. This name is used
/// by the edge agent to connect to parent edge hub for identity and twin operations.
const GATEWAY_HOSTNAME_KEY: &str = "IOTEDGE_GATEWAYHOSTNAME";
/// This variable holds the host name for the edge device. This name is used
/// by the edge agent to provide the edge hub container an alias name in the
/// network so that TLS cert validation works.
const EDGEDEVICE_HOSTNAME_KEY: &str = "EdgeDeviceHostName";
/// This variable holds the IoT Hub device identifier.
const DEVICEID_KEY: &str = "IOTEDGE_DEVICEID";
/// This variable holds the IoT Hub module identifier.
const MODULEID_KEY: &str = "IOTEDGE_MODULEID";
/// This variable holds the URI to use for connecting to the workload endpoint
/// in aziot-edged. This is used by the edge agent to connect to the workload API
/// for its own needs and is also used for volume mounting into module
/// containers when the URI refers to a Unix domain socket.
const WORKLOAD_URI_KEY: &str = "IOTEDGE_WORKLOADURI";
/// This variable holds the URI to use for connecting to the management
/// endpoint in aziot-edged. This is used by the edge agent for managing module
/// lifetimes and module identities.
const MANAGEMENT_URI_KEY: &str = "IOTEDGE_MANAGEMENTURI";
/// This variable holds the authentication scheme that modules are to use when
/// connecting to other server modules (like Edge Hub). The authentication
/// scheme can mean either that we are to use SAS tokens or a TLS client cert.
const AUTHSCHEME_KEY: &str = "IOTEDGE_AUTHSCHEME";
/// This is the key for the edge runtime mode.
const EDGE_RUNTIME_MODE_KEY: &str = "Mode";
/// This is the edge runtime mode - it should be iotedged, when aziot-edged starts edge runtime in single node mode.
const EDGE_RUNTIME_MODE: &str = "iotedged";
/// This is the key for the largest API version that this edgelet supports
const API_VERSION_KEY: &str = "IOTEDGE_APIVERSION";
/// This is the name of the cache subdirectory for settings state
const EDGE_SETTINGS_SUBDIR: &str = "cache";
/// This is the name of the settings backup file
const EDGE_PROVISIONING_STATE_FILENAME: &str = "provisioning_state";
// This is the name of the directory that contains the module folder
// with worlkload sockets inside, on the host
const WORKLOAD_LISTEN_MNT_URI: &str = "IOTEDGE_WORKLOADLISTEN_MNTURI";
// 2 hours
const AZIOT_EDGE_ID_CERT_MAX_DURATION_SECS: i64 = 2 * 3600;
// 90 days
const AZIOT_EDGE_SERVER_CERT_MAX_DURATION_SECS: i64 = 90 * 24 * 3600;
const STOP_TIME: Duration = Duration::from_secs(30);
/// This is the interval at which to poll Identity Service for device information.
const IS_GET_DEVICE_INFO_RETRY_INTERVAL_SECS: Duration = Duration::from_secs(5);
#[derive(Clone, serde::Serialize, serde::Deserialize, Debug)]
pub struct ProvisioningResult {
device_id: String,
gateway_host_name: String,
hub_name: String,
}
pub struct Main<M>
where
M: MakeModuleRuntime,
{
settings: M::Settings,
}
impl<M> Main<M>
where
M: MakeModuleRuntime + Send + 'static,
M::ModuleRuntime: 'static + Authenticator<Request = Request<Body>> + Clone + Send + Sync,
<<M::ModuleRuntime as ModuleRuntime>::Module as Module>::Config:
Clone + DeserializeOwned + Serialize + edgelet_core::module::NestedEdgeBodge,
M::Settings: 'static + Clone + Serialize,
<M::ModuleRuntime as ModuleRuntime>::Logs: Into<Body>,
<M::ModuleRuntime as Authenticator>::Error: Fail + Sync,
for<'r> &'r <M::ModuleRuntime as ModuleRuntime>::Error: Into<ModuleRuntimeErrorReason>,
{
pub fn new(settings: M::Settings) -> Self {
Main { settings }
}
// Allowing cognitive complexity errors for now. TODO: Refactor method later.
#[allow(clippy::cognitive_complexity)]
pub fn run_until<F, G>(self, make_shutdown_signal: G) -> Result<(), Error>
where
F: Future<Item = (), Error = ()> + Send + 'static,
G: Fn() -> F,
{
let Main { mut settings } = self;
let mut tokio_runtime = tokio::runtime::Runtime::new()
.context(ErrorKind::Initialize(InitializeErrorReason::Tokio))?;
let cache_subdir_path = Path::new(&settings.homedir()).join(EDGE_SETTINGS_SUBDIR);
// make sure the cache directory exists
DirBuilder::new()
.recursive(true)
.create(&cache_subdir_path)
.context(ErrorKind::Initialize(
InitializeErrorReason::CreateCacheDirectory,
))?;
let (create_socket_channel_snd, create_socket_channel_rcv) =
mpsc::unbounded::<ModuleAction>();
let runtime = init_runtime::<M>(
settings.clone(),
&mut tokio_runtime,
create_socket_channel_snd,
)?;
let url = settings.endpoints().aziot_identityd_url().clone();
let client = Arc::new(Mutex::new(identity_client::IdentityClient::new(
aziot_identity_common_http::ApiVersion::V2020_09_01,
&url,
)));
let provisioning_result = loop {
info!("Obtaining edge device provisioning data...");
match settings.auto_reprovisioning_mode() {
AutoReprovisioningMode::AlwaysOnStartup => {
tokio_runtime.block_on(reprovision_device(&client))?
}
AutoReprovisioningMode::Dynamic | AutoReprovisioningMode::OnErrorOnly => {}
}
let result =
tokio_runtime.block_on(
client
.lock()
.unwrap()
.get_device()
.map_err(|err| {
Error::from(err.context(ErrorKind::Initialize(
InitializeErrorReason::GetDeviceInfo,
)))
})
.and_then(|identity| match identity {
aziot_identity_common::Identity::Aziot(spec) => {
debug!("{}:{}", spec.hub_name, spec.device_id.0);
Ok(ProvisioningResult {
device_id: spec.device_id.0,
gateway_host_name: spec.gateway_host,
hub_name: spec.hub_name,
})
}
aziot_identity_common::Identity::Local(_) => Err(Error::from(
ErrorKind::Initialize(InitializeErrorReason::InvalidIdentityType),
)),
}),
);
match result {
Ok(provisioning_result) => {
break provisioning_result;
}
Err(err) => {
log_failure(Level::Warn, &err);
std::thread::sleep(IS_GET_DEVICE_INFO_RETRY_INTERVAL_SECS);
log::warn!("Retrying getting edge device provisioning information.");
}
};
};
info!("Finished provisioning edge device.");
// Normally aziot-edged will stop all modules when it shuts down. But if it crashed,
// modules will continue to run. On Linux systems where aziot-edged is responsible for
// creating/binding the socket (e.g., CentOS 7.5, which uses systemd but does not
// support systemd socket activation), modules will be left holding stale file
// descriptors for the workload and management APIs and calls on these APIs will
// begin to fail. Resilient modules should be able to deal with this, but we'll
// restart all modules to ensure a clean start.
info!("Stopping all modules...");
tokio_runtime
.block_on(runtime.stop_all(Some(STOP_TIME)))
.context(ErrorKind::Initialize(
InitializeErrorReason::StopExistingModules,
))?;
info!("Finished stopping modules.");
// Detect if the device was changed and if the device needs to be reconfigured
check_device_reconfigure::<M>(
&cache_subdir_path,
EDGE_PROVISIONING_STATE_FILENAME,
&provisioning_result,
&runtime,
&mut tokio_runtime,
)?;
settings
.agent_mut()
.parent_hostname_resolve(&provisioning_result.gateway_host_name);
let cfg = WorkloadData::new(
provisioning_result.hub_name,
provisioning_result.device_id,
settings
.edge_ca_cert()
.unwrap_or(AZIOT_EDGED_CA_ALIAS)
.to_string(),
settings
.edge_ca_key()
.unwrap_or(AZIOT_EDGED_CA_ALIAS)
.to_string(),
settings
.trust_bundle_cert()
.unwrap_or(TRUST_BUNDLE_ALIAS)
.to_string(),
settings
.manifest_trust_bundle_cert()
.unwrap_or(MANIFEST_TRUST_BUNDLE_ALIAS)
.to_string(),
AZIOT_EDGE_ID_CERT_MAX_DURATION_SECS,
AZIOT_EDGE_SERVER_CERT_MAX_DURATION_SECS,
);
let should_reprovision = start_api::<_, _, M>(
&settings,
&provisioning_result.gateway_host_name,
&runtime,
cfg,
make_shutdown_signal(),
&mut tokio_runtime,
create_socket_channel_rcv,
)?;
if should_reprovision {
tokio_runtime.block_on(reprovision_device(&client))?;
}
info!("Shutdown complete.");
Ok(())
}
}
fn reprovision_device(
identity_client: &Arc<Mutex<IdentityClient>>,
) -> impl Future<Item = (), Error = Error> {
let id_mgr = identity_client.lock().unwrap();
id_mgr
.reprovision_device()
.map_err(|err| Error::from(err.context(ErrorKind::ReprovisionFailure)))
}
fn check_device_reconfigure<M>(
subdir: &Path,
filename: &str,
provisioning_result: &ProvisioningResult,
runtime: &M::ModuleRuntime,
tokio_runtime: &mut tokio::runtime::Runtime,
) -> Result<(), Error>
where
M: MakeModuleRuntime + 'static,
{
info!("Detecting if device information has changed...");
let path = subdir.join(filename);
let diff = diff_with_cached(provisioning_result, &path);
if diff {
info!("Change to provisioning state detected.");
reconfigure::<M>(
subdir,
filename,
provisioning_result,
runtime,
tokio_runtime,
)?;
}
Ok(())
}
fn compute_provisioning_result_digest(
provisioning_result: &ProvisioningResult,
) -> Result<String, DiffError> {
let s = serde_json::to_string(provisioning_result)?;
Ok(base64::encode(&Sha256::digest_str(&s)))
}
fn diff_with_cached(provisioning_result: &ProvisioningResult, path: &Path) -> bool {
fn diff_with_cached_inner(
provisioning_result: &ProvisioningResult,
path: &Path,
) -> Result<bool, DiffError> {
let mut file = OpenOptions::new().read(true).open(path)?;
let mut buffer = String::new();
file.read_to_string(&mut buffer)?;
let encoded = compute_provisioning_result_digest(provisioning_result)?;
if encoded == buffer {
debug!("Provisioning state matches supplied provisioning result.");
Ok(false)
} else {
Ok(true)
}
}
match diff_with_cached_inner(provisioning_result, path) {
Ok(result) => result,
Err(err) => {
log_failure(Level::Debug, &err);
debug!("Error reading config backup.");
true
}
}
}
#[derive(Debug, Fail)]
#[fail(display = "Could not load provisioning result")]
pub struct DiffError(#[cause] Context<Box<dyn std::fmt::Display + Send + Sync>>);
impl From<std::io::Error> for DiffError {
fn from(err: std::io::Error) -> Self {
DiffError(Context::new(Box::new(err)))
}
}
impl From<serde_json::Error> for DiffError {
fn from(err: serde_json::Error) -> Self {
DiffError(Context::new(Box::new(err)))
}
}
fn reconfigure<M>(
subdir: &Path,
filename: &str,
provisioning_result: &ProvisioningResult,
runtime: &M::ModuleRuntime,
tokio_runtime: &mut tokio::runtime::Runtime,
) -> Result<(), Error>
where
M: MakeModuleRuntime + 'static,
{
info!("Removing all modules...");
tokio_runtime
.block_on(runtime.remove_all())
.context(ErrorKind::Initialize(
InitializeErrorReason::RemoveExistingModules,
))?;
info!("Finished removing modules.");
let path = subdir.join(filename);
// store provisioning result
let digest = compute_provisioning_result_digest(provisioning_result).context(
ErrorKind::Initialize(InitializeErrorReason::SaveProvisioning),
)?;
std::fs::write(path, digest.into_bytes()).context(ErrorKind::Initialize(
InitializeErrorReason::SaveProvisioning,
))?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn start_api<F, W, M>(
settings: &M::Settings,
parent_hostname: &str,
runtime: &M::ModuleRuntime,
workload_config: W,
shutdown_signal: F,
tokio_runtime: &mut tokio::runtime::Runtime,
create_socket_channel_rcv: UnboundedReceiver<ModuleAction>,
) -> Result<bool, Error>
where
F: Future<Item = (), Error = ()> + Send + 'static,
W: WorkloadConfig + Clone + Send + Sync + 'static,
M::ModuleRuntime: Authenticator<Request = Request<Body>> + Send + Sync + Clone + 'static,
M: MakeModuleRuntime + 'static,
<<M::ModuleRuntime as ModuleRuntime>::Module as Module>::Config:
Clone + DeserializeOwned + Serialize,
M::Settings: 'static,
<M::ModuleRuntime as ModuleRuntime>::Logs: Into<Body>,
<M::ModuleRuntime as Authenticator>::Error: Fail + Sync,
for<'r> &'r <M::ModuleRuntime as ModuleRuntime>::Error: Into<ModuleRuntimeErrorReason>,
|
fn init_runtime<M>(
settings: M::Settings,
tokio_runtime: &mut tokio::runtime::Runtime,
create_socket_channel_snd: UnboundedSender<ModuleAction>,
) -> Result<M::ModuleRuntime, Error>
where
M: MakeModuleRuntime + Send + 'static,
M::ModuleRuntime: Send,
M::Future: 'static,
{
info!("Initializing the module runtime...");
let runtime = tokio_runtime
.block_on(M::make_runtime(settings, create_socket_channel_snd))
.context(ErrorKind::Initialize(InitializeErrorReason::ModuleRuntime))?;
info!("Finished initializing the module runtime.");
Ok(runtime)
}
fn start_runtime<M>(
runtime: M::ModuleRuntime,
hostname: &str,
parent_hostname: &str,
device_id: &str,
settings: &M::Settings,
shutdown: Receiver<()>,
) -> Result<impl Future<Item = (), Error = Error>, Error>
where
M: MakeModuleRuntime,
M::ModuleRuntime: Clone + 'static,
<<M::ModuleRuntime as ModuleRuntime>::Module as Module>::Config:
Clone + DeserializeOwned + Serialize,
<M::ModuleRuntime as ModuleRuntime>::Logs: Into<Body>,
for<'r> &'r <M::ModuleRuntime as ModuleRuntime>::Error: Into<ModuleRuntimeErrorReason>,
{
let spec = settings.agent().clone();
let env = build_env(spec.env(), hostname, parent_hostname, device_id, settings);
let spec = ModuleSpec::<<M::ModuleRuntime as ModuleRuntime>::Config>::new(
EDGE_RUNTIME_MODULE_NAME.to_string(),
spec.type_().to_string(),
spec.config().clone(),
env,
spec.image_pull_policy(),
)
.context(ErrorKind::Initialize(InitializeErrorReason::EdgeRuntime))?;
let watchdog = Watchdog::new(
runtime,
settings.watchdog().max_retries(),
settings.endpoints().aziot_identityd_url(),
);
let runtime_future = watchdog
.run_until(spec, EDGE_RUNTIME_MODULEID, shutdown.map_err(|_| ()))
.map_err(Error::from);
Ok(runtime_future)
}
// Add the environment variables needed by the EdgeAgent.
fn build_env<S>(
spec_env: &BTreeMap<String, String>,
hostname: &str,
parent_hostname: &str,
device_id: &str,
settings: &S,
) -> BTreeMap<String, String>
where
S: RuntimeSettings,
{
let mut env = BTreeMap::new();
env.insert(HOSTNAME_KEY.to_string(), hostname.to_string());
env.insert(
EDGEDEVICE_HOSTNAME_KEY.to_string(),
settings.hostname().to_string().to_lowercase(),
);
if parent_hostname.to_lowercase() != hostname.to_lowercase() {
env.insert(
GATEWAY_HOSTNAME_KEY.to_string(),
parent_hostname.to_string(),
);
}
env.insert(DEVICEID_KEY.to_string(), device_id.to_string());
env.insert(MODULEID_KEY.to_string(), EDGE_RUNTIME_MODULEID.to_string());
#[cfg(feature = "runtime-docker")]
let (workload_uri, management_uri, home_dir) = (
settings.connect().workload_uri().to_string(),
settings.connect().management_uri().to_string(),
settings.homedir().to_str().unwrap().to_string(),
);
env.insert(WORKLOAD_URI_KEY.to_string(), workload_uri);
env.insert(MANAGEMENT_URI_KEY.to_string(), management_uri);
env.insert(
WORKLOAD_LISTEN_MNT_URI.to_string(),
Listen::workload_mnt_uri(&home_dir),
);
env.insert(AUTHSCHEME_KEY.to_string(), AUTH_SCHEME.to_string());
env.insert(
EDGE_RUNTIME_MODE_KEY.to_string(),
EDGE_RUNTIME_MODE.to_string(),
);
for (key, val) in spec_env.iter() {
env.insert(key.clone(), val.clone());
}
env.insert(API_VERSION_KEY.to_string(), API_VERSION.to_string());
env
}
fn start_management<M>(
settings: &M::Settings,
runtime: &M::ModuleRuntime,
shutdown: Receiver<()>,
initiate_shutdown_and_reprovision: mpsc::UnboundedSender<()>,
) -> impl Future<Item = (), Error = Error>
where
M: MakeModuleRuntime,
M::ModuleRuntime: Authenticator<Request = Request<Body>> + Send + Sync + Clone + 'static,
<<M::ModuleRuntime as Authenticator>::AuthenticateFuture as Future>::Error: Fail,
for<'r> &'r <M::ModuleRuntime as ModuleRuntime>::Error: Into<ModuleRuntimeErrorReason>,
<<M::ModuleRuntime as ModuleRuntime>::Module as Module>::Config: DeserializeOwned + Serialize,
<M::ModuleRuntime as ModuleRuntime>::Logs: Into<Body>,
{
info!("Starting management API...");
let label = "mgmt".to_string();
let url = settings.listen().management_uri().clone();
let identity_uri = settings.endpoints().aziot_identityd_url().clone();
let identity_client = Arc::new(Mutex::new(identity_client::IdentityClient::new(
aziot_identity_common_http::ApiVersion::V2020_09_01,
&identity_uri,
)));
ManagementService::new(runtime, identity_client, initiate_shutdown_and_reprovision)
.then(move |service| -> Result<_, Error> {
let service = service.context(ErrorKind::Initialize(
InitializeErrorReason::ManagementService,
))?;
let service = LoggingService::new(label, service);
let run = Http::new()
.bind_url(url.clone(), service, MGMT_SOCKET_DEFAULT_PERMISSION)
.map_err(|err| {
err.context(ErrorKind::Initialize(
InitializeErrorReason::ManagementService,
))
})?
.run_until(shutdown.map_err(|_| ()), ConcurrencyThrottling::NoLimit)
.map_err(|err| Error::from(err.context(ErrorKind::ManagementService)));
info!("Listening on {} with 1 thread for management API.", url);
Ok(run)
})
.flatten()
}
#[cfg(test)]
mod tests {
use std::fmt;
use edgelet_docker::Settings;
use super::{Fail, RuntimeSettings};
static GOOD_SETTINGS_EDGE_CA_CERT_ID: &str = "test/linux/sample_settings.edge.ca.id.toml";
#[derive(Clone, Copy, Debug, Fail)]
pub struct Error;
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Error")
}
}
lazy_static::lazy_static! {
static ref ENV_LOCK: std::sync::Mutex<()> = Default::default();
}
#[test]
fn settings_for_edge_ca_cert() {
let _env_lock = ENV_LOCK.lock().expect("env lock poisoned");
std::env::set_var("AZIOT_EDGED_CONFIG", GOOD_SETTINGS_EDGE_CA_CERT_ID);
let settings = Settings::new().unwrap();
assert_eq!(settings.edge_ca_cert(), Some("iotedge-test-ca"));
}
}
| {
let iot_hub_name = workload_config.iot_hub_name().to_string();
let device_id = workload_config.device_id().to_string();
let (mgmt_tx, mgmt_rx) = oneshot::channel();
let (mgmt_stop_and_reprovision_tx, mgmt_stop_and_reprovision_rx) = mpsc::unbounded();
let mgmt = start_management::<M>(settings, runtime, mgmt_rx, mgmt_stop_and_reprovision_tx);
WorkloadManager::start_manager::<M>(
settings,
runtime,
workload_config,
tokio_runtime,
create_socket_channel_rcv,
)?;
let (runt_tx, runt_rx) = oneshot::channel();
let edge_rt = start_runtime::<M>(
runtime.clone(),
&iot_hub_name,
parent_hostname,
&device_id,
&settings,
runt_rx,
)?;
// This mpsc sender/receiver is used for getting notifications from the mgmt service
// indicating that the daemon should shut down and attempt to reprovision the device.
let mgmt_stop_and_reprovision_signaled =
mgmt_stop_and_reprovision_rx
.into_future()
.then(|res| match res {
Ok((Some(()), _)) | Ok((None, _)) => Ok(()),
Err(((), _)) => Err(Error::from(ErrorKind::ManagementService)),
});
let mgmt_stop_and_reprovision_signaled = match settings.auto_reprovisioning_mode() {
AutoReprovisioningMode::Dynamic => {
futures::future::Either::B(mgmt_stop_and_reprovision_signaled)
}
AutoReprovisioningMode::AlwaysOnStartup | AutoReprovisioningMode::OnErrorOnly => {
futures::future::Either::A(future::empty())
}
};
let edge_rt_with_mgmt_signal = edge_rt.select2(mgmt_stop_and_reprovision_signaled).then(
|res| match res {
Ok(Either::A((_edge_rt_ok, _mgmt_stop_and_reprovision_signaled_future))) => {
info!("Edge runtime will stop because of the shutdown signal.");
future::ok(false)
}
Ok(Either::B((_mgmt_stop_and_reprovision_signaled_ok, _edge_rt_future))) => {
info!("Edge runtime will stop because of the device reprovisioning signal.");
future::ok(true)
}
Err(Either::A((edge_rt_err, _mgmt_stop_and_reprovision_signaled_future))) => {
error!("Edge runtime will stop because the shutdown signal raised an error.");
future::err(edge_rt_err)
},
Err(Either::B((mgmt_stop_and_reprovision_signaled_err, _edge_rt_future))) => {
error!("Edge runtime will stop because the device reprovisioning signal raised an error.");
future::err(mgmt_stop_and_reprovision_signaled_err)
}
},
);
// Wait for the watchdog to finish, and then send signal to the workload and management services.
// This way the edgeAgent can finish shutting down all modules.
let edge_rt_with_cleanup = edge_rt_with_mgmt_signal.then(move |res| {
mgmt_tx.send(()).unwrap_or(());
// A -> EdgeRt + Mgmt Stop and Reprovision Signal Future
// B -> Restart Signal Future
match res {
Ok(should_reprovision) => future::ok(should_reprovision),
Err(err) => future::err(err),
}
});
let shutdown = shutdown_signal.map(move |_| {
debug!("shutdown signaled");
// Signal the watchdog to shutdown
runt_tx.send(()).unwrap_or(());
});
tokio_runtime.spawn(shutdown);
let services = mgmt.join(edge_rt_with_cleanup).then(|result| match result {
Ok(((), should_reprovision)) => Ok(should_reprovision),
Err(err) => Err(err),
});
let should_reprovision = tokio_runtime.block_on(services)?;
Ok(should_reprovision)
} | identifier_body |
lib.rs | // Copyright (c) Microsoft. All rights reserved.
#![deny(rust_2018_idioms, warnings)]
#![deny(clippy::all, clippy::pedantic)]
#![allow(
clippy::default_trait_access,
clippy::doc_markdown, // clippy want the "IoT" of "IoT Hub" in a code fence
clippy::missing_errors_doc,
clippy::module_name_repetitions,
clippy::must_use_candidate,
clippy::shadow_unrelated,
clippy::too_many_arguments,
clippy::too_many_lines,
clippy::type_complexity,
clippy::use_self,
)]
pub mod app;
mod error;
pub mod logging;
pub mod signal;
pub mod watchdog;
pub mod workload;
pub mod workload_manager;
pub mod unix;
use futures::sync::mpsc;
use identity_client::IdentityClient;
use std::fs::DirBuilder;
use std::path::Path;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use std::{collections::BTreeMap, fs::OpenOptions, io::Read};
use workload_manager::WorkloadManager;
use edgelet_core::{
crypto::{AZIOT_EDGED_CA_ALIAS, MANIFEST_TRUST_BUNDLE_ALIAS, TRUST_BUNDLE_ALIAS},
settings::AutoReprovisioningMode,
};
use edgelet_core::{
Authenticator, Listen, MakeModuleRuntime, Module, ModuleAction, ModuleRuntime,
ModuleRuntimeErrorReason, ModuleSpec, RuntimeSettings, WorkloadConfig,
};
use edgelet_http::logging::LoggingService;
use edgelet_http::{ConcurrencyThrottling, HyperExt, API_VERSION};
use edgelet_http_mgmt::ManagementService;
use edgelet_utils::log_failure;
pub use error::{Error, ErrorKind, InitializeErrorReason};
use failure::{Context, Fail, ResultExt};
use futures::future::Either;
use futures::sync::{
mpsc::{UnboundedReceiver, UnboundedSender},
oneshot::{self, Receiver},
};
use futures::{future, Future, Stream};
use hyper::server::conn::Http;
use hyper::{Body, Request};
use log::{debug, error, info, Level};
use serde::de::DeserializeOwned;
use serde::Serialize;
use sha2::{Digest, Sha256};
use crate::watchdog::Watchdog;
use crate::workload::WorkloadData;
const MGMT_SOCKET_DEFAULT_PERMISSION: u32 = 0o660;
const EDGE_RUNTIME_MODULEID: &str = "$edgeAgent";
const EDGE_RUNTIME_MODULE_NAME: &str = "edgeAgent";
const AUTH_SCHEME: &str = "sasToken";
/// The following constants are all environment variables names injected into
/// the Edge Agent container.
///
/// This variable holds the host name of the IoT Hub instance that edge agent
/// is expected to work with.
const HOSTNAME_KEY: &str = "IOTEDGE_IOTHUBHOSTNAME";
/// This variable holds the host name for the parent edge device. This name is used
/// by the edge agent to connect to parent edge hub for identity and twin operations.
const GATEWAY_HOSTNAME_KEY: &str = "IOTEDGE_GATEWAYHOSTNAME";
/// This variable holds the host name for the edge device. This name is used
/// by the edge agent to provide the edge hub container an alias name in the
/// network so that TLS cert validation works.
const EDGEDEVICE_HOSTNAME_KEY: &str = "EdgeDeviceHostName";
/// This variable holds the IoT Hub device identifier.
const DEVICEID_KEY: &str = "IOTEDGE_DEVICEID";
/// This variable holds the IoT Hub module identifier.
const MODULEID_KEY: &str = "IOTEDGE_MODULEID";
/// This variable holds the URI to use for connecting to the workload endpoint
/// in aziot-edged. This is used by the edge agent to connect to the workload API
/// for its own needs and is also used for volume mounting into module
/// containers when the URI refers to a Unix domain socket.
const WORKLOAD_URI_KEY: &str = "IOTEDGE_WORKLOADURI";
/// This variable holds the URI to use for connecting to the management
/// endpoint in aziot-edged. This is used by the edge agent for managing module
/// lifetimes and module identities.
const MANAGEMENT_URI_KEY: &str = "IOTEDGE_MANAGEMENTURI";
/// This variable holds the authentication scheme that modules are to use when
/// connecting to other server modules (like Edge Hub). The authentication
/// scheme can mean either that we are to use SAS tokens or a TLS client cert.
const AUTHSCHEME_KEY: &str = "IOTEDGE_AUTHSCHEME";
/// This is the key for the edge runtime mode.
const EDGE_RUNTIME_MODE_KEY: &str = "Mode";
/// This is the edge runtime mode - it should be iotedged, when aziot-edged starts edge runtime in single node mode.
const EDGE_RUNTIME_MODE: &str = "iotedged";
/// This is the key for the largest API version that this edgelet supports
const API_VERSION_KEY: &str = "IOTEDGE_APIVERSION";
/// This is the name of the cache subdirectory for settings state
const EDGE_SETTINGS_SUBDIR: &str = "cache";
/// This is the name of the settings backup file
const EDGE_PROVISIONING_STATE_FILENAME: &str = "provisioning_state";
// This is the name of the directory that contains the module folder
// with worlkload sockets inside, on the host
const WORKLOAD_LISTEN_MNT_URI: &str = "IOTEDGE_WORKLOADLISTEN_MNTURI";
// 2 hours
const AZIOT_EDGE_ID_CERT_MAX_DURATION_SECS: i64 = 2 * 3600;
// 90 days
const AZIOT_EDGE_SERVER_CERT_MAX_DURATION_SECS: i64 = 90 * 24 * 3600;
const STOP_TIME: Duration = Duration::from_secs(30);
/// This is the interval at which to poll Identity Service for device information.
const IS_GET_DEVICE_INFO_RETRY_INTERVAL_SECS: Duration = Duration::from_secs(5);
#[derive(Clone, serde::Serialize, serde::Deserialize, Debug)]
pub struct ProvisioningResult {
device_id: String,
gateway_host_name: String,
hub_name: String,
}
pub struct Main<M>
where
M: MakeModuleRuntime,
{
settings: M::Settings,
}
impl<M> Main<M>
where
M: MakeModuleRuntime + Send + 'static,
M::ModuleRuntime: 'static + Authenticator<Request = Request<Body>> + Clone + Send + Sync,
<<M::ModuleRuntime as ModuleRuntime>::Module as Module>::Config:
Clone + DeserializeOwned + Serialize + edgelet_core::module::NestedEdgeBodge,
M::Settings: 'static + Clone + Serialize,
<M::ModuleRuntime as ModuleRuntime>::Logs: Into<Body>,
<M::ModuleRuntime as Authenticator>::Error: Fail + Sync,
for<'r> &'r <M::ModuleRuntime as ModuleRuntime>::Error: Into<ModuleRuntimeErrorReason>,
{
pub fn new(settings: M::Settings) -> Self {
Main { settings }
}
// Allowing cognitive complexity errors for now. TODO: Refactor method later.
#[allow(clippy::cognitive_complexity)]
pub fn run_until<F, G>(self, make_shutdown_signal: G) -> Result<(), Error>
where
F: Future<Item = (), Error = ()> + Send + 'static,
G: Fn() -> F,
{
let Main { mut settings } = self;
let mut tokio_runtime = tokio::runtime::Runtime::new()
.context(ErrorKind::Initialize(InitializeErrorReason::Tokio))?;
let cache_subdir_path = Path::new(&settings.homedir()).join(EDGE_SETTINGS_SUBDIR);
// make sure the cache directory exists
DirBuilder::new()
.recursive(true)
.create(&cache_subdir_path)
.context(ErrorKind::Initialize(
InitializeErrorReason::CreateCacheDirectory,
))?;
let (create_socket_channel_snd, create_socket_channel_rcv) =
mpsc::unbounded::<ModuleAction>();
let runtime = init_runtime::<M>(
settings.clone(),
&mut tokio_runtime,
create_socket_channel_snd,
)?;
let url = settings.endpoints().aziot_identityd_url().clone();
let client = Arc::new(Mutex::new(identity_client::IdentityClient::new(
aziot_identity_common_http::ApiVersion::V2020_09_01,
&url,
)));
let provisioning_result = loop {
info!("Obtaining edge device provisioning data...");
match settings.auto_reprovisioning_mode() {
AutoReprovisioningMode::AlwaysOnStartup => {
tokio_runtime.block_on(reprovision_device(&client))?
}
AutoReprovisioningMode::Dynamic | AutoReprovisioningMode::OnErrorOnly => {}
}
let result =
tokio_runtime.block_on(
client
.lock()
.unwrap()
.get_device()
.map_err(|err| {
Error::from(err.context(ErrorKind::Initialize(
InitializeErrorReason::GetDeviceInfo,
)))
})
.and_then(|identity| match identity {
aziot_identity_common::Identity::Aziot(spec) => {
debug!("{}:{}", spec.hub_name, spec.device_id.0);
Ok(ProvisioningResult {
device_id: spec.device_id.0,
gateway_host_name: spec.gateway_host,
hub_name: spec.hub_name,
})
}
aziot_identity_common::Identity::Local(_) => Err(Error::from(
ErrorKind::Initialize(InitializeErrorReason::InvalidIdentityType),
)),
}),
);
match result {
Ok(provisioning_result) => {
break provisioning_result;
}
Err(err) => {
log_failure(Level::Warn, &err);
std::thread::sleep(IS_GET_DEVICE_INFO_RETRY_INTERVAL_SECS);
log::warn!("Retrying getting edge device provisioning information.");
}
};
};
info!("Finished provisioning edge device.");
// Normally aziot-edged will stop all modules when it shuts down. But if it crashed,
// modules will continue to run. On Linux systems where aziot-edged is responsible for
// creating/binding the socket (e.g., CentOS 7.5, which uses systemd but does not
// support systemd socket activation), modules will be left holding stale file
// descriptors for the workload and management APIs and calls on these APIs will
// begin to fail. Resilient modules should be able to deal with this, but we'll
// restart all modules to ensure a clean start.
info!("Stopping all modules...");
tokio_runtime
.block_on(runtime.stop_all(Some(STOP_TIME)))
.context(ErrorKind::Initialize(
InitializeErrorReason::StopExistingModules,
))?;
info!("Finished stopping modules.");
// Detect if the device was changed and if the device needs to be reconfigured
check_device_reconfigure::<M>(
&cache_subdir_path,
EDGE_PROVISIONING_STATE_FILENAME,
&provisioning_result,
&runtime,
&mut tokio_runtime,
)?;
settings
.agent_mut()
.parent_hostname_resolve(&provisioning_result.gateway_host_name);
let cfg = WorkloadData::new(
provisioning_result.hub_name,
provisioning_result.device_id,
settings
.edge_ca_cert()
.unwrap_or(AZIOT_EDGED_CA_ALIAS)
.to_string(),
settings
.edge_ca_key()
.unwrap_or(AZIOT_EDGED_CA_ALIAS)
.to_string(),
settings
.trust_bundle_cert()
.unwrap_or(TRUST_BUNDLE_ALIAS)
.to_string(),
settings
.manifest_trust_bundle_cert()
.unwrap_or(MANIFEST_TRUST_BUNDLE_ALIAS)
.to_string(),
AZIOT_EDGE_ID_CERT_MAX_DURATION_SECS,
AZIOT_EDGE_SERVER_CERT_MAX_DURATION_SECS,
);
let should_reprovision = start_api::<_, _, M>(
&settings,
&provisioning_result.gateway_host_name,
&runtime,
cfg,
make_shutdown_signal(),
&mut tokio_runtime,
create_socket_channel_rcv,
)?;
if should_reprovision {
tokio_runtime.block_on(reprovision_device(&client))?;
}
info!("Shutdown complete.");
Ok(())
}
}
fn reprovision_device(
identity_client: &Arc<Mutex<IdentityClient>>,
) -> impl Future<Item = (), Error = Error> {
let id_mgr = identity_client.lock().unwrap();
id_mgr
.reprovision_device()
.map_err(|err| Error::from(err.context(ErrorKind::ReprovisionFailure)))
}
fn check_device_reconfigure<M>(
subdir: &Path,
filename: &str,
provisioning_result: &ProvisioningResult,
runtime: &M::ModuleRuntime,
tokio_runtime: &mut tokio::runtime::Runtime,
) -> Result<(), Error>
where
M: MakeModuleRuntime + 'static,
{
info!("Detecting if device information has changed...");
let path = subdir.join(filename);
let diff = diff_with_cached(provisioning_result, &path);
if diff {
info!("Change to provisioning state detected.");
reconfigure::<M>(
subdir,
filename,
provisioning_result,
runtime,
tokio_runtime,
)?;
}
Ok(())
}
fn compute_provisioning_result_digest(
provisioning_result: &ProvisioningResult,
) -> Result<String, DiffError> {
let s = serde_json::to_string(provisioning_result)?;
Ok(base64::encode(&Sha256::digest_str(&s)))
}
fn diff_with_cached(provisioning_result: &ProvisioningResult, path: &Path) -> bool {
fn diff_with_cached_inner(
provisioning_result: &ProvisioningResult,
path: &Path,
) -> Result<bool, DiffError> {
let mut file = OpenOptions::new().read(true).open(path)?;
let mut buffer = String::new();
file.read_to_string(&mut buffer)?;
let encoded = compute_provisioning_result_digest(provisioning_result)?;
if encoded == buffer {
debug!("Provisioning state matches supplied provisioning result.");
Ok(false)
} else {
Ok(true)
}
}
match diff_with_cached_inner(provisioning_result, path) {
Ok(result) => result,
Err(err) => {
log_failure(Level::Debug, &err);
debug!("Error reading config backup.");
true
}
}
}
#[derive(Debug, Fail)]
#[fail(display = "Could not load provisioning result")]
pub struct DiffError(#[cause] Context<Box<dyn std::fmt::Display + Send + Sync>>);
impl From<std::io::Error> for DiffError {
fn from(err: std::io::Error) -> Self {
DiffError(Context::new(Box::new(err)))
}
}
impl From<serde_json::Error> for DiffError {
fn from(err: serde_json::Error) -> Self {
DiffError(Context::new(Box::new(err)))
}
}
fn reconfigure<M>(
subdir: &Path,
filename: &str,
provisioning_result: &ProvisioningResult,
runtime: &M::ModuleRuntime,
tokio_runtime: &mut tokio::runtime::Runtime,
) -> Result<(), Error>
where
M: MakeModuleRuntime + 'static,
{
info!("Removing all modules...");
tokio_runtime
.block_on(runtime.remove_all())
.context(ErrorKind::Initialize(
InitializeErrorReason::RemoveExistingModules,
))?;
info!("Finished removing modules.");
let path = subdir.join(filename);
// store provisioning result
let digest = compute_provisioning_result_digest(provisioning_result).context(
ErrorKind::Initialize(InitializeErrorReason::SaveProvisioning),
)?;
std::fs::write(path, digest.into_bytes()).context(ErrorKind::Initialize(
InitializeErrorReason::SaveProvisioning,
))?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn start_api<F, W, M>(
settings: &M::Settings,
parent_hostname: &str,
runtime: &M::ModuleRuntime,
workload_config: W,
shutdown_signal: F,
tokio_runtime: &mut tokio::runtime::Runtime,
create_socket_channel_rcv: UnboundedReceiver<ModuleAction>,
) -> Result<bool, Error>
where
F: Future<Item = (), Error = ()> + Send + 'static,
W: WorkloadConfig + Clone + Send + Sync + 'static,
M::ModuleRuntime: Authenticator<Request = Request<Body>> + Send + Sync + Clone + 'static,
M: MakeModuleRuntime + 'static,
<<M::ModuleRuntime as ModuleRuntime>::Module as Module>::Config:
Clone + DeserializeOwned + Serialize,
M::Settings: 'static,
<M::ModuleRuntime as ModuleRuntime>::Logs: Into<Body>,
<M::ModuleRuntime as Authenticator>::Error: Fail + Sync,
for<'r> &'r <M::ModuleRuntime as ModuleRuntime>::Error: Into<ModuleRuntimeErrorReason>,
{
let iot_hub_name = workload_config.iot_hub_name().to_string();
let device_id = workload_config.device_id().to_string();
let (mgmt_tx, mgmt_rx) = oneshot::channel();
let (mgmt_stop_and_reprovision_tx, mgmt_stop_and_reprovision_rx) = mpsc::unbounded();
let mgmt = start_management::<M>(settings, runtime, mgmt_rx, mgmt_stop_and_reprovision_tx);
WorkloadManager::start_manager::<M>(
settings,
runtime,
workload_config,
tokio_runtime,
create_socket_channel_rcv,
)?;
let (runt_tx, runt_rx) = oneshot::channel();
let edge_rt = start_runtime::<M>(
runtime.clone(),
&iot_hub_name,
parent_hostname,
&device_id,
&settings,
runt_rx,
)?;
// This mpsc sender/receiver is used for getting notifications from the mgmt service
// indicating that the daemon should shut down and attempt to reprovision the device.
let mgmt_stop_and_reprovision_signaled =
mgmt_stop_and_reprovision_rx
.into_future()
.then(|res| match res {
Ok((Some(()), _)) | Ok((None, _)) => Ok(()),
Err(((), _)) => Err(Error::from(ErrorKind::ManagementService)),
});
let mgmt_stop_and_reprovision_signaled = match settings.auto_reprovisioning_mode() {
AutoReprovisioningMode::Dynamic => {
futures::future::Either::B(mgmt_stop_and_reprovision_signaled)
}
AutoReprovisioningMode::AlwaysOnStartup | AutoReprovisioningMode::OnErrorOnly => {
futures::future::Either::A(future::empty())
}
};
let edge_rt_with_mgmt_signal = edge_rt.select2(mgmt_stop_and_reprovision_signaled).then(
|res| match res {
Ok(Either::A((_edge_rt_ok, _mgmt_stop_and_reprovision_signaled_future))) => {
info!("Edge runtime will stop because of the shutdown signal.");
future::ok(false)
}
Ok(Either::B((_mgmt_stop_and_reprovision_signaled_ok, _edge_rt_future))) => {
info!("Edge runtime will stop because of the device reprovisioning signal.");
future::ok(true)
}
Err(Either::A((edge_rt_err, _mgmt_stop_and_reprovision_signaled_future))) => {
error!("Edge runtime will stop because the shutdown signal raised an error.");
future::err(edge_rt_err)
},
Err(Either::B((mgmt_stop_and_reprovision_signaled_err, _edge_rt_future))) => {
error!("Edge runtime will stop because the device reprovisioning signal raised an error.");
future::err(mgmt_stop_and_reprovision_signaled_err)
}
},
);
// Wait for the watchdog to finish, and then send signal to the workload and management services.
// This way the edgeAgent can finish shutting down all modules.
let edge_rt_with_cleanup = edge_rt_with_mgmt_signal.then(move |res| {
mgmt_tx.send(()).unwrap_or(());
// A -> EdgeRt + Mgmt Stop and Reprovision Signal Future
// B -> Restart Signal Future
match res {
Ok(should_reprovision) => future::ok(should_reprovision),
Err(err) => future::err(err),
}
});
let shutdown = shutdown_signal.map(move |_| {
debug!("shutdown signaled");
// Signal the watchdog to shutdown
runt_tx.send(()).unwrap_or(());
});
tokio_runtime.spawn(shutdown);
let services = mgmt.join(edge_rt_with_cleanup).then(|result| match result {
Ok(((), should_reprovision)) => Ok(should_reprovision),
Err(err) => Err(err),
});
let should_reprovision = tokio_runtime.block_on(services)?;
Ok(should_reprovision)
}
fn init_runtime<M>(
settings: M::Settings,
tokio_runtime: &mut tokio::runtime::Runtime,
create_socket_channel_snd: UnboundedSender<ModuleAction>,
) -> Result<M::ModuleRuntime, Error>
where
M: MakeModuleRuntime + Send + 'static,
M::ModuleRuntime: Send,
M::Future: 'static,
{
info!("Initializing the module runtime...");
let runtime = tokio_runtime
.block_on(M::make_runtime(settings, create_socket_channel_snd))
.context(ErrorKind::Initialize(InitializeErrorReason::ModuleRuntime))?;
info!("Finished initializing the module runtime.");
Ok(runtime)
}
fn start_runtime<M>(
runtime: M::ModuleRuntime,
hostname: &str,
parent_hostname: &str,
device_id: &str,
settings: &M::Settings,
shutdown: Receiver<()>,
) -> Result<impl Future<Item = (), Error = Error>, Error>
where
M: MakeModuleRuntime,
M::ModuleRuntime: Clone + 'static,
<<M::ModuleRuntime as ModuleRuntime>::Module as Module>::Config:
Clone + DeserializeOwned + Serialize,
<M::ModuleRuntime as ModuleRuntime>::Logs: Into<Body>,
for<'r> &'r <M::ModuleRuntime as ModuleRuntime>::Error: Into<ModuleRuntimeErrorReason>,
{
let spec = settings.agent().clone();
let env = build_env(spec.env(), hostname, parent_hostname, device_id, settings);
let spec = ModuleSpec::<<M::ModuleRuntime as ModuleRuntime>::Config>::new(
EDGE_RUNTIME_MODULE_NAME.to_string(),
spec.type_().to_string(),
spec.config().clone(),
env,
spec.image_pull_policy(),
)
.context(ErrorKind::Initialize(InitializeErrorReason::EdgeRuntime))?;
let watchdog = Watchdog::new(
runtime,
settings.watchdog().max_retries(),
settings.endpoints().aziot_identityd_url(),
);
let runtime_future = watchdog
.run_until(spec, EDGE_RUNTIME_MODULEID, shutdown.map_err(|_| ()))
.map_err(Error::from);
Ok(runtime_future)
}
// Add the environment variables needed by the EdgeAgent.
fn build_env<S>(
spec_env: &BTreeMap<String, String>,
hostname: &str,
parent_hostname: &str,
device_id: &str,
settings: &S,
) -> BTreeMap<String, String>
where
S: RuntimeSettings,
{
let mut env = BTreeMap::new();
env.insert(HOSTNAME_KEY.to_string(), hostname.to_string());
env.insert(
EDGEDEVICE_HOSTNAME_KEY.to_string(),
settings.hostname().to_string().to_lowercase(),
);
if parent_hostname.to_lowercase() != hostname.to_lowercase() {
env.insert(
GATEWAY_HOSTNAME_KEY.to_string(),
parent_hostname.to_string(),
);
}
env.insert(DEVICEID_KEY.to_string(), device_id.to_string());
env.insert(MODULEID_KEY.to_string(), EDGE_RUNTIME_MODULEID.to_string());
#[cfg(feature = "runtime-docker")]
let (workload_uri, management_uri, home_dir) = (
settings.connect().workload_uri().to_string(),
settings.connect().management_uri().to_string(),
settings.homedir().to_str().unwrap().to_string(),
);
env.insert(WORKLOAD_URI_KEY.to_string(), workload_uri);
env.insert(MANAGEMENT_URI_KEY.to_string(), management_uri);
env.insert(
WORKLOAD_LISTEN_MNT_URI.to_string(),
Listen::workload_mnt_uri(&home_dir),
);
env.insert(AUTHSCHEME_KEY.to_string(), AUTH_SCHEME.to_string());
env.insert(
EDGE_RUNTIME_MODE_KEY.to_string(),
EDGE_RUNTIME_MODE.to_string(),
);
for (key, val) in spec_env.iter() {
env.insert(key.clone(), val.clone());
}
env.insert(API_VERSION_KEY.to_string(), API_VERSION.to_string());
env
}
fn start_management<M>(
settings: &M::Settings,
runtime: &M::ModuleRuntime,
shutdown: Receiver<()>,
initiate_shutdown_and_reprovision: mpsc::UnboundedSender<()>,
) -> impl Future<Item = (), Error = Error>
where
M: MakeModuleRuntime,
M::ModuleRuntime: Authenticator<Request = Request<Body>> + Send + Sync + Clone + 'static,
<<M::ModuleRuntime as Authenticator>::AuthenticateFuture as Future>::Error: Fail,
for<'r> &'r <M::ModuleRuntime as ModuleRuntime>::Error: Into<ModuleRuntimeErrorReason>,
<<M::ModuleRuntime as ModuleRuntime>::Module as Module>::Config: DeserializeOwned + Serialize,
<M::ModuleRuntime as ModuleRuntime>::Logs: Into<Body>,
{
info!("Starting management API...");
let label = "mgmt".to_string();
let url = settings.listen().management_uri().clone();
let identity_uri = settings.endpoints().aziot_identityd_url().clone();
let identity_client = Arc::new(Mutex::new(identity_client::IdentityClient::new(
aziot_identity_common_http::ApiVersion::V2020_09_01,
&identity_uri,
)));
ManagementService::new(runtime, identity_client, initiate_shutdown_and_reprovision)
.then(move |service| -> Result<_, Error> {
let service = service.context(ErrorKind::Initialize(
InitializeErrorReason::ManagementService,
))?;
let service = LoggingService::new(label, service);
let run = Http::new()
.bind_url(url.clone(), service, MGMT_SOCKET_DEFAULT_PERMISSION)
.map_err(|err| {
err.context(ErrorKind::Initialize(
InitializeErrorReason::ManagementService,
))
})?
.run_until(shutdown.map_err(|_| ()), ConcurrencyThrottling::NoLimit)
.map_err(|err| Error::from(err.context(ErrorKind::ManagementService)));
info!("Listening on {} with 1 thread for management API.", url);
Ok(run)
})
.flatten()
}
#[cfg(test)]
mod tests {
use std::fmt;
use edgelet_docker::Settings;
use super::{Fail, RuntimeSettings};
static GOOD_SETTINGS_EDGE_CA_CERT_ID: &str = "test/linux/sample_settings.edge.ca.id.toml";
#[derive(Clone, Copy, Debug, Fail)]
pub struct | ;
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Error")
}
}
lazy_static::lazy_static! {
static ref ENV_LOCK: std::sync::Mutex<()> = Default::default();
}
#[test]
fn settings_for_edge_ca_cert() {
let _env_lock = ENV_LOCK.lock().expect("env lock poisoned");
std::env::set_var("AZIOT_EDGED_CONFIG", GOOD_SETTINGS_EDGE_CA_CERT_ID);
let settings = Settings::new().unwrap();
assert_eq!(settings.edge_ca_cert(), Some("iotedge-test-ca"));
}
}
| Error | identifier_name |
lib.rs | // Copyright (c) Microsoft. All rights reserved.
#![deny(rust_2018_idioms, warnings)]
#![deny(clippy::all, clippy::pedantic)]
#![allow(
clippy::default_trait_access,
clippy::doc_markdown, // clippy want the "IoT" of "IoT Hub" in a code fence
clippy::missing_errors_doc,
clippy::module_name_repetitions,
clippy::must_use_candidate,
clippy::shadow_unrelated,
clippy::too_many_arguments,
clippy::too_many_lines,
clippy::type_complexity,
clippy::use_self,
)]
pub mod app;
mod error;
pub mod logging;
pub mod signal;
pub mod watchdog;
pub mod workload;
pub mod workload_manager;
pub mod unix;
use futures::sync::mpsc;
use identity_client::IdentityClient;
use std::fs::DirBuilder;
use std::path::Path;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use std::{collections::BTreeMap, fs::OpenOptions, io::Read};
use workload_manager::WorkloadManager;
use edgelet_core::{
crypto::{AZIOT_EDGED_CA_ALIAS, MANIFEST_TRUST_BUNDLE_ALIAS, TRUST_BUNDLE_ALIAS},
settings::AutoReprovisioningMode,
};
use edgelet_core::{
Authenticator, Listen, MakeModuleRuntime, Module, ModuleAction, ModuleRuntime,
ModuleRuntimeErrorReason, ModuleSpec, RuntimeSettings, WorkloadConfig,
};
use edgelet_http::logging::LoggingService;
use edgelet_http::{ConcurrencyThrottling, HyperExt, API_VERSION};
use edgelet_http_mgmt::ManagementService;
use edgelet_utils::log_failure;
pub use error::{Error, ErrorKind, InitializeErrorReason};
use failure::{Context, Fail, ResultExt};
use futures::future::Either;
use futures::sync::{
mpsc::{UnboundedReceiver, UnboundedSender},
oneshot::{self, Receiver},
};
use futures::{future, Future, Stream};
use hyper::server::conn::Http;
use hyper::{Body, Request};
use log::{debug, error, info, Level};
use serde::de::DeserializeOwned;
use serde::Serialize;
use sha2::{Digest, Sha256};
use crate::watchdog::Watchdog;
use crate::workload::WorkloadData;
const MGMT_SOCKET_DEFAULT_PERMISSION: u32 = 0o660;
const EDGE_RUNTIME_MODULEID: &str = "$edgeAgent";
const EDGE_RUNTIME_MODULE_NAME: &str = "edgeAgent";
const AUTH_SCHEME: &str = "sasToken";
/// The following constants are all environment variables names injected into
/// the Edge Agent container.
///
/// This variable holds the host name of the IoT Hub instance that edge agent
/// is expected to work with.
const HOSTNAME_KEY: &str = "IOTEDGE_IOTHUBHOSTNAME";
/// This variable holds the host name for the parent edge device. This name is used
/// by the edge agent to connect to parent edge hub for identity and twin operations.
const GATEWAY_HOSTNAME_KEY: &str = "IOTEDGE_GATEWAYHOSTNAME";
/// This variable holds the host name for the edge device. This name is used
/// by the edge agent to provide the edge hub container an alias name in the
/// network so that TLS cert validation works.
const EDGEDEVICE_HOSTNAME_KEY: &str = "EdgeDeviceHostName";
/// This variable holds the IoT Hub device identifier.
const DEVICEID_KEY: &str = "IOTEDGE_DEVICEID";
/// This variable holds the IoT Hub module identifier.
const MODULEID_KEY: &str = "IOTEDGE_MODULEID";
/// This variable holds the URI to use for connecting to the workload endpoint
/// in aziot-edged. This is used by the edge agent to connect to the workload API
/// for its own needs and is also used for volume mounting into module
/// containers when the URI refers to a Unix domain socket.
const WORKLOAD_URI_KEY: &str = "IOTEDGE_WORKLOADURI";
/// This variable holds the URI to use for connecting to the management
/// endpoint in aziot-edged. This is used by the edge agent for managing module
/// lifetimes and module identities.
const MANAGEMENT_URI_KEY: &str = "IOTEDGE_MANAGEMENTURI";
/// This variable holds the authentication scheme that modules are to use when
/// connecting to other server modules (like Edge Hub). The authentication
/// scheme can mean either that we are to use SAS tokens or a TLS client cert.
const AUTHSCHEME_KEY: &str = "IOTEDGE_AUTHSCHEME";
/// This is the key for the edge runtime mode.
const EDGE_RUNTIME_MODE_KEY: &str = "Mode";
/// This is the edge runtime mode - it should be iotedged, when aziot-edged starts edge runtime in single node mode.
const EDGE_RUNTIME_MODE: &str = "iotedged";
/// This is the key for the largest API version that this edgelet supports
const API_VERSION_KEY: &str = "IOTEDGE_APIVERSION";
/// This is the name of the cache subdirectory for settings state
const EDGE_SETTINGS_SUBDIR: &str = "cache";
/// This is the name of the settings backup file
const EDGE_PROVISIONING_STATE_FILENAME: &str = "provisioning_state";
// This is the name of the directory that contains the module folder
// with worlkload sockets inside, on the host
const WORKLOAD_LISTEN_MNT_URI: &str = "IOTEDGE_WORKLOADLISTEN_MNTURI";
// 2 hours
const AZIOT_EDGE_ID_CERT_MAX_DURATION_SECS: i64 = 2 * 3600;
// 90 days
const AZIOT_EDGE_SERVER_CERT_MAX_DURATION_SECS: i64 = 90 * 24 * 3600;
const STOP_TIME: Duration = Duration::from_secs(30);
/// This is the interval at which to poll Identity Service for device information.
const IS_GET_DEVICE_INFO_RETRY_INTERVAL_SECS: Duration = Duration::from_secs(5);
#[derive(Clone, serde::Serialize, serde::Deserialize, Debug)]
pub struct ProvisioningResult {
device_id: String,
gateway_host_name: String,
hub_name: String,
}
pub struct Main<M>
where
M: MakeModuleRuntime,
{
settings: M::Settings,
}
impl<M> Main<M>
where
M: MakeModuleRuntime + Send + 'static,
M::ModuleRuntime: 'static + Authenticator<Request = Request<Body>> + Clone + Send + Sync,
<<M::ModuleRuntime as ModuleRuntime>::Module as Module>::Config:
Clone + DeserializeOwned + Serialize + edgelet_core::module::NestedEdgeBodge,
M::Settings: 'static + Clone + Serialize,
<M::ModuleRuntime as ModuleRuntime>::Logs: Into<Body>,
<M::ModuleRuntime as Authenticator>::Error: Fail + Sync,
for<'r> &'r <M::ModuleRuntime as ModuleRuntime>::Error: Into<ModuleRuntimeErrorReason>,
{
pub fn new(settings: M::Settings) -> Self {
Main { settings }
}
// Allowing cognitive complexity errors for now. TODO: Refactor method later.
#[allow(clippy::cognitive_complexity)]
pub fn run_until<F, G>(self, make_shutdown_signal: G) -> Result<(), Error>
where
F: Future<Item = (), Error = ()> + Send + 'static,
G: Fn() -> F,
{
let Main { mut settings } = self;
let mut tokio_runtime = tokio::runtime::Runtime::new()
.context(ErrorKind::Initialize(InitializeErrorReason::Tokio))?;
let cache_subdir_path = Path::new(&settings.homedir()).join(EDGE_SETTINGS_SUBDIR);
// make sure the cache directory exists
DirBuilder::new()
.recursive(true)
.create(&cache_subdir_path)
.context(ErrorKind::Initialize(
InitializeErrorReason::CreateCacheDirectory,
))?;
let (create_socket_channel_snd, create_socket_channel_rcv) =
mpsc::unbounded::<ModuleAction>();
let runtime = init_runtime::<M>(
settings.clone(),
&mut tokio_runtime,
create_socket_channel_snd,
)?;
let url = settings.endpoints().aziot_identityd_url().clone();
let client = Arc::new(Mutex::new(identity_client::IdentityClient::new(
aziot_identity_common_http::ApiVersion::V2020_09_01,
&url,
)));
let provisioning_result = loop {
info!("Obtaining edge device provisioning data...");
match settings.auto_reprovisioning_mode() {
AutoReprovisioningMode::AlwaysOnStartup => {
tokio_runtime.block_on(reprovision_device(&client))?
}
AutoReprovisioningMode::Dynamic | AutoReprovisioningMode::OnErrorOnly => {}
}
let result =
tokio_runtime.block_on(
client
.lock()
.unwrap()
.get_device()
.map_err(|err| {
Error::from(err.context(ErrorKind::Initialize(
InitializeErrorReason::GetDeviceInfo,
)))
})
.and_then(|identity| match identity {
aziot_identity_common::Identity::Aziot(spec) => {
debug!("{}:{}", spec.hub_name, spec.device_id.0);
Ok(ProvisioningResult {
device_id: spec.device_id.0,
gateway_host_name: spec.gateway_host,
hub_name: spec.hub_name,
})
}
aziot_identity_common::Identity::Local(_) => Err(Error::from(
ErrorKind::Initialize(InitializeErrorReason::InvalidIdentityType),
)),
}),
);
match result {
Ok(provisioning_result) => {
break provisioning_result;
}
Err(err) => {
log_failure(Level::Warn, &err);
std::thread::sleep(IS_GET_DEVICE_INFO_RETRY_INTERVAL_SECS);
log::warn!("Retrying getting edge device provisioning information.");
} | // Normally aziot-edged will stop all modules when it shuts down. But if it crashed,
// modules will continue to run. On Linux systems where aziot-edged is responsible for
// creating/binding the socket (e.g., CentOS 7.5, which uses systemd but does not
// support systemd socket activation), modules will be left holding stale file
// descriptors for the workload and management APIs and calls on these APIs will
// begin to fail. Resilient modules should be able to deal with this, but we'll
// restart all modules to ensure a clean start.
info!("Stopping all modules...");
tokio_runtime
.block_on(runtime.stop_all(Some(STOP_TIME)))
.context(ErrorKind::Initialize(
InitializeErrorReason::StopExistingModules,
))?;
info!("Finished stopping modules.");
// Detect if the device was changed and if the device needs to be reconfigured
check_device_reconfigure::<M>(
&cache_subdir_path,
EDGE_PROVISIONING_STATE_FILENAME,
&provisioning_result,
&runtime,
&mut tokio_runtime,
)?;
settings
.agent_mut()
.parent_hostname_resolve(&provisioning_result.gateway_host_name);
let cfg = WorkloadData::new(
provisioning_result.hub_name,
provisioning_result.device_id,
settings
.edge_ca_cert()
.unwrap_or(AZIOT_EDGED_CA_ALIAS)
.to_string(),
settings
.edge_ca_key()
.unwrap_or(AZIOT_EDGED_CA_ALIAS)
.to_string(),
settings
.trust_bundle_cert()
.unwrap_or(TRUST_BUNDLE_ALIAS)
.to_string(),
settings
.manifest_trust_bundle_cert()
.unwrap_or(MANIFEST_TRUST_BUNDLE_ALIAS)
.to_string(),
AZIOT_EDGE_ID_CERT_MAX_DURATION_SECS,
AZIOT_EDGE_SERVER_CERT_MAX_DURATION_SECS,
);
let should_reprovision = start_api::<_, _, M>(
&settings,
&provisioning_result.gateway_host_name,
&runtime,
cfg,
make_shutdown_signal(),
&mut tokio_runtime,
create_socket_channel_rcv,
)?;
if should_reprovision {
tokio_runtime.block_on(reprovision_device(&client))?;
}
info!("Shutdown complete.");
Ok(())
}
}
fn reprovision_device(
identity_client: &Arc<Mutex<IdentityClient>>,
) -> impl Future<Item = (), Error = Error> {
let id_mgr = identity_client.lock().unwrap();
id_mgr
.reprovision_device()
.map_err(|err| Error::from(err.context(ErrorKind::ReprovisionFailure)))
}
fn check_device_reconfigure<M>(
subdir: &Path,
filename: &str,
provisioning_result: &ProvisioningResult,
runtime: &M::ModuleRuntime,
tokio_runtime: &mut tokio::runtime::Runtime,
) -> Result<(), Error>
where
M: MakeModuleRuntime + 'static,
{
info!("Detecting if device information has changed...");
let path = subdir.join(filename);
let diff = diff_with_cached(provisioning_result, &path);
if diff {
info!("Change to provisioning state detected.");
reconfigure::<M>(
subdir,
filename,
provisioning_result,
runtime,
tokio_runtime,
)?;
}
Ok(())
}
fn compute_provisioning_result_digest(
provisioning_result: &ProvisioningResult,
) -> Result<String, DiffError> {
let s = serde_json::to_string(provisioning_result)?;
Ok(base64::encode(&Sha256::digest_str(&s)))
}
fn diff_with_cached(provisioning_result: &ProvisioningResult, path: &Path) -> bool {
fn diff_with_cached_inner(
provisioning_result: &ProvisioningResult,
path: &Path,
) -> Result<bool, DiffError> {
let mut file = OpenOptions::new().read(true).open(path)?;
let mut buffer = String::new();
file.read_to_string(&mut buffer)?;
let encoded = compute_provisioning_result_digest(provisioning_result)?;
if encoded == buffer {
debug!("Provisioning state matches supplied provisioning result.");
Ok(false)
} else {
Ok(true)
}
}
match diff_with_cached_inner(provisioning_result, path) {
Ok(result) => result,
Err(err) => {
log_failure(Level::Debug, &err);
debug!("Error reading config backup.");
true
}
}
}
#[derive(Debug, Fail)]
#[fail(display = "Could not load provisioning result")]
pub struct DiffError(#[cause] Context<Box<dyn std::fmt::Display + Send + Sync>>);
impl From<std::io::Error> for DiffError {
fn from(err: std::io::Error) -> Self {
DiffError(Context::new(Box::new(err)))
}
}
impl From<serde_json::Error> for DiffError {
fn from(err: serde_json::Error) -> Self {
DiffError(Context::new(Box::new(err)))
}
}
fn reconfigure<M>(
subdir: &Path,
filename: &str,
provisioning_result: &ProvisioningResult,
runtime: &M::ModuleRuntime,
tokio_runtime: &mut tokio::runtime::Runtime,
) -> Result<(), Error>
where
M: MakeModuleRuntime + 'static,
{
info!("Removing all modules...");
tokio_runtime
.block_on(runtime.remove_all())
.context(ErrorKind::Initialize(
InitializeErrorReason::RemoveExistingModules,
))?;
info!("Finished removing modules.");
let path = subdir.join(filename);
// store provisioning result
let digest = compute_provisioning_result_digest(provisioning_result).context(
ErrorKind::Initialize(InitializeErrorReason::SaveProvisioning),
)?;
std::fs::write(path, digest.into_bytes()).context(ErrorKind::Initialize(
InitializeErrorReason::SaveProvisioning,
))?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn start_api<F, W, M>(
settings: &M::Settings,
parent_hostname: &str,
runtime: &M::ModuleRuntime,
workload_config: W,
shutdown_signal: F,
tokio_runtime: &mut tokio::runtime::Runtime,
create_socket_channel_rcv: UnboundedReceiver<ModuleAction>,
) -> Result<bool, Error>
where
F: Future<Item = (), Error = ()> + Send + 'static,
W: WorkloadConfig + Clone + Send + Sync + 'static,
M::ModuleRuntime: Authenticator<Request = Request<Body>> + Send + Sync + Clone + 'static,
M: MakeModuleRuntime + 'static,
<<M::ModuleRuntime as ModuleRuntime>::Module as Module>::Config:
Clone + DeserializeOwned + Serialize,
M::Settings: 'static,
<M::ModuleRuntime as ModuleRuntime>::Logs: Into<Body>,
<M::ModuleRuntime as Authenticator>::Error: Fail + Sync,
for<'r> &'r <M::ModuleRuntime as ModuleRuntime>::Error: Into<ModuleRuntimeErrorReason>,
{
let iot_hub_name = workload_config.iot_hub_name().to_string();
let device_id = workload_config.device_id().to_string();
let (mgmt_tx, mgmt_rx) = oneshot::channel();
let (mgmt_stop_and_reprovision_tx, mgmt_stop_and_reprovision_rx) = mpsc::unbounded();
let mgmt = start_management::<M>(settings, runtime, mgmt_rx, mgmt_stop_and_reprovision_tx);
WorkloadManager::start_manager::<M>(
settings,
runtime,
workload_config,
tokio_runtime,
create_socket_channel_rcv,
)?;
let (runt_tx, runt_rx) = oneshot::channel();
let edge_rt = start_runtime::<M>(
runtime.clone(),
&iot_hub_name,
parent_hostname,
&device_id,
&settings,
runt_rx,
)?;
// This mpsc sender/receiver is used for getting notifications from the mgmt service
// indicating that the daemon should shut down and attempt to reprovision the device.
let mgmt_stop_and_reprovision_signaled =
mgmt_stop_and_reprovision_rx
.into_future()
.then(|res| match res {
Ok((Some(()), _)) | Ok((None, _)) => Ok(()),
Err(((), _)) => Err(Error::from(ErrorKind::ManagementService)),
});
let mgmt_stop_and_reprovision_signaled = match settings.auto_reprovisioning_mode() {
AutoReprovisioningMode::Dynamic => {
futures::future::Either::B(mgmt_stop_and_reprovision_signaled)
}
AutoReprovisioningMode::AlwaysOnStartup | AutoReprovisioningMode::OnErrorOnly => {
futures::future::Either::A(future::empty())
}
};
let edge_rt_with_mgmt_signal = edge_rt.select2(mgmt_stop_and_reprovision_signaled).then(
|res| match res {
Ok(Either::A((_edge_rt_ok, _mgmt_stop_and_reprovision_signaled_future))) => {
info!("Edge runtime will stop because of the shutdown signal.");
future::ok(false)
}
Ok(Either::B((_mgmt_stop_and_reprovision_signaled_ok, _edge_rt_future))) => {
info!("Edge runtime will stop because of the device reprovisioning signal.");
future::ok(true)
}
Err(Either::A((edge_rt_err, _mgmt_stop_and_reprovision_signaled_future))) => {
error!("Edge runtime will stop because the shutdown signal raised an error.");
future::err(edge_rt_err)
},
Err(Either::B((mgmt_stop_and_reprovision_signaled_err, _edge_rt_future))) => {
error!("Edge runtime will stop because the device reprovisioning signal raised an error.");
future::err(mgmt_stop_and_reprovision_signaled_err)
}
},
);
// Wait for the watchdog to finish, and then send signal to the workload and management services.
// This way the edgeAgent can finish shutting down all modules.
let edge_rt_with_cleanup = edge_rt_with_mgmt_signal.then(move |res| {
mgmt_tx.send(()).unwrap_or(());
// A -> EdgeRt + Mgmt Stop and Reprovision Signal Future
// B -> Restart Signal Future
match res {
Ok(should_reprovision) => future::ok(should_reprovision),
Err(err) => future::err(err),
}
});
let shutdown = shutdown_signal.map(move |_| {
debug!("shutdown signaled");
// Signal the watchdog to shutdown
runt_tx.send(()).unwrap_or(());
});
tokio_runtime.spawn(shutdown);
let services = mgmt.join(edge_rt_with_cleanup).then(|result| match result {
Ok(((), should_reprovision)) => Ok(should_reprovision),
Err(err) => Err(err),
});
let should_reprovision = tokio_runtime.block_on(services)?;
Ok(should_reprovision)
}
fn init_runtime<M>(
settings: M::Settings,
tokio_runtime: &mut tokio::runtime::Runtime,
create_socket_channel_snd: UnboundedSender<ModuleAction>,
) -> Result<M::ModuleRuntime, Error>
where
M: MakeModuleRuntime + Send + 'static,
M::ModuleRuntime: Send,
M::Future: 'static,
{
info!("Initializing the module runtime...");
let runtime = tokio_runtime
.block_on(M::make_runtime(settings, create_socket_channel_snd))
.context(ErrorKind::Initialize(InitializeErrorReason::ModuleRuntime))?;
info!("Finished initializing the module runtime.");
Ok(runtime)
}
fn start_runtime<M>(
runtime: M::ModuleRuntime,
hostname: &str,
parent_hostname: &str,
device_id: &str,
settings: &M::Settings,
shutdown: Receiver<()>,
) -> Result<impl Future<Item = (), Error = Error>, Error>
where
M: MakeModuleRuntime,
M::ModuleRuntime: Clone + 'static,
<<M::ModuleRuntime as ModuleRuntime>::Module as Module>::Config:
Clone + DeserializeOwned + Serialize,
<M::ModuleRuntime as ModuleRuntime>::Logs: Into<Body>,
for<'r> &'r <M::ModuleRuntime as ModuleRuntime>::Error: Into<ModuleRuntimeErrorReason>,
{
let spec = settings.agent().clone();
let env = build_env(spec.env(), hostname, parent_hostname, device_id, settings);
let spec = ModuleSpec::<<M::ModuleRuntime as ModuleRuntime>::Config>::new(
EDGE_RUNTIME_MODULE_NAME.to_string(),
spec.type_().to_string(),
spec.config().clone(),
env,
spec.image_pull_policy(),
)
.context(ErrorKind::Initialize(InitializeErrorReason::EdgeRuntime))?;
let watchdog = Watchdog::new(
runtime,
settings.watchdog().max_retries(),
settings.endpoints().aziot_identityd_url(),
);
let runtime_future = watchdog
.run_until(spec, EDGE_RUNTIME_MODULEID, shutdown.map_err(|_| ()))
.map_err(Error::from);
Ok(runtime_future)
}
// Add the environment variables needed by the EdgeAgent.
fn build_env<S>(
spec_env: &BTreeMap<String, String>,
hostname: &str,
parent_hostname: &str,
device_id: &str,
settings: &S,
) -> BTreeMap<String, String>
where
S: RuntimeSettings,
{
let mut env = BTreeMap::new();
env.insert(HOSTNAME_KEY.to_string(), hostname.to_string());
env.insert(
EDGEDEVICE_HOSTNAME_KEY.to_string(),
settings.hostname().to_string().to_lowercase(),
);
if parent_hostname.to_lowercase() != hostname.to_lowercase() {
env.insert(
GATEWAY_HOSTNAME_KEY.to_string(),
parent_hostname.to_string(),
);
}
env.insert(DEVICEID_KEY.to_string(), device_id.to_string());
env.insert(MODULEID_KEY.to_string(), EDGE_RUNTIME_MODULEID.to_string());
#[cfg(feature = "runtime-docker")]
let (workload_uri, management_uri, home_dir) = (
settings.connect().workload_uri().to_string(),
settings.connect().management_uri().to_string(),
settings.homedir().to_str().unwrap().to_string(),
);
env.insert(WORKLOAD_URI_KEY.to_string(), workload_uri);
env.insert(MANAGEMENT_URI_KEY.to_string(), management_uri);
env.insert(
WORKLOAD_LISTEN_MNT_URI.to_string(),
Listen::workload_mnt_uri(&home_dir),
);
env.insert(AUTHSCHEME_KEY.to_string(), AUTH_SCHEME.to_string());
env.insert(
EDGE_RUNTIME_MODE_KEY.to_string(),
EDGE_RUNTIME_MODE.to_string(),
);
for (key, val) in spec_env.iter() {
env.insert(key.clone(), val.clone());
}
env.insert(API_VERSION_KEY.to_string(), API_VERSION.to_string());
env
}
fn start_management<M>(
settings: &M::Settings,
runtime: &M::ModuleRuntime,
shutdown: Receiver<()>,
initiate_shutdown_and_reprovision: mpsc::UnboundedSender<()>,
) -> impl Future<Item = (), Error = Error>
where
M: MakeModuleRuntime,
M::ModuleRuntime: Authenticator<Request = Request<Body>> + Send + Sync + Clone + 'static,
<<M::ModuleRuntime as Authenticator>::AuthenticateFuture as Future>::Error: Fail,
for<'r> &'r <M::ModuleRuntime as ModuleRuntime>::Error: Into<ModuleRuntimeErrorReason>,
<<M::ModuleRuntime as ModuleRuntime>::Module as Module>::Config: DeserializeOwned + Serialize,
<M::ModuleRuntime as ModuleRuntime>::Logs: Into<Body>,
{
info!("Starting management API...");
let label = "mgmt".to_string();
let url = settings.listen().management_uri().clone();
let identity_uri = settings.endpoints().aziot_identityd_url().clone();
let identity_client = Arc::new(Mutex::new(identity_client::IdentityClient::new(
aziot_identity_common_http::ApiVersion::V2020_09_01,
&identity_uri,
)));
ManagementService::new(runtime, identity_client, initiate_shutdown_and_reprovision)
.then(move |service| -> Result<_, Error> {
let service = service.context(ErrorKind::Initialize(
InitializeErrorReason::ManagementService,
))?;
let service = LoggingService::new(label, service);
let run = Http::new()
.bind_url(url.clone(), service, MGMT_SOCKET_DEFAULT_PERMISSION)
.map_err(|err| {
err.context(ErrorKind::Initialize(
InitializeErrorReason::ManagementService,
))
})?
.run_until(shutdown.map_err(|_| ()), ConcurrencyThrottling::NoLimit)
.map_err(|err| Error::from(err.context(ErrorKind::ManagementService)));
info!("Listening on {} with 1 thread for management API.", url);
Ok(run)
})
.flatten()
}
#[cfg(test)]
mod tests {
use std::fmt;
use edgelet_docker::Settings;
use super::{Fail, RuntimeSettings};
static GOOD_SETTINGS_EDGE_CA_CERT_ID: &str = "test/linux/sample_settings.edge.ca.id.toml";
#[derive(Clone, Copy, Debug, Fail)]
pub struct Error;
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Error")
}
}
lazy_static::lazy_static! {
static ref ENV_LOCK: std::sync::Mutex<()> = Default::default();
}
#[test]
fn settings_for_edge_ca_cert() {
let _env_lock = ENV_LOCK.lock().expect("env lock poisoned");
std::env::set_var("AZIOT_EDGED_CONFIG", GOOD_SETTINGS_EDGE_CA_CERT_ID);
let settings = Settings::new().unwrap();
assert_eq!(settings.edge_ca_cert(), Some("iotedge-test-ca"));
}
} | };
};
info!("Finished provisioning edge device.");
| random_line_split |
util.go | package util
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
"golang.org/x/exp/constraints"
)
const MaxUint = ^uint(0)
const MaxInt = int(MaxUint >> 1)
// Number is a Float or Integer
type Number interface {
constraints.Float | constraints.Integer
}
func LowestTrue(lowFalseStart int, pred func(int) (bool, error)) (int, error) |
func highestTrueRange(lowTrue int, highFalse int, pred func(int) (bool, error)) (int, error) {
if lowTrue >= highFalse {
return 0, fmt.Errorf("highestTrue(%d,%d, pred): want arg1 < arg2", lowTrue, highFalse)
}
lt, err := pred(lowTrue)
if err != nil {
return 0, err
}
if !lt {
return 0, fmt.Errorf("highestTrue(%d,%d, pred): pred(%d)==false", lowTrue, highFalse, lowTrue)
}
hf, err := pred(highFalse)
if err != nil {
return 0, err
}
if hf {
return 0, fmt.Errorf("highestTrue(%d,%d, pred): pred(%d)==true", lowTrue, highFalse, highFalse)
}
for highFalse-lowTrue > 1 {
mid := (lowTrue + highFalse) / 2
mm, err := pred(mid)
if err != nil {
return 0, err
}
if mm {
lowTrue = mid
} else {
highFalse = mid
}
}
return lowTrue, nil
}
// TrimmedLines takes a string, splits it into lines, and trims each line of starting
// and ending whitespace.
func TrimmedLines(s string) []string {
result := strings.Split(strings.TrimSpace(s), "\n")
for i, r := range result {
result[i] = strings.TrimSpace(r)
}
return result
}
// SurroundingNewlineTrimmedLines trimes leading and trailing newlines from the input,
// then splits on newlines.
func SurroundingNewlineTrimmedLines(s string) []string {
return strings.Split(strings.Trim(s, "\r\n"), "\n")
}
// RemoveBlanks returns a slice of strings, but trimmed, and with empty/all-whitespace
// strings removed.
func RemoveBlanks(ss []string) []string {
var r []string
for _, s := range ss {
s = strings.TrimSpace(s)
if s == "" {
continue
}
r = append(r, s)
}
return r
}
// GroupString returns the input string, broken into runs of consecutive characters
func GroupString(s string) []string {
var result []string
last := -1
for i := range s {
c := s[i : i+1]
if len(result) == 0 || result[last][:1] != c {
result = append(result, c)
last++
} else {
result[last] = result[last] + c
}
}
return result
}
// StringsAndInts is a set of parsed strings and ints from a line of input.
type StringsAndInts struct {
Strings []string
Ints []int
}
// ParseStringsAndInts takes a slice of input lines, a slice of string field indexes,
// and a slice of int field indexes. It returns a slice of StringsAndInts structs,
// one per line.
func ParseStringsAndInts(lines []string, fields int, stringFields []int, intFields []int) ([]StringsAndInts, error) {
var result []StringsAndInts
for i, line := range lines {
sai := StringsAndInts{}
parts := strings.Split(line, " ")
if len(parts) != fields {
return nil, fmt.Errorf("want %d fields; got %d at line %d: %q", fields, len(parts), i, line)
}
for _, index := range stringFields {
sai.Strings = append(sai.Strings, parts[index])
}
for _, index := range intFields {
ii, err := strconv.Atoi(parts[index])
if err != nil {
return nil, fmt.Errorf("unparseable field %d at line %d (%q): %v", index, i, line, err)
}
sai.Ints = append(sai.Ints, ii)
}
result = append(result, sai)
}
return result, nil
}
// ParseGrid parses a set of lines of whitespacespace-separated ints into a 2D grid.
func ParseGrid(lines []string) ([][]int, error) {
var result [][]int
var fields int
for i, line := range lines {
parts := strings.Fields(line)
if i == 0 {
fields = len(parts)
} else {
if len(parts) != fields {
return nil, fmt.Errorf("line 0 has %d fields; line %d has %d: %q", fields, i+1, len(parts), line)
}
}
ints := make([]int, 0, len(parts))
for _, part := range parts {
theInt, err := strconv.Atoi(part)
if err != nil {
return nil, fmt.Errorf("error at line %d: %w", i+1, err)
}
ints = append(ints, theInt)
}
result = append(result, ints)
}
return result, nil
}
// LinesByParagraph takes a slice of strings, and returns a slice of slices of
// strings: it separates paragraphs (multiple newlines).
func LinesByParagraph(lines []string) [][]string {
var result [][]string
var chunk []string
for _, line := range lines {
if line == "" {
if chunk != nil {
result = append(result, chunk)
chunk = nil
}
} else {
chunk = append(chunk, line)
}
}
if chunk != nil {
result = append(result, chunk)
}
return result
}
// KeyValuePairs splits a space-separated sequence of colon-separated key:value
// pairs into a map.
func KeyValuePairs(input string) map[string]string {
result := make(map[string]string)
parts := strings.Split(input, " ")
for _, part := range parts {
pieces := strings.SplitN(part, ":", 2)
if len(pieces) == 2 {
result[pieces[0]] = pieces[1]
} else {
result[pieces[0]] = ""
}
}
return result
}
func Transpose(input [][]int) [][]int {
var result [][]int
for col := 0; col < len(input[0]); col++ {
var newRow []int
for row := range input {
newRow = append(newRow, input[row][col])
}
result = append(result, newRow)
}
return result
}
// ReadFile is just ioutil.ReadFile
func ReadFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
// ReadLines reads a file and returns a slice of strings, one per line.
func ReadLines(filename string) ([]string, error) {
s, err := ReadFileString(filename)
if err != nil {
return nil, err
}
return strings.Split(s, "\n"), nil
}
// MustReadLines reads a file and returns a slice of strings, one per line, or dies.
// MustReadFileString reads a string from a file or panics.
func MustReadLines(filename string) []string {
s, err := ReadLines(filename)
if err != nil {
panic(err)
}
return s
}
// ReadFileString reads a file and returns it as a string, trimmed.
func ReadFileString(filename string) (string, error) {
bb, err := ReadFile(filename)
if err != nil {
return "", err
}
return strings.TrimRight(string(bb), " \t\r\n"), nil
}
// MustReadFileString reads a string from a file or panics.
func MustReadFileString(filename string) string {
s, err := ReadFileString(filename)
if err != nil {
panic(err)
}
return s
}
// ReadFileInts reads a file of ints, one per line
func ReadFileInts(filename string) ([]int, error) {
all, err := ReadFileString(filename)
if err != nil {
return nil, err
}
lines := TrimmedLines(all)
return StringsToInts(lines)
}
// MustReadFileInts reads a file of ints, one per line, or panics.
func MustReadFileInts(filename string) []int {
ints, err := ReadFileInts(filename)
if err != nil {
panic(err)
}
return ints
}
// ParseInts parses a string of separated ints into a slice of ints.
func ParseInts(commaString string, separator string) ([]int, error) {
input := strings.TrimSpace(commaString)
entries := strings.Split(input, separator)
ints := make([]int, len(entries))
for i, v := range entries {
i64, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
ints[i] = int(i64)
}
return ints, nil
}
// MustParseInts parses a string of separated ints into a slice of ints,
// or panics.
func MustParseInts(commaString string, separator string) []int {
ints, err := ParseInts(commaString, separator)
if err != nil {
panic(err)
}
return ints
}
// ParseLinesOfInts runs ParseInts on every string in the passed slice, passing
// back a slice of slices of ints.
func ParseLinesOfInts(commaStrings []string, separator string) ([][]int, error) {
result := make([][]int, 0, len(commaStrings))
for _, commaString := range commaStrings {
ints, err := ParseInts(commaString, separator)
if err != nil {
return nil, err
}
result = append(result, ints)
}
return result, nil
}
// MustStringsToInts takes a slice of strings and returns a slice of ints, or panics.
func MustStringsToInts(strings []string) []int {
ints, err := StringsToInts(strings)
if err != nil {
panic(err)
}
return ints
}
// StringsToInts takes a slice of strings and returns a slice of ints
func StringsToInts(strings []string) ([]int, error) {
ints := make([]int, len(strings))
for i, v := range strings {
i64, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
ints[i] = int(i64)
}
return ints, nil
}
// StringsToInt32s takes a slice of strings and returns a slice of int32s
func StringsToInt32s(strings []string) ([]int32, error) {
ints := make([]int32, len(strings))
for i, v := range strings {
i64, err := strconv.ParseInt(v, 10, 32)
if err != nil {
return nil, err
}
ints[i] = int32(i64)
}
return ints, nil
}
// Reverse reverses a string.
func Reverse(s string) string {
chars := []rune(s)
for i, j := 0, len(chars)-1; i < j; i, j = i+1, j-1 {
chars[i], chars[j] = chars[j], chars[i]
}
return string(chars)
}
// Sum gives the sum of a slice of ints or floats.
func Sum[T Number](items []T) T {
var sum T
for _, item := range items {
sum += item
}
return sum
}
// MapSum returns a slice of the Sums of sublists.
func MapSum[T Number](slicesOfItems [][]T) []T {
result := make([]T, 0, len(slicesOfItems))
for _, items := range slicesOfItems {
result = append(result, Sum(items))
}
return result
}
// Max gives the max of a slice of ints or floats.
func Max[T Number](items []T) T {
var max T
if len(items) > 0 {
max = items[0]
}
for _, item := range items[1:] {
if item > max {
max = item
}
}
return max
}
// SplitBefore takes a slice and a predicate, and returns the slice chunked,
// split before any element where the predicate returns true.
func SplitBefore[T any](slice []T, predicate func(T) bool) [][]T {
var result [][]T
last := -1
for _, elem := range slice {
if last < 0 || predicate(elem) {
result = append(result, []T{elem})
last++
} else {
result[last] = append(result[last], elem)
}
}
return result
}
// SplitAfter takes a slice and a predicate, and returns the slice chunked,
// split after any element where the predicate returns true.
func SplitAfter[T any](slice []T, predicate func(T) bool) [][]T {
var result [][]T
var building []T
for _, elem := range slice {
building = append(building, elem)
if predicate(elem) {
result = append(result, building)
building = []T{}
}
}
if len(building) > 0 {
result = append(result, building)
}
return result
}
| {
lf, err := pred(lowFalseStart)
if err != nil {
return 0, err
}
if lf {
return 0, fmt.Errorf("lowestTrue expected pred(lowFalseStart)==false; got pred(%d)==true", lowFalseStart)
}
lowFalse := lowFalseStart
highTrue := 0
for lowFalse < MaxInt/2 {
attempt := lowFalse * 2
st, err := pred(attempt)
if err != nil {
return 0, err
}
if st {
highTrue = attempt
break
}
lowFalse <<= 1
}
if highTrue == 0 {
return 0, fmt.Errorf("cannot find high enough value to make pred(value)==true")
}
for highTrue-lowFalse > 1 {
mid := (lowFalse + highTrue) / 2
mm, err := pred(mid)
if err != nil {
return 0, err
}
if mm {
highTrue = mid
} else {
lowFalse = mid
}
}
return highTrue, nil
} | identifier_body |
util.go | package util
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
"golang.org/x/exp/constraints"
)
const MaxUint = ^uint(0)
const MaxInt = int(MaxUint >> 1)
// Number is a Float or Integer
type Number interface {
constraints.Float | constraints.Integer
}
func LowestTrue(lowFalseStart int, pred func(int) (bool, error)) (int, error) {
lf, err := pred(lowFalseStart)
if err != nil {
return 0, err
}
if lf {
return 0, fmt.Errorf("lowestTrue expected pred(lowFalseStart)==false; got pred(%d)==true", lowFalseStart)
}
lowFalse := lowFalseStart
highTrue := 0
for lowFalse < MaxInt/2 {
attempt := lowFalse * 2
st, err := pred(attempt)
if err != nil {
return 0, err
}
if st {
highTrue = attempt
break
}
lowFalse <<= 1
}
if highTrue == 0 {
return 0, fmt.Errorf("cannot find high enough value to make pred(value)==true")
}
for highTrue-lowFalse > 1 {
mid := (lowFalse + highTrue) / 2
mm, err := pred(mid)
if err != nil {
return 0, err
}
if mm {
highTrue = mid
} else {
lowFalse = mid
}
}
return highTrue, nil
}
func highestTrueRange(lowTrue int, highFalse int, pred func(int) (bool, error)) (int, error) {
if lowTrue >= highFalse {
return 0, fmt.Errorf("highestTrue(%d,%d, pred): want arg1 < arg2", lowTrue, highFalse)
}
lt, err := pred(lowTrue)
if err != nil {
return 0, err
}
if !lt {
return 0, fmt.Errorf("highestTrue(%d,%d, pred): pred(%d)==false", lowTrue, highFalse, lowTrue)
}
hf, err := pred(highFalse)
if err != nil {
return 0, err
}
if hf {
return 0, fmt.Errorf("highestTrue(%d,%d, pred): pred(%d)==true", lowTrue, highFalse, highFalse)
}
for highFalse-lowTrue > 1 {
mid := (lowTrue + highFalse) / 2
mm, err := pred(mid)
if err != nil {
return 0, err
}
if mm {
lowTrue = mid
} else {
highFalse = mid
}
}
return lowTrue, nil
}
// TrimmedLines takes a string, splits it into lines, and trims each line of starting
// and ending whitespace.
func TrimmedLines(s string) []string {
result := strings.Split(strings.TrimSpace(s), "\n")
for i, r := range result {
result[i] = strings.TrimSpace(r)
}
return result
}
// SurroundingNewlineTrimmedLines trimes leading and trailing newlines from the input,
// then splits on newlines.
func SurroundingNewlineTrimmedLines(s string) []string {
return strings.Split(strings.Trim(s, "\r\n"), "\n")
}
// RemoveBlanks returns a slice of strings, but trimmed, and with empty/all-whitespace
// strings removed.
func RemoveBlanks(ss []string) []string {
var r []string
for _, s := range ss {
s = strings.TrimSpace(s)
if s == "" {
continue
}
r = append(r, s)
}
return r
}
// GroupString returns the input string, broken into runs of consecutive characters
func GroupString(s string) []string {
var result []string
last := -1
for i := range s {
c := s[i : i+1]
if len(result) == 0 || result[last][:1] != c {
result = append(result, c)
last++
} else {
result[last] = result[last] + c
}
}
return result
}
// StringsAndInts is a set of parsed strings and ints from a line of input.
type StringsAndInts struct {
Strings []string
Ints []int
}
// ParseStringsAndInts takes a slice of input lines, a slice of string field indexes,
// and a slice of int field indexes. It returns a slice of StringsAndInts structs,
// one per line.
func ParseStringsAndInts(lines []string, fields int, stringFields []int, intFields []int) ([]StringsAndInts, error) {
var result []StringsAndInts
for i, line := range lines {
sai := StringsAndInts{}
parts := strings.Split(line, " ")
if len(parts) != fields {
return nil, fmt.Errorf("want %d fields; got %d at line %d: %q", fields, len(parts), i, line)
}
for _, index := range stringFields {
sai.Strings = append(sai.Strings, parts[index])
}
for _, index := range intFields {
ii, err := strconv.Atoi(parts[index])
if err != nil {
return nil, fmt.Errorf("unparseable field %d at line %d (%q): %v", index, i, line, err)
}
sai.Ints = append(sai.Ints, ii)
}
result = append(result, sai)
}
return result, nil
}
// ParseGrid parses a set of lines of whitespacespace-separated ints into a 2D grid.
func ParseGrid(lines []string) ([][]int, error) {
var result [][]int
var fields int
for i, line := range lines {
parts := strings.Fields(line)
if i == 0 {
fields = len(parts)
} else {
if len(parts) != fields {
return nil, fmt.Errorf("line 0 has %d fields; line %d has %d: %q", fields, i+1, len(parts), line)
}
}
ints := make([]int, 0, len(parts))
for _, part := range parts {
theInt, err := strconv.Atoi(part)
if err != nil {
return nil, fmt.Errorf("error at line %d: %w", i+1, err)
}
ints = append(ints, theInt)
}
result = append(result, ints)
}
return result, nil
}
// LinesByParagraph takes a slice of strings, and returns a slice of slices of
// strings: it separates paragraphs (multiple newlines).
func LinesByParagraph(lines []string) [][]string {
var result [][]string
var chunk []string
for _, line := range lines |
if chunk != nil {
result = append(result, chunk)
}
return result
}
// KeyValuePairs splits a space-separated sequence of colon-separated key:value
// pairs into a map.
func KeyValuePairs(input string) map[string]string {
result := make(map[string]string)
parts := strings.Split(input, " ")
for _, part := range parts {
pieces := strings.SplitN(part, ":", 2)
if len(pieces) == 2 {
result[pieces[0]] = pieces[1]
} else {
result[pieces[0]] = ""
}
}
return result
}
func Transpose(input [][]int) [][]int {
var result [][]int
for col := 0; col < len(input[0]); col++ {
var newRow []int
for row := range input {
newRow = append(newRow, input[row][col])
}
result = append(result, newRow)
}
return result
}
// ReadFile is just ioutil.ReadFile
func ReadFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
// ReadLines reads a file and returns a slice of strings, one per line.
func ReadLines(filename string) ([]string, error) {
s, err := ReadFileString(filename)
if err != nil {
return nil, err
}
return strings.Split(s, "\n"), nil
}
// MustReadLines reads a file and returns a slice of strings, one per line, or dies.
// MustReadFileString reads a string from a file or panics.
func MustReadLines(filename string) []string {
s, err := ReadLines(filename)
if err != nil {
panic(err)
}
return s
}
// ReadFileString reads a file and returns it as a string, trimmed.
func ReadFileString(filename string) (string, error) {
bb, err := ReadFile(filename)
if err != nil {
return "", err
}
return strings.TrimRight(string(bb), " \t\r\n"), nil
}
// MustReadFileString reads a string from a file or panics.
func MustReadFileString(filename string) string {
s, err := ReadFileString(filename)
if err != nil {
panic(err)
}
return s
}
// ReadFileInts reads a file of ints, one per line
func ReadFileInts(filename string) ([]int, error) {
all, err := ReadFileString(filename)
if err != nil {
return nil, err
}
lines := TrimmedLines(all)
return StringsToInts(lines)
}
// MustReadFileInts reads a file of ints, one per line, or panics.
func MustReadFileInts(filename string) []int {
ints, err := ReadFileInts(filename)
if err != nil {
panic(err)
}
return ints
}
// ParseInts parses a string of separated ints into a slice of ints.
func ParseInts(commaString string, separator string) ([]int, error) {
input := strings.TrimSpace(commaString)
entries := strings.Split(input, separator)
ints := make([]int, len(entries))
for i, v := range entries {
i64, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
ints[i] = int(i64)
}
return ints, nil
}
// MustParseInts parses a string of separated ints into a slice of ints,
// or panics.
func MustParseInts(commaString string, separator string) []int {
ints, err := ParseInts(commaString, separator)
if err != nil {
panic(err)
}
return ints
}
// ParseLinesOfInts runs ParseInts on every string in the passed slice, passing
// back a slice of slices of ints.
func ParseLinesOfInts(commaStrings []string, separator string) ([][]int, error) {
result := make([][]int, 0, len(commaStrings))
for _, commaString := range commaStrings {
ints, err := ParseInts(commaString, separator)
if err != nil {
return nil, err
}
result = append(result, ints)
}
return result, nil
}
// MustStringsToInts takes a slice of strings and returns a slice of ints, or panics.
func MustStringsToInts(strings []string) []int {
ints, err := StringsToInts(strings)
if err != nil {
panic(err)
}
return ints
}
// StringsToInts takes a slice of strings and returns a slice of ints
func StringsToInts(strings []string) ([]int, error) {
ints := make([]int, len(strings))
for i, v := range strings {
i64, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
ints[i] = int(i64)
}
return ints, nil
}
// StringsToInt32s takes a slice of strings and returns a slice of int32s
func StringsToInt32s(strings []string) ([]int32, error) {
ints := make([]int32, len(strings))
for i, v := range strings {
i64, err := strconv.ParseInt(v, 10, 32)
if err != nil {
return nil, err
}
ints[i] = int32(i64)
}
return ints, nil
}
// Reverse reverses a string.
func Reverse(s string) string {
chars := []rune(s)
for i, j := 0, len(chars)-1; i < j; i, j = i+1, j-1 {
chars[i], chars[j] = chars[j], chars[i]
}
return string(chars)
}
// Sum gives the sum of a slice of ints or floats.
func Sum[T Number](items []T) T {
var sum T
for _, item := range items {
sum += item
}
return sum
}
// MapSum returns a slice of the Sums of sublists.
func MapSum[T Number](slicesOfItems [][]T) []T {
result := make([]T, 0, len(slicesOfItems))
for _, items := range slicesOfItems {
result = append(result, Sum(items))
}
return result
}
// Max gives the max of a slice of ints or floats.
func Max[T Number](items []T) T {
var max T
if len(items) > 0 {
max = items[0]
}
for _, item := range items[1:] {
if item > max {
max = item
}
}
return max
}
// SplitBefore takes a slice and a predicate, and returns the slice chunked,
// split before any element where the predicate returns true.
func SplitBefore[T any](slice []T, predicate func(T) bool) [][]T {
var result [][]T
last := -1
for _, elem := range slice {
if last < 0 || predicate(elem) {
result = append(result, []T{elem})
last++
} else {
result[last] = append(result[last], elem)
}
}
return result
}
// SplitAfter takes a slice and a predicate, and returns the slice chunked,
// split after any element where the predicate returns true.
func SplitAfter[T any](slice []T, predicate func(T) bool) [][]T {
var result [][]T
var building []T
for _, elem := range slice {
building = append(building, elem)
if predicate(elem) {
result = append(result, building)
building = []T{}
}
}
if len(building) > 0 {
result = append(result, building)
}
return result
}
| {
if line == "" {
if chunk != nil {
result = append(result, chunk)
chunk = nil
}
} else {
chunk = append(chunk, line)
}
} | conditional_block |
util.go | package util
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
"golang.org/x/exp/constraints"
)
const MaxUint = ^uint(0)
const MaxInt = int(MaxUint >> 1)
// Number is a Float or Integer
type Number interface {
constraints.Float | constraints.Integer
}
func LowestTrue(lowFalseStart int, pred func(int) (bool, error)) (int, error) {
lf, err := pred(lowFalseStart)
if err != nil {
return 0, err
} | }
lowFalse := lowFalseStart
highTrue := 0
for lowFalse < MaxInt/2 {
attempt := lowFalse * 2
st, err := pred(attempt)
if err != nil {
return 0, err
}
if st {
highTrue = attempt
break
}
lowFalse <<= 1
}
if highTrue == 0 {
return 0, fmt.Errorf("cannot find high enough value to make pred(value)==true")
}
for highTrue-lowFalse > 1 {
mid := (lowFalse + highTrue) / 2
mm, err := pred(mid)
if err != nil {
return 0, err
}
if mm {
highTrue = mid
} else {
lowFalse = mid
}
}
return highTrue, nil
}
func highestTrueRange(lowTrue int, highFalse int, pred func(int) (bool, error)) (int, error) {
if lowTrue >= highFalse {
return 0, fmt.Errorf("highestTrue(%d,%d, pred): want arg1 < arg2", lowTrue, highFalse)
}
lt, err := pred(lowTrue)
if err != nil {
return 0, err
}
if !lt {
return 0, fmt.Errorf("highestTrue(%d,%d, pred): pred(%d)==false", lowTrue, highFalse, lowTrue)
}
hf, err := pred(highFalse)
if err != nil {
return 0, err
}
if hf {
return 0, fmt.Errorf("highestTrue(%d,%d, pred): pred(%d)==true", lowTrue, highFalse, highFalse)
}
for highFalse-lowTrue > 1 {
mid := (lowTrue + highFalse) / 2
mm, err := pred(mid)
if err != nil {
return 0, err
}
if mm {
lowTrue = mid
} else {
highFalse = mid
}
}
return lowTrue, nil
}
// TrimmedLines takes a string, splits it into lines, and trims each line of starting
// and ending whitespace.
func TrimmedLines(s string) []string {
result := strings.Split(strings.TrimSpace(s), "\n")
for i, r := range result {
result[i] = strings.TrimSpace(r)
}
return result
}
// SurroundingNewlineTrimmedLines trimes leading and trailing newlines from the input,
// then splits on newlines.
func SurroundingNewlineTrimmedLines(s string) []string {
return strings.Split(strings.Trim(s, "\r\n"), "\n")
}
// RemoveBlanks returns a slice of strings, but trimmed, and with empty/all-whitespace
// strings removed.
func RemoveBlanks(ss []string) []string {
var r []string
for _, s := range ss {
s = strings.TrimSpace(s)
if s == "" {
continue
}
r = append(r, s)
}
return r
}
// GroupString returns the input string, broken into runs of consecutive characters
func GroupString(s string) []string {
var result []string
last := -1
for i := range s {
c := s[i : i+1]
if len(result) == 0 || result[last][:1] != c {
result = append(result, c)
last++
} else {
result[last] = result[last] + c
}
}
return result
}
// StringsAndInts is a set of parsed strings and ints from a line of input.
type StringsAndInts struct {
Strings []string
Ints []int
}
// ParseStringsAndInts takes a slice of input lines, a slice of string field indexes,
// and a slice of int field indexes. It returns a slice of StringsAndInts structs,
// one per line.
func ParseStringsAndInts(lines []string, fields int, stringFields []int, intFields []int) ([]StringsAndInts, error) {
var result []StringsAndInts
for i, line := range lines {
sai := StringsAndInts{}
parts := strings.Split(line, " ")
if len(parts) != fields {
return nil, fmt.Errorf("want %d fields; got %d at line %d: %q", fields, len(parts), i, line)
}
for _, index := range stringFields {
sai.Strings = append(sai.Strings, parts[index])
}
for _, index := range intFields {
ii, err := strconv.Atoi(parts[index])
if err != nil {
return nil, fmt.Errorf("unparseable field %d at line %d (%q): %v", index, i, line, err)
}
sai.Ints = append(sai.Ints, ii)
}
result = append(result, sai)
}
return result, nil
}
// ParseGrid parses a set of lines of whitespacespace-separated ints into a 2D grid.
func ParseGrid(lines []string) ([][]int, error) {
var result [][]int
var fields int
for i, line := range lines {
parts := strings.Fields(line)
if i == 0 {
fields = len(parts)
} else {
if len(parts) != fields {
return nil, fmt.Errorf("line 0 has %d fields; line %d has %d: %q", fields, i+1, len(parts), line)
}
}
ints := make([]int, 0, len(parts))
for _, part := range parts {
theInt, err := strconv.Atoi(part)
if err != nil {
return nil, fmt.Errorf("error at line %d: %w", i+1, err)
}
ints = append(ints, theInt)
}
result = append(result, ints)
}
return result, nil
}
// LinesByParagraph takes a slice of strings, and returns a slice of slices of
// strings: it separates paragraphs (multiple newlines).
func LinesByParagraph(lines []string) [][]string {
var result [][]string
var chunk []string
for _, line := range lines {
if line == "" {
if chunk != nil {
result = append(result, chunk)
chunk = nil
}
} else {
chunk = append(chunk, line)
}
}
if chunk != nil {
result = append(result, chunk)
}
return result
}
// KeyValuePairs splits a space-separated sequence of colon-separated key:value
// pairs into a map.
func KeyValuePairs(input string) map[string]string {
result := make(map[string]string)
parts := strings.Split(input, " ")
for _, part := range parts {
pieces := strings.SplitN(part, ":", 2)
if len(pieces) == 2 {
result[pieces[0]] = pieces[1]
} else {
result[pieces[0]] = ""
}
}
return result
}
func Transpose(input [][]int) [][]int {
var result [][]int
for col := 0; col < len(input[0]); col++ {
var newRow []int
for row := range input {
newRow = append(newRow, input[row][col])
}
result = append(result, newRow)
}
return result
}
// ReadFile is just ioutil.ReadFile
func ReadFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
// ReadLines reads a file and returns a slice of strings, one per line.
func ReadLines(filename string) ([]string, error) {
s, err := ReadFileString(filename)
if err != nil {
return nil, err
}
return strings.Split(s, "\n"), nil
}
// MustReadLines reads a file and returns a slice of strings, one per line, or dies.
// MustReadFileString reads a string from a file or panics.
func MustReadLines(filename string) []string {
s, err := ReadLines(filename)
if err != nil {
panic(err)
}
return s
}
// ReadFileString reads a file and returns it as a string, trimmed.
func ReadFileString(filename string) (string, error) {
bb, err := ReadFile(filename)
if err != nil {
return "", err
}
return strings.TrimRight(string(bb), " \t\r\n"), nil
}
// MustReadFileString reads a string from a file or panics.
func MustReadFileString(filename string) string {
s, err := ReadFileString(filename)
if err != nil {
panic(err)
}
return s
}
// ReadFileInts reads a file of ints, one per line
func ReadFileInts(filename string) ([]int, error) {
all, err := ReadFileString(filename)
if err != nil {
return nil, err
}
lines := TrimmedLines(all)
return StringsToInts(lines)
}
// MustReadFileInts reads a file of ints, one per line, or panics.
func MustReadFileInts(filename string) []int {
ints, err := ReadFileInts(filename)
if err != nil {
panic(err)
}
return ints
}
// ParseInts parses a string of separated ints into a slice of ints.
func ParseInts(commaString string, separator string) ([]int, error) {
input := strings.TrimSpace(commaString)
entries := strings.Split(input, separator)
ints := make([]int, len(entries))
for i, v := range entries {
i64, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
ints[i] = int(i64)
}
return ints, nil
}
// MustParseInts parses a string of separated ints into a slice of ints,
// or panics.
func MustParseInts(commaString string, separator string) []int {
ints, err := ParseInts(commaString, separator)
if err != nil {
panic(err)
}
return ints
}
// ParseLinesOfInts runs ParseInts on every string in the passed slice, passing
// back a slice of slices of ints.
func ParseLinesOfInts(commaStrings []string, separator string) ([][]int, error) {
result := make([][]int, 0, len(commaStrings))
for _, commaString := range commaStrings {
ints, err := ParseInts(commaString, separator)
if err != nil {
return nil, err
}
result = append(result, ints)
}
return result, nil
}
// MustStringsToInts takes a slice of strings and returns a slice of ints, or panics.
func MustStringsToInts(strings []string) []int {
ints, err := StringsToInts(strings)
if err != nil {
panic(err)
}
return ints
}
// StringsToInts takes a slice of strings and returns a slice of ints
func StringsToInts(strings []string) ([]int, error) {
ints := make([]int, len(strings))
for i, v := range strings {
i64, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
ints[i] = int(i64)
}
return ints, nil
}
// StringsToInt32s takes a slice of strings and returns a slice of int32s
func StringsToInt32s(strings []string) ([]int32, error) {
ints := make([]int32, len(strings))
for i, v := range strings {
i64, err := strconv.ParseInt(v, 10, 32)
if err != nil {
return nil, err
}
ints[i] = int32(i64)
}
return ints, nil
}
// Reverse reverses a string.
func Reverse(s string) string {
chars := []rune(s)
for i, j := 0, len(chars)-1; i < j; i, j = i+1, j-1 {
chars[i], chars[j] = chars[j], chars[i]
}
return string(chars)
}
// Sum gives the sum of a slice of ints or floats.
func Sum[T Number](items []T) T {
var sum T
for _, item := range items {
sum += item
}
return sum
}
// MapSum returns a slice of the Sums of sublists.
func MapSum[T Number](slicesOfItems [][]T) []T {
result := make([]T, 0, len(slicesOfItems))
for _, items := range slicesOfItems {
result = append(result, Sum(items))
}
return result
}
// Max gives the max of a slice of ints or floats.
func Max[T Number](items []T) T {
var max T
if len(items) > 0 {
max = items[0]
}
for _, item := range items[1:] {
if item > max {
max = item
}
}
return max
}
// SplitBefore takes a slice and a predicate, and returns the slice chunked,
// split before any element where the predicate returns true.
func SplitBefore[T any](slice []T, predicate func(T) bool) [][]T {
var result [][]T
last := -1
for _, elem := range slice {
if last < 0 || predicate(elem) {
result = append(result, []T{elem})
last++
} else {
result[last] = append(result[last], elem)
}
}
return result
}
// SplitAfter takes a slice and a predicate, and returns the slice chunked,
// split after any element where the predicate returns true.
func SplitAfter[T any](slice []T, predicate func(T) bool) [][]T {
var result [][]T
var building []T
for _, elem := range slice {
building = append(building, elem)
if predicate(elem) {
result = append(result, building)
building = []T{}
}
}
if len(building) > 0 {
result = append(result, building)
}
return result
} | if lf {
return 0, fmt.Errorf("lowestTrue expected pred(lowFalseStart)==false; got pred(%d)==true", lowFalseStart) | random_line_split |
util.go | package util
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
"golang.org/x/exp/constraints"
)
const MaxUint = ^uint(0)
const MaxInt = int(MaxUint >> 1)
// Number is a Float or Integer
type Number interface {
constraints.Float | constraints.Integer
}
func LowestTrue(lowFalseStart int, pred func(int) (bool, error)) (int, error) {
lf, err := pred(lowFalseStart)
if err != nil {
return 0, err
}
if lf {
return 0, fmt.Errorf("lowestTrue expected pred(lowFalseStart)==false; got pred(%d)==true", lowFalseStart)
}
lowFalse := lowFalseStart
highTrue := 0
for lowFalse < MaxInt/2 {
attempt := lowFalse * 2
st, err := pred(attempt)
if err != nil {
return 0, err
}
if st {
highTrue = attempt
break
}
lowFalse <<= 1
}
if highTrue == 0 {
return 0, fmt.Errorf("cannot find high enough value to make pred(value)==true")
}
for highTrue-lowFalse > 1 {
mid := (lowFalse + highTrue) / 2
mm, err := pred(mid)
if err != nil {
return 0, err
}
if mm {
highTrue = mid
} else {
lowFalse = mid
}
}
return highTrue, nil
}
func highestTrueRange(lowTrue int, highFalse int, pred func(int) (bool, error)) (int, error) {
if lowTrue >= highFalse {
return 0, fmt.Errorf("highestTrue(%d,%d, pred): want arg1 < arg2", lowTrue, highFalse)
}
lt, err := pred(lowTrue)
if err != nil {
return 0, err
}
if !lt {
return 0, fmt.Errorf("highestTrue(%d,%d, pred): pred(%d)==false", lowTrue, highFalse, lowTrue)
}
hf, err := pred(highFalse)
if err != nil {
return 0, err
}
if hf {
return 0, fmt.Errorf("highestTrue(%d,%d, pred): pred(%d)==true", lowTrue, highFalse, highFalse)
}
for highFalse-lowTrue > 1 {
mid := (lowTrue + highFalse) / 2
mm, err := pred(mid)
if err != nil {
return 0, err
}
if mm {
lowTrue = mid
} else {
highFalse = mid
}
}
return lowTrue, nil
}
// TrimmedLines takes a string, splits it into lines, and trims each line of starting
// and ending whitespace.
func TrimmedLines(s string) []string {
result := strings.Split(strings.TrimSpace(s), "\n")
for i, r := range result {
result[i] = strings.TrimSpace(r)
}
return result
}
// SurroundingNewlineTrimmedLines trimes leading and trailing newlines from the input,
// then splits on newlines.
func SurroundingNewlineTrimmedLines(s string) []string {
return strings.Split(strings.Trim(s, "\r\n"), "\n")
}
// RemoveBlanks returns a slice of strings, but trimmed, and with empty/all-whitespace
// strings removed.
func RemoveBlanks(ss []string) []string {
var r []string
for _, s := range ss {
s = strings.TrimSpace(s)
if s == "" {
continue
}
r = append(r, s)
}
return r
}
// GroupString returns the input string, broken into runs of consecutive characters
func GroupString(s string) []string {
var result []string
last := -1
for i := range s {
c := s[i : i+1]
if len(result) == 0 || result[last][:1] != c {
result = append(result, c)
last++
} else {
result[last] = result[last] + c
}
}
return result
}
// StringsAndInts is a set of parsed strings and ints from a line of input.
type StringsAndInts struct {
Strings []string
Ints []int
}
// ParseStringsAndInts takes a slice of input lines, a slice of string field indexes,
// and a slice of int field indexes. It returns a slice of StringsAndInts structs,
// one per line.
func ParseStringsAndInts(lines []string, fields int, stringFields []int, intFields []int) ([]StringsAndInts, error) {
var result []StringsAndInts
for i, line := range lines {
sai := StringsAndInts{}
parts := strings.Split(line, " ")
if len(parts) != fields {
return nil, fmt.Errorf("want %d fields; got %d at line %d: %q", fields, len(parts), i, line)
}
for _, index := range stringFields {
sai.Strings = append(sai.Strings, parts[index])
}
for _, index := range intFields {
ii, err := strconv.Atoi(parts[index])
if err != nil {
return nil, fmt.Errorf("unparseable field %d at line %d (%q): %v", index, i, line, err)
}
sai.Ints = append(sai.Ints, ii)
}
result = append(result, sai)
}
return result, nil
}
// ParseGrid parses a set of lines of whitespacespace-separated ints into a 2D grid.
func ParseGrid(lines []string) ([][]int, error) {
var result [][]int
var fields int
for i, line := range lines {
parts := strings.Fields(line)
if i == 0 {
fields = len(parts)
} else {
if len(parts) != fields {
return nil, fmt.Errorf("line 0 has %d fields; line %d has %d: %q", fields, i+1, len(parts), line)
}
}
ints := make([]int, 0, len(parts))
for _, part := range parts {
theInt, err := strconv.Atoi(part)
if err != nil {
return nil, fmt.Errorf("error at line %d: %w", i+1, err)
}
ints = append(ints, theInt)
}
result = append(result, ints)
}
return result, nil
}
// LinesByParagraph takes a slice of strings, and returns a slice of slices of
// strings: it separates paragraphs (multiple newlines).
func | (lines []string) [][]string {
var result [][]string
var chunk []string
for _, line := range lines {
if line == "" {
if chunk != nil {
result = append(result, chunk)
chunk = nil
}
} else {
chunk = append(chunk, line)
}
}
if chunk != nil {
result = append(result, chunk)
}
return result
}
// KeyValuePairs splits a space-separated sequence of colon-separated key:value
// pairs into a map.
func KeyValuePairs(input string) map[string]string {
result := make(map[string]string)
parts := strings.Split(input, " ")
for _, part := range parts {
pieces := strings.SplitN(part, ":", 2)
if len(pieces) == 2 {
result[pieces[0]] = pieces[1]
} else {
result[pieces[0]] = ""
}
}
return result
}
func Transpose(input [][]int) [][]int {
var result [][]int
for col := 0; col < len(input[0]); col++ {
var newRow []int
for row := range input {
newRow = append(newRow, input[row][col])
}
result = append(result, newRow)
}
return result
}
// ReadFile is just ioutil.ReadFile
func ReadFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
// ReadLines reads a file and returns a slice of strings, one per line.
func ReadLines(filename string) ([]string, error) {
s, err := ReadFileString(filename)
if err != nil {
return nil, err
}
return strings.Split(s, "\n"), nil
}
// MustReadLines reads a file and returns a slice of strings, one per line, or dies.
// MustReadFileString reads a string from a file or panics.
func MustReadLines(filename string) []string {
s, err := ReadLines(filename)
if err != nil {
panic(err)
}
return s
}
// ReadFileString reads a file and returns it as a string, trimmed.
func ReadFileString(filename string) (string, error) {
bb, err := ReadFile(filename)
if err != nil {
return "", err
}
return strings.TrimRight(string(bb), " \t\r\n"), nil
}
// MustReadFileString reads a string from a file or panics.
func MustReadFileString(filename string) string {
s, err := ReadFileString(filename)
if err != nil {
panic(err)
}
return s
}
// ReadFileInts reads a file of ints, one per line
func ReadFileInts(filename string) ([]int, error) {
all, err := ReadFileString(filename)
if err != nil {
return nil, err
}
lines := TrimmedLines(all)
return StringsToInts(lines)
}
// MustReadFileInts reads a file of ints, one per line, or panics.
func MustReadFileInts(filename string) []int {
ints, err := ReadFileInts(filename)
if err != nil {
panic(err)
}
return ints
}
// ParseInts parses a string of separated ints into a slice of ints.
func ParseInts(commaString string, separator string) ([]int, error) {
input := strings.TrimSpace(commaString)
entries := strings.Split(input, separator)
ints := make([]int, len(entries))
for i, v := range entries {
i64, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
ints[i] = int(i64)
}
return ints, nil
}
// MustParseInts parses a string of separated ints into a slice of ints,
// or panics.
func MustParseInts(commaString string, separator string) []int {
ints, err := ParseInts(commaString, separator)
if err != nil {
panic(err)
}
return ints
}
// ParseLinesOfInts runs ParseInts on every string in the passed slice, passing
// back a slice of slices of ints.
func ParseLinesOfInts(commaStrings []string, separator string) ([][]int, error) {
result := make([][]int, 0, len(commaStrings))
for _, commaString := range commaStrings {
ints, err := ParseInts(commaString, separator)
if err != nil {
return nil, err
}
result = append(result, ints)
}
return result, nil
}
// MustStringsToInts takes a slice of strings and returns a slice of ints, or panics.
func MustStringsToInts(strings []string) []int {
ints, err := StringsToInts(strings)
if err != nil {
panic(err)
}
return ints
}
// StringsToInts takes a slice of strings and returns a slice of ints
func StringsToInts(strings []string) ([]int, error) {
ints := make([]int, len(strings))
for i, v := range strings {
i64, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
ints[i] = int(i64)
}
return ints, nil
}
// StringsToInt32s takes a slice of strings and returns a slice of int32s
func StringsToInt32s(strings []string) ([]int32, error) {
ints := make([]int32, len(strings))
for i, v := range strings {
i64, err := strconv.ParseInt(v, 10, 32)
if err != nil {
return nil, err
}
ints[i] = int32(i64)
}
return ints, nil
}
// Reverse reverses a string.
func Reverse(s string) string {
chars := []rune(s)
for i, j := 0, len(chars)-1; i < j; i, j = i+1, j-1 {
chars[i], chars[j] = chars[j], chars[i]
}
return string(chars)
}
// Sum gives the sum of a slice of ints or floats.
func Sum[T Number](items []T) T {
var sum T
for _, item := range items {
sum += item
}
return sum
}
// MapSum returns a slice of the Sums of sublists.
func MapSum[T Number](slicesOfItems [][]T) []T {
result := make([]T, 0, len(slicesOfItems))
for _, items := range slicesOfItems {
result = append(result, Sum(items))
}
return result
}
// Max gives the max of a slice of ints or floats.
func Max[T Number](items []T) T {
var max T
if len(items) > 0 {
max = items[0]
}
for _, item := range items[1:] {
if item > max {
max = item
}
}
return max
}
// SplitBefore takes a slice and a predicate, and returns the slice chunked,
// split before any element where the predicate returns true.
func SplitBefore[T any](slice []T, predicate func(T) bool) [][]T {
var result [][]T
last := -1
for _, elem := range slice {
if last < 0 || predicate(elem) {
result = append(result, []T{elem})
last++
} else {
result[last] = append(result[last], elem)
}
}
return result
}
// SplitAfter takes a slice and a predicate, and returns the slice chunked,
// split after any element where the predicate returns true.
func SplitAfter[T any](slice []T, predicate func(T) bool) [][]T {
var result [][]T
var building []T
for _, elem := range slice {
building = append(building, elem)
if predicate(elem) {
result = append(result, building)
building = []T{}
}
}
if len(building) > 0 {
result = append(result, building)
}
return result
}
| LinesByParagraph | identifier_name |
FilterListViewModel.ts | // Copyright 2021 Esri
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// esri.core.accessorSupport
import { property, subclass } from "esri/core/accessorSupport/decorators";
// General esri Imports
import Accessor = require("esri/core/Accessor");
import { fromJSON } from "esri/geometry/support/jsonUtils";
// Config Panel Imports
import { Expression, ExtentSelector, FilterLayers, FilterOutput, LayerExpression } from "./interfaces/interfaces";
let accordionStyle = `
.accordion-item-content { padding: 0!important; }
.accordion-item-header-text { flex-direction: unset!important }`;
@subclass("FilterListViewModel")
class FilterListViewModel extends Accessor {
// ----------------------------------
//
// Public Variables
//
// ----------------------------------
@property()
map: __esri.WebMap;
@property()
layerExpressions: LayerExpression[] = [];
@property()
theme: "dark" | "light" = "light";
@property()
updatingExpression: boolean = false;
@property()
extentSelector: boolean = false;
@property()
extentSelectorConfig: ExtentSelector;
@property()
output: FilterOutput;
// ----------------------------------
//
// Private Variables
//
// ----------------------------------
private _layers: FilterLayers = {};
private _timeout: NodeJS.Timeout;
// ----------------------------------
//
// Lifecycle methods
//
// ----------------------------------
constructor(params?: any) {
super(params);
}
// ----------------------------------
//
// Public methods
//
// ----------------------------------
initExpressions(): void {
this.layerExpressions?.map((layerExpression) => {
let tmpExp = {};
const { id } = layerExpression;
layerExpression.expressions.map((expression) => {
if (!expression.checked) {
expression.checked = false;
} else {
tmpExp = {
[expression.id]: expression.definitionExpression
};
}
});
this._layers[id] = {
expressions: tmpExp,
operator: layerExpression?.operator ?? " AND "
};
if (Object.keys(tmpExp).length > 0) {
this._generateOutput(id);
}
});
}
initLayerHeader(accordionItem: HTMLCalciteAccordionItemElement) {
const style = document.createElement("style");
if (this.theme === "dark") {
accordionStyle += ` .accordion-item-header {
border-bottom: 1px solid rgb(217, 218, 218)!important;
padding: 14px 20px!important;
}`;
} else if (this.theme === "light") {
accordionStyle += ` .accordion-item-header {
background: rgb(244, 243, 244)!important;
border-bottom: 1px solid rgb(217, 218, 218)!important;
padding: 14px 20px!important;
}`;
}
style.innerHTML = accordionStyle;
accordionItem.shadowRoot.prepend(style);
}
initCheckbox(id: string, expression: Expression, checkbox: HTMLCalciteCheckboxElement) {
checkbox.addEventListener("calciteCheckboxChange", (event: CustomEvent) => {
const node = event.target as HTMLCalciteCheckboxElement;
expression.checked = node.checked;
if (node.checked) {
this._layers[id].expressions[expression.id] = {
definitionExpression: expression.definitionExpression
};
} else {
delete this._layers[id].expressions[expression.id];
}
this._generateOutput(id);
});
}
handleSelect(expression: Expression, layerId: string, event: Event): void {
const node = event.target as HTMLSelectElement;
if (node.value !== "default") {
const definitionExpression = `${expression.field} = '${node.value}'`;
this._layers[layerId].expressions[expression.id] = {
definitionExpression
};
} else {
delete this._layers[layerId].expressions[expression.id];
}
this._generateOutput(layerId);
}
handleComboSelectCreate(expression: Expression, layerId: string, comboBox: HTMLCalciteComboboxElement): void {
comboBox.addEventListener("calciteLookupChange", this.handleComboSelect.bind(this, expression, layerId));
}
handleComboSelect(expression: Expression, layerId: string, event: CustomEvent): void {
const items = event.detail as HTMLCalciteComboboxItemElement[];
if (items && items.length) {
const values = items.map((item) => `'${item.value}'`);
const definitionExpression = `${expression.field} IN (${values.join(",")})`;
this._layers[layerId].expressions[expression.id] = {
definitionExpression
};
} else {
delete this._layers[layerId].expressions[expression.id];
}
this._generateOutput(layerId);
}
handleNumberInputCreate(
expression: Expression,
layerId: string,
type: "min" | "max",
input: HTMLCalciteInputElement
): void {
input.addEventListener("calciteInputInput", this.handleNumberInput.bind(this, expression, layerId, type));
}
handleNumberInput(expression: Expression, layerId: string, type: "min" | "max", event: CustomEvent): void {
const { value } = event.detail;
this._debounceNumberInput(expression, layerId, value, type);
}
handleDatePickerCreate(expression: Expression, layerId: string, datePicker: HTMLCalciteInputDatePickerElement): void {
datePicker.start = this._convertToDate(expression?.start);
datePicker.end = this._convertToDate(expression?.end);
datePicker.min = this._convertToDate(expression?.min);
datePicker.max = this._convertToDate(expression?.max);
datePicker.addEventListener(
"calciteDatePickerRangeChange",
this.handleDatePickerRangeChange.bind(this, expression, layerId)
);
datePicker.addEventListener("input", this.handleDatePickerInputChange.bind(this, expression, layerId));
}
handleDatePickerRangeChange(expression: Expression, layerId: string, event: CustomEvent): void {
this.setExpressionDates(event.detail?.startDate, event.detail?.endDate, expression, layerId);
}
handleDatePickerInputChange(expression: Expression, layerId: string, event: Event) {
setTimeout(() => {
const datePicker = event.target as HTMLCalciteInputDatePickerElement;
this.setExpressionDates(datePicker.startAsDate, datePicker.endAsDate, expression, layerId);
}, 1000);
}
handleResetDatePicker(expression: Expression, layerId: string, event: Event): void {
const datePicker = document.getElementById(expression.id.toString()) as HTMLCalciteInputDatePickerElement;
datePicker.start = null;
datePicker.startAsDate = null;
datePicker.end = null;
datePicker.endAsDate = null;
delete this._layers[layerId].expressions[expression.id];
this._generateOutput(layerId);
}
setExpressionDates(startDate: Date, endDate: Date, expression: Expression, layerId: string): void {
const { expressions } = this._layers[layerId];
const start = startDate ? this._convertToDate(Math.floor(startDate.getTime()), true) : null;
const end = endDate ? this._convertToDate(Math.floor(endDate.getTime()), true) : null;
const chevron = end && !start ? "<" : !end && start ? ">" : null;
if (chevron) {
expressions[expression.id] = {
definitionExpression: `${expression.field} ${chevron} '${start ?? end}'`,
type: "date"
};
} else {
expressions[expression.id] = {
definitionExpression: `${expression.field} BETWEEN '${start}' AND '${end}'`,
type: "date"
};
}
this._generateOutput(layerId);
}
handleResetFilter(): void {
this.layerExpressions.map((layerExpression) => {
const { id } = layerExpression;
layerExpression.expressions.map((expression) => {
const { id, type, useCombobox } = expression;
if (type) {
if (type === "string" && !useCombobox) {
const select = document.getElementById(id.toString()) as HTMLSelectElement;
select.value = "default";
} else if (type === "string" && useCombobox) {
const combobox = document.getElementById(id.toString()) as HTMLCalciteComboboxElement;
const wrapper = combobox.shadowRoot.querySelector(".wrapper");
for (let i = 0; i < wrapper.children.length; i++) {
const child = wrapper.children[i];
if (child.nodeName === "CALCITE-CHIP") {
const chip = child as HTMLCalciteChipElement;
chip.style.display = "none";
}
}
for (let i = 0; i < combobox.children.length; i++) {
const comboboxItem = combobox.children[i] as HTMLCalciteComboboxItemElement;
comboboxItem.selected = false;
}
} else if (type === "date") {
const datePicker = document.getElementById(id.toString()) as HTMLCalciteInputDatePickerElement;
datePicker.startAsDate = new Date(expression?.start);
datePicker.endAsDate = new Date(expression?.end);
}
}
expression.checked = false;
});
this._layers[id].expressions = {};
}); | const query = layer.createQuery();
query.where = layer.definitionExpression ? layer.definitionExpression : "1=1";
if (layer?.capabilities?.query?.supportsCacheHint) {
query.cacheHint = true;
}
if (field) {
query.outFields = [field];
query.returnDistinctValues = true;
query.returnGeometry = false;
if (this.extentSelector && this.extentSelectorConfig) {
query.geometry = this._getExtent(this.extentSelector, this.extentSelectorConfig);
query.spatialRelationship = "intersects";
}
const results = await layer.queryFeatures(query);
return results?.features;
}
}
return [];
}
// ----------------------------------
//
// Private methods
//
// ----------------------------------
private _generateOutput(id: string): void {
const defExpressions = [];
Object.values(this._layers[id].expressions).forEach(({ definitionExpression }) =>
defExpressions.push(definitionExpression)
);
const newOutput = {
id,
definitionExpression: defExpressions.join(this._layers[id].operator)
};
this.updatingExpression = true;
this.set("output", newOutput);
}
private _convertToDate(date: string | number, includeTime: boolean = false): string {
if (date) {
const tmpDate = new Date(date);
const formattedDate = `${tmpDate.getFullYear()}-${tmpDate.getMonth() + 1}-${tmpDate.getDate()}`;
if (includeTime) {
const time = `${tmpDate.getHours()}:${tmpDate.getMinutes()}:${tmpDate.getSeconds()}`;
return `${formattedDate} ${time}`;
} else {
return formattedDate;
}
}
return null;
}
private _getExtent(extentSelector: boolean, extentSelectorConfig: ExtentSelector): __esri.Geometry {
if (extentSelector && extentSelectorConfig) {
const { constraints } = extentSelectorConfig;
let newConstraints = { ...constraints };
const geometry = newConstraints?.geometry;
if (geometry) {
const tmpExtent = fromJSON(geometry);
if (tmpExtent && (tmpExtent?.type === "extent" || tmpExtent?.type === "polygon")) {
return tmpExtent;
}
}
}
return null;
}
private _debounceNumberInput(expression: Expression, layerId: string, value: string, type: "min" | "max"): void {
if (this._timeout) {
clearTimeout(this._timeout);
}
this._timeout = setTimeout(() => {
this._updateExpressions(expression, layerId, value, type);
this._generateOutput(layerId);
}, 800);
}
private _updateExpressions(expression: Expression, layerId: string, value: string, type: "min" | "max"): void {
const { expressions } = this._layers[layerId];
const { id } = expression;
if (expressions[id]) {
expressions[id] = {
...expressions[id],
type: "number",
[type]: value
};
if (!expressions[id]?.min && !expressions[id]?.max) {
delete expressions[id];
this._generateOutput(layerId);
return;
}
} else {
expressions[id] = {
definitionExpression: null,
type: "number",
[type]: value
};
}
this._setNumberRangeExpression(expression, layerId, value);
}
private _setNumberRangeExpression(expression: Expression, layerId: string, value: string): void {
const { expressions } = this._layers[layerId];
const { field, id } = expression;
const displayName = document.getElementById(`${id}-name`);
const inputMessage = document.getElementById(`${id}-error`) as HTMLCalciteInputMessageElement;
const min = expressions[id]?.min;
const max = expressions[id]?.max;
const chevron = max && !min ? "<" : !max && min ? ">" : null;
if (chevron) {
const exprValue = value ? value : max ? max : min ? min : null;
if (exprValue) {
displayName.style.color = "inherit";
inputMessage.active = false;
expressions[id].definitionExpression = `${field} ${chevron} ${exprValue}`;
} else {
delete expressions[id];
}
} else if (Number(max) < Number(min)) {
displayName.style.color = "red";
inputMessage.active = true;
} else {
displayName.style.color = "inherit";
inputMessage.active = false;
expressions[id].definitionExpression = `${field} BETWEEN ${min} AND ${max}`;
}
}
}
export = FilterListViewModel; | }
async calculateStatistics(layerId: string, field: string): Promise<__esri.Graphic[]> {
const layer = this.map.layers.find(({ id }) => id === layerId) as __esri.FeatureLayer;
if (layer && layer.type === "feature") { | random_line_split |
FilterListViewModel.ts | // Copyright 2021 Esri
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// esri.core.accessorSupport
import { property, subclass } from "esri/core/accessorSupport/decorators";
// General esri Imports
import Accessor = require("esri/core/Accessor");
import { fromJSON } from "esri/geometry/support/jsonUtils";
// Config Panel Imports
import { Expression, ExtentSelector, FilterLayers, FilterOutput, LayerExpression } from "./interfaces/interfaces";
let accordionStyle = `
.accordion-item-content { padding: 0!important; }
.accordion-item-header-text { flex-direction: unset!important }`;
@subclass("FilterListViewModel")
class FilterListViewModel extends Accessor {
// ----------------------------------
//
// Public Variables
//
// ----------------------------------
@property()
map: __esri.WebMap;
@property()
layerExpressions: LayerExpression[] = [];
@property()
theme: "dark" | "light" = "light";
@property()
updatingExpression: boolean = false;
@property()
extentSelector: boolean = false;
@property()
extentSelectorConfig: ExtentSelector;
@property()
output: FilterOutput;
// ----------------------------------
//
// Private Variables
//
// ----------------------------------
private _layers: FilterLayers = {};
private _timeout: NodeJS.Timeout;
// ----------------------------------
//
// Lifecycle methods
//
// ----------------------------------
constructor(params?: any) {
super(params);
}
// ----------------------------------
//
// Public methods
//
// ----------------------------------
initExpressions(): void {
this.layerExpressions?.map((layerExpression) => {
let tmpExp = {};
const { id } = layerExpression;
layerExpression.expressions.map((expression) => {
if (!expression.checked) {
expression.checked = false;
} else {
tmpExp = {
[expression.id]: expression.definitionExpression
};
}
});
this._layers[id] = {
expressions: tmpExp,
operator: layerExpression?.operator ?? " AND "
};
if (Object.keys(tmpExp).length > 0) {
this._generateOutput(id);
}
});
}
initLayerHeader(accordionItem: HTMLCalciteAccordionItemElement) {
const style = document.createElement("style");
if (this.theme === "dark") {
accordionStyle += ` .accordion-item-header {
border-bottom: 1px solid rgb(217, 218, 218)!important;
padding: 14px 20px!important;
}`;
} else if (this.theme === "light") {
accordionStyle += ` .accordion-item-header {
background: rgb(244, 243, 244)!important;
border-bottom: 1px solid rgb(217, 218, 218)!important;
padding: 14px 20px!important;
}`;
}
style.innerHTML = accordionStyle;
accordionItem.shadowRoot.prepend(style);
}
initCheckbox(id: string, expression: Expression, checkbox: HTMLCalciteCheckboxElement) {
checkbox.addEventListener("calciteCheckboxChange", (event: CustomEvent) => {
const node = event.target as HTMLCalciteCheckboxElement;
expression.checked = node.checked;
if (node.checked) {
this._layers[id].expressions[expression.id] = {
definitionExpression: expression.definitionExpression
};
} else {
delete this._layers[id].expressions[expression.id];
}
this._generateOutput(id);
});
}
handleSelect(expression: Expression, layerId: string, event: Event): void {
const node = event.target as HTMLSelectElement;
if (node.value !== "default") {
const definitionExpression = `${expression.field} = '${node.value}'`;
this._layers[layerId].expressions[expression.id] = {
definitionExpression
};
} else {
delete this._layers[layerId].expressions[expression.id];
}
this._generateOutput(layerId);
}
handleComboSelectCreate(expression: Expression, layerId: string, comboBox: HTMLCalciteComboboxElement): void {
comboBox.addEventListener("calciteLookupChange", this.handleComboSelect.bind(this, expression, layerId));
}
handleComboSelect(expression: Expression, layerId: string, event: CustomEvent): void {
const items = event.detail as HTMLCalciteComboboxItemElement[];
if (items && items.length) {
const values = items.map((item) => `'${item.value}'`);
const definitionExpression = `${expression.field} IN (${values.join(",")})`;
this._layers[layerId].expressions[expression.id] = {
definitionExpression
};
} else {
delete this._layers[layerId].expressions[expression.id];
}
this._generateOutput(layerId);
}
handleNumberInputCreate(
expression: Expression,
layerId: string,
type: "min" | "max",
input: HTMLCalciteInputElement
): void {
input.addEventListener("calciteInputInput", this.handleNumberInput.bind(this, expression, layerId, type));
}
handleNumberInput(expression: Expression, layerId: string, type: "min" | "max", event: CustomEvent): void {
const { value } = event.detail;
this._debounceNumberInput(expression, layerId, value, type);
}
handleDatePickerCreate(expression: Expression, layerId: string, datePicker: HTMLCalciteInputDatePickerElement): void {
datePicker.start = this._convertToDate(expression?.start);
datePicker.end = this._convertToDate(expression?.end);
datePicker.min = this._convertToDate(expression?.min);
datePicker.max = this._convertToDate(expression?.max);
datePicker.addEventListener(
"calciteDatePickerRangeChange",
this.handleDatePickerRangeChange.bind(this, expression, layerId)
);
datePicker.addEventListener("input", this.handleDatePickerInputChange.bind(this, expression, layerId));
}
handleDatePickerRangeChange(expression: Expression, layerId: string, event: CustomEvent): void {
this.setExpressionDates(event.detail?.startDate, event.detail?.endDate, expression, layerId);
}
handleDatePickerInputChange(expression: Expression, layerId: string, event: Event) {
| handleResetDatePicker(expression: Expression, layerId: string, event: Event): void {
const datePicker = document.getElementById(expression.id.toString()) as HTMLCalciteInputDatePickerElement;
datePicker.start = null;
datePicker.startAsDate = null;
datePicker.end = null;
datePicker.endAsDate = null;
delete this._layers[layerId].expressions[expression.id];
this._generateOutput(layerId);
}
setExpressionDates(startDate: Date, endDate: Date, expression: Expression, layerId: string): void {
const { expressions } = this._layers[layerId];
const start = startDate ? this._convertToDate(Math.floor(startDate.getTime()), true) : null;
const end = endDate ? this._convertToDate(Math.floor(endDate.getTime()), true) : null;
const chevron = end && !start ? "<" : !end && start ? ">" : null;
if (chevron) {
expressions[expression.id] = {
definitionExpression: `${expression.field} ${chevron} '${start ?? end}'`,
type: "date"
};
} else {
expressions[expression.id] = {
definitionExpression: `${expression.field} BETWEEN '${start}' AND '${end}'`,
type: "date"
};
}
this._generateOutput(layerId);
}
handleResetFilter(): void {
this.layerExpressions.map((layerExpression) => {
const { id } = layerExpression;
layerExpression.expressions.map((expression) => {
const { id, type, useCombobox } = expression;
if (type) {
if (type === "string" && !useCombobox) {
const select = document.getElementById(id.toString()) as HTMLSelectElement;
select.value = "default";
} else if (type === "string" && useCombobox) {
const combobox = document.getElementById(id.toString()) as HTMLCalciteComboboxElement;
const wrapper = combobox.shadowRoot.querySelector(".wrapper");
for (let i = 0; i < wrapper.children.length; i++) {
const child = wrapper.children[i];
if (child.nodeName === "CALCITE-CHIP") {
const chip = child as HTMLCalciteChipElement;
chip.style.display = "none";
}
}
for (let i = 0; i < combobox.children.length; i++) {
const comboboxItem = combobox.children[i] as HTMLCalciteComboboxItemElement;
comboboxItem.selected = false;
}
} else if (type === "date") {
const datePicker = document.getElementById(id.toString()) as HTMLCalciteInputDatePickerElement;
datePicker.startAsDate = new Date(expression?.start);
datePicker.endAsDate = new Date(expression?.end);
}
}
expression.checked = false;
});
this._layers[id].expressions = {};
});
}
async calculateStatistics(layerId: string, field: string): Promise<__esri.Graphic[]> {
const layer = this.map.layers.find(({ id }) => id === layerId) as __esri.FeatureLayer;
if (layer && layer.type === "feature") {
const query = layer.createQuery();
query.where = layer.definitionExpression ? layer.definitionExpression : "1=1";
if (layer?.capabilities?.query?.supportsCacheHint) {
query.cacheHint = true;
}
if (field) {
query.outFields = [field];
query.returnDistinctValues = true;
query.returnGeometry = false;
if (this.extentSelector && this.extentSelectorConfig) {
query.geometry = this._getExtent(this.extentSelector, this.extentSelectorConfig);
query.spatialRelationship = "intersects";
}
const results = await layer.queryFeatures(query);
return results?.features;
}
}
return [];
}
// ----------------------------------
//
// Private methods
//
// ----------------------------------
private _generateOutput(id: string): void {
const defExpressions = [];
Object.values(this._layers[id].expressions).forEach(({ definitionExpression }) =>
defExpressions.push(definitionExpression)
);
const newOutput = {
id,
definitionExpression: defExpressions.join(this._layers[id].operator)
};
this.updatingExpression = true;
this.set("output", newOutput);
}
private _convertToDate(date: string | number, includeTime: boolean = false): string {
if (date) {
const tmpDate = new Date(date);
const formattedDate = `${tmpDate.getFullYear()}-${tmpDate.getMonth() + 1}-${tmpDate.getDate()}`;
if (includeTime) {
const time = `${tmpDate.getHours()}:${tmpDate.getMinutes()}:${tmpDate.getSeconds()}`;
return `${formattedDate} ${time}`;
} else {
return formattedDate;
}
}
return null;
}
private _getExtent(extentSelector: boolean, extentSelectorConfig: ExtentSelector): __esri.Geometry {
if (extentSelector && extentSelectorConfig) {
const { constraints } = extentSelectorConfig;
let newConstraints = { ...constraints };
const geometry = newConstraints?.geometry;
if (geometry) {
const tmpExtent = fromJSON(geometry);
if (tmpExtent && (tmpExtent?.type === "extent" || tmpExtent?.type === "polygon")) {
return tmpExtent;
}
}
}
return null;
}
private _debounceNumberInput(expression: Expression, layerId: string, value: string, type: "min" | "max"): void {
if (this._timeout) {
clearTimeout(this._timeout);
}
this._timeout = setTimeout(() => {
this._updateExpressions(expression, layerId, value, type);
this._generateOutput(layerId);
}, 800);
}
private _updateExpressions(expression: Expression, layerId: string, value: string, type: "min" | "max"): void {
const { expressions } = this._layers[layerId];
const { id } = expression;
if (expressions[id]) {
expressions[id] = {
...expressions[id],
type: "number",
[type]: value
};
if (!expressions[id]?.min && !expressions[id]?.max) {
delete expressions[id];
this._generateOutput(layerId);
return;
}
} else {
expressions[id] = {
definitionExpression: null,
type: "number",
[type]: value
};
}
this._setNumberRangeExpression(expression, layerId, value);
}
private _setNumberRangeExpression(expression: Expression, layerId: string, value: string): void {
const { expressions } = this._layers[layerId];
const { field, id } = expression;
const displayName = document.getElementById(`${id}-name`);
const inputMessage = document.getElementById(`${id}-error`) as HTMLCalciteInputMessageElement;
const min = expressions[id]?.min;
const max = expressions[id]?.max;
const chevron = max && !min ? "<" : !max && min ? ">" : null;
if (chevron) {
const exprValue = value ? value : max ? max : min ? min : null;
if (exprValue) {
displayName.style.color = "inherit";
inputMessage.active = false;
expressions[id].definitionExpression = `${field} ${chevron} ${exprValue}`;
} else {
delete expressions[id];
}
} else if (Number(max) < Number(min)) {
displayName.style.color = "red";
inputMessage.active = true;
} else {
displayName.style.color = "inherit";
inputMessage.active = false;
expressions[id].definitionExpression = `${field} BETWEEN ${min} AND ${max}`;
}
}
}
export = FilterListViewModel;
| setTimeout(() => {
const datePicker = event.target as HTMLCalciteInputDatePickerElement;
this.setExpressionDates(datePicker.startAsDate, datePicker.endAsDate, expression, layerId);
}, 1000);
}
| identifier_body |
FilterListViewModel.ts | // Copyright 2021 Esri
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// esri.core.accessorSupport
import { property, subclass } from "esri/core/accessorSupport/decorators";
// General esri Imports
import Accessor = require("esri/core/Accessor");
import { fromJSON } from "esri/geometry/support/jsonUtils";
// Config Panel Imports
import { Expression, ExtentSelector, FilterLayers, FilterOutput, LayerExpression } from "./interfaces/interfaces";
let accordionStyle = `
.accordion-item-content { padding: 0!important; }
.accordion-item-header-text { flex-direction: unset!important }`;
@subclass("FilterListViewModel")
class FilterListViewModel extends Accessor {
// ----------------------------------
//
// Public Variables
//
// ----------------------------------
@property()
map: __esri.WebMap;
@property()
layerExpressions: LayerExpression[] = [];
@property()
theme: "dark" | "light" = "light";
@property()
updatingExpression: boolean = false;
@property()
extentSelector: boolean = false;
@property()
extentSelectorConfig: ExtentSelector;
@property()
output: FilterOutput;
// ----------------------------------
//
// Private Variables
//
// ----------------------------------
private _layers: FilterLayers = {};
private _timeout: NodeJS.Timeout;
// ----------------------------------
//
// Lifecycle methods
//
// ----------------------------------
constructor(params?: any) {
super(params);
}
// ----------------------------------
//
// Public methods
//
// ----------------------------------
initExpressions(): void {
this.layerExpressions?.map((layerExpression) => {
let tmpExp = {};
const { id } = layerExpression;
layerExpression.expressions.map((expression) => {
if (!expression.checked) {
expression.checked = false;
} else {
tmpExp = {
[expression.id]: expression.definitionExpression
};
}
});
this._layers[id] = {
expressions: tmpExp,
operator: layerExpression?.operator ?? " AND "
};
if (Object.keys(tmpExp).length > 0) {
this._generateOutput(id);
}
});
}
initLayerHeader(accordionItem: HTMLCalciteAccordionItemElement) {
const style = document.createElement("style");
if (this.theme === "dark") {
accordionStyle += ` .accordion-item-header {
border-bottom: 1px solid rgb(217, 218, 218)!important;
padding: 14px 20px!important;
}`;
} else if (this.theme === "light") {
accordionStyle += ` .accordion-item-header {
background: rgb(244, 243, 244)!important;
border-bottom: 1px solid rgb(217, 218, 218)!important;
padding: 14px 20px!important;
}`;
}
style.innerHTML = accordionStyle;
accordionItem.shadowRoot.prepend(style);
}
initCheckbox(id: string, expression: Expression, checkbox: HTMLCalciteCheckboxElement) {
checkbox.addEventListener("calciteCheckboxChange", (event: CustomEvent) => {
const node = event.target as HTMLCalciteCheckboxElement;
expression.checked = node.checked;
if (node.checked) {
this._layers[id].expressions[expression.id] = {
definitionExpression: expression.definitionExpression
};
} else {
delete this._layers[id].expressions[expression.id];
}
this._generateOutput(id);
});
}
handleSelect(expression: Expression, layerId: string, event: Event): void {
const node = event.target as HTMLSelectElement;
if (node.value !== "default") {
const definitionExpression = `${expression.field} = '${node.value}'`;
this._layers[layerId].expressions[expression.id] = {
definitionExpression
};
} else {
delete this._layers[layerId].expressions[expression.id];
}
this._generateOutput(layerId);
}
handleComboSelectCreate(expression: Expression, layerId: string, comboBox: HTMLCalciteComboboxElement): void {
comboBox.addEventListener("calciteLookupChange", this.handleComboSelect.bind(this, expression, layerId));
}
handleComboSelect(expression: Expression, layerId: string, event: CustomEvent): void {
const items = event.detail as HTMLCalciteComboboxItemElement[];
if (items && items.length) {
const values = items.map((item) => `'${item.value}'`);
const definitionExpression = `${expression.field} IN (${values.join(",")})`;
this._layers[layerId].expressions[expression.id] = {
definitionExpression
};
} else {
| this._generateOutput(layerId);
}
handleNumberInputCreate(
expression: Expression,
layerId: string,
type: "min" | "max",
input: HTMLCalciteInputElement
): void {
input.addEventListener("calciteInputInput", this.handleNumberInput.bind(this, expression, layerId, type));
}
handleNumberInput(expression: Expression, layerId: string, type: "min" | "max", event: CustomEvent): void {
const { value } = event.detail;
this._debounceNumberInput(expression, layerId, value, type);
}
handleDatePickerCreate(expression: Expression, layerId: string, datePicker: HTMLCalciteInputDatePickerElement): void {
datePicker.start = this._convertToDate(expression?.start);
datePicker.end = this._convertToDate(expression?.end);
datePicker.min = this._convertToDate(expression?.min);
datePicker.max = this._convertToDate(expression?.max);
datePicker.addEventListener(
"calciteDatePickerRangeChange",
this.handleDatePickerRangeChange.bind(this, expression, layerId)
);
datePicker.addEventListener("input", this.handleDatePickerInputChange.bind(this, expression, layerId));
}
handleDatePickerRangeChange(expression: Expression, layerId: string, event: CustomEvent): void {
this.setExpressionDates(event.detail?.startDate, event.detail?.endDate, expression, layerId);
}
handleDatePickerInputChange(expression: Expression, layerId: string, event: Event) {
setTimeout(() => {
const datePicker = event.target as HTMLCalciteInputDatePickerElement;
this.setExpressionDates(datePicker.startAsDate, datePicker.endAsDate, expression, layerId);
}, 1000);
}
handleResetDatePicker(expression: Expression, layerId: string, event: Event): void {
const datePicker = document.getElementById(expression.id.toString()) as HTMLCalciteInputDatePickerElement;
datePicker.start = null;
datePicker.startAsDate = null;
datePicker.end = null;
datePicker.endAsDate = null;
delete this._layers[layerId].expressions[expression.id];
this._generateOutput(layerId);
}
setExpressionDates(startDate: Date, endDate: Date, expression: Expression, layerId: string): void {
const { expressions } = this._layers[layerId];
const start = startDate ? this._convertToDate(Math.floor(startDate.getTime()), true) : null;
const end = endDate ? this._convertToDate(Math.floor(endDate.getTime()), true) : null;
const chevron = end && !start ? "<" : !end && start ? ">" : null;
if (chevron) {
expressions[expression.id] = {
definitionExpression: `${expression.field} ${chevron} '${start ?? end}'`,
type: "date"
};
} else {
expressions[expression.id] = {
definitionExpression: `${expression.field} BETWEEN '${start}' AND '${end}'`,
type: "date"
};
}
this._generateOutput(layerId);
}
handleResetFilter(): void {
this.layerExpressions.map((layerExpression) => {
const { id } = layerExpression;
layerExpression.expressions.map((expression) => {
const { id, type, useCombobox } = expression;
if (type) {
if (type === "string" && !useCombobox) {
const select = document.getElementById(id.toString()) as HTMLSelectElement;
select.value = "default";
} else if (type === "string" && useCombobox) {
const combobox = document.getElementById(id.toString()) as HTMLCalciteComboboxElement;
const wrapper = combobox.shadowRoot.querySelector(".wrapper");
for (let i = 0; i < wrapper.children.length; i++) {
const child = wrapper.children[i];
if (child.nodeName === "CALCITE-CHIP") {
const chip = child as HTMLCalciteChipElement;
chip.style.display = "none";
}
}
for (let i = 0; i < combobox.children.length; i++) {
const comboboxItem = combobox.children[i] as HTMLCalciteComboboxItemElement;
comboboxItem.selected = false;
}
} else if (type === "date") {
const datePicker = document.getElementById(id.toString()) as HTMLCalciteInputDatePickerElement;
datePicker.startAsDate = new Date(expression?.start);
datePicker.endAsDate = new Date(expression?.end);
}
}
expression.checked = false;
});
this._layers[id].expressions = {};
});
}
async calculateStatistics(layerId: string, field: string): Promise<__esri.Graphic[]> {
const layer = this.map.layers.find(({ id }) => id === layerId) as __esri.FeatureLayer;
if (layer && layer.type === "feature") {
const query = layer.createQuery();
query.where = layer.definitionExpression ? layer.definitionExpression : "1=1";
if (layer?.capabilities?.query?.supportsCacheHint) {
query.cacheHint = true;
}
if (field) {
query.outFields = [field];
query.returnDistinctValues = true;
query.returnGeometry = false;
if (this.extentSelector && this.extentSelectorConfig) {
query.geometry = this._getExtent(this.extentSelector, this.extentSelectorConfig);
query.spatialRelationship = "intersects";
}
const results = await layer.queryFeatures(query);
return results?.features;
}
}
return [];
}
// ----------------------------------
//
// Private methods
//
// ----------------------------------
private _generateOutput(id: string): void {
const defExpressions = [];
Object.values(this._layers[id].expressions).forEach(({ definitionExpression }) =>
defExpressions.push(definitionExpression)
);
const newOutput = {
id,
definitionExpression: defExpressions.join(this._layers[id].operator)
};
this.updatingExpression = true;
this.set("output", newOutput);
}
private _convertToDate(date: string | number, includeTime: boolean = false): string {
if (date) {
const tmpDate = new Date(date);
const formattedDate = `${tmpDate.getFullYear()}-${tmpDate.getMonth() + 1}-${tmpDate.getDate()}`;
if (includeTime) {
const time = `${tmpDate.getHours()}:${tmpDate.getMinutes()}:${tmpDate.getSeconds()}`;
return `${formattedDate} ${time}`;
} else {
return formattedDate;
}
}
return null;
}
private _getExtent(extentSelector: boolean, extentSelectorConfig: ExtentSelector): __esri.Geometry {
if (extentSelector && extentSelectorConfig) {
const { constraints } = extentSelectorConfig;
let newConstraints = { ...constraints };
const geometry = newConstraints?.geometry;
if (geometry) {
const tmpExtent = fromJSON(geometry);
if (tmpExtent && (tmpExtent?.type === "extent" || tmpExtent?.type === "polygon")) {
return tmpExtent;
}
}
}
return null;
}
private _debounceNumberInput(expression: Expression, layerId: string, value: string, type: "min" | "max"): void {
if (this._timeout) {
clearTimeout(this._timeout);
}
this._timeout = setTimeout(() => {
this._updateExpressions(expression, layerId, value, type);
this._generateOutput(layerId);
}, 800);
}
private _updateExpressions(expression: Expression, layerId: string, value: string, type: "min" | "max"): void {
const { expressions } = this._layers[layerId];
const { id } = expression;
if (expressions[id]) {
expressions[id] = {
...expressions[id],
type: "number",
[type]: value
};
if (!expressions[id]?.min && !expressions[id]?.max) {
delete expressions[id];
this._generateOutput(layerId);
return;
}
} else {
expressions[id] = {
definitionExpression: null,
type: "number",
[type]: value
};
}
this._setNumberRangeExpression(expression, layerId, value);
}
private _setNumberRangeExpression(expression: Expression, layerId: string, value: string): void {
const { expressions } = this._layers[layerId];
const { field, id } = expression;
const displayName = document.getElementById(`${id}-name`);
const inputMessage = document.getElementById(`${id}-error`) as HTMLCalciteInputMessageElement;
const min = expressions[id]?.min;
const max = expressions[id]?.max;
const chevron = max && !min ? "<" : !max && min ? ">" : null;
if (chevron) {
const exprValue = value ? value : max ? max : min ? min : null;
if (exprValue) {
displayName.style.color = "inherit";
inputMessage.active = false;
expressions[id].definitionExpression = `${field} ${chevron} ${exprValue}`;
} else {
delete expressions[id];
}
} else if (Number(max) < Number(min)) {
displayName.style.color = "red";
inputMessage.active = true;
} else {
displayName.style.color = "inherit";
inputMessage.active = false;
expressions[id].definitionExpression = `${field} BETWEEN ${min} AND ${max}`;
}
}
}
export = FilterListViewModel;
| delete this._layers[layerId].expressions[expression.id];
}
| conditional_block |
FilterListViewModel.ts | // Copyright 2021 Esri
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// esri.core.accessorSupport
import { property, subclass } from "esri/core/accessorSupport/decorators";
// General esri Imports
import Accessor = require("esri/core/Accessor");
import { fromJSON } from "esri/geometry/support/jsonUtils";
// Config Panel Imports
import { Expression, ExtentSelector, FilterLayers, FilterOutput, LayerExpression } from "./interfaces/interfaces";
let accordionStyle = `
.accordion-item-content { padding: 0!important; }
.accordion-item-header-text { flex-direction: unset!important }`;
@subclass("FilterListViewModel")
class FilterListViewModel extends Accessor {
// ----------------------------------
//
// Public Variables
//
// ----------------------------------
@property()
map: __esri.WebMap;
@property()
layerExpressions: LayerExpression[] = [];
@property()
theme: "dark" | "light" = "light";
@property()
updatingExpression: boolean = false;
@property()
extentSelector: boolean = false;
@property()
extentSelectorConfig: ExtentSelector;
@property()
output: FilterOutput;
// ----------------------------------
//
// Private Variables
//
// ----------------------------------
private _layers: FilterLayers = {};
private _timeout: NodeJS.Timeout;
// ----------------------------------
//
// Lifecycle methods
//
// ----------------------------------
constructor(params?: any) {
super(params);
}
// ----------------------------------
//
// Public methods
//
// ----------------------------------
in | : void {
this.layerExpressions?.map((layerExpression) => {
let tmpExp = {};
const { id } = layerExpression;
layerExpression.expressions.map((expression) => {
if (!expression.checked) {
expression.checked = false;
} else {
tmpExp = {
[expression.id]: expression.definitionExpression
};
}
});
this._layers[id] = {
expressions: tmpExp,
operator: layerExpression?.operator ?? " AND "
};
if (Object.keys(tmpExp).length > 0) {
this._generateOutput(id);
}
});
}
initLayerHeader(accordionItem: HTMLCalciteAccordionItemElement) {
const style = document.createElement("style");
if (this.theme === "dark") {
accordionStyle += ` .accordion-item-header {
border-bottom: 1px solid rgb(217, 218, 218)!important;
padding: 14px 20px!important;
}`;
} else if (this.theme === "light") {
accordionStyle += ` .accordion-item-header {
background: rgb(244, 243, 244)!important;
border-bottom: 1px solid rgb(217, 218, 218)!important;
padding: 14px 20px!important;
}`;
}
style.innerHTML = accordionStyle;
accordionItem.shadowRoot.prepend(style);
}
initCheckbox(id: string, expression: Expression, checkbox: HTMLCalciteCheckboxElement) {
checkbox.addEventListener("calciteCheckboxChange", (event: CustomEvent) => {
const node = event.target as HTMLCalciteCheckboxElement;
expression.checked = node.checked;
if (node.checked) {
this._layers[id].expressions[expression.id] = {
definitionExpression: expression.definitionExpression
};
} else {
delete this._layers[id].expressions[expression.id];
}
this._generateOutput(id);
});
}
handleSelect(expression: Expression, layerId: string, event: Event): void {
const node = event.target as HTMLSelectElement;
if (node.value !== "default") {
const definitionExpression = `${expression.field} = '${node.value}'`;
this._layers[layerId].expressions[expression.id] = {
definitionExpression
};
} else {
delete this._layers[layerId].expressions[expression.id];
}
this._generateOutput(layerId);
}
handleComboSelectCreate(expression: Expression, layerId: string, comboBox: HTMLCalciteComboboxElement): void {
comboBox.addEventListener("calciteLookupChange", this.handleComboSelect.bind(this, expression, layerId));
}
handleComboSelect(expression: Expression, layerId: string, event: CustomEvent): void {
const items = event.detail as HTMLCalciteComboboxItemElement[];
if (items && items.length) {
const values = items.map((item) => `'${item.value}'`);
const definitionExpression = `${expression.field} IN (${values.join(",")})`;
this._layers[layerId].expressions[expression.id] = {
definitionExpression
};
} else {
delete this._layers[layerId].expressions[expression.id];
}
this._generateOutput(layerId);
}
handleNumberInputCreate(
expression: Expression,
layerId: string,
type: "min" | "max",
input: HTMLCalciteInputElement
): void {
input.addEventListener("calciteInputInput", this.handleNumberInput.bind(this, expression, layerId, type));
}
handleNumberInput(expression: Expression, layerId: string, type: "min" | "max", event: CustomEvent): void {
const { value } = event.detail;
this._debounceNumberInput(expression, layerId, value, type);
}
handleDatePickerCreate(expression: Expression, layerId: string, datePicker: HTMLCalciteInputDatePickerElement): void {
datePicker.start = this._convertToDate(expression?.start);
datePicker.end = this._convertToDate(expression?.end);
datePicker.min = this._convertToDate(expression?.min);
datePicker.max = this._convertToDate(expression?.max);
datePicker.addEventListener(
"calciteDatePickerRangeChange",
this.handleDatePickerRangeChange.bind(this, expression, layerId)
);
datePicker.addEventListener("input", this.handleDatePickerInputChange.bind(this, expression, layerId));
}
handleDatePickerRangeChange(expression: Expression, layerId: string, event: CustomEvent): void {
this.setExpressionDates(event.detail?.startDate, event.detail?.endDate, expression, layerId);
}
handleDatePickerInputChange(expression: Expression, layerId: string, event: Event) {
setTimeout(() => {
const datePicker = event.target as HTMLCalciteInputDatePickerElement;
this.setExpressionDates(datePicker.startAsDate, datePicker.endAsDate, expression, layerId);
}, 1000);
}
handleResetDatePicker(expression: Expression, layerId: string, event: Event): void {
const datePicker = document.getElementById(expression.id.toString()) as HTMLCalciteInputDatePickerElement;
datePicker.start = null;
datePicker.startAsDate = null;
datePicker.end = null;
datePicker.endAsDate = null;
delete this._layers[layerId].expressions[expression.id];
this._generateOutput(layerId);
}
setExpressionDates(startDate: Date, endDate: Date, expression: Expression, layerId: string): void {
const { expressions } = this._layers[layerId];
const start = startDate ? this._convertToDate(Math.floor(startDate.getTime()), true) : null;
const end = endDate ? this._convertToDate(Math.floor(endDate.getTime()), true) : null;
const chevron = end && !start ? "<" : !end && start ? ">" : null;
if (chevron) {
expressions[expression.id] = {
definitionExpression: `${expression.field} ${chevron} '${start ?? end}'`,
type: "date"
};
} else {
expressions[expression.id] = {
definitionExpression: `${expression.field} BETWEEN '${start}' AND '${end}'`,
type: "date"
};
}
this._generateOutput(layerId);
}
handleResetFilter(): void {
this.layerExpressions.map((layerExpression) => {
const { id } = layerExpression;
layerExpression.expressions.map((expression) => {
const { id, type, useCombobox } = expression;
if (type) {
if (type === "string" && !useCombobox) {
const select = document.getElementById(id.toString()) as HTMLSelectElement;
select.value = "default";
} else if (type === "string" && useCombobox) {
const combobox = document.getElementById(id.toString()) as HTMLCalciteComboboxElement;
const wrapper = combobox.shadowRoot.querySelector(".wrapper");
for (let i = 0; i < wrapper.children.length; i++) {
const child = wrapper.children[i];
if (child.nodeName === "CALCITE-CHIP") {
const chip = child as HTMLCalciteChipElement;
chip.style.display = "none";
}
}
for (let i = 0; i < combobox.children.length; i++) {
const comboboxItem = combobox.children[i] as HTMLCalciteComboboxItemElement;
comboboxItem.selected = false;
}
} else if (type === "date") {
const datePicker = document.getElementById(id.toString()) as HTMLCalciteInputDatePickerElement;
datePicker.startAsDate = new Date(expression?.start);
datePicker.endAsDate = new Date(expression?.end);
}
}
expression.checked = false;
});
this._layers[id].expressions = {};
});
}
async calculateStatistics(layerId: string, field: string): Promise<__esri.Graphic[]> {
const layer = this.map.layers.find(({ id }) => id === layerId) as __esri.FeatureLayer;
if (layer && layer.type === "feature") {
const query = layer.createQuery();
query.where = layer.definitionExpression ? layer.definitionExpression : "1=1";
if (layer?.capabilities?.query?.supportsCacheHint) {
query.cacheHint = true;
}
if (field) {
query.outFields = [field];
query.returnDistinctValues = true;
query.returnGeometry = false;
if (this.extentSelector && this.extentSelectorConfig) {
query.geometry = this._getExtent(this.extentSelector, this.extentSelectorConfig);
query.spatialRelationship = "intersects";
}
const results = await layer.queryFeatures(query);
return results?.features;
}
}
return [];
}
// ----------------------------------
//
// Private methods
//
// ----------------------------------
private _generateOutput(id: string): void {
const defExpressions = [];
Object.values(this._layers[id].expressions).forEach(({ definitionExpression }) =>
defExpressions.push(definitionExpression)
);
const newOutput = {
id,
definitionExpression: defExpressions.join(this._layers[id].operator)
};
this.updatingExpression = true;
this.set("output", newOutput);
}
private _convertToDate(date: string | number, includeTime: boolean = false): string {
if (date) {
const tmpDate = new Date(date);
const formattedDate = `${tmpDate.getFullYear()}-${tmpDate.getMonth() + 1}-${tmpDate.getDate()}`;
if (includeTime) {
const time = `${tmpDate.getHours()}:${tmpDate.getMinutes()}:${tmpDate.getSeconds()}`;
return `${formattedDate} ${time}`;
} else {
return formattedDate;
}
}
return null;
}
private _getExtent(extentSelector: boolean, extentSelectorConfig: ExtentSelector): __esri.Geometry {
if (extentSelector && extentSelectorConfig) {
const { constraints } = extentSelectorConfig;
let newConstraints = { ...constraints };
const geometry = newConstraints?.geometry;
if (geometry) {
const tmpExtent = fromJSON(geometry);
if (tmpExtent && (tmpExtent?.type === "extent" || tmpExtent?.type === "polygon")) {
return tmpExtent;
}
}
}
return null;
}
private _debounceNumberInput(expression: Expression, layerId: string, value: string, type: "min" | "max"): void {
if (this._timeout) {
clearTimeout(this._timeout);
}
this._timeout = setTimeout(() => {
this._updateExpressions(expression, layerId, value, type);
this._generateOutput(layerId);
}, 800);
}
private _updateExpressions(expression: Expression, layerId: string, value: string, type: "min" | "max"): void {
const { expressions } = this._layers[layerId];
const { id } = expression;
if (expressions[id]) {
expressions[id] = {
...expressions[id],
type: "number",
[type]: value
};
if (!expressions[id]?.min && !expressions[id]?.max) {
delete expressions[id];
this._generateOutput(layerId);
return;
}
} else {
expressions[id] = {
definitionExpression: null,
type: "number",
[type]: value
};
}
this._setNumberRangeExpression(expression, layerId, value);
}
private _setNumberRangeExpression(expression: Expression, layerId: string, value: string): void {
const { expressions } = this._layers[layerId];
const { field, id } = expression;
const displayName = document.getElementById(`${id}-name`);
const inputMessage = document.getElementById(`${id}-error`) as HTMLCalciteInputMessageElement;
const min = expressions[id]?.min;
const max = expressions[id]?.max;
const chevron = max && !min ? "<" : !max && min ? ">" : null;
if (chevron) {
const exprValue = value ? value : max ? max : min ? min : null;
if (exprValue) {
displayName.style.color = "inherit";
inputMessage.active = false;
expressions[id].definitionExpression = `${field} ${chevron} ${exprValue}`;
} else {
delete expressions[id];
}
} else if (Number(max) < Number(min)) {
displayName.style.color = "red";
inputMessage.active = true;
} else {
displayName.style.color = "inherit";
inputMessage.active = false;
expressions[id].definitionExpression = `${field} BETWEEN ${min} AND ${max}`;
}
}
}
export = FilterListViewModel;
| itExpressions() | identifier_name |
a3c_v10_cnn_lstm.py | from A3C.sharedAdam import SharedAdam
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
import torch.multiprocessing as mp
import numpy as np
import pommerman
from pommerman import agents
import matplotlib.pyplot as plt
# on windows, multiprocessing: https://pytorch.org/docs/stable/notes/windows.html
# use one thread for parallel as they will block
# each other otherwise (https://github.com/ikostrikov/pytorch-a3c/issues/33)
os.environ["OMP_NUM_THREADS"] = "1"
# define globals
S_statespace = 3
S_actionspace = 6
UPDATE_GLOBAL_ITER = 800
GAMMA = 0.95
LAMBDA = 1
MAX_EP = 2000
LEARNING_RATE = 0.00001
eps = np.finfo(np.float32).eps.item()
def load_checkpoint(filename, model, optimizer):
if os.path.isfile(filename):
print("# loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("# loaded checkpoint '{}'".format(filename))
else:
print("# no checkpoint found at '{}'".format(filename))
return model, optimizer
def save_checkpoint(filename, model, optimizer):
state = {
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, filename)
def test(gnet):
John = A3CAgent(gnet)
John.set_train(False)
agentList = [John, agents.SimpleAgent(), agents.SimpleAgent(), agents.SimpleAgent()]
env = pommerman.make('PommeFFACompetition-v0', agentList)
wins = []
for ii in range(100):
John.reset_lstm()
state = env.reset()
done = False
while done == False:
if ii % 20 == 0:
env.render()
# time.sleep(1/24)
with torch.no_grad():
actions = env.act(state)
state_next, reward, done, info = env.step(actions)
print(ii, "DONE. Info:", info, "reward:", reward, "You win = ",
info['winners'][0] == 0 if info['result'].name == 'Win' else False)
wins.append(info['winners'][0] if info['result'].name == 'Win' else -1)
fig, ax = plt.subplots(num=1, clear=True)
winrate = wins.count(0) / len(wins)
fig, ax = plt.subplots(num=1, clear=True)
t, p0, p1, p2, p3 = plt.bar([-1, 0, 1, 2, 3], [
wins.count(-1) / len(wins) * 100,
wins.count(0) / len(wins) * 100,
wins.count(1) / len(wins) * 100,
wins.count(2) / len(wins) * 100,
wins.count(3) / len(wins) * 100])
t.set_facecolor('b')
p0.set_facecolor('r')
p1.set_facecolor('g')
p2.set_facecolor('b')
p3.set_facecolor('c')
ax.set_xticks([-1, 0, 1, 2, 3])
ax.set_xticklabels(['Ties', 'Agent\n(A2C)', 'Agent 1\nSimpleAgent', 'Agent 2\nSimpleAgent', 'Agent 3\nSimpleAgent'])
ax.set_ylim([0, 100])
ax.set_ylabel('Percent')
ax.set_title('Bomberman. FFA mode.')
print("Winrate: ", winrate)
plt.show()
def clean_up_agents(agents):
"""Stops all agents"""
return [agent.shutdown() for agent in agents]
def ensure_shared_grads(lnet, gnet):
for param, shared_param in zip(lnet.parameters(), gnet.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def update_glob_net(opt, lnet, gnet, agent, GAMMA):
R = 0
actor_loss = 0
value_loss = 0
gae = 0
agent.values.append(torch.zeros(1)) # we need to add this for the deltaT equation(below)
# print(agent.rewards)
for i in reversed(range(len(agent.rewards))):
R = GAMMA * R + agent.rewards[i]
advantage = R - agent.values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
deltaT = agent.rewards[i] + GAMMA * agent.values[i + 1].data - agent.values[i].data
gae = gae * GAMMA * LAMBDA + deltaT # generalized advantage estimator
actor_loss = actor_loss - agent.logProbs[i] * gae - 0.01 * agent.entropies[i]
loss = (actor_loss + 0.5 * value_loss)
opt.zero_grad()
loss.backward(retain_graph=True)
ensure_shared_grads(lnet, gnet)
opt.step()
lnet.load_state_dict(gnet.state_dict())
lnet.zero_grad()
agent.clear_actions()
def record(global_ep, global_ep_r, ep_r, res_queue, global_nr_steps, nr_steps, name):
with global_ep.get_lock():
global_ep.value += 1
with global_ep_r.get_lock():
if global_ep_r.value == 0.:
global_ep_r.value = -1 # ep_r
global_nr_steps.value = nr_steps
else:
global_ep_r.value = global_ep_r.value * 0.99 + ep_r * 0.01
global_nr_steps.value = global_nr_steps.value * 0.99 + nr_steps * 0.01
res_queue.put(ep_r)
print(
name,
"Ep:", global_ep.value,
"| Avg Ep_r: %.2f" % global_ep_r.value,
"| Avg Steps: %d" % global_nr_steps.value,
"| Ep_r / Steps: %.2f" % (ep_r / nr_steps),
# "| Ep_r: %.2f" % ep_r,
)
def get_reward(state, old_state, agent_nr, start_reward, max_ammo, old_max_ammo, action, last_action,
action_history_oh):
# developer note:
# on the board, 0: nothing,
# 1: unbreakable wall,
# 2: wall,
# 3: bomb,
# 4: flames,
# 6,7,8: pick-ups:
# 11,12 and 13: enemies
reward = 0
# reward stage 0: teach the agent to move and make invalid actions
# (move into walls, place bombs when you have no ammo)
ammo = old_state[agent_nr]['ammo']
if action != 5:
if state[agent_nr]['position'] == old_state[agent_nr]['position']:
reward -= 0.03
elif ammo == 0:
reward -= 0.03
# reward stage 1: teach agent to bomb walls (and enemies)
# compute adjacent squares
position = state[agent_nr]['position']
adj = [(i, j) for i in (-1, 0, 1) for j in (-1, 0, 1) if not ((i == j) or i + j == 0)]
adjacent = np.matlib.repmat(position, 4, 1)
adjacent = adjacent - np.asarray(adj)
# limit adjacent squares to only include inside board
adjacent = np.clip(adjacent, 0, 10)
if action == 5 and ammo > 0:
board = state[agent_nr]['board']
for xy in adjacent:
square_val = board[xy[0]][xy[1]]
if square_val == 2:
reward += 0.02
elif square_val == 11 or square_val == 12 or square_val == 13:
reward += 0.05
# reward stage2: teach agent to not stand on or beside bombs
# reward /= 4
# bomb_life = state[agent_nr]['bomb_life']
# if we stand on a bomb or next to bomb
# just_placed_bomb = np.logical_xor(last_action==5,action==5)
# if bomb_life[position]>0 and not(just_placed_bomb):
# reward-=0.1*(9-bomb_life[position])
# for xy in adjacent:
# if bomb_life[xy[0]][xy[1]]>0:
# reward -=0.05*(9-bomb_life[xy[0]][xy[1]])
# reward agent for picking up power-ups
blast_strength = state[agent_nr]['blast_strength']
old_blast_strength = old_state[agent_nr]['blast_strength']
can_kick = int(state[agent_nr]['can_kick'])
old_can_kick = int(old_state[agent_nr]['can_kick'])
reward += (can_kick - old_can_kick) * 0.02
reward += (max_ammo - old_max_ammo) * 0.02
reward += (blast_strength - old_blast_strength) * 0.02
# discourage action repetition by cross correlation of previous actions
# corr = signal.correlate2d(action_history_oh,action_history_oh)[5:-5][0]
# corr.sort()
# reward -= corr[-2]/100 #the second highest correlation (highest is found at 0-lag and is always 10),
# only reward game play at last stage
reward += start_reward
return reward
class A3CNet(nn.Module):
def __init__(self):
super(A3CNet, self).__init__()
self.conv1 = nn.Conv2d(S_statespace, 66, 3, stride=1, groups=3)
self.conv2 = nn.Conv2d(66, 66, 3, stride=1, padding=1, groups=3)
self.conv3 = nn.Conv2d(66, 66, 3, stride=1, padding=1, groups=3)
self.conv4 = nn.Conv2d(66, 66, 3, stride=1, padding=1, groups=3)
self.encoder1 = nn.Linear(11237, 1000)
self.encoder2 = nn.Linear(1000, 200)
self.encoder3 = nn.Linear(200, 50)
self.critic_linear = nn.Linear(83, 1)
self.actor_lstm = nn.LSTM(50, S_actionspace, 2, batch_first=True)
self.actor_out = nn.Linear(S_actionspace, S_actionspace)
| torch.nn.init.xavier_uniform_(self.encoder2.weight)
torch.nn.init.xavier_uniform_(self.encoder3.weight)
torch.nn.init.xavier_uniform_(self.critic_linear.weight)
# torch.nn.init.xavier_uniform_(self.actor_linear.weight)
def forward(self, x, raw, hx, cx):
timesteps, batch_size, C, H, W = x.size()
x = x.view(batch_size * timesteps, C, H, W)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(timesteps, batch_size, -1)
x = torch.cat((x, raw), -1)
x = F.relu(self.encoder1(x))
x = F.relu(self.encoder2(x))
x = F.relu(self.encoder3(x)) # .permute(1, 0, 2)
# critic
value = self.critic_linear(raw)
# actor
x, (hx, cx) = self.actor_lstm(x, (hx, cx))
action = self.actor_out(x)
return action, value, (hx, cx)
@staticmethod
def get_lstm_reset():
hx = torch.zeros(2, 1, 6)
cx = torch.zeros(2, 1, 6)
return hx, cx
class A3CAgent(agents.BaseAgent):
def __init__(self, model):
super(A3CAgent, self).__init__()
self.model = model
self.hn, self.cn = self.model.get_lstm_reset()
# self.hx2, self.cx2 = self.model.get_lstm_reset()
self.rewards = []
self.values = []
self.logProbs = []
self.entropies = []
self.action_history = np.zeros(6)
self.train = True
def act(self, state, action_space):
if self.train:
obs, raw = self.observe(state, self.action_history)
logit, value, (hn, cn) = self.model(torch.from_numpy(obs).float().unsqueeze(0).unsqueeze(0),
torch.from_numpy(raw).float().unsqueeze(0).unsqueeze(0), self.hn,
self.cn)
logit, value = logit.squeeze(0), value.squeeze(0) # remove batch dimension
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1)
self.entropies.append(entropy)
try:
# action = torch.argmax(logit,dim=-1).unsqueeze(0) #JUST TEST!!!
action = Categorical(prob).sample().unsqueeze(0)
except:
print('obs: ', obs.data)
print('logit: ', logit.data)
print('value: ', value.data)
log_prob = log_prob.gather(1, action)
self.values.append(value)
self.logProbs.append(log_prob)
a = action.item()
else:
obs, raw = self.observe(state, self.action_history)
logit, value, (hn, cn) = self.model(torch.from_numpy(obs).float().unsqueeze(0).unsqueeze(0),
torch.from_numpy(raw).float().unsqueeze(0).unsqueeze(0),
self.hn,
self.cn)
logit = logit.squeeze(0) # remove batch dimension
prob = F.softmax(logit, dim=-1)
a = torch.argmax(logit, dim=-1).item()
self.action_history[:-1] = self.action_history[1:]
self.action_history[-1] = a
return a
def set_train(self, input):
self.train = input
def add_reward(self, reward):
self.reward = max(min(reward, 1), -1)
self.rewards.append(self.reward)
def observe(self, state, action_history):
obs_width = 5 # choose uneven number
obs_radius = obs_width // 2
board = state['board']
blast_strength = state['bomb_blast_strength']
bomb_life = state['bomb_life']
pos = np.asarray(state['position'])
board_pad = np.pad(board, (obs_radius, obs_radius), 'constant', constant_values=1)
blast_strength_pad = np.pad(blast_strength, (obs_radius, obs_radius), 'constant', constant_values=0)
life_pad = np.pad(bomb_life, (obs_radius, obs_radius), 'constant', constant_values=0)
# centered, padded board
board_cent = board_pad[pos[0]:pos[0] + 2 * obs_radius + 1, pos[1]:pos[1] + 2 * obs_radius + 1]
bomb_bs_cent = blast_strength_pad[pos[0]:pos[0] + 2 * obs_radius + 1, pos[1]:pos[1] + 2 * obs_radius + 1]
bomb_life_cent = life_pad[pos[0]:pos[0] + 2 * obs_radius + 1, pos[1]:pos[1] + 2 * obs_radius + 1]
ammo = np.asarray([state['ammo']])
my_bs = np.asarray([state['blast_strength']])
# note:
# on the board, 0: nothing,
# 1: unbreakable wall,
# 2: wall,
# 3: bomb,
# 4: flames,
# 6,7,8: pick-ups:
# 11,12 and 13: enemies
out = np.empty((3, 11 + 2 * obs_radius, 11 + 2 * obs_radius), dtype=np.float32)
out[0, :, :] = board_pad
out[1, :, :] = blast_strength_pad
out[2, :, :] = life_pad
# get raw surroundings
raw = np.concatenate((board_cent.flatten(), bomb_bs_cent.flatten()), 0)
raw = np.concatenate((raw, bomb_life_cent.flatten()), 0)
raw = np.concatenate((raw, ammo), 0)
raw = np.concatenate((raw, my_bs), 0)
raw = np.concatenate((raw, action_history), 0)
return out, raw
def reset_lstm(self):
# TODO I believe here should be self.hn, self.cn
self.hx, self.cx = self.model.get_lstm_reset()
self.action_history = np.zeros(6)
def clear_actions(self):
self.values = []
self.logProbs = []
self.rewards = []
self.entropies = []
return self
class Worker(mp.Process):
def __init__(self, gnet, optimizer, global_ep, global_ep_r, global_nr_steps, res_queue, name):
super(Worker, self).__init__()
self.agent_nr = 0
self.name = 'w%s' % name
self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
self.gnet, self.opt = gnet, optimizer
self.lnet = A3CNet()
self.lnet.load_state_dict(gnet.state_dict())
self.lnet.train()
self.lnet.zero_grad()
self.results = []
self.global_nr_steps = global_nr_steps
self.empty_oh_action = np.zeros((6, 1))
self.saved_oh_actions = np.repeat(self.empty_oh_action, 6, 1)
def run(self):
# If we move this to "init", we get an error on recursion depth
self.A3CAgent = A3CAgent(self.lnet)
self.agentList = [self.A3CAgent, agents.SimpleAgent(), agents.RandomAgent(), agents.RandomAgent()]
self.env = env = pommerman.make('PommeFFACompetition-v0', self.agentList)
total_step = 1
while self.g_ep.value < MAX_EP:
# Step 2). worker interacts with environment
s_act = self.env.reset()
max_ammo = old_max_ammo = 1
ep_r = 0.
self.render = False # self.g_ep.value % 20==0
self.A3CAgent.reset_lstm()
if self.name == 'w0':
enc1 = abs(torch.sum(self.gnet.encoder1.weight.data).item())
enc2 = abs(torch.sum(self.gnet.encoder2.weight.data).item())
enc3 = abs(torch.sum(self.gnet.encoder3.weight.data).item())
conv1 = abs(torch.sum(self.gnet.conv1.weight.data).item())
conv2 = abs(torch.sum(self.gnet.conv2.weight.data).item())
conv3 = abs(torch.sum(self.gnet.conv3.weight.data).item())
conv4 = abs(torch.sum(self.gnet.conv4.weight.data).item())
cl = abs(torch.sum(self.gnet.critic_linear.weight.data).item())
alstm1 = abs(torch.sum(self.gnet.actor_lstm.weight_ih_l0.data).item())
alstm2 = abs(torch.sum(self.gnet.actor_lstm.weight_hh_l0.data).item())
aout = abs(torch.sum(self.gnet.actor_out.weight.data).item())
f = open("AbsSummedWeights_ActorCritic_v2.txt", "a")
f.write(
'{0:.5f} \t {1:.5f} \t {2:.5f} \t {3:.5f} \t {4:.5f} \t {5:.5f} \t {6:.5f} \t {7:.5f} \t {8:.5f} '
'\t {9:.5f} \t {10:.5f} \n'.format(enc1, enc2, enc3, conv1, conv2, conv3, conv4, alstm1, alstm2,
aout, cl))
f.close()
while True:
# only render worker 0
if self.name == 'w0' and self.render:
self.env.render()
agent_actions = self.env.act(s_act)
a = agent_actions[self.agent_nr]
self.saved_oh_actions[:, :-1] = self.saved_oh_actions[:, 1:] # time shift
self.saved_oh_actions[:, -1] = self.empty_oh_action[:, 0] # erase last value
self.saved_oh_actions[a, -1] = 1 # insert new one-hot
s_new, rewards, done, _ = self.env.step(agent_actions)
# not(10 in s_new[self.agent_nr]['alive']) #if done or agent 10 is dead
done = done or rewards[self.agent_nr] == -1
max_ammo = max(max_ammo, s_act[self.agent_nr]['ammo'])
# reward and buffer
r = rewards[self.agent_nr]
# if (10 in s_act[self.agent_nr]['alive']) and total_step!=1:
# r = get_reward(s_new,s_act,self.agent_nr,r,max_ammo,old_max_ammo,a,a_old,self.saved_oh_actions)
ep_r += r
self.A3CAgent.add_reward(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
update_glob_net(self.opt, self.lnet, self.gnet, self.A3CAgent, GAMMA)
if done:
record(self.g_ep, self.g_ep_r, ep_r, self.res_queue, self.global_nr_steps,
s_new[self.agent_nr]['step_count'], self.name)
break
s_act = s_new
old_max_ammo = max_ammo
a_old = a
total_step += 1
self.res_queue.put(None)
def main():
global_net = A3CNet() # global network
global_net.train() # Set in training mode, only affect BN, Dropout etc.
filename = './A3C_v10_cnn_lstm_trained_critic.pth'
global_net.share_memory() # share the global parameters in multiprocessing
optimizer = SharedAdam(global_net.parameters(), lr=LEARNING_RATE) # global optimizer
load_checkpoint(filename, global_net, optimizer)
for g in optimizer.param_groups:
g['lr'] = LEARNING_RATE
global_ep, global_ep_r, global_nr_steps, res_queue = \
mp.Value('d', 0), mp.Value('d', 0.), mp.Value('d', 0.), mp.Queue()
# parallel training
workers = [Worker(global_net, optimizer, global_ep, global_ep_r, global_nr_steps, res_queue, i) for i in range(1)]
# for i in range(mp.cpu_count())]
[w.start() for w in workers]
res = [] # record episode reward to plot
while True:
r = res_queue.get()
if r is not None:
res.append(r)
else:
break
filename = './A3C_v10_cnn_lstm_trained_critic_actor_1.pth'
save_checkpoint(filename, global_net, optimizer)
with open('A3C_v10_cnn_lstm_trained_critic_actor.txt', 'a') as f:
for item in res:
f.write("%s\n" % item)
print('joining workers')
[w.join() for w in workers]
if __name__ == '__main__':
main() | torch.nn.init.xavier_uniform_(self.encoder1.weight) | random_line_split |
a3c_v10_cnn_lstm.py | from A3C.sharedAdam import SharedAdam
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
import torch.multiprocessing as mp
import numpy as np
import pommerman
from pommerman import agents
import matplotlib.pyplot as plt
# on windows, multiprocessing: https://pytorch.org/docs/stable/notes/windows.html
# use one thread for parallel as they will block
# each other otherwise (https://github.com/ikostrikov/pytorch-a3c/issues/33)
os.environ["OMP_NUM_THREADS"] = "1"
# define globals
S_statespace = 3
S_actionspace = 6
UPDATE_GLOBAL_ITER = 800
GAMMA = 0.95
LAMBDA = 1
MAX_EP = 2000
LEARNING_RATE = 0.00001
eps = np.finfo(np.float32).eps.item()
def load_checkpoint(filename, model, optimizer):
if os.path.isfile(filename):
print("# loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("# loaded checkpoint '{}'".format(filename))
else:
print("# no checkpoint found at '{}'".format(filename))
return model, optimizer
def save_checkpoint(filename, model, optimizer):
state = {
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, filename)
def test(gnet):
John = A3CAgent(gnet)
John.set_train(False)
agentList = [John, agents.SimpleAgent(), agents.SimpleAgent(), agents.SimpleAgent()]
env = pommerman.make('PommeFFACompetition-v0', agentList)
wins = []
for ii in range(100):
John.reset_lstm()
state = env.reset()
done = False
while done == False:
if ii % 20 == 0:
env.render()
# time.sleep(1/24)
with torch.no_grad():
actions = env.act(state)
state_next, reward, done, info = env.step(actions)
print(ii, "DONE. Info:", info, "reward:", reward, "You win = ",
info['winners'][0] == 0 if info['result'].name == 'Win' else False)
wins.append(info['winners'][0] if info['result'].name == 'Win' else -1)
fig, ax = plt.subplots(num=1, clear=True)
winrate = wins.count(0) / len(wins)
fig, ax = plt.subplots(num=1, clear=True)
t, p0, p1, p2, p3 = plt.bar([-1, 0, 1, 2, 3], [
wins.count(-1) / len(wins) * 100,
wins.count(0) / len(wins) * 100,
wins.count(1) / len(wins) * 100,
wins.count(2) / len(wins) * 100,
wins.count(3) / len(wins) * 100])
t.set_facecolor('b')
p0.set_facecolor('r')
p1.set_facecolor('g')
p2.set_facecolor('b')
p3.set_facecolor('c')
ax.set_xticks([-1, 0, 1, 2, 3])
ax.set_xticklabels(['Ties', 'Agent\n(A2C)', 'Agent 1\nSimpleAgent', 'Agent 2\nSimpleAgent', 'Agent 3\nSimpleAgent'])
ax.set_ylim([0, 100])
ax.set_ylabel('Percent')
ax.set_title('Bomberman. FFA mode.')
print("Winrate: ", winrate)
plt.show()
def clean_up_agents(agents):
|
def ensure_shared_grads(lnet, gnet):
for param, shared_param in zip(lnet.parameters(), gnet.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def update_glob_net(opt, lnet, gnet, agent, GAMMA):
R = 0
actor_loss = 0
value_loss = 0
gae = 0
agent.values.append(torch.zeros(1)) # we need to add this for the deltaT equation(below)
# print(agent.rewards)
for i in reversed(range(len(agent.rewards))):
R = GAMMA * R + agent.rewards[i]
advantage = R - agent.values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
deltaT = agent.rewards[i] + GAMMA * agent.values[i + 1].data - agent.values[i].data
gae = gae * GAMMA * LAMBDA + deltaT # generalized advantage estimator
actor_loss = actor_loss - agent.logProbs[i] * gae - 0.01 * agent.entropies[i]
loss = (actor_loss + 0.5 * value_loss)
opt.zero_grad()
loss.backward(retain_graph=True)
ensure_shared_grads(lnet, gnet)
opt.step()
lnet.load_state_dict(gnet.state_dict())
lnet.zero_grad()
agent.clear_actions()
def record(global_ep, global_ep_r, ep_r, res_queue, global_nr_steps, nr_steps, name):
with global_ep.get_lock():
global_ep.value += 1
with global_ep_r.get_lock():
if global_ep_r.value == 0.:
global_ep_r.value = -1 # ep_r
global_nr_steps.value = nr_steps
else:
global_ep_r.value = global_ep_r.value * 0.99 + ep_r * 0.01
global_nr_steps.value = global_nr_steps.value * 0.99 + nr_steps * 0.01
res_queue.put(ep_r)
print(
name,
"Ep:", global_ep.value,
"| Avg Ep_r: %.2f" % global_ep_r.value,
"| Avg Steps: %d" % global_nr_steps.value,
"| Ep_r / Steps: %.2f" % (ep_r / nr_steps),
# "| Ep_r: %.2f" % ep_r,
)
def get_reward(state, old_state, agent_nr, start_reward, max_ammo, old_max_ammo, action, last_action,
action_history_oh):
# developer note:
# on the board, 0: nothing,
# 1: unbreakable wall,
# 2: wall,
# 3: bomb,
# 4: flames,
# 6,7,8: pick-ups:
# 11,12 and 13: enemies
reward = 0
# reward stage 0: teach the agent to move and make invalid actions
# (move into walls, place bombs when you have no ammo)
ammo = old_state[agent_nr]['ammo']
if action != 5:
if state[agent_nr]['position'] == old_state[agent_nr]['position']:
reward -= 0.03
elif ammo == 0:
reward -= 0.03
# reward stage 1: teach agent to bomb walls (and enemies)
# compute adjacent squares
position = state[agent_nr]['position']
adj = [(i, j) for i in (-1, 0, 1) for j in (-1, 0, 1) if not ((i == j) or i + j == 0)]
adjacent = np.matlib.repmat(position, 4, 1)
adjacent = adjacent - np.asarray(adj)
# limit adjacent squares to only include inside board
adjacent = np.clip(adjacent, 0, 10)
if action == 5 and ammo > 0:
board = state[agent_nr]['board']
for xy in adjacent:
square_val = board[xy[0]][xy[1]]
if square_val == 2:
reward += 0.02
elif square_val == 11 or square_val == 12 or square_val == 13:
reward += 0.05
# reward stage2: teach agent to not stand on or beside bombs
# reward /= 4
# bomb_life = state[agent_nr]['bomb_life']
# if we stand on a bomb or next to bomb
# just_placed_bomb = np.logical_xor(last_action==5,action==5)
# if bomb_life[position]>0 and not(just_placed_bomb):
# reward-=0.1*(9-bomb_life[position])
# for xy in adjacent:
# if bomb_life[xy[0]][xy[1]]>0:
# reward -=0.05*(9-bomb_life[xy[0]][xy[1]])
# reward agent for picking up power-ups
blast_strength = state[agent_nr]['blast_strength']
old_blast_strength = old_state[agent_nr]['blast_strength']
can_kick = int(state[agent_nr]['can_kick'])
old_can_kick = int(old_state[agent_nr]['can_kick'])
reward += (can_kick - old_can_kick) * 0.02
reward += (max_ammo - old_max_ammo) * 0.02
reward += (blast_strength - old_blast_strength) * 0.02
# discourage action repetition by cross correlation of previous actions
# corr = signal.correlate2d(action_history_oh,action_history_oh)[5:-5][0]
# corr.sort()
# reward -= corr[-2]/100 #the second highest correlation (highest is found at 0-lag and is always 10),
# only reward game play at last stage
reward += start_reward
return reward
class A3CNet(nn.Module):
def __init__(self):
super(A3CNet, self).__init__()
self.conv1 = nn.Conv2d(S_statespace, 66, 3, stride=1, groups=3)
self.conv2 = nn.Conv2d(66, 66, 3, stride=1, padding=1, groups=3)
self.conv3 = nn.Conv2d(66, 66, 3, stride=1, padding=1, groups=3)
self.conv4 = nn.Conv2d(66, 66, 3, stride=1, padding=1, groups=3)
self.encoder1 = nn.Linear(11237, 1000)
self.encoder2 = nn.Linear(1000, 200)
self.encoder3 = nn.Linear(200, 50)
self.critic_linear = nn.Linear(83, 1)
self.actor_lstm = nn.LSTM(50, S_actionspace, 2, batch_first=True)
self.actor_out = nn.Linear(S_actionspace, S_actionspace)
torch.nn.init.xavier_uniform_(self.encoder1.weight)
torch.nn.init.xavier_uniform_(self.encoder2.weight)
torch.nn.init.xavier_uniform_(self.encoder3.weight)
torch.nn.init.xavier_uniform_(self.critic_linear.weight)
# torch.nn.init.xavier_uniform_(self.actor_linear.weight)
def forward(self, x, raw, hx, cx):
timesteps, batch_size, C, H, W = x.size()
x = x.view(batch_size * timesteps, C, H, W)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(timesteps, batch_size, -1)
x = torch.cat((x, raw), -1)
x = F.relu(self.encoder1(x))
x = F.relu(self.encoder2(x))
x = F.relu(self.encoder3(x)) # .permute(1, 0, 2)
# critic
value = self.critic_linear(raw)
# actor
x, (hx, cx) = self.actor_lstm(x, (hx, cx))
action = self.actor_out(x)
return action, value, (hx, cx)
@staticmethod
def get_lstm_reset():
hx = torch.zeros(2, 1, 6)
cx = torch.zeros(2, 1, 6)
return hx, cx
class A3CAgent(agents.BaseAgent):
def __init__(self, model):
super(A3CAgent, self).__init__()
self.model = model
self.hn, self.cn = self.model.get_lstm_reset()
# self.hx2, self.cx2 = self.model.get_lstm_reset()
self.rewards = []
self.values = []
self.logProbs = []
self.entropies = []
self.action_history = np.zeros(6)
self.train = True
def act(self, state, action_space):
if self.train:
obs, raw = self.observe(state, self.action_history)
logit, value, (hn, cn) = self.model(torch.from_numpy(obs).float().unsqueeze(0).unsqueeze(0),
torch.from_numpy(raw).float().unsqueeze(0).unsqueeze(0), self.hn,
self.cn)
logit, value = logit.squeeze(0), value.squeeze(0) # remove batch dimension
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1)
self.entropies.append(entropy)
try:
# action = torch.argmax(logit,dim=-1).unsqueeze(0) #JUST TEST!!!
action = Categorical(prob).sample().unsqueeze(0)
except:
print('obs: ', obs.data)
print('logit: ', logit.data)
print('value: ', value.data)
log_prob = log_prob.gather(1, action)
self.values.append(value)
self.logProbs.append(log_prob)
a = action.item()
else:
obs, raw = self.observe(state, self.action_history)
logit, value, (hn, cn) = self.model(torch.from_numpy(obs).float().unsqueeze(0).unsqueeze(0),
torch.from_numpy(raw).float().unsqueeze(0).unsqueeze(0),
self.hn,
self.cn)
logit = logit.squeeze(0) # remove batch dimension
prob = F.softmax(logit, dim=-1)
a = torch.argmax(logit, dim=-1).item()
self.action_history[:-1] = self.action_history[1:]
self.action_history[-1] = a
return a
def set_train(self, input):
self.train = input
def add_reward(self, reward):
self.reward = max(min(reward, 1), -1)
self.rewards.append(self.reward)
def observe(self, state, action_history):
obs_width = 5 # choose uneven number
obs_radius = obs_width // 2
board = state['board']
blast_strength = state['bomb_blast_strength']
bomb_life = state['bomb_life']
pos = np.asarray(state['position'])
board_pad = np.pad(board, (obs_radius, obs_radius), 'constant', constant_values=1)
blast_strength_pad = np.pad(blast_strength, (obs_radius, obs_radius), 'constant', constant_values=0)
life_pad = np.pad(bomb_life, (obs_radius, obs_radius), 'constant', constant_values=0)
# centered, padded board
board_cent = board_pad[pos[0]:pos[0] + 2 * obs_radius + 1, pos[1]:pos[1] + 2 * obs_radius + 1]
bomb_bs_cent = blast_strength_pad[pos[0]:pos[0] + 2 * obs_radius + 1, pos[1]:pos[1] + 2 * obs_radius + 1]
bomb_life_cent = life_pad[pos[0]:pos[0] + 2 * obs_radius + 1, pos[1]:pos[1] + 2 * obs_radius + 1]
ammo = np.asarray([state['ammo']])
my_bs = np.asarray([state['blast_strength']])
# note:
# on the board, 0: nothing,
# 1: unbreakable wall,
# 2: wall,
# 3: bomb,
# 4: flames,
# 6,7,8: pick-ups:
# 11,12 and 13: enemies
out = np.empty((3, 11 + 2 * obs_radius, 11 + 2 * obs_radius), dtype=np.float32)
out[0, :, :] = board_pad
out[1, :, :] = blast_strength_pad
out[2, :, :] = life_pad
# get raw surroundings
raw = np.concatenate((board_cent.flatten(), bomb_bs_cent.flatten()), 0)
raw = np.concatenate((raw, bomb_life_cent.flatten()), 0)
raw = np.concatenate((raw, ammo), 0)
raw = np.concatenate((raw, my_bs), 0)
raw = np.concatenate((raw, action_history), 0)
return out, raw
def reset_lstm(self):
# TODO I believe here should be self.hn, self.cn
self.hx, self.cx = self.model.get_lstm_reset()
self.action_history = np.zeros(6)
def clear_actions(self):
self.values = []
self.logProbs = []
self.rewards = []
self.entropies = []
return self
class Worker(mp.Process):
def __init__(self, gnet, optimizer, global_ep, global_ep_r, global_nr_steps, res_queue, name):
super(Worker, self).__init__()
self.agent_nr = 0
self.name = 'w%s' % name
self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
self.gnet, self.opt = gnet, optimizer
self.lnet = A3CNet()
self.lnet.load_state_dict(gnet.state_dict())
self.lnet.train()
self.lnet.zero_grad()
self.results = []
self.global_nr_steps = global_nr_steps
self.empty_oh_action = np.zeros((6, 1))
self.saved_oh_actions = np.repeat(self.empty_oh_action, 6, 1)
def run(self):
# If we move this to "init", we get an error on recursion depth
self.A3CAgent = A3CAgent(self.lnet)
self.agentList = [self.A3CAgent, agents.SimpleAgent(), agents.RandomAgent(), agents.RandomAgent()]
self.env = env = pommerman.make('PommeFFACompetition-v0', self.agentList)
total_step = 1
while self.g_ep.value < MAX_EP:
# Step 2). worker interacts with environment
s_act = self.env.reset()
max_ammo = old_max_ammo = 1
ep_r = 0.
self.render = False # self.g_ep.value % 20==0
self.A3CAgent.reset_lstm()
if self.name == 'w0':
enc1 = abs(torch.sum(self.gnet.encoder1.weight.data).item())
enc2 = abs(torch.sum(self.gnet.encoder2.weight.data).item())
enc3 = abs(torch.sum(self.gnet.encoder3.weight.data).item())
conv1 = abs(torch.sum(self.gnet.conv1.weight.data).item())
conv2 = abs(torch.sum(self.gnet.conv2.weight.data).item())
conv3 = abs(torch.sum(self.gnet.conv3.weight.data).item())
conv4 = abs(torch.sum(self.gnet.conv4.weight.data).item())
cl = abs(torch.sum(self.gnet.critic_linear.weight.data).item())
alstm1 = abs(torch.sum(self.gnet.actor_lstm.weight_ih_l0.data).item())
alstm2 = abs(torch.sum(self.gnet.actor_lstm.weight_hh_l0.data).item())
aout = abs(torch.sum(self.gnet.actor_out.weight.data).item())
f = open("AbsSummedWeights_ActorCritic_v2.txt", "a")
f.write(
'{0:.5f} \t {1:.5f} \t {2:.5f} \t {3:.5f} \t {4:.5f} \t {5:.5f} \t {6:.5f} \t {7:.5f} \t {8:.5f} '
'\t {9:.5f} \t {10:.5f} \n'.format(enc1, enc2, enc3, conv1, conv2, conv3, conv4, alstm1, alstm2,
aout, cl))
f.close()
while True:
# only render worker 0
if self.name == 'w0' and self.render:
self.env.render()
agent_actions = self.env.act(s_act)
a = agent_actions[self.agent_nr]
self.saved_oh_actions[:, :-1] = self.saved_oh_actions[:, 1:] # time shift
self.saved_oh_actions[:, -1] = self.empty_oh_action[:, 0] # erase last value
self.saved_oh_actions[a, -1] = 1 # insert new one-hot
s_new, rewards, done, _ = self.env.step(agent_actions)
# not(10 in s_new[self.agent_nr]['alive']) #if done or agent 10 is dead
done = done or rewards[self.agent_nr] == -1
max_ammo = max(max_ammo, s_act[self.agent_nr]['ammo'])
# reward and buffer
r = rewards[self.agent_nr]
# if (10 in s_act[self.agent_nr]['alive']) and total_step!=1:
# r = get_reward(s_new,s_act,self.agent_nr,r,max_ammo,old_max_ammo,a,a_old,self.saved_oh_actions)
ep_r += r
self.A3CAgent.add_reward(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
update_glob_net(self.opt, self.lnet, self.gnet, self.A3CAgent, GAMMA)
if done:
record(self.g_ep, self.g_ep_r, ep_r, self.res_queue, self.global_nr_steps,
s_new[self.agent_nr]['step_count'], self.name)
break
s_act = s_new
old_max_ammo = max_ammo
a_old = a
total_step += 1
self.res_queue.put(None)
def main():
global_net = A3CNet() # global network
global_net.train() # Set in training mode, only affect BN, Dropout etc.
filename = './A3C_v10_cnn_lstm_trained_critic.pth'
global_net.share_memory() # share the global parameters in multiprocessing
optimizer = SharedAdam(global_net.parameters(), lr=LEARNING_RATE) # global optimizer
load_checkpoint(filename, global_net, optimizer)
for g in optimizer.param_groups:
g['lr'] = LEARNING_RATE
global_ep, global_ep_r, global_nr_steps, res_queue = \
mp.Value('d', 0), mp.Value('d', 0.), mp.Value('d', 0.), mp.Queue()
# parallel training
workers = [Worker(global_net, optimizer, global_ep, global_ep_r, global_nr_steps, res_queue, i) for i in range(1)]
# for i in range(mp.cpu_count())]
[w.start() for w in workers]
res = [] # record episode reward to plot
while True:
r = res_queue.get()
if r is not None:
res.append(r)
else:
break
filename = './A3C_v10_cnn_lstm_trained_critic_actor_1.pth'
save_checkpoint(filename, global_net, optimizer)
with open('A3C_v10_cnn_lstm_trained_critic_actor.txt', 'a') as f:
for item in res:
f.write("%s\n" % item)
print('joining workers')
[w.join() for w in workers]
if __name__ == '__main__':
main()
| """Stops all agents"""
return [agent.shutdown() for agent in agents] | identifier_body |
a3c_v10_cnn_lstm.py | from A3C.sharedAdam import SharedAdam
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
import torch.multiprocessing as mp
import numpy as np
import pommerman
from pommerman import agents
import matplotlib.pyplot as plt
# on windows, multiprocessing: https://pytorch.org/docs/stable/notes/windows.html
# use one thread for parallel as they will block
# each other otherwise (https://github.com/ikostrikov/pytorch-a3c/issues/33)
os.environ["OMP_NUM_THREADS"] = "1"
# define globals
S_statespace = 3
S_actionspace = 6
UPDATE_GLOBAL_ITER = 800
GAMMA = 0.95
LAMBDA = 1
MAX_EP = 2000
LEARNING_RATE = 0.00001
eps = np.finfo(np.float32).eps.item()
def load_checkpoint(filename, model, optimizer):
if os.path.isfile(filename):
print("# loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("# loaded checkpoint '{}'".format(filename))
else:
print("# no checkpoint found at '{}'".format(filename))
return model, optimizer
def save_checkpoint(filename, model, optimizer):
state = {
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, filename)
def test(gnet):
John = A3CAgent(gnet)
John.set_train(False)
agentList = [John, agents.SimpleAgent(), agents.SimpleAgent(), agents.SimpleAgent()]
env = pommerman.make('PommeFFACompetition-v0', agentList)
wins = []
for ii in range(100):
John.reset_lstm()
state = env.reset()
done = False
while done == False:
if ii % 20 == 0:
env.render()
# time.sleep(1/24)
with torch.no_grad():
actions = env.act(state)
state_next, reward, done, info = env.step(actions)
print(ii, "DONE. Info:", info, "reward:", reward, "You win = ",
info['winners'][0] == 0 if info['result'].name == 'Win' else False)
wins.append(info['winners'][0] if info['result'].name == 'Win' else -1)
fig, ax = plt.subplots(num=1, clear=True)
winrate = wins.count(0) / len(wins)
fig, ax = plt.subplots(num=1, clear=True)
t, p0, p1, p2, p3 = plt.bar([-1, 0, 1, 2, 3], [
wins.count(-1) / len(wins) * 100,
wins.count(0) / len(wins) * 100,
wins.count(1) / len(wins) * 100,
wins.count(2) / len(wins) * 100,
wins.count(3) / len(wins) * 100])
t.set_facecolor('b')
p0.set_facecolor('r')
p1.set_facecolor('g')
p2.set_facecolor('b')
p3.set_facecolor('c')
ax.set_xticks([-1, 0, 1, 2, 3])
ax.set_xticklabels(['Ties', 'Agent\n(A2C)', 'Agent 1\nSimpleAgent', 'Agent 2\nSimpleAgent', 'Agent 3\nSimpleAgent'])
ax.set_ylim([0, 100])
ax.set_ylabel('Percent')
ax.set_title('Bomberman. FFA mode.')
print("Winrate: ", winrate)
plt.show()
def clean_up_agents(agents):
"""Stops all agents"""
return [agent.shutdown() for agent in agents]
def ensure_shared_grads(lnet, gnet):
for param, shared_param in zip(lnet.parameters(), gnet.parameters()):
if shared_param.grad is not None:
|
shared_param._grad = param.grad
def update_glob_net(opt, lnet, gnet, agent, GAMMA):
R = 0
actor_loss = 0
value_loss = 0
gae = 0
agent.values.append(torch.zeros(1)) # we need to add this for the deltaT equation(below)
# print(agent.rewards)
for i in reversed(range(len(agent.rewards))):
R = GAMMA * R + agent.rewards[i]
advantage = R - agent.values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
deltaT = agent.rewards[i] + GAMMA * agent.values[i + 1].data - agent.values[i].data
gae = gae * GAMMA * LAMBDA + deltaT # generalized advantage estimator
actor_loss = actor_loss - agent.logProbs[i] * gae - 0.01 * agent.entropies[i]
loss = (actor_loss + 0.5 * value_loss)
opt.zero_grad()
loss.backward(retain_graph=True)
ensure_shared_grads(lnet, gnet)
opt.step()
lnet.load_state_dict(gnet.state_dict())
lnet.zero_grad()
agent.clear_actions()
def record(global_ep, global_ep_r, ep_r, res_queue, global_nr_steps, nr_steps, name):
with global_ep.get_lock():
global_ep.value += 1
with global_ep_r.get_lock():
if global_ep_r.value == 0.:
global_ep_r.value = -1 # ep_r
global_nr_steps.value = nr_steps
else:
global_ep_r.value = global_ep_r.value * 0.99 + ep_r * 0.01
global_nr_steps.value = global_nr_steps.value * 0.99 + nr_steps * 0.01
res_queue.put(ep_r)
print(
name,
"Ep:", global_ep.value,
"| Avg Ep_r: %.2f" % global_ep_r.value,
"| Avg Steps: %d" % global_nr_steps.value,
"| Ep_r / Steps: %.2f" % (ep_r / nr_steps),
# "| Ep_r: %.2f" % ep_r,
)
def get_reward(state, old_state, agent_nr, start_reward, max_ammo, old_max_ammo, action, last_action,
action_history_oh):
# developer note:
# on the board, 0: nothing,
# 1: unbreakable wall,
# 2: wall,
# 3: bomb,
# 4: flames,
# 6,7,8: pick-ups:
# 11,12 and 13: enemies
reward = 0
# reward stage 0: teach the agent to move and make invalid actions
# (move into walls, place bombs when you have no ammo)
ammo = old_state[agent_nr]['ammo']
if action != 5:
if state[agent_nr]['position'] == old_state[agent_nr]['position']:
reward -= 0.03
elif ammo == 0:
reward -= 0.03
# reward stage 1: teach agent to bomb walls (and enemies)
# compute adjacent squares
position = state[agent_nr]['position']
adj = [(i, j) for i in (-1, 0, 1) for j in (-1, 0, 1) if not ((i == j) or i + j == 0)]
adjacent = np.matlib.repmat(position, 4, 1)
adjacent = adjacent - np.asarray(adj)
# limit adjacent squares to only include inside board
adjacent = np.clip(adjacent, 0, 10)
if action == 5 and ammo > 0:
board = state[agent_nr]['board']
for xy in adjacent:
square_val = board[xy[0]][xy[1]]
if square_val == 2:
reward += 0.02
elif square_val == 11 or square_val == 12 or square_val == 13:
reward += 0.05
# reward stage2: teach agent to not stand on or beside bombs
# reward /= 4
# bomb_life = state[agent_nr]['bomb_life']
# if we stand on a bomb or next to bomb
# just_placed_bomb = np.logical_xor(last_action==5,action==5)
# if bomb_life[position]>0 and not(just_placed_bomb):
# reward-=0.1*(9-bomb_life[position])
# for xy in adjacent:
# if bomb_life[xy[0]][xy[1]]>0:
# reward -=0.05*(9-bomb_life[xy[0]][xy[1]])
# reward agent for picking up power-ups
blast_strength = state[agent_nr]['blast_strength']
old_blast_strength = old_state[agent_nr]['blast_strength']
can_kick = int(state[agent_nr]['can_kick'])
old_can_kick = int(old_state[agent_nr]['can_kick'])
reward += (can_kick - old_can_kick) * 0.02
reward += (max_ammo - old_max_ammo) * 0.02
reward += (blast_strength - old_blast_strength) * 0.02
# discourage action repetition by cross correlation of previous actions
# corr = signal.correlate2d(action_history_oh,action_history_oh)[5:-5][0]
# corr.sort()
# reward -= corr[-2]/100 #the second highest correlation (highest is found at 0-lag and is always 10),
# only reward game play at last stage
reward += start_reward
return reward
class A3CNet(nn.Module):
def __init__(self):
super(A3CNet, self).__init__()
self.conv1 = nn.Conv2d(S_statespace, 66, 3, stride=1, groups=3)
self.conv2 = nn.Conv2d(66, 66, 3, stride=1, padding=1, groups=3)
self.conv3 = nn.Conv2d(66, 66, 3, stride=1, padding=1, groups=3)
self.conv4 = nn.Conv2d(66, 66, 3, stride=1, padding=1, groups=3)
self.encoder1 = nn.Linear(11237, 1000)
self.encoder2 = nn.Linear(1000, 200)
self.encoder3 = nn.Linear(200, 50)
self.critic_linear = nn.Linear(83, 1)
self.actor_lstm = nn.LSTM(50, S_actionspace, 2, batch_first=True)
self.actor_out = nn.Linear(S_actionspace, S_actionspace)
torch.nn.init.xavier_uniform_(self.encoder1.weight)
torch.nn.init.xavier_uniform_(self.encoder2.weight)
torch.nn.init.xavier_uniform_(self.encoder3.weight)
torch.nn.init.xavier_uniform_(self.critic_linear.weight)
# torch.nn.init.xavier_uniform_(self.actor_linear.weight)
def forward(self, x, raw, hx, cx):
timesteps, batch_size, C, H, W = x.size()
x = x.view(batch_size * timesteps, C, H, W)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(timesteps, batch_size, -1)
x = torch.cat((x, raw), -1)
x = F.relu(self.encoder1(x))
x = F.relu(self.encoder2(x))
x = F.relu(self.encoder3(x)) # .permute(1, 0, 2)
# critic
value = self.critic_linear(raw)
# actor
x, (hx, cx) = self.actor_lstm(x, (hx, cx))
action = self.actor_out(x)
return action, value, (hx, cx)
@staticmethod
def get_lstm_reset():
hx = torch.zeros(2, 1, 6)
cx = torch.zeros(2, 1, 6)
return hx, cx
class A3CAgent(agents.BaseAgent):
def __init__(self, model):
super(A3CAgent, self).__init__()
self.model = model
self.hn, self.cn = self.model.get_lstm_reset()
# self.hx2, self.cx2 = self.model.get_lstm_reset()
self.rewards = []
self.values = []
self.logProbs = []
self.entropies = []
self.action_history = np.zeros(6)
self.train = True
def act(self, state, action_space):
if self.train:
obs, raw = self.observe(state, self.action_history)
logit, value, (hn, cn) = self.model(torch.from_numpy(obs).float().unsqueeze(0).unsqueeze(0),
torch.from_numpy(raw).float().unsqueeze(0).unsqueeze(0), self.hn,
self.cn)
logit, value = logit.squeeze(0), value.squeeze(0) # remove batch dimension
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1)
self.entropies.append(entropy)
try:
# action = torch.argmax(logit,dim=-1).unsqueeze(0) #JUST TEST!!!
action = Categorical(prob).sample().unsqueeze(0)
except:
print('obs: ', obs.data)
print('logit: ', logit.data)
print('value: ', value.data)
log_prob = log_prob.gather(1, action)
self.values.append(value)
self.logProbs.append(log_prob)
a = action.item()
else:
obs, raw = self.observe(state, self.action_history)
logit, value, (hn, cn) = self.model(torch.from_numpy(obs).float().unsqueeze(0).unsqueeze(0),
torch.from_numpy(raw).float().unsqueeze(0).unsqueeze(0),
self.hn,
self.cn)
logit = logit.squeeze(0) # remove batch dimension
prob = F.softmax(logit, dim=-1)
a = torch.argmax(logit, dim=-1).item()
self.action_history[:-1] = self.action_history[1:]
self.action_history[-1] = a
return a
def set_train(self, input):
self.train = input
def add_reward(self, reward):
self.reward = max(min(reward, 1), -1)
self.rewards.append(self.reward)
def observe(self, state, action_history):
obs_width = 5 # choose uneven number
obs_radius = obs_width // 2
board = state['board']
blast_strength = state['bomb_blast_strength']
bomb_life = state['bomb_life']
pos = np.asarray(state['position'])
board_pad = np.pad(board, (obs_radius, obs_radius), 'constant', constant_values=1)
blast_strength_pad = np.pad(blast_strength, (obs_radius, obs_radius), 'constant', constant_values=0)
life_pad = np.pad(bomb_life, (obs_radius, obs_radius), 'constant', constant_values=0)
# centered, padded board
board_cent = board_pad[pos[0]:pos[0] + 2 * obs_radius + 1, pos[1]:pos[1] + 2 * obs_radius + 1]
bomb_bs_cent = blast_strength_pad[pos[0]:pos[0] + 2 * obs_radius + 1, pos[1]:pos[1] + 2 * obs_radius + 1]
bomb_life_cent = life_pad[pos[0]:pos[0] + 2 * obs_radius + 1, pos[1]:pos[1] + 2 * obs_radius + 1]
ammo = np.asarray([state['ammo']])
my_bs = np.asarray([state['blast_strength']])
# note:
# on the board, 0: nothing,
# 1: unbreakable wall,
# 2: wall,
# 3: bomb,
# 4: flames,
# 6,7,8: pick-ups:
# 11,12 and 13: enemies
out = np.empty((3, 11 + 2 * obs_radius, 11 + 2 * obs_radius), dtype=np.float32)
out[0, :, :] = board_pad
out[1, :, :] = blast_strength_pad
out[2, :, :] = life_pad
# get raw surroundings
raw = np.concatenate((board_cent.flatten(), bomb_bs_cent.flatten()), 0)
raw = np.concatenate((raw, bomb_life_cent.flatten()), 0)
raw = np.concatenate((raw, ammo), 0)
raw = np.concatenate((raw, my_bs), 0)
raw = np.concatenate((raw, action_history), 0)
return out, raw
def reset_lstm(self):
# TODO I believe here should be self.hn, self.cn
self.hx, self.cx = self.model.get_lstm_reset()
self.action_history = np.zeros(6)
def clear_actions(self):
self.values = []
self.logProbs = []
self.rewards = []
self.entropies = []
return self
class Worker(mp.Process):
def __init__(self, gnet, optimizer, global_ep, global_ep_r, global_nr_steps, res_queue, name):
super(Worker, self).__init__()
self.agent_nr = 0
self.name = 'w%s' % name
self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
self.gnet, self.opt = gnet, optimizer
self.lnet = A3CNet()
self.lnet.load_state_dict(gnet.state_dict())
self.lnet.train()
self.lnet.zero_grad()
self.results = []
self.global_nr_steps = global_nr_steps
self.empty_oh_action = np.zeros((6, 1))
self.saved_oh_actions = np.repeat(self.empty_oh_action, 6, 1)
def run(self):
# If we move this to "init", we get an error on recursion depth
self.A3CAgent = A3CAgent(self.lnet)
self.agentList = [self.A3CAgent, agents.SimpleAgent(), agents.RandomAgent(), agents.RandomAgent()]
self.env = env = pommerman.make('PommeFFACompetition-v0', self.agentList)
total_step = 1
while self.g_ep.value < MAX_EP:
# Step 2). worker interacts with environment
s_act = self.env.reset()
max_ammo = old_max_ammo = 1
ep_r = 0.
self.render = False # self.g_ep.value % 20==0
self.A3CAgent.reset_lstm()
if self.name == 'w0':
enc1 = abs(torch.sum(self.gnet.encoder1.weight.data).item())
enc2 = abs(torch.sum(self.gnet.encoder2.weight.data).item())
enc3 = abs(torch.sum(self.gnet.encoder3.weight.data).item())
conv1 = abs(torch.sum(self.gnet.conv1.weight.data).item())
conv2 = abs(torch.sum(self.gnet.conv2.weight.data).item())
conv3 = abs(torch.sum(self.gnet.conv3.weight.data).item())
conv4 = abs(torch.sum(self.gnet.conv4.weight.data).item())
cl = abs(torch.sum(self.gnet.critic_linear.weight.data).item())
alstm1 = abs(torch.sum(self.gnet.actor_lstm.weight_ih_l0.data).item())
alstm2 = abs(torch.sum(self.gnet.actor_lstm.weight_hh_l0.data).item())
aout = abs(torch.sum(self.gnet.actor_out.weight.data).item())
f = open("AbsSummedWeights_ActorCritic_v2.txt", "a")
f.write(
'{0:.5f} \t {1:.5f} \t {2:.5f} \t {3:.5f} \t {4:.5f} \t {5:.5f} \t {6:.5f} \t {7:.5f} \t {8:.5f} '
'\t {9:.5f} \t {10:.5f} \n'.format(enc1, enc2, enc3, conv1, conv2, conv3, conv4, alstm1, alstm2,
aout, cl))
f.close()
while True:
# only render worker 0
if self.name == 'w0' and self.render:
self.env.render()
agent_actions = self.env.act(s_act)
a = agent_actions[self.agent_nr]
self.saved_oh_actions[:, :-1] = self.saved_oh_actions[:, 1:] # time shift
self.saved_oh_actions[:, -1] = self.empty_oh_action[:, 0] # erase last value
self.saved_oh_actions[a, -1] = 1 # insert new one-hot
s_new, rewards, done, _ = self.env.step(agent_actions)
# not(10 in s_new[self.agent_nr]['alive']) #if done or agent 10 is dead
done = done or rewards[self.agent_nr] == -1
max_ammo = max(max_ammo, s_act[self.agent_nr]['ammo'])
# reward and buffer
r = rewards[self.agent_nr]
# if (10 in s_act[self.agent_nr]['alive']) and total_step!=1:
# r = get_reward(s_new,s_act,self.agent_nr,r,max_ammo,old_max_ammo,a,a_old,self.saved_oh_actions)
ep_r += r
self.A3CAgent.add_reward(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
update_glob_net(self.opt, self.lnet, self.gnet, self.A3CAgent, GAMMA)
if done:
record(self.g_ep, self.g_ep_r, ep_r, self.res_queue, self.global_nr_steps,
s_new[self.agent_nr]['step_count'], self.name)
break
s_act = s_new
old_max_ammo = max_ammo
a_old = a
total_step += 1
self.res_queue.put(None)
def main():
global_net = A3CNet() # global network
global_net.train() # Set in training mode, only affect BN, Dropout etc.
filename = './A3C_v10_cnn_lstm_trained_critic.pth'
global_net.share_memory() # share the global parameters in multiprocessing
optimizer = SharedAdam(global_net.parameters(), lr=LEARNING_RATE) # global optimizer
load_checkpoint(filename, global_net, optimizer)
for g in optimizer.param_groups:
g['lr'] = LEARNING_RATE
global_ep, global_ep_r, global_nr_steps, res_queue = \
mp.Value('d', 0), mp.Value('d', 0.), mp.Value('d', 0.), mp.Queue()
# parallel training
workers = [Worker(global_net, optimizer, global_ep, global_ep_r, global_nr_steps, res_queue, i) for i in range(1)]
# for i in range(mp.cpu_count())]
[w.start() for w in workers]
res = [] # record episode reward to plot
while True:
r = res_queue.get()
if r is not None:
res.append(r)
else:
break
filename = './A3C_v10_cnn_lstm_trained_critic_actor_1.pth'
save_checkpoint(filename, global_net, optimizer)
with open('A3C_v10_cnn_lstm_trained_critic_actor.txt', 'a') as f:
for item in res:
f.write("%s\n" % item)
print('joining workers')
[w.join() for w in workers]
if __name__ == '__main__':
main()
| return | conditional_block |
a3c_v10_cnn_lstm.py | from A3C.sharedAdam import SharedAdam
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
import torch.multiprocessing as mp
import numpy as np
import pommerman
from pommerman import agents
import matplotlib.pyplot as plt
# on windows, multiprocessing: https://pytorch.org/docs/stable/notes/windows.html
# use one thread for parallel as they will block
# each other otherwise (https://github.com/ikostrikov/pytorch-a3c/issues/33)
os.environ["OMP_NUM_THREADS"] = "1"
# define globals
S_statespace = 3
S_actionspace = 6
UPDATE_GLOBAL_ITER = 800
GAMMA = 0.95
LAMBDA = 1
MAX_EP = 2000
LEARNING_RATE = 0.00001
eps = np.finfo(np.float32).eps.item()
def load_checkpoint(filename, model, optimizer):
if os.path.isfile(filename):
print("# loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("# loaded checkpoint '{}'".format(filename))
else:
print("# no checkpoint found at '{}'".format(filename))
return model, optimizer
def save_checkpoint(filename, model, optimizer):
state = {
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, filename)
def test(gnet):
John = A3CAgent(gnet)
John.set_train(False)
agentList = [John, agents.SimpleAgent(), agents.SimpleAgent(), agents.SimpleAgent()]
env = pommerman.make('PommeFFACompetition-v0', agentList)
wins = []
for ii in range(100):
John.reset_lstm()
state = env.reset()
done = False
while done == False:
if ii % 20 == 0:
env.render()
# time.sleep(1/24)
with torch.no_grad():
actions = env.act(state)
state_next, reward, done, info = env.step(actions)
print(ii, "DONE. Info:", info, "reward:", reward, "You win = ",
info['winners'][0] == 0 if info['result'].name == 'Win' else False)
wins.append(info['winners'][0] if info['result'].name == 'Win' else -1)
fig, ax = plt.subplots(num=1, clear=True)
winrate = wins.count(0) / len(wins)
fig, ax = plt.subplots(num=1, clear=True)
t, p0, p1, p2, p3 = plt.bar([-1, 0, 1, 2, 3], [
wins.count(-1) / len(wins) * 100,
wins.count(0) / len(wins) * 100,
wins.count(1) / len(wins) * 100,
wins.count(2) / len(wins) * 100,
wins.count(3) / len(wins) * 100])
t.set_facecolor('b')
p0.set_facecolor('r')
p1.set_facecolor('g')
p2.set_facecolor('b')
p3.set_facecolor('c')
ax.set_xticks([-1, 0, 1, 2, 3])
ax.set_xticklabels(['Ties', 'Agent\n(A2C)', 'Agent 1\nSimpleAgent', 'Agent 2\nSimpleAgent', 'Agent 3\nSimpleAgent'])
ax.set_ylim([0, 100])
ax.set_ylabel('Percent')
ax.set_title('Bomberman. FFA mode.')
print("Winrate: ", winrate)
plt.show()
def clean_up_agents(agents):
"""Stops all agents"""
return [agent.shutdown() for agent in agents]
def ensure_shared_grads(lnet, gnet):
for param, shared_param in zip(lnet.parameters(), gnet.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def update_glob_net(opt, lnet, gnet, agent, GAMMA):
R = 0
actor_loss = 0
value_loss = 0
gae = 0
agent.values.append(torch.zeros(1)) # we need to add this for the deltaT equation(below)
# print(agent.rewards)
for i in reversed(range(len(agent.rewards))):
R = GAMMA * R + agent.rewards[i]
advantage = R - agent.values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
deltaT = agent.rewards[i] + GAMMA * agent.values[i + 1].data - agent.values[i].data
gae = gae * GAMMA * LAMBDA + deltaT # generalized advantage estimator
actor_loss = actor_loss - agent.logProbs[i] * gae - 0.01 * agent.entropies[i]
loss = (actor_loss + 0.5 * value_loss)
opt.zero_grad()
loss.backward(retain_graph=True)
ensure_shared_grads(lnet, gnet)
opt.step()
lnet.load_state_dict(gnet.state_dict())
lnet.zero_grad()
agent.clear_actions()
def record(global_ep, global_ep_r, ep_r, res_queue, global_nr_steps, nr_steps, name):
with global_ep.get_lock():
global_ep.value += 1
with global_ep_r.get_lock():
if global_ep_r.value == 0.:
global_ep_r.value = -1 # ep_r
global_nr_steps.value = nr_steps
else:
global_ep_r.value = global_ep_r.value * 0.99 + ep_r * 0.01
global_nr_steps.value = global_nr_steps.value * 0.99 + nr_steps * 0.01
res_queue.put(ep_r)
print(
name,
"Ep:", global_ep.value,
"| Avg Ep_r: %.2f" % global_ep_r.value,
"| Avg Steps: %d" % global_nr_steps.value,
"| Ep_r / Steps: %.2f" % (ep_r / nr_steps),
# "| Ep_r: %.2f" % ep_r,
)
def get_reward(state, old_state, agent_nr, start_reward, max_ammo, old_max_ammo, action, last_action,
action_history_oh):
# developer note:
# on the board, 0: nothing,
# 1: unbreakable wall,
# 2: wall,
# 3: bomb,
# 4: flames,
# 6,7,8: pick-ups:
# 11,12 and 13: enemies
reward = 0
# reward stage 0: teach the agent to move and make invalid actions
# (move into walls, place bombs when you have no ammo)
ammo = old_state[agent_nr]['ammo']
if action != 5:
if state[agent_nr]['position'] == old_state[agent_nr]['position']:
reward -= 0.03
elif ammo == 0:
reward -= 0.03
# reward stage 1: teach agent to bomb walls (and enemies)
# compute adjacent squares
position = state[agent_nr]['position']
adj = [(i, j) for i in (-1, 0, 1) for j in (-1, 0, 1) if not ((i == j) or i + j == 0)]
adjacent = np.matlib.repmat(position, 4, 1)
adjacent = adjacent - np.asarray(adj)
# limit adjacent squares to only include inside board
adjacent = np.clip(adjacent, 0, 10)
if action == 5 and ammo > 0:
board = state[agent_nr]['board']
for xy in adjacent:
square_val = board[xy[0]][xy[1]]
if square_val == 2:
reward += 0.02
elif square_val == 11 or square_val == 12 or square_val == 13:
reward += 0.05
# reward stage2: teach agent to not stand on or beside bombs
# reward /= 4
# bomb_life = state[agent_nr]['bomb_life']
# if we stand on a bomb or next to bomb
# just_placed_bomb = np.logical_xor(last_action==5,action==5)
# if bomb_life[position]>0 and not(just_placed_bomb):
# reward-=0.1*(9-bomb_life[position])
# for xy in adjacent:
# if bomb_life[xy[0]][xy[1]]>0:
# reward -=0.05*(9-bomb_life[xy[0]][xy[1]])
# reward agent for picking up power-ups
blast_strength = state[agent_nr]['blast_strength']
old_blast_strength = old_state[agent_nr]['blast_strength']
can_kick = int(state[agent_nr]['can_kick'])
old_can_kick = int(old_state[agent_nr]['can_kick'])
reward += (can_kick - old_can_kick) * 0.02
reward += (max_ammo - old_max_ammo) * 0.02
reward += (blast_strength - old_blast_strength) * 0.02
# discourage action repetition by cross correlation of previous actions
# corr = signal.correlate2d(action_history_oh,action_history_oh)[5:-5][0]
# corr.sort()
# reward -= corr[-2]/100 #the second highest correlation (highest is found at 0-lag and is always 10),
# only reward game play at last stage
reward += start_reward
return reward
class A3CNet(nn.Module):
def __init__(self):
super(A3CNet, self).__init__()
self.conv1 = nn.Conv2d(S_statespace, 66, 3, stride=1, groups=3)
self.conv2 = nn.Conv2d(66, 66, 3, stride=1, padding=1, groups=3)
self.conv3 = nn.Conv2d(66, 66, 3, stride=1, padding=1, groups=3)
self.conv4 = nn.Conv2d(66, 66, 3, stride=1, padding=1, groups=3)
self.encoder1 = nn.Linear(11237, 1000)
self.encoder2 = nn.Linear(1000, 200)
self.encoder3 = nn.Linear(200, 50)
self.critic_linear = nn.Linear(83, 1)
self.actor_lstm = nn.LSTM(50, S_actionspace, 2, batch_first=True)
self.actor_out = nn.Linear(S_actionspace, S_actionspace)
torch.nn.init.xavier_uniform_(self.encoder1.weight)
torch.nn.init.xavier_uniform_(self.encoder2.weight)
torch.nn.init.xavier_uniform_(self.encoder3.weight)
torch.nn.init.xavier_uniform_(self.critic_linear.weight)
# torch.nn.init.xavier_uniform_(self.actor_linear.weight)
def forward(self, x, raw, hx, cx):
timesteps, batch_size, C, H, W = x.size()
x = x.view(batch_size * timesteps, C, H, W)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(timesteps, batch_size, -1)
x = torch.cat((x, raw), -1)
x = F.relu(self.encoder1(x))
x = F.relu(self.encoder2(x))
x = F.relu(self.encoder3(x)) # .permute(1, 0, 2)
# critic
value = self.critic_linear(raw)
# actor
x, (hx, cx) = self.actor_lstm(x, (hx, cx))
action = self.actor_out(x)
return action, value, (hx, cx)
@staticmethod
def get_lstm_reset():
hx = torch.zeros(2, 1, 6)
cx = torch.zeros(2, 1, 6)
return hx, cx
class A3CAgent(agents.BaseAgent):
def __init__(self, model):
super(A3CAgent, self).__init__()
self.model = model
self.hn, self.cn = self.model.get_lstm_reset()
# self.hx2, self.cx2 = self.model.get_lstm_reset()
self.rewards = []
self.values = []
self.logProbs = []
self.entropies = []
self.action_history = np.zeros(6)
self.train = True
def act(self, state, action_space):
if self.train:
obs, raw = self.observe(state, self.action_history)
logit, value, (hn, cn) = self.model(torch.from_numpy(obs).float().unsqueeze(0).unsqueeze(0),
torch.from_numpy(raw).float().unsqueeze(0).unsqueeze(0), self.hn,
self.cn)
logit, value = logit.squeeze(0), value.squeeze(0) # remove batch dimension
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1)
self.entropies.append(entropy)
try:
# action = torch.argmax(logit,dim=-1).unsqueeze(0) #JUST TEST!!!
action = Categorical(prob).sample().unsqueeze(0)
except:
print('obs: ', obs.data)
print('logit: ', logit.data)
print('value: ', value.data)
log_prob = log_prob.gather(1, action)
self.values.append(value)
self.logProbs.append(log_prob)
a = action.item()
else:
obs, raw = self.observe(state, self.action_history)
logit, value, (hn, cn) = self.model(torch.from_numpy(obs).float().unsqueeze(0).unsqueeze(0),
torch.from_numpy(raw).float().unsqueeze(0).unsqueeze(0),
self.hn,
self.cn)
logit = logit.squeeze(0) # remove batch dimension
prob = F.softmax(logit, dim=-1)
a = torch.argmax(logit, dim=-1).item()
self.action_history[:-1] = self.action_history[1:]
self.action_history[-1] = a
return a
def set_train(self, input):
self.train = input
def add_reward(self, reward):
self.reward = max(min(reward, 1), -1)
self.rewards.append(self.reward)
def observe(self, state, action_history):
obs_width = 5 # choose uneven number
obs_radius = obs_width // 2
board = state['board']
blast_strength = state['bomb_blast_strength']
bomb_life = state['bomb_life']
pos = np.asarray(state['position'])
board_pad = np.pad(board, (obs_radius, obs_radius), 'constant', constant_values=1)
blast_strength_pad = np.pad(blast_strength, (obs_radius, obs_radius), 'constant', constant_values=0)
life_pad = np.pad(bomb_life, (obs_radius, obs_radius), 'constant', constant_values=0)
# centered, padded board
board_cent = board_pad[pos[0]:pos[0] + 2 * obs_radius + 1, pos[1]:pos[1] + 2 * obs_radius + 1]
bomb_bs_cent = blast_strength_pad[pos[0]:pos[0] + 2 * obs_radius + 1, pos[1]:pos[1] + 2 * obs_radius + 1]
bomb_life_cent = life_pad[pos[0]:pos[0] + 2 * obs_radius + 1, pos[1]:pos[1] + 2 * obs_radius + 1]
ammo = np.asarray([state['ammo']])
my_bs = np.asarray([state['blast_strength']])
# note:
# on the board, 0: nothing,
# 1: unbreakable wall,
# 2: wall,
# 3: bomb,
# 4: flames,
# 6,7,8: pick-ups:
# 11,12 and 13: enemies
out = np.empty((3, 11 + 2 * obs_radius, 11 + 2 * obs_radius), dtype=np.float32)
out[0, :, :] = board_pad
out[1, :, :] = blast_strength_pad
out[2, :, :] = life_pad
# get raw surroundings
raw = np.concatenate((board_cent.flatten(), bomb_bs_cent.flatten()), 0)
raw = np.concatenate((raw, bomb_life_cent.flatten()), 0)
raw = np.concatenate((raw, ammo), 0)
raw = np.concatenate((raw, my_bs), 0)
raw = np.concatenate((raw, action_history), 0)
return out, raw
def | (self):
# TODO I believe here should be self.hn, self.cn
self.hx, self.cx = self.model.get_lstm_reset()
self.action_history = np.zeros(6)
def clear_actions(self):
self.values = []
self.logProbs = []
self.rewards = []
self.entropies = []
return self
class Worker(mp.Process):
def __init__(self, gnet, optimizer, global_ep, global_ep_r, global_nr_steps, res_queue, name):
super(Worker, self).__init__()
self.agent_nr = 0
self.name = 'w%s' % name
self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
self.gnet, self.opt = gnet, optimizer
self.lnet = A3CNet()
self.lnet.load_state_dict(gnet.state_dict())
self.lnet.train()
self.lnet.zero_grad()
self.results = []
self.global_nr_steps = global_nr_steps
self.empty_oh_action = np.zeros((6, 1))
self.saved_oh_actions = np.repeat(self.empty_oh_action, 6, 1)
def run(self):
# If we move this to "init", we get an error on recursion depth
self.A3CAgent = A3CAgent(self.lnet)
self.agentList = [self.A3CAgent, agents.SimpleAgent(), agents.RandomAgent(), agents.RandomAgent()]
self.env = env = pommerman.make('PommeFFACompetition-v0', self.agentList)
total_step = 1
while self.g_ep.value < MAX_EP:
# Step 2). worker interacts with environment
s_act = self.env.reset()
max_ammo = old_max_ammo = 1
ep_r = 0.
self.render = False # self.g_ep.value % 20==0
self.A3CAgent.reset_lstm()
if self.name == 'w0':
enc1 = abs(torch.sum(self.gnet.encoder1.weight.data).item())
enc2 = abs(torch.sum(self.gnet.encoder2.weight.data).item())
enc3 = abs(torch.sum(self.gnet.encoder3.weight.data).item())
conv1 = abs(torch.sum(self.gnet.conv1.weight.data).item())
conv2 = abs(torch.sum(self.gnet.conv2.weight.data).item())
conv3 = abs(torch.sum(self.gnet.conv3.weight.data).item())
conv4 = abs(torch.sum(self.gnet.conv4.weight.data).item())
cl = abs(torch.sum(self.gnet.critic_linear.weight.data).item())
alstm1 = abs(torch.sum(self.gnet.actor_lstm.weight_ih_l0.data).item())
alstm2 = abs(torch.sum(self.gnet.actor_lstm.weight_hh_l0.data).item())
aout = abs(torch.sum(self.gnet.actor_out.weight.data).item())
f = open("AbsSummedWeights_ActorCritic_v2.txt", "a")
f.write(
'{0:.5f} \t {1:.5f} \t {2:.5f} \t {3:.5f} \t {4:.5f} \t {5:.5f} \t {6:.5f} \t {7:.5f} \t {8:.5f} '
'\t {9:.5f} \t {10:.5f} \n'.format(enc1, enc2, enc3, conv1, conv2, conv3, conv4, alstm1, alstm2,
aout, cl))
f.close()
while True:
# only render worker 0
if self.name == 'w0' and self.render:
self.env.render()
agent_actions = self.env.act(s_act)
a = agent_actions[self.agent_nr]
self.saved_oh_actions[:, :-1] = self.saved_oh_actions[:, 1:] # time shift
self.saved_oh_actions[:, -1] = self.empty_oh_action[:, 0] # erase last value
self.saved_oh_actions[a, -1] = 1 # insert new one-hot
s_new, rewards, done, _ = self.env.step(agent_actions)
# not(10 in s_new[self.agent_nr]['alive']) #if done or agent 10 is dead
done = done or rewards[self.agent_nr] == -1
max_ammo = max(max_ammo, s_act[self.agent_nr]['ammo'])
# reward and buffer
r = rewards[self.agent_nr]
# if (10 in s_act[self.agent_nr]['alive']) and total_step!=1:
# r = get_reward(s_new,s_act,self.agent_nr,r,max_ammo,old_max_ammo,a,a_old,self.saved_oh_actions)
ep_r += r
self.A3CAgent.add_reward(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
update_glob_net(self.opt, self.lnet, self.gnet, self.A3CAgent, GAMMA)
if done:
record(self.g_ep, self.g_ep_r, ep_r, self.res_queue, self.global_nr_steps,
s_new[self.agent_nr]['step_count'], self.name)
break
s_act = s_new
old_max_ammo = max_ammo
a_old = a
total_step += 1
self.res_queue.put(None)
def main():
global_net = A3CNet() # global network
global_net.train() # Set in training mode, only affect BN, Dropout etc.
filename = './A3C_v10_cnn_lstm_trained_critic.pth'
global_net.share_memory() # share the global parameters in multiprocessing
optimizer = SharedAdam(global_net.parameters(), lr=LEARNING_RATE) # global optimizer
load_checkpoint(filename, global_net, optimizer)
for g in optimizer.param_groups:
g['lr'] = LEARNING_RATE
global_ep, global_ep_r, global_nr_steps, res_queue = \
mp.Value('d', 0), mp.Value('d', 0.), mp.Value('d', 0.), mp.Queue()
# parallel training
workers = [Worker(global_net, optimizer, global_ep, global_ep_r, global_nr_steps, res_queue, i) for i in range(1)]
# for i in range(mp.cpu_count())]
[w.start() for w in workers]
res = [] # record episode reward to plot
while True:
r = res_queue.get()
if r is not None:
res.append(r)
else:
break
filename = './A3C_v10_cnn_lstm_trained_critic_actor_1.pth'
save_checkpoint(filename, global_net, optimizer)
with open('A3C_v10_cnn_lstm_trained_critic_actor.txt', 'a') as f:
for item in res:
f.write("%s\n" % item)
print('joining workers')
[w.join() for w in workers]
if __name__ == '__main__':
main()
| reset_lstm | identifier_name |
LCOGT_submit_requests.py | """
Validate and submit requests made in LCOGT_make_requests.py
"""
###########
# imports #
###########
import pickle, requests, socket
from parse import search
import os
from glob import glob
import numpy as np, pandas as pd
from astropy.time import Time
import astropy.units as u
HOMEDIR = os.path.expanduser('~')
API_FILE = os.path.join(HOMEDIR, '.lcogt_api_token')
if not os.path.exists(API_FILE):
raise NotImplementedError('where to get API file?')
with open(API_FILE, 'r') as f:
l = f.readlines()
token = str(l[0].replace('\n',''))
from cdips_followup import __path__
DATADIR = os.path.join(os.path.dirname(__path__[0]), 'data')
RESULTSDIR = os.path.join(os.path.dirname(__path__[0]), 'results')
#############
# functions #
#############
def validate_single_request(requestgroup, max_duration_error=15,
raise_error=True):
"""
Submit the RequestGroup through the "validate" API, cf.
https://developers.lco.global/#validate-a-requestgroup
max_duration_error: in minutes, is the maximum allowable difference between
the start & end times of the request, and the _billed duration_ of the
request. By design in the API, the billed duration is always shorter than
the (end-start) time. I allotted 1 hour on either side for scheduling, so a
bit of slack on either is fine.
"""
is_modified = False
response = requests.post(
'https://observe.lco.global/api/requestgroups/validate/',
headers={'Authorization': 'Token {}'.format(token)},
json=requestgroup
)
# Make sure the API call was successful
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
print('API call failed: {}'.format(response.content))
raise exc
requestgroup_dict = response.json()
# If you get an error because your incorrectly estimated the number of
# exposures, correct it here.
if len(requestgroup_dict['errors']) >= 1:
if 'non_field_errors' in requestgroup_dict['errors']:
print(42*'-')
print('GOT ERROR: {}'.
format(requestgroup_dict['errors']['non_field_errors']))
print(42*'-')
return np.nan, np.nan
if 'requests' in requestgroup_dict['errors']:
print(42*'-')
print('GOT ERROR: {}'.
format(requestgroup_dict['errors']['requests']))
print(42*'-')
try:
errmsg = (
requestgroup_dict['errors']['requests'][0]['non_field_errors'][0]
)
except:
return np.nan, np.nan
if 'the target is visible for a maximum of' in errmsg:
# get the strings of durations, and decrement the requested number
# of exposures by the right multiple!
sr = search("According{}maximum of {} hours "
"within{}your request {} hours. Consider{}",
errmsg)
max_dur = float(sr[1])
req_dur = float(sr[3])
if req_dur == max_dur:
# {:.1f} formatted strings. genius ._.
req_dur += 0.01
if not req_dur > max_dur:
errmsg = (
'ERR! max dur: {}, req dur: {}'.format(max_dur, req_dur)
)
raise ValueError(errmsg)
diff_dur_sec = (req_dur - max_dur)*60*60
# previously, guessed
#
# expcount = np.floor(
# (endtime-starttime).to(u.hr)
# /
# (exptime*u.second + read_time_per_exposure).to(u.hr)
# )
#
# that produced the difference above...
exptime_sec = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_time']
)
expcount = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count']
)
read_time_per_exposure = 30*u.second # from Bayliss' completed runs
n_exposures_diff = int(
np.ceil(diff_dur_sec/
(exptime_sec + read_time_per_exposure.value)
)
)
new_expcount = expcount - n_exposures_diff
print(42*'-')
print('WRN!: max durn: {} hr, req durn: {} hr. had {} exposures, decrement to {}'.
format(max_dur, req_dur, expcount, new_expcount))
print(42*'-')
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count'] = new_expcount
is_modified = True
return requestgroup, is_modified
else:
if raise_error:
raise NotImplementedError('got new API error: {}'.format(errmsg))
else:
print('WRN!: Got API error: {}'.format(errmsg))
print(requestgroup)
return np.nan, np.nan
billed_durn = (
requestgroup_dict['request_durations']['requests'][0]['duration']
)
start = Time(requestgroup['requests'][0]['windows'][0]['start'])
end = Time(requestgroup['requests'][0]['windows'][0]['end'])
window_durn = (end - start).value*24*60*60
expcount = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count']
)
if (window_durn - billed_durn)/60 > max_duration_error:
errmsg = (
'ERROR! got a window of {:.2f} min; but tried to bill {:.2f} min.'.
format(window_durn/60, billed_durn/60)
)
print(42*'-')
print(errmsg)
print(42*'-')
#import IPython; IPython.embed()
#raise AssertionError(errmsg) #FIXME
return np.nan, np.nan
else:
print(42*'-')
print('ACCEPTED! window durn: {:.2f} min, billed {:.2f} min. had {:d} exposures'.
format(window_durn/60, billed_durn/60, expcount))
print(42*'-')
return requestgroup, is_modified
def submit_single_request(requestgroup):
# Submit the fully formed RequestGroup
response = requests.post(
'https://observe.lco.global/api/requestgroups/',
headers={'Authorization': 'Token {}'.format(token)},
json=requestgroup
)
# Make sure the API call was successful
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
print('API call failed: {}'.format(response.content))
raise exc
# The API returns the newly submitted requestgroup as json
requestgroup_dict = response.json()
# Print out the url on the portal where we can view the submitted request
print('View the observing request: '
'https://observe.lco.global/requestgroups/{}/'.
format(requestgroup_dict['id']))
def submit_all_requests(savstr, validate_all=1, submit_all=0,
max_N_transit_per_object=3, max_duration_error=15,
semesterstr='20A'):
"""
savstr: used for directory management
validate_all: if true, first validates observation requests to ensure that
they can be submitted
submit_all: actually submits them
max_N_transit_per_object:
max_duration_error: in minutes, maximum acceptable difference between
_desired_ observation window, and the window that the LCOGT system accepts.
"""
if submit_all:
assert validate_all
if not 'ephemupdate' in savstr:
resultsdir = (
os.path.join(RESULTSDIR,'LCOGT_{}_observability/'.format(semesterstr))
)
else:
resultsdir = (
os.path.join(RESULTSDIR,'LCOGT_{}_updated_requests/'.format(semesterstr))
)
pkl_savpath = (
os.path.join(resultsdir, '{}.pkl'.format(savstr))
)
mult_savpath = (
os.path.join(resultsdir, '{}_summary.csv'.format(savstr))
)
with open(pkl_savpath, 'rb') as f:
r = pickle.load(f)
df = pd.read_csv(mult_savpath)
if submit_all:
print('ATTEMPTING TO SUBMIT THE FOLLOWING')
df['submit_durn'] = (
df['sched_duration'] *
np.minimum(df['n_requests'], max_N_transit_per_object)
)
print(df)
print(42*'=')
print('\nTotal time: {:.1f} hr\n'.format(np.sum(df['submit_durn'])))
print(42*'=')
#
# sort all the available transit windows for each target by time. submit
# the earliest `max_N_transit_per_object' (e.g., 2 transits).
#
starts = []
for _r in r:
starts.append(
[ Time(__r['requests'][0]['windows'][0]['start']) for __r in _r ]
)
time_sort_inds = []
for start in starts:
time_sort_inds.append(
np.argsort(start)
)
#
# iterate over available requests for each target that met the magnitude
# and depth cuts
#
for _r, ind in zip(r, time_sort_inds):
_requests_sorted = np.array(_r)[ind]
_requests_to_submit = _requests_sorted[:max_N_transit_per_object]
for requestgroup in _requests_to_submit:
if validate_all:
if not submit_all:
print(requestgroup)
requestgroup, is_modified = (
validate_single_request(
requestgroup, max_duration_error=max_duration_error
)
)
n_iter = 0
if is_modified and np.isfinite(is_modified):
|
if submit_all:
if isinstance(requestgroup, dict):
print('SUBMITTING...')
print(requestgroup)
submit_single_request(requestgroup)
else:
print('vvv DID NOT SUBMIT B/C FAILED TO VALIDATE vvv')
print(requestgroup)
print('^^^ DID NOT SUBMIT B/C FAILED TO VALIDATE ^^^')
if __name__=="__main__":
validate_all = 1
submit_all = 1
max_N_transit_per_object = 2
max_duration_error = 20
eventclass = 'OIBEO'
savstr = 'request_19B_59859387_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'request_19B_2m_faint_{}'.format(eventclass)
# eventclass = 'OIBE'
# savstr = 'bright_shallow_19B_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'midpartials_19B_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'toppartials_19B_{}'.format(eventclass)
# max_duration_error = 15
# savstr = 'request_TIC29786532_19B'
# max_N_transit_per_object = 2
# savstr = 'request_19B_2m_faint_v2'
# max_N_transit_per_object = 2
# savstr = 'request_19B_2m_faint'
# max_N_transit_per_object = 4 # actually 3, b/c one fails
# savstr = 'all_requests_19B_easyones'
# max_N_transit_per_object = 3
submit_all_requests(savstr, validate_all=validate_all,
submit_all=submit_all,
max_N_transit_per_object=max_N_transit_per_object,
max_duration_error=max_duration_error)
| while is_modified:
if n_iter >= 10:
raise AssertionError('too many iterations')
requestgroup, is_modified = (
validate_single_request(
requestgroup,
max_duration_error=max_duration_error
)
)
if not isinstance(requestgroup, dict):
if not np.isfinite(requestgroup):
break
n_iter += 1 | conditional_block |
LCOGT_submit_requests.py | """
Validate and submit requests made in LCOGT_make_requests.py
"""
###########
# imports #
###########
import pickle, requests, socket
from parse import search
import os
from glob import glob
import numpy as np, pandas as pd
from astropy.time import Time
import astropy.units as u
HOMEDIR = os.path.expanduser('~')
API_FILE = os.path.join(HOMEDIR, '.lcogt_api_token')
if not os.path.exists(API_FILE):
raise NotImplementedError('where to get API file?')
with open(API_FILE, 'r') as f:
l = f.readlines()
token = str(l[0].replace('\n',''))
from cdips_followup import __path__
DATADIR = os.path.join(os.path.dirname(__path__[0]), 'data')
RESULTSDIR = os.path.join(os.path.dirname(__path__[0]), 'results')
#############
# functions #
#############
def validate_single_request(requestgroup, max_duration_error=15,
raise_error=True):
"""
Submit the RequestGroup through the "validate" API, cf.
https://developers.lco.global/#validate-a-requestgroup
max_duration_error: in minutes, is the maximum allowable difference between
the start & end times of the request, and the _billed duration_ of the
request. By design in the API, the billed duration is always shorter than
the (end-start) time. I allotted 1 hour on either side for scheduling, so a
bit of slack on either is fine.
"""
is_modified = False
response = requests.post(
'https://observe.lco.global/api/requestgroups/validate/',
headers={'Authorization': 'Token {}'.format(token)},
json=requestgroup
)
# Make sure the API call was successful
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
print('API call failed: {}'.format(response.content))
raise exc
requestgroup_dict = response.json()
# If you get an error because your incorrectly estimated the number of
# exposures, correct it here.
if len(requestgroup_dict['errors']) >= 1:
if 'non_field_errors' in requestgroup_dict['errors']:
print(42*'-')
print('GOT ERROR: {}'.
format(requestgroup_dict['errors']['non_field_errors']))
print(42*'-')
return np.nan, np.nan
if 'requests' in requestgroup_dict['errors']:
print(42*'-')
print('GOT ERROR: {}'.
format(requestgroup_dict['errors']['requests']))
print(42*'-')
try:
errmsg = (
requestgroup_dict['errors']['requests'][0]['non_field_errors'][0]
)
except:
return np.nan, np.nan
if 'the target is visible for a maximum of' in errmsg:
# get the strings of durations, and decrement the requested number
# of exposures by the right multiple!
sr = search("According{}maximum of {} hours "
"within{}your request {} hours. Consider{}",
errmsg)
max_dur = float(sr[1])
req_dur = float(sr[3])
if req_dur == max_dur:
# {:.1f} formatted strings. genius ._.
req_dur += 0.01
if not req_dur > max_dur:
errmsg = (
'ERR! max dur: {}, req dur: {}'.format(max_dur, req_dur)
)
raise ValueError(errmsg)
diff_dur_sec = (req_dur - max_dur)*60*60
# previously, guessed
#
# expcount = np.floor(
# (endtime-starttime).to(u.hr)
# /
# (exptime*u.second + read_time_per_exposure).to(u.hr)
# )
#
# that produced the difference above...
exptime_sec = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_time']
)
expcount = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count']
)
read_time_per_exposure = 30*u.second # from Bayliss' completed runs
n_exposures_diff = int(
np.ceil(diff_dur_sec/
(exptime_sec + read_time_per_exposure.value)
)
)
new_expcount = expcount - n_exposures_diff
print(42*'-')
print('WRN!: max durn: {} hr, req durn: {} hr. had {} exposures, decrement to {}'.
format(max_dur, req_dur, expcount, new_expcount))
print(42*'-')
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count'] = new_expcount
is_modified = True
return requestgroup, is_modified
else:
if raise_error:
raise NotImplementedError('got new API error: {}'.format(errmsg))
else:
print('WRN!: Got API error: {}'.format(errmsg))
print(requestgroup)
return np.nan, np.nan
billed_durn = (
requestgroup_dict['request_durations']['requests'][0]['duration']
)
start = Time(requestgroup['requests'][0]['windows'][0]['start'])
end = Time(requestgroup['requests'][0]['windows'][0]['end'])
window_durn = (end - start).value*24*60*60
expcount = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count']
)
if (window_durn - billed_durn)/60 > max_duration_error:
errmsg = (
'ERROR! got a window of {:.2f} min; but tried to bill {:.2f} min.'.
format(window_durn/60, billed_durn/60)
)
print(42*'-')
print(errmsg)
print(42*'-')
#import IPython; IPython.embed()
#raise AssertionError(errmsg) #FIXME
return np.nan, np.nan
else:
print(42*'-')
print('ACCEPTED! window durn: {:.2f} min, billed {:.2f} min. had {:d} exposures'. |
def submit_single_request(requestgroup):
# Submit the fully formed RequestGroup
response = requests.post(
'https://observe.lco.global/api/requestgroups/',
headers={'Authorization': 'Token {}'.format(token)},
json=requestgroup
)
# Make sure the API call was successful
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
print('API call failed: {}'.format(response.content))
raise exc
# The API returns the newly submitted requestgroup as json
requestgroup_dict = response.json()
# Print out the url on the portal where we can view the submitted request
print('View the observing request: '
'https://observe.lco.global/requestgroups/{}/'.
format(requestgroup_dict['id']))
def submit_all_requests(savstr, validate_all=1, submit_all=0,
max_N_transit_per_object=3, max_duration_error=15,
semesterstr='20A'):
"""
savstr: used for directory management
validate_all: if true, first validates observation requests to ensure that
they can be submitted
submit_all: actually submits them
max_N_transit_per_object:
max_duration_error: in minutes, maximum acceptable difference between
_desired_ observation window, and the window that the LCOGT system accepts.
"""
if submit_all:
assert validate_all
if not 'ephemupdate' in savstr:
resultsdir = (
os.path.join(RESULTSDIR,'LCOGT_{}_observability/'.format(semesterstr))
)
else:
resultsdir = (
os.path.join(RESULTSDIR,'LCOGT_{}_updated_requests/'.format(semesterstr))
)
pkl_savpath = (
os.path.join(resultsdir, '{}.pkl'.format(savstr))
)
mult_savpath = (
os.path.join(resultsdir, '{}_summary.csv'.format(savstr))
)
with open(pkl_savpath, 'rb') as f:
r = pickle.load(f)
df = pd.read_csv(mult_savpath)
if submit_all:
print('ATTEMPTING TO SUBMIT THE FOLLOWING')
df['submit_durn'] = (
df['sched_duration'] *
np.minimum(df['n_requests'], max_N_transit_per_object)
)
print(df)
print(42*'=')
print('\nTotal time: {:.1f} hr\n'.format(np.sum(df['submit_durn'])))
print(42*'=')
#
# sort all the available transit windows for each target by time. submit
# the earliest `max_N_transit_per_object' (e.g., 2 transits).
#
starts = []
for _r in r:
starts.append(
[ Time(__r['requests'][0]['windows'][0]['start']) for __r in _r ]
)
time_sort_inds = []
for start in starts:
time_sort_inds.append(
np.argsort(start)
)
#
# iterate over available requests for each target that met the magnitude
# and depth cuts
#
for _r, ind in zip(r, time_sort_inds):
_requests_sorted = np.array(_r)[ind]
_requests_to_submit = _requests_sorted[:max_N_transit_per_object]
for requestgroup in _requests_to_submit:
if validate_all:
if not submit_all:
print(requestgroup)
requestgroup, is_modified = (
validate_single_request(
requestgroup, max_duration_error=max_duration_error
)
)
n_iter = 0
if is_modified and np.isfinite(is_modified):
while is_modified:
if n_iter >= 10:
raise AssertionError('too many iterations')
requestgroup, is_modified = (
validate_single_request(
requestgroup,
max_duration_error=max_duration_error
)
)
if not isinstance(requestgroup, dict):
if not np.isfinite(requestgroup):
break
n_iter += 1
if submit_all:
if isinstance(requestgroup, dict):
print('SUBMITTING...')
print(requestgroup)
submit_single_request(requestgroup)
else:
print('vvv DID NOT SUBMIT B/C FAILED TO VALIDATE vvv')
print(requestgroup)
print('^^^ DID NOT SUBMIT B/C FAILED TO VALIDATE ^^^')
if __name__=="__main__":
validate_all = 1
submit_all = 1
max_N_transit_per_object = 2
max_duration_error = 20
eventclass = 'OIBEO'
savstr = 'request_19B_59859387_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'request_19B_2m_faint_{}'.format(eventclass)
# eventclass = 'OIBE'
# savstr = 'bright_shallow_19B_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'midpartials_19B_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'toppartials_19B_{}'.format(eventclass)
# max_duration_error = 15
# savstr = 'request_TIC29786532_19B'
# max_N_transit_per_object = 2
# savstr = 'request_19B_2m_faint_v2'
# max_N_transit_per_object = 2
# savstr = 'request_19B_2m_faint'
# max_N_transit_per_object = 4 # actually 3, b/c one fails
# savstr = 'all_requests_19B_easyones'
# max_N_transit_per_object = 3
submit_all_requests(savstr, validate_all=validate_all,
submit_all=submit_all,
max_N_transit_per_object=max_N_transit_per_object,
max_duration_error=max_duration_error) | format(window_durn/60, billed_durn/60, expcount))
print(42*'-')
return requestgroup, is_modified
| random_line_split |
LCOGT_submit_requests.py | """
Validate and submit requests made in LCOGT_make_requests.py
"""
###########
# imports #
###########
import pickle, requests, socket
from parse import search
import os
from glob import glob
import numpy as np, pandas as pd
from astropy.time import Time
import astropy.units as u
HOMEDIR = os.path.expanduser('~')
API_FILE = os.path.join(HOMEDIR, '.lcogt_api_token')
if not os.path.exists(API_FILE):
raise NotImplementedError('where to get API file?')
with open(API_FILE, 'r') as f:
l = f.readlines()
token = str(l[0].replace('\n',''))
from cdips_followup import __path__
DATADIR = os.path.join(os.path.dirname(__path__[0]), 'data')
RESULTSDIR = os.path.join(os.path.dirname(__path__[0]), 'results')
#############
# functions #
#############
def | (requestgroup, max_duration_error=15,
raise_error=True):
"""
Submit the RequestGroup through the "validate" API, cf.
https://developers.lco.global/#validate-a-requestgroup
max_duration_error: in minutes, is the maximum allowable difference between
the start & end times of the request, and the _billed duration_ of the
request. By design in the API, the billed duration is always shorter than
the (end-start) time. I allotted 1 hour on either side for scheduling, so a
bit of slack on either is fine.
"""
is_modified = False
response = requests.post(
'https://observe.lco.global/api/requestgroups/validate/',
headers={'Authorization': 'Token {}'.format(token)},
json=requestgroup
)
# Make sure the API call was successful
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
print('API call failed: {}'.format(response.content))
raise exc
requestgroup_dict = response.json()
# If you get an error because your incorrectly estimated the number of
# exposures, correct it here.
if len(requestgroup_dict['errors']) >= 1:
if 'non_field_errors' in requestgroup_dict['errors']:
print(42*'-')
print('GOT ERROR: {}'.
format(requestgroup_dict['errors']['non_field_errors']))
print(42*'-')
return np.nan, np.nan
if 'requests' in requestgroup_dict['errors']:
print(42*'-')
print('GOT ERROR: {}'.
format(requestgroup_dict['errors']['requests']))
print(42*'-')
try:
errmsg = (
requestgroup_dict['errors']['requests'][0]['non_field_errors'][0]
)
except:
return np.nan, np.nan
if 'the target is visible for a maximum of' in errmsg:
# get the strings of durations, and decrement the requested number
# of exposures by the right multiple!
sr = search("According{}maximum of {} hours "
"within{}your request {} hours. Consider{}",
errmsg)
max_dur = float(sr[1])
req_dur = float(sr[3])
if req_dur == max_dur:
# {:.1f} formatted strings. genius ._.
req_dur += 0.01
if not req_dur > max_dur:
errmsg = (
'ERR! max dur: {}, req dur: {}'.format(max_dur, req_dur)
)
raise ValueError(errmsg)
diff_dur_sec = (req_dur - max_dur)*60*60
# previously, guessed
#
# expcount = np.floor(
# (endtime-starttime).to(u.hr)
# /
# (exptime*u.second + read_time_per_exposure).to(u.hr)
# )
#
# that produced the difference above...
exptime_sec = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_time']
)
expcount = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count']
)
read_time_per_exposure = 30*u.second # from Bayliss' completed runs
n_exposures_diff = int(
np.ceil(diff_dur_sec/
(exptime_sec + read_time_per_exposure.value)
)
)
new_expcount = expcount - n_exposures_diff
print(42*'-')
print('WRN!: max durn: {} hr, req durn: {} hr. had {} exposures, decrement to {}'.
format(max_dur, req_dur, expcount, new_expcount))
print(42*'-')
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count'] = new_expcount
is_modified = True
return requestgroup, is_modified
else:
if raise_error:
raise NotImplementedError('got new API error: {}'.format(errmsg))
else:
print('WRN!: Got API error: {}'.format(errmsg))
print(requestgroup)
return np.nan, np.nan
billed_durn = (
requestgroup_dict['request_durations']['requests'][0]['duration']
)
start = Time(requestgroup['requests'][0]['windows'][0]['start'])
end = Time(requestgroup['requests'][0]['windows'][0]['end'])
window_durn = (end - start).value*24*60*60
expcount = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count']
)
if (window_durn - billed_durn)/60 > max_duration_error:
errmsg = (
'ERROR! got a window of {:.2f} min; but tried to bill {:.2f} min.'.
format(window_durn/60, billed_durn/60)
)
print(42*'-')
print(errmsg)
print(42*'-')
#import IPython; IPython.embed()
#raise AssertionError(errmsg) #FIXME
return np.nan, np.nan
else:
print(42*'-')
print('ACCEPTED! window durn: {:.2f} min, billed {:.2f} min. had {:d} exposures'.
format(window_durn/60, billed_durn/60, expcount))
print(42*'-')
return requestgroup, is_modified
def submit_single_request(requestgroup):
# Submit the fully formed RequestGroup
response = requests.post(
'https://observe.lco.global/api/requestgroups/',
headers={'Authorization': 'Token {}'.format(token)},
json=requestgroup
)
# Make sure the API call was successful
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
print('API call failed: {}'.format(response.content))
raise exc
# The API returns the newly submitted requestgroup as json
requestgroup_dict = response.json()
# Print out the url on the portal where we can view the submitted request
print('View the observing request: '
'https://observe.lco.global/requestgroups/{}/'.
format(requestgroup_dict['id']))
def submit_all_requests(savstr, validate_all=1, submit_all=0,
max_N_transit_per_object=3, max_duration_error=15,
semesterstr='20A'):
"""
savstr: used for directory management
validate_all: if true, first validates observation requests to ensure that
they can be submitted
submit_all: actually submits them
max_N_transit_per_object:
max_duration_error: in minutes, maximum acceptable difference between
_desired_ observation window, and the window that the LCOGT system accepts.
"""
if submit_all:
assert validate_all
if not 'ephemupdate' in savstr:
resultsdir = (
os.path.join(RESULTSDIR,'LCOGT_{}_observability/'.format(semesterstr))
)
else:
resultsdir = (
os.path.join(RESULTSDIR,'LCOGT_{}_updated_requests/'.format(semesterstr))
)
pkl_savpath = (
os.path.join(resultsdir, '{}.pkl'.format(savstr))
)
mult_savpath = (
os.path.join(resultsdir, '{}_summary.csv'.format(savstr))
)
with open(pkl_savpath, 'rb') as f:
r = pickle.load(f)
df = pd.read_csv(mult_savpath)
if submit_all:
print('ATTEMPTING TO SUBMIT THE FOLLOWING')
df['submit_durn'] = (
df['sched_duration'] *
np.minimum(df['n_requests'], max_N_transit_per_object)
)
print(df)
print(42*'=')
print('\nTotal time: {:.1f} hr\n'.format(np.sum(df['submit_durn'])))
print(42*'=')
#
# sort all the available transit windows for each target by time. submit
# the earliest `max_N_transit_per_object' (e.g., 2 transits).
#
starts = []
for _r in r:
starts.append(
[ Time(__r['requests'][0]['windows'][0]['start']) for __r in _r ]
)
time_sort_inds = []
for start in starts:
time_sort_inds.append(
np.argsort(start)
)
#
# iterate over available requests for each target that met the magnitude
# and depth cuts
#
for _r, ind in zip(r, time_sort_inds):
_requests_sorted = np.array(_r)[ind]
_requests_to_submit = _requests_sorted[:max_N_transit_per_object]
for requestgroup in _requests_to_submit:
if validate_all:
if not submit_all:
print(requestgroup)
requestgroup, is_modified = (
validate_single_request(
requestgroup, max_duration_error=max_duration_error
)
)
n_iter = 0
if is_modified and np.isfinite(is_modified):
while is_modified:
if n_iter >= 10:
raise AssertionError('too many iterations')
requestgroup, is_modified = (
validate_single_request(
requestgroup,
max_duration_error=max_duration_error
)
)
if not isinstance(requestgroup, dict):
if not np.isfinite(requestgroup):
break
n_iter += 1
if submit_all:
if isinstance(requestgroup, dict):
print('SUBMITTING...')
print(requestgroup)
submit_single_request(requestgroup)
else:
print('vvv DID NOT SUBMIT B/C FAILED TO VALIDATE vvv')
print(requestgroup)
print('^^^ DID NOT SUBMIT B/C FAILED TO VALIDATE ^^^')
if __name__=="__main__":
validate_all = 1
submit_all = 1
max_N_transit_per_object = 2
max_duration_error = 20
eventclass = 'OIBEO'
savstr = 'request_19B_59859387_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'request_19B_2m_faint_{}'.format(eventclass)
# eventclass = 'OIBE'
# savstr = 'bright_shallow_19B_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'midpartials_19B_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'toppartials_19B_{}'.format(eventclass)
# max_duration_error = 15
# savstr = 'request_TIC29786532_19B'
# max_N_transit_per_object = 2
# savstr = 'request_19B_2m_faint_v2'
# max_N_transit_per_object = 2
# savstr = 'request_19B_2m_faint'
# max_N_transit_per_object = 4 # actually 3, b/c one fails
# savstr = 'all_requests_19B_easyones'
# max_N_transit_per_object = 3
submit_all_requests(savstr, validate_all=validate_all,
submit_all=submit_all,
max_N_transit_per_object=max_N_transit_per_object,
max_duration_error=max_duration_error)
| validate_single_request | identifier_name |
LCOGT_submit_requests.py | """
Validate and submit requests made in LCOGT_make_requests.py
"""
###########
# imports #
###########
import pickle, requests, socket
from parse import search
import os
from glob import glob
import numpy as np, pandas as pd
from astropy.time import Time
import astropy.units as u
HOMEDIR = os.path.expanduser('~')
API_FILE = os.path.join(HOMEDIR, '.lcogt_api_token')
if not os.path.exists(API_FILE):
raise NotImplementedError('where to get API file?')
with open(API_FILE, 'r') as f:
l = f.readlines()
token = str(l[0].replace('\n',''))
from cdips_followup import __path__
DATADIR = os.path.join(os.path.dirname(__path__[0]), 'data')
RESULTSDIR = os.path.join(os.path.dirname(__path__[0]), 'results')
#############
# functions #
#############
def validate_single_request(requestgroup, max_duration_error=15,
raise_error=True):
"""
Submit the RequestGroup through the "validate" API, cf.
https://developers.lco.global/#validate-a-requestgroup
max_duration_error: in minutes, is the maximum allowable difference between
the start & end times of the request, and the _billed duration_ of the
request. By design in the API, the billed duration is always shorter than
the (end-start) time. I allotted 1 hour on either side for scheduling, so a
bit of slack on either is fine.
"""
is_modified = False
response = requests.post(
'https://observe.lco.global/api/requestgroups/validate/',
headers={'Authorization': 'Token {}'.format(token)},
json=requestgroup
)
# Make sure the API call was successful
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
print('API call failed: {}'.format(response.content))
raise exc
requestgroup_dict = response.json()
# If you get an error because your incorrectly estimated the number of
# exposures, correct it here.
if len(requestgroup_dict['errors']) >= 1:
if 'non_field_errors' in requestgroup_dict['errors']:
print(42*'-')
print('GOT ERROR: {}'.
format(requestgroup_dict['errors']['non_field_errors']))
print(42*'-')
return np.nan, np.nan
if 'requests' in requestgroup_dict['errors']:
print(42*'-')
print('GOT ERROR: {}'.
format(requestgroup_dict['errors']['requests']))
print(42*'-')
try:
errmsg = (
requestgroup_dict['errors']['requests'][0]['non_field_errors'][0]
)
except:
return np.nan, np.nan
if 'the target is visible for a maximum of' in errmsg:
# get the strings of durations, and decrement the requested number
# of exposures by the right multiple!
sr = search("According{}maximum of {} hours "
"within{}your request {} hours. Consider{}",
errmsg)
max_dur = float(sr[1])
req_dur = float(sr[3])
if req_dur == max_dur:
# {:.1f} formatted strings. genius ._.
req_dur += 0.01
if not req_dur > max_dur:
errmsg = (
'ERR! max dur: {}, req dur: {}'.format(max_dur, req_dur)
)
raise ValueError(errmsg)
diff_dur_sec = (req_dur - max_dur)*60*60
# previously, guessed
#
# expcount = np.floor(
# (endtime-starttime).to(u.hr)
# /
# (exptime*u.second + read_time_per_exposure).to(u.hr)
# )
#
# that produced the difference above...
exptime_sec = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_time']
)
expcount = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count']
)
read_time_per_exposure = 30*u.second # from Bayliss' completed runs
n_exposures_diff = int(
np.ceil(diff_dur_sec/
(exptime_sec + read_time_per_exposure.value)
)
)
new_expcount = expcount - n_exposures_diff
print(42*'-')
print('WRN!: max durn: {} hr, req durn: {} hr. had {} exposures, decrement to {}'.
format(max_dur, req_dur, expcount, new_expcount))
print(42*'-')
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count'] = new_expcount
is_modified = True
return requestgroup, is_modified
else:
if raise_error:
raise NotImplementedError('got new API error: {}'.format(errmsg))
else:
print('WRN!: Got API error: {}'.format(errmsg))
print(requestgroup)
return np.nan, np.nan
billed_durn = (
requestgroup_dict['request_durations']['requests'][0]['duration']
)
start = Time(requestgroup['requests'][0]['windows'][0]['start'])
end = Time(requestgroup['requests'][0]['windows'][0]['end'])
window_durn = (end - start).value*24*60*60
expcount = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count']
)
if (window_durn - billed_durn)/60 > max_duration_error:
errmsg = (
'ERROR! got a window of {:.2f} min; but tried to bill {:.2f} min.'.
format(window_durn/60, billed_durn/60)
)
print(42*'-')
print(errmsg)
print(42*'-')
#import IPython; IPython.embed()
#raise AssertionError(errmsg) #FIXME
return np.nan, np.nan
else:
print(42*'-')
print('ACCEPTED! window durn: {:.2f} min, billed {:.2f} min. had {:d} exposures'.
format(window_durn/60, billed_durn/60, expcount))
print(42*'-')
return requestgroup, is_modified
def submit_single_request(requestgroup):
# Submit the fully formed RequestGroup
response = requests.post(
'https://observe.lco.global/api/requestgroups/',
headers={'Authorization': 'Token {}'.format(token)},
json=requestgroup
)
# Make sure the API call was successful
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
print('API call failed: {}'.format(response.content))
raise exc
# The API returns the newly submitted requestgroup as json
requestgroup_dict = response.json()
# Print out the url on the portal where we can view the submitted request
print('View the observing request: '
'https://observe.lco.global/requestgroups/{}/'.
format(requestgroup_dict['id']))
def submit_all_requests(savstr, validate_all=1, submit_all=0,
max_N_transit_per_object=3, max_duration_error=15,
semesterstr='20A'):
|
if __name__=="__main__":
validate_all = 1
submit_all = 1
max_N_transit_per_object = 2
max_duration_error = 20
eventclass = 'OIBEO'
savstr = 'request_19B_59859387_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'request_19B_2m_faint_{}'.format(eventclass)
# eventclass = 'OIBE'
# savstr = 'bright_shallow_19B_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'midpartials_19B_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'toppartials_19B_{}'.format(eventclass)
# max_duration_error = 15
# savstr = 'request_TIC29786532_19B'
# max_N_transit_per_object = 2
# savstr = 'request_19B_2m_faint_v2'
# max_N_transit_per_object = 2
# savstr = 'request_19B_2m_faint'
# max_N_transit_per_object = 4 # actually 3, b/c one fails
# savstr = 'all_requests_19B_easyones'
# max_N_transit_per_object = 3
submit_all_requests(savstr, validate_all=validate_all,
submit_all=submit_all,
max_N_transit_per_object=max_N_transit_per_object,
max_duration_error=max_duration_error)
| """
savstr: used for directory management
validate_all: if true, first validates observation requests to ensure that
they can be submitted
submit_all: actually submits them
max_N_transit_per_object:
max_duration_error: in minutes, maximum acceptable difference between
_desired_ observation window, and the window that the LCOGT system accepts.
"""
if submit_all:
assert validate_all
if not 'ephemupdate' in savstr:
resultsdir = (
os.path.join(RESULTSDIR,'LCOGT_{}_observability/'.format(semesterstr))
)
else:
resultsdir = (
os.path.join(RESULTSDIR,'LCOGT_{}_updated_requests/'.format(semesterstr))
)
pkl_savpath = (
os.path.join(resultsdir, '{}.pkl'.format(savstr))
)
mult_savpath = (
os.path.join(resultsdir, '{}_summary.csv'.format(savstr))
)
with open(pkl_savpath, 'rb') as f:
r = pickle.load(f)
df = pd.read_csv(mult_savpath)
if submit_all:
print('ATTEMPTING TO SUBMIT THE FOLLOWING')
df['submit_durn'] = (
df['sched_duration'] *
np.minimum(df['n_requests'], max_N_transit_per_object)
)
print(df)
print(42*'=')
print('\nTotal time: {:.1f} hr\n'.format(np.sum(df['submit_durn'])))
print(42*'=')
#
# sort all the available transit windows for each target by time. submit
# the earliest `max_N_transit_per_object' (e.g., 2 transits).
#
starts = []
for _r in r:
starts.append(
[ Time(__r['requests'][0]['windows'][0]['start']) for __r in _r ]
)
time_sort_inds = []
for start in starts:
time_sort_inds.append(
np.argsort(start)
)
#
# iterate over available requests for each target that met the magnitude
# and depth cuts
#
for _r, ind in zip(r, time_sort_inds):
_requests_sorted = np.array(_r)[ind]
_requests_to_submit = _requests_sorted[:max_N_transit_per_object]
for requestgroup in _requests_to_submit:
if validate_all:
if not submit_all:
print(requestgroup)
requestgroup, is_modified = (
validate_single_request(
requestgroup, max_duration_error=max_duration_error
)
)
n_iter = 0
if is_modified and np.isfinite(is_modified):
while is_modified:
if n_iter >= 10:
raise AssertionError('too many iterations')
requestgroup, is_modified = (
validate_single_request(
requestgroup,
max_duration_error=max_duration_error
)
)
if not isinstance(requestgroup, dict):
if not np.isfinite(requestgroup):
break
n_iter += 1
if submit_all:
if isinstance(requestgroup, dict):
print('SUBMITTING...')
print(requestgroup)
submit_single_request(requestgroup)
else:
print('vvv DID NOT SUBMIT B/C FAILED TO VALIDATE vvv')
print(requestgroup)
print('^^^ DID NOT SUBMIT B/C FAILED TO VALIDATE ^^^') | identifier_body |
base_plugin.go | package transformation
import (
envoyapi "github.com/envoyproxy/go-control-plane/envoy/api/v2"
envoycore "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
envoyroute "github.com/envoyproxy/go-control-plane/envoy/api/v2/route"
envoyhttp "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2"
"fmt"
"regexp"
"strings"
"github.com/gogo/protobuf/types"
"github.com/mitchellh/hashstructure"
"github.com/pkg/errors"
"github.com/envoyproxy/go-control-plane/pkg/util"
"github.com/solo-io/gloo/pkg/api/types/v1"
"github.com/solo-io/gloo/pkg/coreplugins/common"
"github.com/solo-io/gloo/pkg/log"
"github.com/solo-io/gloo/pkg/plugins"
)
//go:generate protoc -I=./envoy/ -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf/ --gogo_out=. envoy/transformation_filter.proto
//go:generate protoc -I=./ -I=${GOPATH}/src/github.com/gogo/protobuf/ -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf/ --gogo_out=Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:${GOPATH}/src spec.proto
const (
filterName = "io.solo.transformation"
metadataRequestKey = "request-transformation"
metadataResponseKey = "response-transformation"
ServiceTypeTransformation = "HTTP-Functions"
pluginStage = plugins.PostInAuth
)
type GetTransformationFunction func(destination *v1.Destination_Function) (*TransformationTemplate, error)
type Plugin interface {
ActivateFilterForCluster(out *envoyapi.Cluster)
AddRequestTransformationsToRoute(getTemplate GetTransformationFunction, in *v1.Route, out *envoyroute.Route) error
AddResponseTransformationsToRoute(in *v1.Route, out *envoyroute.Route) error
GetTransformationFilter() *plugins.StagedHttpFilter
}
func NewTransformationPlugin() Plugin {
return &transformationPlugin{
cachedTransformations: make(map[string]*Transformation),
}
}
type transformationPlugin struct {
cachedTransformations map[string]*Transformation
}
func (p *transformationPlugin) ActivateFilterForCluster(out *envoyapi.Cluster) {
if out.Metadata == nil {
out.Metadata = &envoycore.Metadata{}
}
common.InitFilterMetadata(filterName, out.Metadata)
out.Metadata.FilterMetadata[filterName] = &types.Struct{
Fields: make(map[string]*types.Value),
}
}
func (p *transformationPlugin) AddRequestTransformationsToRoute(getTemplate GetTransformationFunction, in *v1.Route, out *envoyroute.Route) error {
var extractors map[string]*Extraction
// if no parameters specified, the only extraction will be a json body
if in.Extensions != nil {
extension, err := DecodeRouteExtension(in.Extensions)
if err != nil {
return err
}
extractors, err = createRequestExtractors(extension.Parameters)
if err != nil {
return err
}
}
// calculate the templates for all these transformations
if err := p.setTransformationsForRoute(getTemplate, in, extractors, out); err != nil {
return errors.Wrap(err, "resolving request transformations for route")
}
return nil
}
func createRequestExtractors(params *Parameters) (map[string]*Extraction, error) {
extractors := make(map[string]*Extraction)
if params == nil {
return extractors, nil
}
// special http2 headers, get the whole thing for free
// as a convenience to the user
// TODO: add more
for _, header := range []string{
"path",
"method",
} {
addHeaderExtractorFromParam(":"+header, "{"+header+"}", extractors)
}
// headers we support submatching on
// custom as well as the path and authority/host header
if params.Path != nil {
if err := addHeaderExtractorFromParam(":path", params.Path.Value, extractors); err != nil {
return nil, errors.Wrap(err, "error processing parameter")
}
}
for headerName, headerValue := range params.Headers {
if err := addHeaderExtractorFromParam(headerName, headerValue, extractors); err != nil {
return nil, errors.Wrap(err, "error processing parameter")
}
}
return extractors, nil
}
// TODO: clean up the response transformation
// params should live on the source (upstream/function)
func (p *transformationPlugin) AddResponseTransformationsToRoute(in *v1.Route, out *envoyroute.Route) error {
if in.Extensions == nil {
return nil
}
extension, err := DecodeRouteExtension(in.Extensions)
if err != nil {
return err
}
if extension.ResponseTransformation == nil {
return nil
}
extractors := make(map[string]*Extraction)
if extension.ResponseParams != nil {
for headerName, headerValue := range extension.ResponseParams.Headers {
addHeaderExtractorFromParam(headerName, headerValue, extractors)
}
}
// calculate the templates for all these transformations
if err := p.setResponseTransformationForRoute(*extension.ResponseTransformation, extractors, out); err != nil {
return errors.Wrap(err, "resolving request transformations for route")
}
return nil
}
func addHeaderExtractorFromParam(header, parameter string, extractors map[string]*Extraction) error {
if parameter == "" {
return nil
}
// remember that the order of the param names correlates with their order in the regex
paramNames, regexMatcher := getNamesAndRegexFromParamString(parameter)
log.Debugf("transformation pluginN: extraction for header %v: parameters: %v regex matcher: %v", header, paramNames, regexMatcher)
// if no regex, this is a "default variable" that the user gets for free
if len(paramNames) == 0 {
// extract everything
// TODO(yuval): create a special extractor that doesn't use regex when we just want the whole thing
extract := &Extraction{
Header: header,
Regex: "(.*)",
Subgroup: uint32(1),
}
extractors[strings.TrimPrefix(header, ":")] = extract
}
// count the number of open braces,
// if they are not equal to the # of counted params,
// the user gave us bad variable names or unterminated braces and we should error
expectedParameterCount := strings.Count(parameter, "{")
if len(paramNames) != expectedParameterCount {
return errors.Errorf("%v is not valid syntax. {} braces must be closed and variable names must satisfy regex "+
`([\-._[:alnum:]]+)`, parameter)
}
// otherwise it's regex, and we need to create an extraction for each variable name they defined
for i, name := range paramNames {
extract := &Extraction{
Header: header,
Regex: regexMatcher,
Subgroup: uint32(i + 1),
}
extractors[name] = extract
}
return nil
}
func getNamesAndRegexFromParamString(paramString string) ([]string, string) {
// escape regex
// TODO: make sure all envoy regex is being escaped here
rxp := regexp.MustCompile(`\{([\.\-_[:word:]]+)\}`)
parameterNames := rxp.FindAllString(paramString, -1)
for i, name := range parameterNames {
parameterNames[i] = strings.TrimSuffix(strings.TrimPrefix(name, "{"), "}")
}
return parameterNames, buildRegexString(rxp, paramString)
}
func buildRegexString(rxp *regexp.Regexp, paramString string) string {
var regexString string
var prevEnd int
for _, startStop := range rxp.FindAllStringIndex(paramString, -1) {
start := startStop[0]
end := startStop[1]
subStr := regexp.QuoteMeta(paramString[prevEnd:start]) + `([\-._[:alnum:]]+)`
regexString += subStr
prevEnd = end
}
return regexString + regexp.QuoteMeta(paramString[prevEnd:])
}
// sets all transformations a route may need
// if single destination, just one transformation
// if multi destination, one transformation for each functional
// that specifies a transformation spec
func (p *transformationPlugin) | (getTemplate GetTransformationFunction, in *v1.Route, extractors map[string]*Extraction, out *envoyroute.Route) error {
switch {
case in.MultipleDestinations != nil:
for _, dest := range in.MultipleDestinations {
err := p.setTransformationForRoute(getTemplate, dest.Destination, extractors, out)
if err != nil {
return errors.Wrap(err, "setting transformation for route")
}
}
case in.SingleDestination != nil:
err := p.setTransformationForRoute(getTemplate, in.SingleDestination, extractors, out)
if err != nil {
return errors.Wrap(err, "setting transformation for route")
}
}
return nil
}
func (p *transformationPlugin) setTransformationForRoute(getTemplateForDestination GetTransformationFunction, dest *v1.Destination, extractors map[string]*Extraction, out *envoyroute.Route) error {
fnDestination, ok := dest.DestinationType.(*v1.Destination_Function)
if !ok {
// not a functional route, nothing to do
return nil
}
template, err := getTemplateForDestination(fnDestination)
if err != nil {
return errors.Wrap(err, "getting transformation for function")
}
// no transformations for this destination
if template == nil {
return nil
}
template.Extractors = extractors
t := Transformation{
TransformationType: &Transformation_TransformationTemplate{
TransformationTemplate: template,
},
}
intHash, err := hashstructure.Hash(t, nil)
if err != nil {
return err
}
hash := fmt.Sprintf("%v", intHash)
// cache the transformation, the filter config needs to contain all of them
p.cachedTransformations[hash] = &t
// set the filter metadata on the route
if out.Metadata == nil {
out.Metadata = &envoycore.Metadata{}
}
filterMetadata := common.InitFilterMetadataField(filterName, metadataRequestKey, out.Metadata)
if filterMetadata.Kind == nil {
filterMetadata.Kind = &types.Value_StructValue{}
}
if _, ok := filterMetadata.Kind.(*types.Value_StructValue); !ok {
return errors.Errorf("needed filter metadta to be kind *types.Value_StructValue, but was: %v", filterMetadata.Kind)
}
if filterMetadata.Kind.(*types.Value_StructValue).StructValue == nil {
filterMetadata.Kind.(*types.Value_StructValue).StructValue = &types.Struct{}
}
if filterMetadata.Kind.(*types.Value_StructValue).StructValue.Fields == nil {
filterMetadata.Kind.(*types.Value_StructValue).StructValue.Fields = make(map[string]*types.Value)
}
upstreamName := fnDestination.Function.UpstreamName
functionName := fnDestination.Function.FunctionName
fields := filterMetadata.Kind.(*types.Value_StructValue).StructValue.Fields
if fields[upstreamName] == nil {
var funcVal types.Value
funcVal.Kind = &types.Value_StructValue{
StructValue: &types.Struct{
Fields: make(map[string]*types.Value),
},
}
fields[upstreamName] = &funcVal
}
funcFields := fields[upstreamName].Kind.(*types.Value_StructValue).StructValue.Fields
if funcFields[functionName] == nil {
funcFields[functionName] = &types.Value{
Kind: &types.Value_StructValue{
StructValue: &types.Struct{
Fields: make(map[string]*types.Value),
},
},
}
}
funcFields[functionName].Kind = &types.Value_StringValue{StringValue: hash}
return nil
}
func (p *transformationPlugin) setResponseTransformationForRoute(template TransformationSpec, extractors map[string]*Extraction, out *envoyroute.Route) error {
// create templates
// right now it's just a no-op, user writes inja directly
headerTemplates := make(map[string]*InjaTemplate)
for k, v := range template.Headers {
headerTemplates[k] = &InjaTemplate{Text: v}
}
tt := &Transformation_TransformationTemplate{
TransformationTemplate: &TransformationTemplate{
Extractors: extractors,
Headers: headerTemplates,
},
}
if template.Body != nil {
tt.TransformationTemplate.BodyTransformation = &TransformationTemplate_Body{
Body: &InjaTemplate{
Text: template.Body.Value,
},
}
} else {
tt.TransformationTemplate.BodyTransformation = &TransformationTemplate_Passthrough{
Passthrough: &Passthrough{},
}
}
t := Transformation{
TransformationType: tt,
}
intHash, err := hashstructure.Hash(t, nil)
if err != nil {
return errors.Wrap(err, "generating hash")
}
hash := fmt.Sprintf("%v", intHash)
// cache the transformation, the filter config needs to contain all of them
p.cachedTransformations[hash] = &t
// set the filter metadata on the route
if out.Metadata == nil {
out.Metadata = &envoycore.Metadata{}
}
filterMetadata := common.InitFilterMetadataField(filterName, metadataResponseKey, out.Metadata)
filterMetadata.Kind = &types.Value_StringValue{StringValue: hash}
return nil
}
func (p *transformationPlugin) GetTransformationFilter() *plugins.StagedHttpFilter {
if len(p.cachedTransformations) == 0 {
return nil
}
defer func() {
// clear cache
p.cachedTransformations = make(map[string]*Transformation)
}()
filterConfig, err := util.MessageToStruct(&Transformations{
Transformations: p.cachedTransformations,
})
if err != nil {
log.Warnf("error in transformation plugin: %v", err)
return nil
}
return &plugins.StagedHttpFilter{
HttpFilter: &envoyhttp.HttpFilter{
Name: filterName,
Config: filterConfig,
}, Stage: pluginStage,
}
}
| setTransformationsForRoute | identifier_name |
base_plugin.go | package transformation
import (
envoyapi "github.com/envoyproxy/go-control-plane/envoy/api/v2"
envoycore "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
envoyroute "github.com/envoyproxy/go-control-plane/envoy/api/v2/route"
envoyhttp "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2"
"fmt"
"regexp"
"strings"
"github.com/gogo/protobuf/types"
"github.com/mitchellh/hashstructure"
"github.com/pkg/errors"
"github.com/envoyproxy/go-control-plane/pkg/util"
"github.com/solo-io/gloo/pkg/api/types/v1"
"github.com/solo-io/gloo/pkg/coreplugins/common"
"github.com/solo-io/gloo/pkg/log"
"github.com/solo-io/gloo/pkg/plugins"
)
//go:generate protoc -I=./envoy/ -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf/ --gogo_out=. envoy/transformation_filter.proto
//go:generate protoc -I=./ -I=${GOPATH}/src/github.com/gogo/protobuf/ -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf/ --gogo_out=Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:${GOPATH}/src spec.proto
const (
filterName = "io.solo.transformation"
metadataRequestKey = "request-transformation"
metadataResponseKey = "response-transformation"
ServiceTypeTransformation = "HTTP-Functions"
pluginStage = plugins.PostInAuth
)
type GetTransformationFunction func(destination *v1.Destination_Function) (*TransformationTemplate, error)
type Plugin interface {
ActivateFilterForCluster(out *envoyapi.Cluster)
AddRequestTransformationsToRoute(getTemplate GetTransformationFunction, in *v1.Route, out *envoyroute.Route) error
AddResponseTransformationsToRoute(in *v1.Route, out *envoyroute.Route) error
GetTransformationFilter() *plugins.StagedHttpFilter
}
func NewTransformationPlugin() Plugin {
return &transformationPlugin{
cachedTransformations: make(map[string]*Transformation),
}
}
type transformationPlugin struct {
cachedTransformations map[string]*Transformation
}
func (p *transformationPlugin) ActivateFilterForCluster(out *envoyapi.Cluster) {
if out.Metadata == nil {
out.Metadata = &envoycore.Metadata{}
}
common.InitFilterMetadata(filterName, out.Metadata)
out.Metadata.FilterMetadata[filterName] = &types.Struct{
Fields: make(map[string]*types.Value),
}
}
func (p *transformationPlugin) AddRequestTransformationsToRoute(getTemplate GetTransformationFunction, in *v1.Route, out *envoyroute.Route) error {
var extractors map[string]*Extraction
// if no parameters specified, the only extraction will be a json body
if in.Extensions != nil {
extension, err := DecodeRouteExtension(in.Extensions)
if err != nil {
return err
}
extractors, err = createRequestExtractors(extension.Parameters)
if err != nil {
return err
}
}
// calculate the templates for all these transformations
if err := p.setTransformationsForRoute(getTemplate, in, extractors, out); err != nil {
return errors.Wrap(err, "resolving request transformations for route")
}
return nil
}
func createRequestExtractors(params *Parameters) (map[string]*Extraction, error) {
extractors := make(map[string]*Extraction)
if params == nil {
return extractors, nil
}
// special http2 headers, get the whole thing for free
// as a convenience to the user
// TODO: add more
for _, header := range []string{
"path",
"method",
} {
addHeaderExtractorFromParam(":"+header, "{"+header+"}", extractors)
}
// headers we support submatching on
// custom as well as the path and authority/host header
if params.Path != nil {
if err := addHeaderExtractorFromParam(":path", params.Path.Value, extractors); err != nil {
return nil, errors.Wrap(err, "error processing parameter")
}
}
for headerName, headerValue := range params.Headers {
if err := addHeaderExtractorFromParam(headerName, headerValue, extractors); err != nil {
return nil, errors.Wrap(err, "error processing parameter")
}
}
return extractors, nil
}
// TODO: clean up the response transformation
// params should live on the source (upstream/function)
func (p *transformationPlugin) AddResponseTransformationsToRoute(in *v1.Route, out *envoyroute.Route) error {
if in.Extensions == nil {
return nil
}
extension, err := DecodeRouteExtension(in.Extensions)
if err != nil {
return err
}
if extension.ResponseTransformation == nil {
return nil
}
extractors := make(map[string]*Extraction)
if extension.ResponseParams != nil {
for headerName, headerValue := range extension.ResponseParams.Headers {
addHeaderExtractorFromParam(headerName, headerValue, extractors)
}
}
// calculate the templates for all these transformations
if err := p.setResponseTransformationForRoute(*extension.ResponseTransformation, extractors, out); err != nil {
return errors.Wrap(err, "resolving request transformations for route")
}
return nil
}
func addHeaderExtractorFromParam(header, parameter string, extractors map[string]*Extraction) error {
if parameter == "" {
return nil
}
// remember that the order of the param names correlates with their order in the regex
paramNames, regexMatcher := getNamesAndRegexFromParamString(parameter)
log.Debugf("transformation pluginN: extraction for header %v: parameters: %v regex matcher: %v", header, paramNames, regexMatcher)
// if no regex, this is a "default variable" that the user gets for free
if len(paramNames) == 0 {
// extract everything
// TODO(yuval): create a special extractor that doesn't use regex when we just want the whole thing
extract := &Extraction{
Header: header,
Regex: "(.*)",
Subgroup: uint32(1),
}
extractors[strings.TrimPrefix(header, ":")] = extract
}
// count the number of open braces,
// if they are not equal to the # of counted params,
// the user gave us bad variable names or unterminated braces and we should error
expectedParameterCount := strings.Count(parameter, "{")
if len(paramNames) != expectedParameterCount {
return errors.Errorf("%v is not valid syntax. {} braces must be closed and variable names must satisfy regex "+
`([\-._[:alnum:]]+)`, parameter)
}
// otherwise it's regex, and we need to create an extraction for each variable name they defined
for i, name := range paramNames {
extract := &Extraction{
Header: header,
Regex: regexMatcher,
Subgroup: uint32(i + 1),
}
extractors[name] = extract
}
return nil
}
func getNamesAndRegexFromParamString(paramString string) ([]string, string) {
// escape regex
// TODO: make sure all envoy regex is being escaped here
rxp := regexp.MustCompile(`\{([\.\-_[:word:]]+)\}`)
parameterNames := rxp.FindAllString(paramString, -1)
for i, name := range parameterNames {
parameterNames[i] = strings.TrimSuffix(strings.TrimPrefix(name, "{"), "}")
}
return parameterNames, buildRegexString(rxp, paramString)
}
func buildRegexString(rxp *regexp.Regexp, paramString string) string {
var regexString string
var prevEnd int
for _, startStop := range rxp.FindAllStringIndex(paramString, -1) {
start := startStop[0]
end := startStop[1]
subStr := regexp.QuoteMeta(paramString[prevEnd:start]) + `([\-._[:alnum:]]+)`
regexString += subStr
prevEnd = end
}
return regexString + regexp.QuoteMeta(paramString[prevEnd:])
}
// sets all transformations a route may need
// if single destination, just one transformation
// if multi destination, one transformation for each functional
// that specifies a transformation spec
func (p *transformationPlugin) setTransformationsForRoute(getTemplate GetTransformationFunction, in *v1.Route, extractors map[string]*Extraction, out *envoyroute.Route) error {
switch {
case in.MultipleDestinations != nil:
for _, dest := range in.MultipleDestinations {
err := p.setTransformationForRoute(getTemplate, dest.Destination, extractors, out)
if err != nil {
return errors.Wrap(err, "setting transformation for route")
}
}
case in.SingleDestination != nil:
err := p.setTransformationForRoute(getTemplate, in.SingleDestination, extractors, out)
if err != nil {
return errors.Wrap(err, "setting transformation for route")
}
}
return nil
}
func (p *transformationPlugin) setTransformationForRoute(getTemplateForDestination GetTransformationFunction, dest *v1.Destination, extractors map[string]*Extraction, out *envoyroute.Route) error {
fnDestination, ok := dest.DestinationType.(*v1.Destination_Function)
if !ok {
// not a functional route, nothing to do
return nil
}
template, err := getTemplateForDestination(fnDestination)
if err != nil {
return errors.Wrap(err, "getting transformation for function")
}
// no transformations for this destination
if template == nil {
return nil
}
template.Extractors = extractors
t := Transformation{
TransformationType: &Transformation_TransformationTemplate{
TransformationTemplate: template,
},
}
intHash, err := hashstructure.Hash(t, nil) | if err != nil {
return err
}
hash := fmt.Sprintf("%v", intHash)
// cache the transformation, the filter config needs to contain all of them
p.cachedTransformations[hash] = &t
// set the filter metadata on the route
if out.Metadata == nil {
out.Metadata = &envoycore.Metadata{}
}
filterMetadata := common.InitFilterMetadataField(filterName, metadataRequestKey, out.Metadata)
if filterMetadata.Kind == nil {
filterMetadata.Kind = &types.Value_StructValue{}
}
if _, ok := filterMetadata.Kind.(*types.Value_StructValue); !ok {
return errors.Errorf("needed filter metadta to be kind *types.Value_StructValue, but was: %v", filterMetadata.Kind)
}
if filterMetadata.Kind.(*types.Value_StructValue).StructValue == nil {
filterMetadata.Kind.(*types.Value_StructValue).StructValue = &types.Struct{}
}
if filterMetadata.Kind.(*types.Value_StructValue).StructValue.Fields == nil {
filterMetadata.Kind.(*types.Value_StructValue).StructValue.Fields = make(map[string]*types.Value)
}
upstreamName := fnDestination.Function.UpstreamName
functionName := fnDestination.Function.FunctionName
fields := filterMetadata.Kind.(*types.Value_StructValue).StructValue.Fields
if fields[upstreamName] == nil {
var funcVal types.Value
funcVal.Kind = &types.Value_StructValue{
StructValue: &types.Struct{
Fields: make(map[string]*types.Value),
},
}
fields[upstreamName] = &funcVal
}
funcFields := fields[upstreamName].Kind.(*types.Value_StructValue).StructValue.Fields
if funcFields[functionName] == nil {
funcFields[functionName] = &types.Value{
Kind: &types.Value_StructValue{
StructValue: &types.Struct{
Fields: make(map[string]*types.Value),
},
},
}
}
funcFields[functionName].Kind = &types.Value_StringValue{StringValue: hash}
return nil
}
func (p *transformationPlugin) setResponseTransformationForRoute(template TransformationSpec, extractors map[string]*Extraction, out *envoyroute.Route) error {
// create templates
// right now it's just a no-op, user writes inja directly
headerTemplates := make(map[string]*InjaTemplate)
for k, v := range template.Headers {
headerTemplates[k] = &InjaTemplate{Text: v}
}
tt := &Transformation_TransformationTemplate{
TransformationTemplate: &TransformationTemplate{
Extractors: extractors,
Headers: headerTemplates,
},
}
if template.Body != nil {
tt.TransformationTemplate.BodyTransformation = &TransformationTemplate_Body{
Body: &InjaTemplate{
Text: template.Body.Value,
},
}
} else {
tt.TransformationTemplate.BodyTransformation = &TransformationTemplate_Passthrough{
Passthrough: &Passthrough{},
}
}
t := Transformation{
TransformationType: tt,
}
intHash, err := hashstructure.Hash(t, nil)
if err != nil {
return errors.Wrap(err, "generating hash")
}
hash := fmt.Sprintf("%v", intHash)
// cache the transformation, the filter config needs to contain all of them
p.cachedTransformations[hash] = &t
// set the filter metadata on the route
if out.Metadata == nil {
out.Metadata = &envoycore.Metadata{}
}
filterMetadata := common.InitFilterMetadataField(filterName, metadataResponseKey, out.Metadata)
filterMetadata.Kind = &types.Value_StringValue{StringValue: hash}
return nil
}
func (p *transformationPlugin) GetTransformationFilter() *plugins.StagedHttpFilter {
if len(p.cachedTransformations) == 0 {
return nil
}
defer func() {
// clear cache
p.cachedTransformations = make(map[string]*Transformation)
}()
filterConfig, err := util.MessageToStruct(&Transformations{
Transformations: p.cachedTransformations,
})
if err != nil {
log.Warnf("error in transformation plugin: %v", err)
return nil
}
return &plugins.StagedHttpFilter{
HttpFilter: &envoyhttp.HttpFilter{
Name: filterName,
Config: filterConfig,
}, Stage: pluginStage,
}
} | random_line_split | |
base_plugin.go | package transformation
import (
envoyapi "github.com/envoyproxy/go-control-plane/envoy/api/v2"
envoycore "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
envoyroute "github.com/envoyproxy/go-control-plane/envoy/api/v2/route"
envoyhttp "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2"
"fmt"
"regexp"
"strings"
"github.com/gogo/protobuf/types"
"github.com/mitchellh/hashstructure"
"github.com/pkg/errors"
"github.com/envoyproxy/go-control-plane/pkg/util"
"github.com/solo-io/gloo/pkg/api/types/v1"
"github.com/solo-io/gloo/pkg/coreplugins/common"
"github.com/solo-io/gloo/pkg/log"
"github.com/solo-io/gloo/pkg/plugins"
)
//go:generate protoc -I=./envoy/ -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf/ --gogo_out=. envoy/transformation_filter.proto
//go:generate protoc -I=./ -I=${GOPATH}/src/github.com/gogo/protobuf/ -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf/ --gogo_out=Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:${GOPATH}/src spec.proto
const (
filterName = "io.solo.transformation"
metadataRequestKey = "request-transformation"
metadataResponseKey = "response-transformation"
ServiceTypeTransformation = "HTTP-Functions"
pluginStage = plugins.PostInAuth
)
type GetTransformationFunction func(destination *v1.Destination_Function) (*TransformationTemplate, error)
type Plugin interface {
ActivateFilterForCluster(out *envoyapi.Cluster)
AddRequestTransformationsToRoute(getTemplate GetTransformationFunction, in *v1.Route, out *envoyroute.Route) error
AddResponseTransformationsToRoute(in *v1.Route, out *envoyroute.Route) error
GetTransformationFilter() *plugins.StagedHttpFilter
}
func NewTransformationPlugin() Plugin {
return &transformationPlugin{
cachedTransformations: make(map[string]*Transformation),
}
}
type transformationPlugin struct {
cachedTransformations map[string]*Transformation
}
func (p *transformationPlugin) ActivateFilterForCluster(out *envoyapi.Cluster) {
if out.Metadata == nil {
out.Metadata = &envoycore.Metadata{}
}
common.InitFilterMetadata(filterName, out.Metadata)
out.Metadata.FilterMetadata[filterName] = &types.Struct{
Fields: make(map[string]*types.Value),
}
}
func (p *transformationPlugin) AddRequestTransformationsToRoute(getTemplate GetTransformationFunction, in *v1.Route, out *envoyroute.Route) error {
var extractors map[string]*Extraction
// if no parameters specified, the only extraction will be a json body
if in.Extensions != nil {
extension, err := DecodeRouteExtension(in.Extensions)
if err != nil {
return err
}
extractors, err = createRequestExtractors(extension.Parameters)
if err != nil {
return err
}
}
// calculate the templates for all these transformations
if err := p.setTransformationsForRoute(getTemplate, in, extractors, out); err != nil {
return errors.Wrap(err, "resolving request transformations for route")
}
return nil
}
func createRequestExtractors(params *Parameters) (map[string]*Extraction, error) {
extractors := make(map[string]*Extraction)
if params == nil {
return extractors, nil
}
// special http2 headers, get the whole thing for free
// as a convenience to the user
// TODO: add more
for _, header := range []string{
"path",
"method",
} {
addHeaderExtractorFromParam(":"+header, "{"+header+"}", extractors)
}
// headers we support submatching on
// custom as well as the path and authority/host header
if params.Path != nil {
if err := addHeaderExtractorFromParam(":path", params.Path.Value, extractors); err != nil {
return nil, errors.Wrap(err, "error processing parameter")
}
}
for headerName, headerValue := range params.Headers {
if err := addHeaderExtractorFromParam(headerName, headerValue, extractors); err != nil |
}
return extractors, nil
}
// TODO: clean up the response transformation
// params should live on the source (upstream/function)
func (p *transformationPlugin) AddResponseTransformationsToRoute(in *v1.Route, out *envoyroute.Route) error {
if in.Extensions == nil {
return nil
}
extension, err := DecodeRouteExtension(in.Extensions)
if err != nil {
return err
}
if extension.ResponseTransformation == nil {
return nil
}
extractors := make(map[string]*Extraction)
if extension.ResponseParams != nil {
for headerName, headerValue := range extension.ResponseParams.Headers {
addHeaderExtractorFromParam(headerName, headerValue, extractors)
}
}
// calculate the templates for all these transformations
if err := p.setResponseTransformationForRoute(*extension.ResponseTransformation, extractors, out); err != nil {
return errors.Wrap(err, "resolving request transformations for route")
}
return nil
}
func addHeaderExtractorFromParam(header, parameter string, extractors map[string]*Extraction) error {
if parameter == "" {
return nil
}
// remember that the order of the param names correlates with their order in the regex
paramNames, regexMatcher := getNamesAndRegexFromParamString(parameter)
log.Debugf("transformation pluginN: extraction for header %v: parameters: %v regex matcher: %v", header, paramNames, regexMatcher)
// if no regex, this is a "default variable" that the user gets for free
if len(paramNames) == 0 {
// extract everything
// TODO(yuval): create a special extractor that doesn't use regex when we just want the whole thing
extract := &Extraction{
Header: header,
Regex: "(.*)",
Subgroup: uint32(1),
}
extractors[strings.TrimPrefix(header, ":")] = extract
}
// count the number of open braces,
// if they are not equal to the # of counted params,
// the user gave us bad variable names or unterminated braces and we should error
expectedParameterCount := strings.Count(parameter, "{")
if len(paramNames) != expectedParameterCount {
return errors.Errorf("%v is not valid syntax. {} braces must be closed and variable names must satisfy regex "+
`([\-._[:alnum:]]+)`, parameter)
}
// otherwise it's regex, and we need to create an extraction for each variable name they defined
for i, name := range paramNames {
extract := &Extraction{
Header: header,
Regex: regexMatcher,
Subgroup: uint32(i + 1),
}
extractors[name] = extract
}
return nil
}
func getNamesAndRegexFromParamString(paramString string) ([]string, string) {
// escape regex
// TODO: make sure all envoy regex is being escaped here
rxp := regexp.MustCompile(`\{([\.\-_[:word:]]+)\}`)
parameterNames := rxp.FindAllString(paramString, -1)
for i, name := range parameterNames {
parameterNames[i] = strings.TrimSuffix(strings.TrimPrefix(name, "{"), "}")
}
return parameterNames, buildRegexString(rxp, paramString)
}
func buildRegexString(rxp *regexp.Regexp, paramString string) string {
var regexString string
var prevEnd int
for _, startStop := range rxp.FindAllStringIndex(paramString, -1) {
start := startStop[0]
end := startStop[1]
subStr := regexp.QuoteMeta(paramString[prevEnd:start]) + `([\-._[:alnum:]]+)`
regexString += subStr
prevEnd = end
}
return regexString + regexp.QuoteMeta(paramString[prevEnd:])
}
// sets all transformations a route may need
// if single destination, just one transformation
// if multi destination, one transformation for each functional
// that specifies a transformation spec
func (p *transformationPlugin) setTransformationsForRoute(getTemplate GetTransformationFunction, in *v1.Route, extractors map[string]*Extraction, out *envoyroute.Route) error {
switch {
case in.MultipleDestinations != nil:
for _, dest := range in.MultipleDestinations {
err := p.setTransformationForRoute(getTemplate, dest.Destination, extractors, out)
if err != nil {
return errors.Wrap(err, "setting transformation for route")
}
}
case in.SingleDestination != nil:
err := p.setTransformationForRoute(getTemplate, in.SingleDestination, extractors, out)
if err != nil {
return errors.Wrap(err, "setting transformation for route")
}
}
return nil
}
func (p *transformationPlugin) setTransformationForRoute(getTemplateForDestination GetTransformationFunction, dest *v1.Destination, extractors map[string]*Extraction, out *envoyroute.Route) error {
fnDestination, ok := dest.DestinationType.(*v1.Destination_Function)
if !ok {
// not a functional route, nothing to do
return nil
}
template, err := getTemplateForDestination(fnDestination)
if err != nil {
return errors.Wrap(err, "getting transformation for function")
}
// no transformations for this destination
if template == nil {
return nil
}
template.Extractors = extractors
t := Transformation{
TransformationType: &Transformation_TransformationTemplate{
TransformationTemplate: template,
},
}
intHash, err := hashstructure.Hash(t, nil)
if err != nil {
return err
}
hash := fmt.Sprintf("%v", intHash)
// cache the transformation, the filter config needs to contain all of them
p.cachedTransformations[hash] = &t
// set the filter metadata on the route
if out.Metadata == nil {
out.Metadata = &envoycore.Metadata{}
}
filterMetadata := common.InitFilterMetadataField(filterName, metadataRequestKey, out.Metadata)
if filterMetadata.Kind == nil {
filterMetadata.Kind = &types.Value_StructValue{}
}
if _, ok := filterMetadata.Kind.(*types.Value_StructValue); !ok {
return errors.Errorf("needed filter metadta to be kind *types.Value_StructValue, but was: %v", filterMetadata.Kind)
}
if filterMetadata.Kind.(*types.Value_StructValue).StructValue == nil {
filterMetadata.Kind.(*types.Value_StructValue).StructValue = &types.Struct{}
}
if filterMetadata.Kind.(*types.Value_StructValue).StructValue.Fields == nil {
filterMetadata.Kind.(*types.Value_StructValue).StructValue.Fields = make(map[string]*types.Value)
}
upstreamName := fnDestination.Function.UpstreamName
functionName := fnDestination.Function.FunctionName
fields := filterMetadata.Kind.(*types.Value_StructValue).StructValue.Fields
if fields[upstreamName] == nil {
var funcVal types.Value
funcVal.Kind = &types.Value_StructValue{
StructValue: &types.Struct{
Fields: make(map[string]*types.Value),
},
}
fields[upstreamName] = &funcVal
}
funcFields := fields[upstreamName].Kind.(*types.Value_StructValue).StructValue.Fields
if funcFields[functionName] == nil {
funcFields[functionName] = &types.Value{
Kind: &types.Value_StructValue{
StructValue: &types.Struct{
Fields: make(map[string]*types.Value),
},
},
}
}
funcFields[functionName].Kind = &types.Value_StringValue{StringValue: hash}
return nil
}
func (p *transformationPlugin) setResponseTransformationForRoute(template TransformationSpec, extractors map[string]*Extraction, out *envoyroute.Route) error {
// create templates
// right now it's just a no-op, user writes inja directly
headerTemplates := make(map[string]*InjaTemplate)
for k, v := range template.Headers {
headerTemplates[k] = &InjaTemplate{Text: v}
}
tt := &Transformation_TransformationTemplate{
TransformationTemplate: &TransformationTemplate{
Extractors: extractors,
Headers: headerTemplates,
},
}
if template.Body != nil {
tt.TransformationTemplate.BodyTransformation = &TransformationTemplate_Body{
Body: &InjaTemplate{
Text: template.Body.Value,
},
}
} else {
tt.TransformationTemplate.BodyTransformation = &TransformationTemplate_Passthrough{
Passthrough: &Passthrough{},
}
}
t := Transformation{
TransformationType: tt,
}
intHash, err := hashstructure.Hash(t, nil)
if err != nil {
return errors.Wrap(err, "generating hash")
}
hash := fmt.Sprintf("%v", intHash)
// cache the transformation, the filter config needs to contain all of them
p.cachedTransformations[hash] = &t
// set the filter metadata on the route
if out.Metadata == nil {
out.Metadata = &envoycore.Metadata{}
}
filterMetadata := common.InitFilterMetadataField(filterName, metadataResponseKey, out.Metadata)
filterMetadata.Kind = &types.Value_StringValue{StringValue: hash}
return nil
}
func (p *transformationPlugin) GetTransformationFilter() *plugins.StagedHttpFilter {
if len(p.cachedTransformations) == 0 {
return nil
}
defer func() {
// clear cache
p.cachedTransformations = make(map[string]*Transformation)
}()
filterConfig, err := util.MessageToStruct(&Transformations{
Transformations: p.cachedTransformations,
})
if err != nil {
log.Warnf("error in transformation plugin: %v", err)
return nil
}
return &plugins.StagedHttpFilter{
HttpFilter: &envoyhttp.HttpFilter{
Name: filterName,
Config: filterConfig,
}, Stage: pluginStage,
}
}
| {
return nil, errors.Wrap(err, "error processing parameter")
} | conditional_block |
base_plugin.go | package transformation
import (
envoyapi "github.com/envoyproxy/go-control-plane/envoy/api/v2"
envoycore "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
envoyroute "github.com/envoyproxy/go-control-plane/envoy/api/v2/route"
envoyhttp "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2"
"fmt"
"regexp"
"strings"
"github.com/gogo/protobuf/types"
"github.com/mitchellh/hashstructure"
"github.com/pkg/errors"
"github.com/envoyproxy/go-control-plane/pkg/util"
"github.com/solo-io/gloo/pkg/api/types/v1"
"github.com/solo-io/gloo/pkg/coreplugins/common"
"github.com/solo-io/gloo/pkg/log"
"github.com/solo-io/gloo/pkg/plugins"
)
//go:generate protoc -I=./envoy/ -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf/ --gogo_out=. envoy/transformation_filter.proto
//go:generate protoc -I=./ -I=${GOPATH}/src/github.com/gogo/protobuf/ -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf/ --gogo_out=Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:${GOPATH}/src spec.proto
const (
filterName = "io.solo.transformation"
metadataRequestKey = "request-transformation"
metadataResponseKey = "response-transformation"
ServiceTypeTransformation = "HTTP-Functions"
pluginStage = plugins.PostInAuth
)
type GetTransformationFunction func(destination *v1.Destination_Function) (*TransformationTemplate, error)
type Plugin interface {
ActivateFilterForCluster(out *envoyapi.Cluster)
AddRequestTransformationsToRoute(getTemplate GetTransformationFunction, in *v1.Route, out *envoyroute.Route) error
AddResponseTransformationsToRoute(in *v1.Route, out *envoyroute.Route) error
GetTransformationFilter() *plugins.StagedHttpFilter
}
func NewTransformationPlugin() Plugin {
return &transformationPlugin{
cachedTransformations: make(map[string]*Transformation),
}
}
type transformationPlugin struct {
cachedTransformations map[string]*Transformation
}
func (p *transformationPlugin) ActivateFilterForCluster(out *envoyapi.Cluster) {
if out.Metadata == nil {
out.Metadata = &envoycore.Metadata{}
}
common.InitFilterMetadata(filterName, out.Metadata)
out.Metadata.FilterMetadata[filterName] = &types.Struct{
Fields: make(map[string]*types.Value),
}
}
func (p *transformationPlugin) AddRequestTransformationsToRoute(getTemplate GetTransformationFunction, in *v1.Route, out *envoyroute.Route) error {
var extractors map[string]*Extraction
// if no parameters specified, the only extraction will be a json body
if in.Extensions != nil {
extension, err := DecodeRouteExtension(in.Extensions)
if err != nil {
return err
}
extractors, err = createRequestExtractors(extension.Parameters)
if err != nil {
return err
}
}
// calculate the templates for all these transformations
if err := p.setTransformationsForRoute(getTemplate, in, extractors, out); err != nil {
return errors.Wrap(err, "resolving request transformations for route")
}
return nil
}
func createRequestExtractors(params *Parameters) (map[string]*Extraction, error) {
extractors := make(map[string]*Extraction)
if params == nil {
return extractors, nil
}
// special http2 headers, get the whole thing for free
// as a convenience to the user
// TODO: add more
for _, header := range []string{
"path",
"method",
} {
addHeaderExtractorFromParam(":"+header, "{"+header+"}", extractors)
}
// headers we support submatching on
// custom as well as the path and authority/host header
if params.Path != nil {
if err := addHeaderExtractorFromParam(":path", params.Path.Value, extractors); err != nil {
return nil, errors.Wrap(err, "error processing parameter")
}
}
for headerName, headerValue := range params.Headers {
if err := addHeaderExtractorFromParam(headerName, headerValue, extractors); err != nil {
return nil, errors.Wrap(err, "error processing parameter")
}
}
return extractors, nil
}
// TODO: clean up the response transformation
// params should live on the source (upstream/function)
func (p *transformationPlugin) AddResponseTransformationsToRoute(in *v1.Route, out *envoyroute.Route) error {
if in.Extensions == nil {
return nil
}
extension, err := DecodeRouteExtension(in.Extensions)
if err != nil {
return err
}
if extension.ResponseTransformation == nil {
return nil
}
extractors := make(map[string]*Extraction)
if extension.ResponseParams != nil {
for headerName, headerValue := range extension.ResponseParams.Headers {
addHeaderExtractorFromParam(headerName, headerValue, extractors)
}
}
// calculate the templates for all these transformations
if err := p.setResponseTransformationForRoute(*extension.ResponseTransformation, extractors, out); err != nil {
return errors.Wrap(err, "resolving request transformations for route")
}
return nil
}
func addHeaderExtractorFromParam(header, parameter string, extractors map[string]*Extraction) error {
if parameter == "" {
return nil
}
// remember that the order of the param names correlates with their order in the regex
paramNames, regexMatcher := getNamesAndRegexFromParamString(parameter)
log.Debugf("transformation pluginN: extraction for header %v: parameters: %v regex matcher: %v", header, paramNames, regexMatcher)
// if no regex, this is a "default variable" that the user gets for free
if len(paramNames) == 0 {
// extract everything
// TODO(yuval): create a special extractor that doesn't use regex when we just want the whole thing
extract := &Extraction{
Header: header,
Regex: "(.*)",
Subgroup: uint32(1),
}
extractors[strings.TrimPrefix(header, ":")] = extract
}
// count the number of open braces,
// if they are not equal to the # of counted params,
// the user gave us bad variable names or unterminated braces and we should error
expectedParameterCount := strings.Count(parameter, "{")
if len(paramNames) != expectedParameterCount {
return errors.Errorf("%v is not valid syntax. {} braces must be closed and variable names must satisfy regex "+
`([\-._[:alnum:]]+)`, parameter)
}
// otherwise it's regex, and we need to create an extraction for each variable name they defined
for i, name := range paramNames {
extract := &Extraction{
Header: header,
Regex: regexMatcher,
Subgroup: uint32(i + 1),
}
extractors[name] = extract
}
return nil
}
func getNamesAndRegexFromParamString(paramString string) ([]string, string) {
// escape regex
// TODO: make sure all envoy regex is being escaped here
rxp := regexp.MustCompile(`\{([\.\-_[:word:]]+)\}`)
parameterNames := rxp.FindAllString(paramString, -1)
for i, name := range parameterNames {
parameterNames[i] = strings.TrimSuffix(strings.TrimPrefix(name, "{"), "}")
}
return parameterNames, buildRegexString(rxp, paramString)
}
func buildRegexString(rxp *regexp.Regexp, paramString string) string |
// sets all transformations a route may need
// if single destination, just one transformation
// if multi destination, one transformation for each functional
// that specifies a transformation spec
func (p *transformationPlugin) setTransformationsForRoute(getTemplate GetTransformationFunction, in *v1.Route, extractors map[string]*Extraction, out *envoyroute.Route) error {
switch {
case in.MultipleDestinations != nil:
for _, dest := range in.MultipleDestinations {
err := p.setTransformationForRoute(getTemplate, dest.Destination, extractors, out)
if err != nil {
return errors.Wrap(err, "setting transformation for route")
}
}
case in.SingleDestination != nil:
err := p.setTransformationForRoute(getTemplate, in.SingleDestination, extractors, out)
if err != nil {
return errors.Wrap(err, "setting transformation for route")
}
}
return nil
}
func (p *transformationPlugin) setTransformationForRoute(getTemplateForDestination GetTransformationFunction, dest *v1.Destination, extractors map[string]*Extraction, out *envoyroute.Route) error {
fnDestination, ok := dest.DestinationType.(*v1.Destination_Function)
if !ok {
// not a functional route, nothing to do
return nil
}
template, err := getTemplateForDestination(fnDestination)
if err != nil {
return errors.Wrap(err, "getting transformation for function")
}
// no transformations for this destination
if template == nil {
return nil
}
template.Extractors = extractors
t := Transformation{
TransformationType: &Transformation_TransformationTemplate{
TransformationTemplate: template,
},
}
intHash, err := hashstructure.Hash(t, nil)
if err != nil {
return err
}
hash := fmt.Sprintf("%v", intHash)
// cache the transformation, the filter config needs to contain all of them
p.cachedTransformations[hash] = &t
// set the filter metadata on the route
if out.Metadata == nil {
out.Metadata = &envoycore.Metadata{}
}
filterMetadata := common.InitFilterMetadataField(filterName, metadataRequestKey, out.Metadata)
if filterMetadata.Kind == nil {
filterMetadata.Kind = &types.Value_StructValue{}
}
if _, ok := filterMetadata.Kind.(*types.Value_StructValue); !ok {
return errors.Errorf("needed filter metadta to be kind *types.Value_StructValue, but was: %v", filterMetadata.Kind)
}
if filterMetadata.Kind.(*types.Value_StructValue).StructValue == nil {
filterMetadata.Kind.(*types.Value_StructValue).StructValue = &types.Struct{}
}
if filterMetadata.Kind.(*types.Value_StructValue).StructValue.Fields == nil {
filterMetadata.Kind.(*types.Value_StructValue).StructValue.Fields = make(map[string]*types.Value)
}
upstreamName := fnDestination.Function.UpstreamName
functionName := fnDestination.Function.FunctionName
fields := filterMetadata.Kind.(*types.Value_StructValue).StructValue.Fields
if fields[upstreamName] == nil {
var funcVal types.Value
funcVal.Kind = &types.Value_StructValue{
StructValue: &types.Struct{
Fields: make(map[string]*types.Value),
},
}
fields[upstreamName] = &funcVal
}
funcFields := fields[upstreamName].Kind.(*types.Value_StructValue).StructValue.Fields
if funcFields[functionName] == nil {
funcFields[functionName] = &types.Value{
Kind: &types.Value_StructValue{
StructValue: &types.Struct{
Fields: make(map[string]*types.Value),
},
},
}
}
funcFields[functionName].Kind = &types.Value_StringValue{StringValue: hash}
return nil
}
func (p *transformationPlugin) setResponseTransformationForRoute(template TransformationSpec, extractors map[string]*Extraction, out *envoyroute.Route) error {
// create templates
// right now it's just a no-op, user writes inja directly
headerTemplates := make(map[string]*InjaTemplate)
for k, v := range template.Headers {
headerTemplates[k] = &InjaTemplate{Text: v}
}
tt := &Transformation_TransformationTemplate{
TransformationTemplate: &TransformationTemplate{
Extractors: extractors,
Headers: headerTemplates,
},
}
if template.Body != nil {
tt.TransformationTemplate.BodyTransformation = &TransformationTemplate_Body{
Body: &InjaTemplate{
Text: template.Body.Value,
},
}
} else {
tt.TransformationTemplate.BodyTransformation = &TransformationTemplate_Passthrough{
Passthrough: &Passthrough{},
}
}
t := Transformation{
TransformationType: tt,
}
intHash, err := hashstructure.Hash(t, nil)
if err != nil {
return errors.Wrap(err, "generating hash")
}
hash := fmt.Sprintf("%v", intHash)
// cache the transformation, the filter config needs to contain all of them
p.cachedTransformations[hash] = &t
// set the filter metadata on the route
if out.Metadata == nil {
out.Metadata = &envoycore.Metadata{}
}
filterMetadata := common.InitFilterMetadataField(filterName, metadataResponseKey, out.Metadata)
filterMetadata.Kind = &types.Value_StringValue{StringValue: hash}
return nil
}
func (p *transformationPlugin) GetTransformationFilter() *plugins.StagedHttpFilter {
if len(p.cachedTransformations) == 0 {
return nil
}
defer func() {
// clear cache
p.cachedTransformations = make(map[string]*Transformation)
}()
filterConfig, err := util.MessageToStruct(&Transformations{
Transformations: p.cachedTransformations,
})
if err != nil {
log.Warnf("error in transformation plugin: %v", err)
return nil
}
return &plugins.StagedHttpFilter{
HttpFilter: &envoyhttp.HttpFilter{
Name: filterName,
Config: filterConfig,
}, Stage: pluginStage,
}
}
| {
var regexString string
var prevEnd int
for _, startStop := range rxp.FindAllStringIndex(paramString, -1) {
start := startStop[0]
end := startStop[1]
subStr := regexp.QuoteMeta(paramString[prevEnd:start]) + `([\-._[:alnum:]]+)`
regexString += subStr
prevEnd = end
}
return regexString + regexp.QuoteMeta(paramString[prevEnd:])
} | identifier_body |
tasks.py | import logging
import os
import random
from datetime import timedelta
from urllib.parse import urlparse
from celery import shared_task
from django.conf import settings
from django.db import transaction
from django.db.models import Count, Q
from django.utils.timezone import now
from requests import HTTPError
from rest_framework_simplejwt.token_blacklist.management.commands import flushexpiredtokens
from bot.models import Weibo
from bot.services.download_service import DownloadService
from bot.services.info_service import AtwikiInfoService, ASDBCopyrightInfoService, ANNArtistInfoService, \
GoogleKGSArtistInfoService, MALCopyrightInfoService, BangumiCopyrightInfoService, GoogleKGSCopyrightInfoService
from bot.services.media_service import MediaService
from bot.services.sakugabooru_service import SakugabooruService
from bot.services.weiboV2_service import WeiboService
from hub.models import Post, Tag, Node
logger = logging.getLogger('bot.tasks')
TIME_LIMIT = settings.TASK_TIME_LIMIT
class TagInfoUpdateTask(object):
def __init__(self, tag, overwrite=False):
assert isinstance(tag, Tag)
self.tag = tag
self.overwrite = overwrite
self.info = dict()
def _save_info_to_tag(self):
for k, v in self.info.items():
if v:
try:
logger.info("Info [{}: {}] is being added "
"to Tag[{}]. Overwrite: {}".format(k,
v,
self.tag.name,
self.overwrite))
self.tag.save_to_detail(k, v, self.overwrite)
except AttributeError:
pass
def _get_and_save_info(self, service, *names, overwrite_keys=(), **kwargs):
logger.info("Tag[{}]: Getting result from {} with names {}".format(self.tag.name, service.__name__, names))
service_instance = service()
info = service_instance.get_info(*names, **kwargs)
for k, v in info.items():
if k in overwrite_keys:
self.info[k] = v
continue
self.info.setdefault(k, v)
def get_values_from_info(self, *keys):
return [self.info.get(key, None) for key in keys if self.info.get(key, None)]
def translate_artist(self):
if self.tag.type != Tag.ARTIST:
return
name = self.tag.name.replace("_", " ")
self._get_and_save_info(ANNArtistInfoService, name)
names = [name] + self.get_values_from_info('name_ja')
self._get_and_save_info(GoogleKGSArtistInfoService,
*names,
overwrite_keys=('description',))
def translate_copyright(self):
if self.tag.type != Tag.COPYRIGHT:
return
name = self.tag.name.replace("_", " ")
self._get_and_save_info(MALCopyrightInfoService, name)
names = [name] + self.get_values_from_info('name_ja')
self._get_and_save_info(BangumiCopyrightInfoService,
*names,
overwrite_keys=("name_ja",))
try:
source = self.tag.post_set.latest('id').source
except Post.DoesNotExist:
source = ''
if len(self.tag.name) > 6 and (len(source) < 10 or not bool(urlparse(source).netloc)):
names = [name] + self.get_values_from_info('name_ja', 'name_zh')
self._get_and_save_info(GoogleKGSCopyrightInfoService,
*names,
overwrite_keys=('description',))
def get_additional_info(self):
if self.tag.type not in (Tag.ARTIST, Tag.COPYRIGHT):
return
ja_names = []
if self.tag.ja_name:
|
ja_names.extend(self.get_values_from_info('name_ja'))
if not ja_names:
ja_names = [self.tag.name.replace("_", " ")]
self._get_and_save_info(AtwikiInfoService, *ja_names)
if self.tag.type == Tag.COPYRIGHT:
self._get_and_save_info(ASDBCopyrightInfoService, *ja_names)
@transaction.atomic
def save(self):
self.tag.refresh_from_db()
self._save_info_to_tag()
return self.tag.save()
def process(self):
try:
self.translate_artist()
self.translate_copyright()
self.get_additional_info()
finally:
return self.save()
def update_tags_info(*tags, update_tag_type=False, overwrite=False):
if update_tag_type:
tags = SakugabooruService().update_tags([tag.name for tag in tags], force_update=True)
for tag in tags:
TagInfoUpdateTask(tag, overwrite).process()
@shared_task(soft_time_limit=TIME_LIMIT)
def update_tags_info_task(*tag_pks, update_tag_type=False, overwrite=False):
tags = Tag.objects.filter(pk__in=tag_pks)
update_tags_info(*tags, update_tag_type=update_tag_type, overwrite=overwrite)
@shared_task
def update_all_tags_info(update_tag_type=True, overwrite=True):
tags = Tag.objects.filter(type__in=[Tag.ARTIST, Tag.COPYRIGHT])
update_tags_info(*tags, update_tag_type=update_tag_type, overwrite=overwrite)
def update_posts(*posts):
booru = SakugabooruService()
try:
logger.info("Updating posts {} from sakugabooru.".format([post.id for post in posts]))
booru.update_posts(*[post.id for post in posts])
finally:
update_tags_info(*booru.created_tags)
@shared_task(soft_time_limit=TIME_LIMIT)
def update_posts_task(*post_pks):
update_posts(*Post.objects.filter(pk__in=post_pks))
@shared_task(soft_time_limit=TIME_LIMIT)
def auto_update_posts():
booru = SakugabooruService()
try:
logger.info("Updating posts from sakugabooru.")
last_post = Post.objects.latest('id')
page = 1
posts = booru.update_posts_by_page(page=page)
while posts[-1].id > last_post.id + 1 or page < 3:
page += 1
posts = booru.update_posts_by_page(page=page)
except Post.DoesNotExist:
booru.update_posts_by_page()
except:
logger.exception("Auto_update_posts failed.")
finally:
update_tags_info(*booru.created_tags)
def post_weibo(*posts):
weibo_service = WeiboService()
for post in posts:
try:
logger.info("Post[{}]: Downloading media.".format(post.id))
media_path = DownloadService().download_post_media(post)
logger.info("Post[{}]: Transcoding media.".format(post.id))
media_path = MediaService().transcoding_media(post, media_path)
logger.info("Post[{}]: Sending weibo.".format(post.id))
post.posted = True
post.weibo = weibo_service.post_weibo(post, media_path)
post.save()
logger.info("Post[{}]: Posting Weibo Success. weibo_id[{}]".format(post.id, post.weibo.weibo_id))
except HTTPError as e:
if '404' in str(e):
post.posted = True
post.save()
continue
except RuntimeError as e:
if '[SKIP]' in str(e):
post.posted = True
post.save()
continue
break
except:
post.posted = True
post.save()
logger.exception("Something went wrong while posting Post[{}].".format(post.id))
raise
@shared_task(soft_time_limit=TIME_LIMIT)
def post_weibo_task(*post_pks):
post_weibo(*Post.objects.filter(pk__in=post_pks).order_by('id'))
def check_status():
last_weibo = Weibo.objects.last()
fails = Post.objects.filter(id__gt=last_weibo.post.id,
update_time__gt=last_weibo.create_time - timedelta(hours=settings.MAX_PENDING_HOURS),
posted=True,
weibo__isnull=True)
if len(fails) >= 5:
return False
return True
@shared_task(soft_time_limit=TIME_LIMIT)
def auto_post_weibo():
if not check_status():
logger.error("More than 5 fails. Posting has Stopped!")
return
try:
last_posted_post = Post.objects.filter(posted=True).latest('id')
posts = Post.objects.filter(
created_at__gt=last_posted_post.created_at - timedelta(hours=settings.MAX_PENDING_HOURS),
posted=False,
is_shown=True).exclude(
Q(uploader__in_blacklist=True) | Q(uploader__in_whitelist=False, is_pending=True)).order_by('id')
except Post.DoesNotExist:
posts = list(reversed(Post.objects.filter(posted=False,
is_shown=True).order_by('-id')[:20]))
if not posts:
logger.info("There's no need to post weibo.")
post_weibo(*posts[:random.randint(1, 2)])
@shared_task()
def auto_post_weibo_with_random_delay():
if random.random() < 0.8:
waiting_sec = random.randint(0, 300)
logger.info("Try to post weibo in {} seconds.".format(waiting_sec))
auto_post_weibo.apply_async(eta=now() + timedelta(seconds=waiting_sec))
@shared_task(soft_time_limit=TIME_LIMIT)
def clean_media():
file_list = list()
total_size = 0
for start_path in (DownloadService.ROOT, MediaService.ROOT):
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
fp_stat = os.stat(fp)
total_size += fp_stat.st_size
file_list.append((fp, fp_stat.st_size, fp_stat.st_ctime))
file_list.sort(key=lambda x: x[-1])
for fp, size, dummy in file_list:
if total_size <= settings.MEDIA_MAX_SIZE:
break
try:
os.remove(fp)
total_size -= size
logger.info("File[{}] has been removed.".format(fp))
except FileNotFoundError:
total_size -= size
except OSError:
logger.exception("Error occurred while deleting file[{}].".format(fp))
raise
@shared_task(soft_time_limit=TIME_LIMIT)
def clean_nodes():
annotated_nodes = Node.objects.annotate(n_histories=Count('histories'))
orphans = annotated_nodes.filter(n_histories=0)
nodes = [{'attribute': node.attribute.code, 'value': node._value} for node in orphans]
orphans.delete()
if nodes:
logger.info("Following Nodes have been deleted.: {}".format(nodes))
@shared_task(soft_time_limit=TIME_LIMIT)
def bot_auto_task():
try:
auto_update_posts()
auto_post_weibo()
finally:
clean_media()
@shared_task
def clean_expired_tokens():
flushexpiredtokens.Command().handle()
| ja_names.append(self.tag.ja_name) | conditional_block |
tasks.py | import logging
import os
import random
from datetime import timedelta
from urllib.parse import urlparse
from celery import shared_task
from django.conf import settings
from django.db import transaction
from django.db.models import Count, Q
from django.utils.timezone import now
from requests import HTTPError
from rest_framework_simplejwt.token_blacklist.management.commands import flushexpiredtokens
from bot.models import Weibo
from bot.services.download_service import DownloadService
from bot.services.info_service import AtwikiInfoService, ASDBCopyrightInfoService, ANNArtistInfoService, \
GoogleKGSArtistInfoService, MALCopyrightInfoService, BangumiCopyrightInfoService, GoogleKGSCopyrightInfoService
from bot.services.media_service import MediaService
from bot.services.sakugabooru_service import SakugabooruService
from bot.services.weiboV2_service import WeiboService
from hub.models import Post, Tag, Node
logger = logging.getLogger('bot.tasks')
TIME_LIMIT = settings.TASK_TIME_LIMIT
class TagInfoUpdateTask(object):
def __init__(self, tag, overwrite=False):
|
def _save_info_to_tag(self):
for k, v in self.info.items():
if v:
try:
logger.info("Info [{}: {}] is being added "
"to Tag[{}]. Overwrite: {}".format(k,
v,
self.tag.name,
self.overwrite))
self.tag.save_to_detail(k, v, self.overwrite)
except AttributeError:
pass
def _get_and_save_info(self, service, *names, overwrite_keys=(), **kwargs):
logger.info("Tag[{}]: Getting result from {} with names {}".format(self.tag.name, service.__name__, names))
service_instance = service()
info = service_instance.get_info(*names, **kwargs)
for k, v in info.items():
if k in overwrite_keys:
self.info[k] = v
continue
self.info.setdefault(k, v)
def get_values_from_info(self, *keys):
return [self.info.get(key, None) for key in keys if self.info.get(key, None)]
def translate_artist(self):
if self.tag.type != Tag.ARTIST:
return
name = self.tag.name.replace("_", " ")
self._get_and_save_info(ANNArtistInfoService, name)
names = [name] + self.get_values_from_info('name_ja')
self._get_and_save_info(GoogleKGSArtistInfoService,
*names,
overwrite_keys=('description',))
def translate_copyright(self):
if self.tag.type != Tag.COPYRIGHT:
return
name = self.tag.name.replace("_", " ")
self._get_and_save_info(MALCopyrightInfoService, name)
names = [name] + self.get_values_from_info('name_ja')
self._get_and_save_info(BangumiCopyrightInfoService,
*names,
overwrite_keys=("name_ja",))
try:
source = self.tag.post_set.latest('id').source
except Post.DoesNotExist:
source = ''
if len(self.tag.name) > 6 and (len(source) < 10 or not bool(urlparse(source).netloc)):
names = [name] + self.get_values_from_info('name_ja', 'name_zh')
self._get_and_save_info(GoogleKGSCopyrightInfoService,
*names,
overwrite_keys=('description',))
def get_additional_info(self):
if self.tag.type not in (Tag.ARTIST, Tag.COPYRIGHT):
return
ja_names = []
if self.tag.ja_name:
ja_names.append(self.tag.ja_name)
ja_names.extend(self.get_values_from_info('name_ja'))
if not ja_names:
ja_names = [self.tag.name.replace("_", " ")]
self._get_and_save_info(AtwikiInfoService, *ja_names)
if self.tag.type == Tag.COPYRIGHT:
self._get_and_save_info(ASDBCopyrightInfoService, *ja_names)
@transaction.atomic
def save(self):
self.tag.refresh_from_db()
self._save_info_to_tag()
return self.tag.save()
def process(self):
try:
self.translate_artist()
self.translate_copyright()
self.get_additional_info()
finally:
return self.save()
def update_tags_info(*tags, update_tag_type=False, overwrite=False):
if update_tag_type:
tags = SakugabooruService().update_tags([tag.name for tag in tags], force_update=True)
for tag in tags:
TagInfoUpdateTask(tag, overwrite).process()
@shared_task(soft_time_limit=TIME_LIMIT)
def update_tags_info_task(*tag_pks, update_tag_type=False, overwrite=False):
tags = Tag.objects.filter(pk__in=tag_pks)
update_tags_info(*tags, update_tag_type=update_tag_type, overwrite=overwrite)
@shared_task
def update_all_tags_info(update_tag_type=True, overwrite=True):
tags = Tag.objects.filter(type__in=[Tag.ARTIST, Tag.COPYRIGHT])
update_tags_info(*tags, update_tag_type=update_tag_type, overwrite=overwrite)
def update_posts(*posts):
booru = SakugabooruService()
try:
logger.info("Updating posts {} from sakugabooru.".format([post.id for post in posts]))
booru.update_posts(*[post.id for post in posts])
finally:
update_tags_info(*booru.created_tags)
@shared_task(soft_time_limit=TIME_LIMIT)
def update_posts_task(*post_pks):
update_posts(*Post.objects.filter(pk__in=post_pks))
@shared_task(soft_time_limit=TIME_LIMIT)
def auto_update_posts():
booru = SakugabooruService()
try:
logger.info("Updating posts from sakugabooru.")
last_post = Post.objects.latest('id')
page = 1
posts = booru.update_posts_by_page(page=page)
while posts[-1].id > last_post.id + 1 or page < 3:
page += 1
posts = booru.update_posts_by_page(page=page)
except Post.DoesNotExist:
booru.update_posts_by_page()
except:
logger.exception("Auto_update_posts failed.")
finally:
update_tags_info(*booru.created_tags)
def post_weibo(*posts):
weibo_service = WeiboService()
for post in posts:
try:
logger.info("Post[{}]: Downloading media.".format(post.id))
media_path = DownloadService().download_post_media(post)
logger.info("Post[{}]: Transcoding media.".format(post.id))
media_path = MediaService().transcoding_media(post, media_path)
logger.info("Post[{}]: Sending weibo.".format(post.id))
post.posted = True
post.weibo = weibo_service.post_weibo(post, media_path)
post.save()
logger.info("Post[{}]: Posting Weibo Success. weibo_id[{}]".format(post.id, post.weibo.weibo_id))
except HTTPError as e:
if '404' in str(e):
post.posted = True
post.save()
continue
except RuntimeError as e:
if '[SKIP]' in str(e):
post.posted = True
post.save()
continue
break
except:
post.posted = True
post.save()
logger.exception("Something went wrong while posting Post[{}].".format(post.id))
raise
@shared_task(soft_time_limit=TIME_LIMIT)
def post_weibo_task(*post_pks):
post_weibo(*Post.objects.filter(pk__in=post_pks).order_by('id'))
def check_status():
last_weibo = Weibo.objects.last()
fails = Post.objects.filter(id__gt=last_weibo.post.id,
update_time__gt=last_weibo.create_time - timedelta(hours=settings.MAX_PENDING_HOURS),
posted=True,
weibo__isnull=True)
if len(fails) >= 5:
return False
return True
@shared_task(soft_time_limit=TIME_LIMIT)
def auto_post_weibo():
if not check_status():
logger.error("More than 5 fails. Posting has Stopped!")
return
try:
last_posted_post = Post.objects.filter(posted=True).latest('id')
posts = Post.objects.filter(
created_at__gt=last_posted_post.created_at - timedelta(hours=settings.MAX_PENDING_HOURS),
posted=False,
is_shown=True).exclude(
Q(uploader__in_blacklist=True) | Q(uploader__in_whitelist=False, is_pending=True)).order_by('id')
except Post.DoesNotExist:
posts = list(reversed(Post.objects.filter(posted=False,
is_shown=True).order_by('-id')[:20]))
if not posts:
logger.info("There's no need to post weibo.")
post_weibo(*posts[:random.randint(1, 2)])
@shared_task()
def auto_post_weibo_with_random_delay():
if random.random() < 0.8:
waiting_sec = random.randint(0, 300)
logger.info("Try to post weibo in {} seconds.".format(waiting_sec))
auto_post_weibo.apply_async(eta=now() + timedelta(seconds=waiting_sec))
@shared_task(soft_time_limit=TIME_LIMIT)
def clean_media():
file_list = list()
total_size = 0
for start_path in (DownloadService.ROOT, MediaService.ROOT):
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
fp_stat = os.stat(fp)
total_size += fp_stat.st_size
file_list.append((fp, fp_stat.st_size, fp_stat.st_ctime))
file_list.sort(key=lambda x: x[-1])
for fp, size, dummy in file_list:
if total_size <= settings.MEDIA_MAX_SIZE:
break
try:
os.remove(fp)
total_size -= size
logger.info("File[{}] has been removed.".format(fp))
except FileNotFoundError:
total_size -= size
except OSError:
logger.exception("Error occurred while deleting file[{}].".format(fp))
raise
@shared_task(soft_time_limit=TIME_LIMIT)
def clean_nodes():
annotated_nodes = Node.objects.annotate(n_histories=Count('histories'))
orphans = annotated_nodes.filter(n_histories=0)
nodes = [{'attribute': node.attribute.code, 'value': node._value} for node in orphans]
orphans.delete()
if nodes:
logger.info("Following Nodes have been deleted.: {}".format(nodes))
@shared_task(soft_time_limit=TIME_LIMIT)
def bot_auto_task():
try:
auto_update_posts()
auto_post_weibo()
finally:
clean_media()
@shared_task
def clean_expired_tokens():
flushexpiredtokens.Command().handle()
| assert isinstance(tag, Tag)
self.tag = tag
self.overwrite = overwrite
self.info = dict() | identifier_body |
tasks.py | import logging
import os
import random
from datetime import timedelta
from urllib.parse import urlparse
from celery import shared_task
from django.conf import settings
from django.db import transaction
from django.db.models import Count, Q
from django.utils.timezone import now
from requests import HTTPError
from rest_framework_simplejwt.token_blacklist.management.commands import flushexpiredtokens
from bot.models import Weibo
from bot.services.download_service import DownloadService
from bot.services.info_service import AtwikiInfoService, ASDBCopyrightInfoService, ANNArtistInfoService, \
GoogleKGSArtistInfoService, MALCopyrightInfoService, BangumiCopyrightInfoService, GoogleKGSCopyrightInfoService
from bot.services.media_service import MediaService
from bot.services.sakugabooru_service import SakugabooruService
from bot.services.weiboV2_service import WeiboService
from hub.models import Post, Tag, Node
logger = logging.getLogger('bot.tasks')
TIME_LIMIT = settings.TASK_TIME_LIMIT
class TagInfoUpdateTask(object):
def __init__(self, tag, overwrite=False):
assert isinstance(tag, Tag)
self.tag = tag
self.overwrite = overwrite
self.info = dict()
def _save_info_to_tag(self):
for k, v in self.info.items():
if v:
try:
logger.info("Info [{}: {}] is being added "
"to Tag[{}]. Overwrite: {}".format(k,
v,
self.tag.name,
self.overwrite))
self.tag.save_to_detail(k, v, self.overwrite)
except AttributeError:
pass
def _get_and_save_info(self, service, *names, overwrite_keys=(), **kwargs):
logger.info("Tag[{}]: Getting result from {} with names {}".format(self.tag.name, service.__name__, names))
service_instance = service()
info = service_instance.get_info(*names, **kwargs) | self.info.setdefault(k, v)
def get_values_from_info(self, *keys):
return [self.info.get(key, None) for key in keys if self.info.get(key, None)]
def translate_artist(self):
if self.tag.type != Tag.ARTIST:
return
name = self.tag.name.replace("_", " ")
self._get_and_save_info(ANNArtistInfoService, name)
names = [name] + self.get_values_from_info('name_ja')
self._get_and_save_info(GoogleKGSArtistInfoService,
*names,
overwrite_keys=('description',))
def translate_copyright(self):
if self.tag.type != Tag.COPYRIGHT:
return
name = self.tag.name.replace("_", " ")
self._get_and_save_info(MALCopyrightInfoService, name)
names = [name] + self.get_values_from_info('name_ja')
self._get_and_save_info(BangumiCopyrightInfoService,
*names,
overwrite_keys=("name_ja",))
try:
source = self.tag.post_set.latest('id').source
except Post.DoesNotExist:
source = ''
if len(self.tag.name) > 6 and (len(source) < 10 or not bool(urlparse(source).netloc)):
names = [name] + self.get_values_from_info('name_ja', 'name_zh')
self._get_and_save_info(GoogleKGSCopyrightInfoService,
*names,
overwrite_keys=('description',))
def get_additional_info(self):
if self.tag.type not in (Tag.ARTIST, Tag.COPYRIGHT):
return
ja_names = []
if self.tag.ja_name:
ja_names.append(self.tag.ja_name)
ja_names.extend(self.get_values_from_info('name_ja'))
if not ja_names:
ja_names = [self.tag.name.replace("_", " ")]
self._get_and_save_info(AtwikiInfoService, *ja_names)
if self.tag.type == Tag.COPYRIGHT:
self._get_and_save_info(ASDBCopyrightInfoService, *ja_names)
@transaction.atomic
def save(self):
self.tag.refresh_from_db()
self._save_info_to_tag()
return self.tag.save()
def process(self):
try:
self.translate_artist()
self.translate_copyright()
self.get_additional_info()
finally:
return self.save()
def update_tags_info(*tags, update_tag_type=False, overwrite=False):
if update_tag_type:
tags = SakugabooruService().update_tags([tag.name for tag in tags], force_update=True)
for tag in tags:
TagInfoUpdateTask(tag, overwrite).process()
@shared_task(soft_time_limit=TIME_LIMIT)
def update_tags_info_task(*tag_pks, update_tag_type=False, overwrite=False):
tags = Tag.objects.filter(pk__in=tag_pks)
update_tags_info(*tags, update_tag_type=update_tag_type, overwrite=overwrite)
@shared_task
def update_all_tags_info(update_tag_type=True, overwrite=True):
tags = Tag.objects.filter(type__in=[Tag.ARTIST, Tag.COPYRIGHT])
update_tags_info(*tags, update_tag_type=update_tag_type, overwrite=overwrite)
def update_posts(*posts):
booru = SakugabooruService()
try:
logger.info("Updating posts {} from sakugabooru.".format([post.id for post in posts]))
booru.update_posts(*[post.id for post in posts])
finally:
update_tags_info(*booru.created_tags)
@shared_task(soft_time_limit=TIME_LIMIT)
def update_posts_task(*post_pks):
update_posts(*Post.objects.filter(pk__in=post_pks))
@shared_task(soft_time_limit=TIME_LIMIT)
def auto_update_posts():
booru = SakugabooruService()
try:
logger.info("Updating posts from sakugabooru.")
last_post = Post.objects.latest('id')
page = 1
posts = booru.update_posts_by_page(page=page)
while posts[-1].id > last_post.id + 1 or page < 3:
page += 1
posts = booru.update_posts_by_page(page=page)
except Post.DoesNotExist:
booru.update_posts_by_page()
except:
logger.exception("Auto_update_posts failed.")
finally:
update_tags_info(*booru.created_tags)
def post_weibo(*posts):
weibo_service = WeiboService()
for post in posts:
try:
logger.info("Post[{}]: Downloading media.".format(post.id))
media_path = DownloadService().download_post_media(post)
logger.info("Post[{}]: Transcoding media.".format(post.id))
media_path = MediaService().transcoding_media(post, media_path)
logger.info("Post[{}]: Sending weibo.".format(post.id))
post.posted = True
post.weibo = weibo_service.post_weibo(post, media_path)
post.save()
logger.info("Post[{}]: Posting Weibo Success. weibo_id[{}]".format(post.id, post.weibo.weibo_id))
except HTTPError as e:
if '404' in str(e):
post.posted = True
post.save()
continue
except RuntimeError as e:
if '[SKIP]' in str(e):
post.posted = True
post.save()
continue
break
except:
post.posted = True
post.save()
logger.exception("Something went wrong while posting Post[{}].".format(post.id))
raise
@shared_task(soft_time_limit=TIME_LIMIT)
def post_weibo_task(*post_pks):
post_weibo(*Post.objects.filter(pk__in=post_pks).order_by('id'))
def check_status():
last_weibo = Weibo.objects.last()
fails = Post.objects.filter(id__gt=last_weibo.post.id,
update_time__gt=last_weibo.create_time - timedelta(hours=settings.MAX_PENDING_HOURS),
posted=True,
weibo__isnull=True)
if len(fails) >= 5:
return False
return True
@shared_task(soft_time_limit=TIME_LIMIT)
def auto_post_weibo():
if not check_status():
logger.error("More than 5 fails. Posting has Stopped!")
return
try:
last_posted_post = Post.objects.filter(posted=True).latest('id')
posts = Post.objects.filter(
created_at__gt=last_posted_post.created_at - timedelta(hours=settings.MAX_PENDING_HOURS),
posted=False,
is_shown=True).exclude(
Q(uploader__in_blacklist=True) | Q(uploader__in_whitelist=False, is_pending=True)).order_by('id')
except Post.DoesNotExist:
posts = list(reversed(Post.objects.filter(posted=False,
is_shown=True).order_by('-id')[:20]))
if not posts:
logger.info("There's no need to post weibo.")
post_weibo(*posts[:random.randint(1, 2)])
@shared_task()
def auto_post_weibo_with_random_delay():
if random.random() < 0.8:
waiting_sec = random.randint(0, 300)
logger.info("Try to post weibo in {} seconds.".format(waiting_sec))
auto_post_weibo.apply_async(eta=now() + timedelta(seconds=waiting_sec))
@shared_task(soft_time_limit=TIME_LIMIT)
def clean_media():
file_list = list()
total_size = 0
for start_path in (DownloadService.ROOT, MediaService.ROOT):
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
fp_stat = os.stat(fp)
total_size += fp_stat.st_size
file_list.append((fp, fp_stat.st_size, fp_stat.st_ctime))
file_list.sort(key=lambda x: x[-1])
for fp, size, dummy in file_list:
if total_size <= settings.MEDIA_MAX_SIZE:
break
try:
os.remove(fp)
total_size -= size
logger.info("File[{}] has been removed.".format(fp))
except FileNotFoundError:
total_size -= size
except OSError:
logger.exception("Error occurred while deleting file[{}].".format(fp))
raise
@shared_task(soft_time_limit=TIME_LIMIT)
def clean_nodes():
annotated_nodes = Node.objects.annotate(n_histories=Count('histories'))
orphans = annotated_nodes.filter(n_histories=0)
nodes = [{'attribute': node.attribute.code, 'value': node._value} for node in orphans]
orphans.delete()
if nodes:
logger.info("Following Nodes have been deleted.: {}".format(nodes))
@shared_task(soft_time_limit=TIME_LIMIT)
def bot_auto_task():
try:
auto_update_posts()
auto_post_weibo()
finally:
clean_media()
@shared_task
def clean_expired_tokens():
flushexpiredtokens.Command().handle() | for k, v in info.items():
if k in overwrite_keys:
self.info[k] = v
continue | random_line_split |
tasks.py | import logging
import os
import random
from datetime import timedelta
from urllib.parse import urlparse
from celery import shared_task
from django.conf import settings
from django.db import transaction
from django.db.models import Count, Q
from django.utils.timezone import now
from requests import HTTPError
from rest_framework_simplejwt.token_blacklist.management.commands import flushexpiredtokens
from bot.models import Weibo
from bot.services.download_service import DownloadService
from bot.services.info_service import AtwikiInfoService, ASDBCopyrightInfoService, ANNArtistInfoService, \
GoogleKGSArtistInfoService, MALCopyrightInfoService, BangumiCopyrightInfoService, GoogleKGSCopyrightInfoService
from bot.services.media_service import MediaService
from bot.services.sakugabooru_service import SakugabooruService
from bot.services.weiboV2_service import WeiboService
from hub.models import Post, Tag, Node
logger = logging.getLogger('bot.tasks')
TIME_LIMIT = settings.TASK_TIME_LIMIT
class TagInfoUpdateTask(object):
def __init__(self, tag, overwrite=False):
assert isinstance(tag, Tag)
self.tag = tag
self.overwrite = overwrite
self.info = dict()
def _save_info_to_tag(self):
for k, v in self.info.items():
if v:
try:
logger.info("Info [{}: {}] is being added "
"to Tag[{}]. Overwrite: {}".format(k,
v,
self.tag.name,
self.overwrite))
self.tag.save_to_detail(k, v, self.overwrite)
except AttributeError:
pass
def _get_and_save_info(self, service, *names, overwrite_keys=(), **kwargs):
logger.info("Tag[{}]: Getting result from {} with names {}".format(self.tag.name, service.__name__, names))
service_instance = service()
info = service_instance.get_info(*names, **kwargs)
for k, v in info.items():
if k in overwrite_keys:
self.info[k] = v
continue
self.info.setdefault(k, v)
def get_values_from_info(self, *keys):
return [self.info.get(key, None) for key in keys if self.info.get(key, None)]
def translate_artist(self):
if self.tag.type != Tag.ARTIST:
return
name = self.tag.name.replace("_", " ")
self._get_and_save_info(ANNArtistInfoService, name)
names = [name] + self.get_values_from_info('name_ja')
self._get_and_save_info(GoogleKGSArtistInfoService,
*names,
overwrite_keys=('description',))
def | (self):
if self.tag.type != Tag.COPYRIGHT:
return
name = self.tag.name.replace("_", " ")
self._get_and_save_info(MALCopyrightInfoService, name)
names = [name] + self.get_values_from_info('name_ja')
self._get_and_save_info(BangumiCopyrightInfoService,
*names,
overwrite_keys=("name_ja",))
try:
source = self.tag.post_set.latest('id').source
except Post.DoesNotExist:
source = ''
if len(self.tag.name) > 6 and (len(source) < 10 or not bool(urlparse(source).netloc)):
names = [name] + self.get_values_from_info('name_ja', 'name_zh')
self._get_and_save_info(GoogleKGSCopyrightInfoService,
*names,
overwrite_keys=('description',))
def get_additional_info(self):
if self.tag.type not in (Tag.ARTIST, Tag.COPYRIGHT):
return
ja_names = []
if self.tag.ja_name:
ja_names.append(self.tag.ja_name)
ja_names.extend(self.get_values_from_info('name_ja'))
if not ja_names:
ja_names = [self.tag.name.replace("_", " ")]
self._get_and_save_info(AtwikiInfoService, *ja_names)
if self.tag.type == Tag.COPYRIGHT:
self._get_and_save_info(ASDBCopyrightInfoService, *ja_names)
@transaction.atomic
def save(self):
self.tag.refresh_from_db()
self._save_info_to_tag()
return self.tag.save()
def process(self):
try:
self.translate_artist()
self.translate_copyright()
self.get_additional_info()
finally:
return self.save()
def update_tags_info(*tags, update_tag_type=False, overwrite=False):
if update_tag_type:
tags = SakugabooruService().update_tags([tag.name for tag in tags], force_update=True)
for tag in tags:
TagInfoUpdateTask(tag, overwrite).process()
@shared_task(soft_time_limit=TIME_LIMIT)
def update_tags_info_task(*tag_pks, update_tag_type=False, overwrite=False):
tags = Tag.objects.filter(pk__in=tag_pks)
update_tags_info(*tags, update_tag_type=update_tag_type, overwrite=overwrite)
@shared_task
def update_all_tags_info(update_tag_type=True, overwrite=True):
tags = Tag.objects.filter(type__in=[Tag.ARTIST, Tag.COPYRIGHT])
update_tags_info(*tags, update_tag_type=update_tag_type, overwrite=overwrite)
def update_posts(*posts):
booru = SakugabooruService()
try:
logger.info("Updating posts {} from sakugabooru.".format([post.id for post in posts]))
booru.update_posts(*[post.id for post in posts])
finally:
update_tags_info(*booru.created_tags)
@shared_task(soft_time_limit=TIME_LIMIT)
def update_posts_task(*post_pks):
update_posts(*Post.objects.filter(pk__in=post_pks))
@shared_task(soft_time_limit=TIME_LIMIT)
def auto_update_posts():
booru = SakugabooruService()
try:
logger.info("Updating posts from sakugabooru.")
last_post = Post.objects.latest('id')
page = 1
posts = booru.update_posts_by_page(page=page)
while posts[-1].id > last_post.id + 1 or page < 3:
page += 1
posts = booru.update_posts_by_page(page=page)
except Post.DoesNotExist:
booru.update_posts_by_page()
except:
logger.exception("Auto_update_posts failed.")
finally:
update_tags_info(*booru.created_tags)
def post_weibo(*posts):
weibo_service = WeiboService()
for post in posts:
try:
logger.info("Post[{}]: Downloading media.".format(post.id))
media_path = DownloadService().download_post_media(post)
logger.info("Post[{}]: Transcoding media.".format(post.id))
media_path = MediaService().transcoding_media(post, media_path)
logger.info("Post[{}]: Sending weibo.".format(post.id))
post.posted = True
post.weibo = weibo_service.post_weibo(post, media_path)
post.save()
logger.info("Post[{}]: Posting Weibo Success. weibo_id[{}]".format(post.id, post.weibo.weibo_id))
except HTTPError as e:
if '404' in str(e):
post.posted = True
post.save()
continue
except RuntimeError as e:
if '[SKIP]' in str(e):
post.posted = True
post.save()
continue
break
except:
post.posted = True
post.save()
logger.exception("Something went wrong while posting Post[{}].".format(post.id))
raise
@shared_task(soft_time_limit=TIME_LIMIT)
def post_weibo_task(*post_pks):
post_weibo(*Post.objects.filter(pk__in=post_pks).order_by('id'))
def check_status():
last_weibo = Weibo.objects.last()
fails = Post.objects.filter(id__gt=last_weibo.post.id,
update_time__gt=last_weibo.create_time - timedelta(hours=settings.MAX_PENDING_HOURS),
posted=True,
weibo__isnull=True)
if len(fails) >= 5:
return False
return True
@shared_task(soft_time_limit=TIME_LIMIT)
def auto_post_weibo():
if not check_status():
logger.error("More than 5 fails. Posting has Stopped!")
return
try:
last_posted_post = Post.objects.filter(posted=True).latest('id')
posts = Post.objects.filter(
created_at__gt=last_posted_post.created_at - timedelta(hours=settings.MAX_PENDING_HOURS),
posted=False,
is_shown=True).exclude(
Q(uploader__in_blacklist=True) | Q(uploader__in_whitelist=False, is_pending=True)).order_by('id')
except Post.DoesNotExist:
posts = list(reversed(Post.objects.filter(posted=False,
is_shown=True).order_by('-id')[:20]))
if not posts:
logger.info("There's no need to post weibo.")
post_weibo(*posts[:random.randint(1, 2)])
@shared_task()
def auto_post_weibo_with_random_delay():
if random.random() < 0.8:
waiting_sec = random.randint(0, 300)
logger.info("Try to post weibo in {} seconds.".format(waiting_sec))
auto_post_weibo.apply_async(eta=now() + timedelta(seconds=waiting_sec))
@shared_task(soft_time_limit=TIME_LIMIT)
def clean_media():
file_list = list()
total_size = 0
for start_path in (DownloadService.ROOT, MediaService.ROOT):
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
fp_stat = os.stat(fp)
total_size += fp_stat.st_size
file_list.append((fp, fp_stat.st_size, fp_stat.st_ctime))
file_list.sort(key=lambda x: x[-1])
for fp, size, dummy in file_list:
if total_size <= settings.MEDIA_MAX_SIZE:
break
try:
os.remove(fp)
total_size -= size
logger.info("File[{}] has been removed.".format(fp))
except FileNotFoundError:
total_size -= size
except OSError:
logger.exception("Error occurred while deleting file[{}].".format(fp))
raise
@shared_task(soft_time_limit=TIME_LIMIT)
def clean_nodes():
annotated_nodes = Node.objects.annotate(n_histories=Count('histories'))
orphans = annotated_nodes.filter(n_histories=0)
nodes = [{'attribute': node.attribute.code, 'value': node._value} for node in orphans]
orphans.delete()
if nodes:
logger.info("Following Nodes have been deleted.: {}".format(nodes))
@shared_task(soft_time_limit=TIME_LIMIT)
def bot_auto_task():
try:
auto_update_posts()
auto_post_weibo()
finally:
clean_media()
@shared_task
def clean_expired_tokens():
flushexpiredtokens.Command().handle()
| translate_copyright | identifier_name |
day04.rs | //! # --- Day 4: Passport Processing ---
//!
//! You arrive at the airport only to realize that you grabbed your North Pole
//! Credentials instead of your passport. While these documents are extremely
//! similar, North Pole Credentials aren't issued by a country and therefore
//! aren't actually valid documentation for travel in most of the world.
//!
//! It seems like you're not the only one having problems, though; a very long
//! line has formed for the automatic passport scanners, and the delay could
//! upset your travel itinerary.
//!
//! Due to some questionable network security, you realize you might be able to
//! solve both of these problems at the same time.
//!
//! The automatic passport scanners are slow because they're having trouble
//! **detecting which passports have all required fields**. The expected fields
//! are as follows:
//!
//! - `byr` (Birth Year)
//! - `iyr` (Issue Year)
//! - `eyr` (Expiration Year)
//! - `hgt` (Height)
//! - `hcl` (Hair Color)
//! - `ecl` (Eye Color)
//! - `pid` (Passport ID)
//! - `cid` (Country ID)
//!
//! Passport data is validated in batch files (your puzzle input). Each passport
//! is represented as a sequence of `key:value` pairs separated by spaces or
//! newlines. Passports are separated by blank lines.
//!
//! Here is an example batch file containing four passports:
//!
//! ```
//! ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
//! byr:1937 iyr:2017 cid:147 hgt:183cm
//!
//! iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
//! hcl:#cfa07d byr:1929
//!
//! hcl:#ae17e1 iyr:2013
//! eyr:2024
//! ecl:brn pid:760753108 byr:1931
//! hgt:179cm
//!
//! hcl:#cfa07d eyr:2025 pid:166559648
//! iyr:2011 ecl:brn hgt:59in
//! ```
//!
//! The first passport is **valid** - all eight fields are present. The second
//! passport is **invalid** - it is missing `hgt` (the Height field).
//!
//! The third passport is interesting; the **only missing field** is `cid`, so
//! it looks like data from North Pole Credentials, not a passport at all!
//! Surely, nobody would mind if you made the system temporarily ignore missing
//! `cid` fields. Treat this "passport" as valid.
//!
//! The fourth passport is missing two fields, `cid` and `byr`. Missing `cid` is
//! fine, but missing any other field is not, so this passport is **invalid**.
//!
//! According to the above rules, your improved system would report `2` valid
//! passports.
//!
//! Count the number of **valid** passports - those that have all required
//! fields. Treat `cid` as optional.
//! **In your batch file, how many passports are valid?**
//!
//! ## --- Part Two ---
//!
//! The line is moving more quickly now, but you overhear airport security
//! talking about how passports with invalid data are getting through. Better
//! add some data validation, quick!
//!
//! You can continue to ignore the `cid` field, but each other field has strict
//! rules about what values are valid for automatic validation:
//!
//! - `byr` (Birth Year) - four digits; at least `1920` and at most `2002`.
//! - `iyr` (Issue Year) - four digits; at least `2010` and at most `2020`.
//! - `eyr` (Expiration Year) - four digits; at least `2020` and at most `2030`.
//! - `hgt` (Height) - a number followed by either `cm` or `in`:
//! - If `cm`, the number must be at least `150` and at most `193`.
//! - If `in`, the number must be at least `59` and at most `76`.
//! - `hcl` (Hair Color) - a `#` followed by exactly six characters `0-9` or `a-f`.
//! - `ecl` (Eye Color) - exactly one of: `amb` `blu` `brn` `gry` `grn` `hzl` `oth`.
//! - `pid` (Passport ID) - a nine-digit number, including leading zeroes.
//! - `cid` (Country ID) - ignored, missing or not.
//!
//! Your job is to count the passports where all required fields are both
//! **present** and **valid** according to the above rules. Here are some
//! example values:
//!
//! ```
//! byr valid: 2002
//! byr invalid: 2003
//!
//! hgt valid: 60in
//! hgt valid: 190cm
//! hgt invalid: 190in
//! hgt invalid: 190
//!
//! hcl valid: #123abc
//! hcl invalid: #123abz
//! hcl invalid: 123abc
//!
//! ecl valid: brn
//! ecl invalid: wat
//!
//! pid valid: 000000001
//! pid invalid: 0123456789
//! ```
//!
//! Here are some invalid passports:
//!
//! ```
//! eyr:1972 cid:100
//! hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
//!
//! iyr:2019
//! hcl:#602927 eyr:1967 hgt:170cm
//! ecl:grn pid:012533040 byr:1946
//!
//! hcl:dab227 iyr:2012
//! ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
//!
//! hgt:59cm ecl:zzz
//! eyr:2038 hcl:74454a iyr:2023
//! pid:3556412378 byr:2007
//! ```
//!
//! Here are some valid passports:
//!
//! ```
//! pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
//! hcl:#623a2f
//!
//! eyr:2029 ecl:blu cid:129 byr:1989
//! iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
//!
//! hcl:#888785
//! hgt:164cm byr:2001 iyr:2015 cid:88
//! pid:545766238 ecl:hzl
//! eyr:2022
//!
//! iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
//! ```
//!
//! Count the number of **valid** passports - those that have all required
//! fields **and valid values**. Continue to treat cid as optional.
//! **In your batch file, how many passports are valid?**
#[macro_use]
extern crate lazy_static;
use std::collections::{HashMap, HashSet};
use std::env;
use std::fs;
use nom::{
branch::alt, bytes::complete::tag, bytes::complete::take_while,
character::complete::multispace1, multi::separated_list0, sequence::tuple, IResult,
};
lazy_static! {
static ref MUST_FIELDS: HashSet<&'static str> =
vec!["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"]
.into_iter()
.collect();
static ref HAIR_COLORS: HashSet<&'static str> =
vec!["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]
.into_iter()
.collect();
}
#[derive(Debug, Eq, PartialEq)]
enum HeightUnit {
Cm,
In,
}
#[derive(Debug, Eq, PartialEq)]
struct Height {
hight: usize,
unit: HeightUnit,
}
#[derive(Debug, Eq, PartialEq)]
struct Passport {
byr: usize, // birth year
iyr: usize, // issue year
eyr: usize, // expiration year
hgt: Height, // height
hcl: String, // hair color
ecl: String, // eye color
pid: String, // passport id
cid: Option<usize>, // country id
}
impl Passport {
fn from_hashmap(kvs: &HashMap<&str, &str>) -> Option<Passport> {
// `byr` (Birth Year) - four digits; at least `1920` and at most `2002`.
let byr = match kvs.get("byr").unwrap().parse::<usize>() {
Ok(i @ 1920..=2002) => i,
Err(_) => return None,
_ => return None,
};
// `iyr` (Issue Year) - four digits; at least `2010` and at most `2020`.
let iyr = match kvs.get("iyr").unwrap().parse::<usize>() {
Ok(i @ 2010..=2020) => i,
Err(_) => return None,
_ => return None,
};
// `eyr` (Expiration Year) - four digits; at least `2020` and at most `2030`.
let eyr = match kvs.get("eyr").unwrap().parse::<usize>() {
Ok(i @ 2020..=2030) => i,
Err(_) => return None,
_ => return None,
};
// - `hgt` (Height) - a number followed by either `cm` or `in`:
// - If `cm`, the number must be at least `150` and at most `193`.
// - If `in`, the number must be at least `59` and at most `76`.
let hgt_str = kvs.get("hgt").unwrap();
let hgt = if hgt_str.ends_with("cm") {
let high = hgt_str.strip_suffix("cm").unwrap().parse::<usize>();
match high {
Ok(i @ 150..=193) => Height {
hight: i,
unit: HeightUnit::Cm,
},
_ => return None,
}
} else if hgt_str.ends_with("in") {
let high = hgt_str.strip_suffix("in").unwrap().parse::<usize>();
match high {
Ok(i @ 59..=76) => Height {
hight: i,
unit: HeightUnit::In,
},
_ => return None,
}
} else {
return None;
};
// `hcl` (Hair Color) - a `#` followed by exactly six characters `0-9` or `a-f`.
let hcl_str = kvs.get("hcl").unwrap();
let hcl = if hcl_str.starts_with('#')
&& hcl_str.len() == 7
&& hcl_str[1..].chars().all(|x| x.is_digit(16))
{
(*hcl_str).into()
} else {
return None;
};
// `ecl` (Eye Color) - exactly one of: `amb` `blu` `brn` `gry` `grn` `hzl` `oth`.
let ecl_str = kvs.get("ecl").unwrap();
let ecl = if HAIR_COLORS.contains(ecl_str) {
(*ecl_str).into()
} else {
return None;
};
// `pid` (Passport ID) - a nine-digit number, including leading zeroes.
let pid_str = kvs.get("pid").unwrap();
let pid = if (*pid_str).parse::<usize>().is_ok() && (*pid_str).trim().len() == 9 {
(*pid_str).into()
} else {
return None;
};
// `cid` (Country ID) - ignored, missing or not.
let cid = match kvs.get("cid") {
Some(x) => match x.parse::<usize>() {
Ok(i) => Some(i),
Err(_) => return None,
},
None => None,
};
Some(Passport {
byr,
iyr,
eyr,
hgt,
hcl,
ecl,
pid,
cid,
})
}
}
fn is_valid_char(c: char) -> bool {
c.is_alphanumeric() || c == '#'
}
fn kv_parser(input: &str) -> IResult<&str, (&str, &str)> {
let keys = alt((
tag("byr"),
tag("iyr"),
tag("eyr"),
tag("hgt"),
tag("hcl"),
tag("ecl"),
tag("pid"),
tag("cid"),
));
let (input, (k, _, v)) = tuple((keys, tag(":"), take_while(is_valid_char)))(input)?;
Ok((input, (k, v)))
}
fn kvlist_parser(input: &str) -> IResult<&str, Vec<(&str, &str)>> {
let (input, kv_group) = separated_list0(multispace1, kv_parser)(input)?;
// println!("{:?}", kv_group);
Ok((input, kv_group))
}
fn parse_input(input: &str) -> Vec<HashMap<&str, &str>> {
let group = input.split("\n\n");
group
.filter_map(|x| {
let (_, kv_list) = kvlist_parser(x).unwrap();
let kv_map = kv_list.into_iter().collect::<HashMap<&str, &str>>();
let keys_set = kv_map.keys().cloned().collect::<HashSet<&str>>();
if MUST_FIELDS.is_subset(&keys_set) {
Some(kv_map)
} else {
None
}
})
.collect::<Vec<HashMap<&str, &str>>>()
}
fn main() -> Result<(), &'static str> {
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
return Err("not enough arguments");
}
let filename = &args[1];
println!("Load input file {}.", filename);
let input = fs::read_to_string(filename).expect("Something went wrong reading the file");
let valid_kvs = parse_input(&input);
println!(
"The number of valid passports have all required fields is {}.",
valid_kvs.len()
);
let passports: Vec<Passport> = valid_kvs
.iter()
.filter_map(|x| Passport::from_hashmap(x))
.collect();
println!(
"The number of passports satisfy all restrictions is {}.",
passports.len()
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_kv_parser() {
let (left, (k, v)) = kv_parser("ecl:gry").unwrap();
assert_eq!(left, "");
assert_eq!((k, v), ("ecl", "gry"));
let (_, (k, v)) = kv_parser("hcl:#fffffd").unwrap();
assert_eq!((k, v), ("hcl", "#fffffd"));
let (left, (k, v)) = kv_parser("pid:860033327~rust").unwrap();
assert_eq!(left, "~rust");
assert_eq!((k, v), ("pid", "860033327"));
}
#[test]
fn test_kvlist_parser() {
let input = "ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm";
let (_, kvs) = kvlist_parser(input).unwrap();
assert_eq!(
kvs,
vec![
("ecl", "gry"),
("pid", "860033327"),
("eyr", "2020"),
("hcl", "#fffffd"),
("byr", "1937"),
("iyr", "2017"),
("cid", "147"),
("hgt", "183cm"),
],
)
}
#[test]
fn test_parse_input() {
let input = "ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
";
let passports_dict = parse_input(input);
assert_eq!(
passports_dict,
vec![
vec![
("ecl", "gry"),
("pid", "860033327"),
("eyr", "2020"),
("hcl", "#fffffd"),
("byr", "1937"),
("iyr", "2017"),
("cid", "147"),
("hgt", "183cm")
]
.into_iter()
.collect(),
vec![
("hcl", "#ae17e1"),
("iyr", "2013"),
("eyr", "2024"),
("ecl", "brn"),
("pid", "760753108"),
("byr", "1931"),
("hgt", "179cm")
]
.into_iter()
.collect(),
]
)
}
#[test]
fn test_passport_valid() {
let input = "ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
";
let kvs = parse_input(&input);
let passports: Vec<Passport> = kvs
.iter() | .filter_map(|x| Passport::from_hashmap(x))
.collect();
assert_eq!(
passports,
vec![
Passport {
ecl: "gry".into(),
pid: "860033327".into(),
eyr: 2020,
hcl: "#fffffd".into(),
byr: 1937,
iyr: 2017,
cid: Some(147),
hgt: Height {
hight: 183,
unit: HeightUnit::Cm,
},
},
Passport {
hcl: "#ae17e1".into(),
iyr: 2013,
eyr: 2024,
ecl: "brn".into(),
pid: "760753108".into(),
byr: 1931,
hgt: Height {
hight: 179,
unit: HeightUnit::Cm,
},
cid: None,
},
]
);
}
#[test]
fn test_passport_invalid() {
let input = "eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007";
let kvs = parse_input(&input);
let passports: Vec<Passport> = kvs
.iter()
.filter_map(|x| Passport::from_hashmap(x))
.collect();
assert_eq!(passports, vec![]);
}
} | random_line_split | |
day04.rs | //! # --- Day 4: Passport Processing ---
//!
//! You arrive at the airport only to realize that you grabbed your North Pole
//! Credentials instead of your passport. While these documents are extremely
//! similar, North Pole Credentials aren't issued by a country and therefore
//! aren't actually valid documentation for travel in most of the world.
//!
//! It seems like you're not the only one having problems, though; a very long
//! line has formed for the automatic passport scanners, and the delay could
//! upset your travel itinerary.
//!
//! Due to some questionable network security, you realize you might be able to
//! solve both of these problems at the same time.
//!
//! The automatic passport scanners are slow because they're having trouble
//! **detecting which passports have all required fields**. The expected fields
//! are as follows:
//!
//! - `byr` (Birth Year)
//! - `iyr` (Issue Year)
//! - `eyr` (Expiration Year)
//! - `hgt` (Height)
//! - `hcl` (Hair Color)
//! - `ecl` (Eye Color)
//! - `pid` (Passport ID)
//! - `cid` (Country ID)
//!
//! Passport data is validated in batch files (your puzzle input). Each passport
//! is represented as a sequence of `key:value` pairs separated by spaces or
//! newlines. Passports are separated by blank lines.
//!
//! Here is an example batch file containing four passports:
//!
//! ```
//! ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
//! byr:1937 iyr:2017 cid:147 hgt:183cm
//!
//! iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
//! hcl:#cfa07d byr:1929
//!
//! hcl:#ae17e1 iyr:2013
//! eyr:2024
//! ecl:brn pid:760753108 byr:1931
//! hgt:179cm
//!
//! hcl:#cfa07d eyr:2025 pid:166559648
//! iyr:2011 ecl:brn hgt:59in
//! ```
//!
//! The first passport is **valid** - all eight fields are present. The second
//! passport is **invalid** - it is missing `hgt` (the Height field).
//!
//! The third passport is interesting; the **only missing field** is `cid`, so
//! it looks like data from North Pole Credentials, not a passport at all!
//! Surely, nobody would mind if you made the system temporarily ignore missing
//! `cid` fields. Treat this "passport" as valid.
//!
//! The fourth passport is missing two fields, `cid` and `byr`. Missing `cid` is
//! fine, but missing any other field is not, so this passport is **invalid**.
//!
//! According to the above rules, your improved system would report `2` valid
//! passports.
//!
//! Count the number of **valid** passports - those that have all required
//! fields. Treat `cid` as optional.
//! **In your batch file, how many passports are valid?**
//!
//! ## --- Part Two ---
//!
//! The line is moving more quickly now, but you overhear airport security
//! talking about how passports with invalid data are getting through. Better
//! add some data validation, quick!
//!
//! You can continue to ignore the `cid` field, but each other field has strict
//! rules about what values are valid for automatic validation:
//!
//! - `byr` (Birth Year) - four digits; at least `1920` and at most `2002`.
//! - `iyr` (Issue Year) - four digits; at least `2010` and at most `2020`.
//! - `eyr` (Expiration Year) - four digits; at least `2020` and at most `2030`.
//! - `hgt` (Height) - a number followed by either `cm` or `in`:
//! - If `cm`, the number must be at least `150` and at most `193`.
//! - If `in`, the number must be at least `59` and at most `76`.
//! - `hcl` (Hair Color) - a `#` followed by exactly six characters `0-9` or `a-f`.
//! - `ecl` (Eye Color) - exactly one of: `amb` `blu` `brn` `gry` `grn` `hzl` `oth`.
//! - `pid` (Passport ID) - a nine-digit number, including leading zeroes.
//! - `cid` (Country ID) - ignored, missing or not.
//!
//! Your job is to count the passports where all required fields are both
//! **present** and **valid** according to the above rules. Here are some
//! example values:
//!
//! ```
//! byr valid: 2002
//! byr invalid: 2003
//!
//! hgt valid: 60in
//! hgt valid: 190cm
//! hgt invalid: 190in
//! hgt invalid: 190
//!
//! hcl valid: #123abc
//! hcl invalid: #123abz
//! hcl invalid: 123abc
//!
//! ecl valid: brn
//! ecl invalid: wat
//!
//! pid valid: 000000001
//! pid invalid: 0123456789
//! ```
//!
//! Here are some invalid passports:
//!
//! ```
//! eyr:1972 cid:100
//! hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
//!
//! iyr:2019
//! hcl:#602927 eyr:1967 hgt:170cm
//! ecl:grn pid:012533040 byr:1946
//!
//! hcl:dab227 iyr:2012
//! ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
//!
//! hgt:59cm ecl:zzz
//! eyr:2038 hcl:74454a iyr:2023
//! pid:3556412378 byr:2007
//! ```
//!
//! Here are some valid passports:
//!
//! ```
//! pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
//! hcl:#623a2f
//!
//! eyr:2029 ecl:blu cid:129 byr:1989
//! iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
//!
//! hcl:#888785
//! hgt:164cm byr:2001 iyr:2015 cid:88
//! pid:545766238 ecl:hzl
//! eyr:2022
//!
//! iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
//! ```
//!
//! Count the number of **valid** passports - those that have all required
//! fields **and valid values**. Continue to treat cid as optional.
//! **In your batch file, how many passports are valid?**
#[macro_use]
extern crate lazy_static;
use std::collections::{HashMap, HashSet};
use std::env;
use std::fs;
use nom::{
branch::alt, bytes::complete::tag, bytes::complete::take_while,
character::complete::multispace1, multi::separated_list0, sequence::tuple, IResult,
};
lazy_static! {
static ref MUST_FIELDS: HashSet<&'static str> =
vec!["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"]
.into_iter()
.collect();
static ref HAIR_COLORS: HashSet<&'static str> =
vec!["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]
.into_iter()
.collect();
}
#[derive(Debug, Eq, PartialEq)]
enum HeightUnit {
Cm,
In,
}
#[derive(Debug, Eq, PartialEq)]
struct Height {
hight: usize,
unit: HeightUnit,
}
#[derive(Debug, Eq, PartialEq)]
struct Passport {
byr: usize, // birth year
iyr: usize, // issue year
eyr: usize, // expiration year
hgt: Height, // height
hcl: String, // hair color
ecl: String, // eye color
pid: String, // passport id
cid: Option<usize>, // country id
}
impl Passport {
fn from_hashmap(kvs: &HashMap<&str, &str>) -> Option<Passport> {
// `byr` (Birth Year) - four digits; at least `1920` and at most `2002`.
let byr = match kvs.get("byr").unwrap().parse::<usize>() {
Ok(i @ 1920..=2002) => i,
Err(_) => return None,
_ => return None,
};
// `iyr` (Issue Year) - four digits; at least `2010` and at most `2020`.
let iyr = match kvs.get("iyr").unwrap().parse::<usize>() {
Ok(i @ 2010..=2020) => i,
Err(_) => return None,
_ => return None,
};
// `eyr` (Expiration Year) - four digits; at least `2020` and at most `2030`.
let eyr = match kvs.get("eyr").unwrap().parse::<usize>() {
Ok(i @ 2020..=2030) => i,
Err(_) => return None,
_ => return None,
};
// - `hgt` (Height) - a number followed by either `cm` or `in`:
// - If `cm`, the number must be at least `150` and at most `193`.
// - If `in`, the number must be at least `59` and at most `76`.
let hgt_str = kvs.get("hgt").unwrap();
let hgt = if hgt_str.ends_with("cm") {
let high = hgt_str.strip_suffix("cm").unwrap().parse::<usize>();
match high {
Ok(i @ 150..=193) => Height {
hight: i,
unit: HeightUnit::Cm,
},
_ => return None,
}
} else if hgt_str.ends_with("in") {
let high = hgt_str.strip_suffix("in").unwrap().parse::<usize>();
match high {
Ok(i @ 59..=76) => Height {
hight: i,
unit: HeightUnit::In,
},
_ => return None,
}
} else {
return None;
};
// `hcl` (Hair Color) - a `#` followed by exactly six characters `0-9` or `a-f`.
let hcl_str = kvs.get("hcl").unwrap();
let hcl = if hcl_str.starts_with('#')
&& hcl_str.len() == 7
&& hcl_str[1..].chars().all(|x| x.is_digit(16))
{
(*hcl_str).into()
} else {
return None;
};
// `ecl` (Eye Color) - exactly one of: `amb` `blu` `brn` `gry` `grn` `hzl` `oth`.
let ecl_str = kvs.get("ecl").unwrap();
let ecl = if HAIR_COLORS.contains(ecl_str) {
(*ecl_str).into()
} else {
return None;
};
// `pid` (Passport ID) - a nine-digit number, including leading zeroes.
let pid_str = kvs.get("pid").unwrap();
let pid = if (*pid_str).parse::<usize>().is_ok() && (*pid_str).trim().len() == 9 {
(*pid_str).into()
} else {
return None;
};
// `cid` (Country ID) - ignored, missing or not.
let cid = match kvs.get("cid") {
Some(x) => match x.parse::<usize>() {
Ok(i) => Some(i),
Err(_) => return None,
},
None => None,
};
Some(Passport {
byr,
iyr,
eyr,
hgt,
hcl,
ecl,
pid,
cid,
})
}
}
fn is_valid_char(c: char) -> bool {
c.is_alphanumeric() || c == '#'
}
fn kv_parser(input: &str) -> IResult<&str, (&str, &str)> {
let keys = alt((
tag("byr"),
tag("iyr"),
tag("eyr"),
tag("hgt"),
tag("hcl"),
tag("ecl"),
tag("pid"),
tag("cid"),
));
let (input, (k, _, v)) = tuple((keys, tag(":"), take_while(is_valid_char)))(input)?;
Ok((input, (k, v)))
}
fn kvlist_parser(input: &str) -> IResult<&str, Vec<(&str, &str)>> {
let (input, kv_group) = separated_list0(multispace1, kv_parser)(input)?;
// println!("{:?}", kv_group);
Ok((input, kv_group))
}
fn parse_input(input: &str) -> Vec<HashMap<&str, &str>> {
let group = input.split("\n\n");
group
.filter_map(|x| {
let (_, kv_list) = kvlist_parser(x).unwrap();
let kv_map = kv_list.into_iter().collect::<HashMap<&str, &str>>();
let keys_set = kv_map.keys().cloned().collect::<HashSet<&str>>();
if MUST_FIELDS.is_subset(&keys_set) {
Some(kv_map)
} else {
None
}
})
.collect::<Vec<HashMap<&str, &str>>>()
}
fn main() -> Result<(), &'static str> {
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
return Err("not enough arguments");
}
let filename = &args[1];
println!("Load input file {}.", filename);
let input = fs::read_to_string(filename).expect("Something went wrong reading the file");
let valid_kvs = parse_input(&input);
println!(
"The number of valid passports have all required fields is {}.",
valid_kvs.len()
);
let passports: Vec<Passport> = valid_kvs
.iter()
.filter_map(|x| Passport::from_hashmap(x))
.collect();
println!(
"The number of passports satisfy all restrictions is {}.",
passports.len()
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_kv_parser() {
let (left, (k, v)) = kv_parser("ecl:gry").unwrap();
assert_eq!(left, "");
assert_eq!((k, v), ("ecl", "gry"));
let (_, (k, v)) = kv_parser("hcl:#fffffd").unwrap();
assert_eq!((k, v), ("hcl", "#fffffd"));
let (left, (k, v)) = kv_parser("pid:860033327~rust").unwrap();
assert_eq!(left, "~rust");
assert_eq!((k, v), ("pid", "860033327"));
}
#[test]
fn | () {
let input = "ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm";
let (_, kvs) = kvlist_parser(input).unwrap();
assert_eq!(
kvs,
vec![
("ecl", "gry"),
("pid", "860033327"),
("eyr", "2020"),
("hcl", "#fffffd"),
("byr", "1937"),
("iyr", "2017"),
("cid", "147"),
("hgt", "183cm"),
],
)
}
#[test]
fn test_parse_input() {
let input = "ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
";
let passports_dict = parse_input(input);
assert_eq!(
passports_dict,
vec![
vec![
("ecl", "gry"),
("pid", "860033327"),
("eyr", "2020"),
("hcl", "#fffffd"),
("byr", "1937"),
("iyr", "2017"),
("cid", "147"),
("hgt", "183cm")
]
.into_iter()
.collect(),
vec![
("hcl", "#ae17e1"),
("iyr", "2013"),
("eyr", "2024"),
("ecl", "brn"),
("pid", "760753108"),
("byr", "1931"),
("hgt", "179cm")
]
.into_iter()
.collect(),
]
)
}
#[test]
fn test_passport_valid() {
let input = "ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
";
let kvs = parse_input(&input);
let passports: Vec<Passport> = kvs
.iter()
.filter_map(|x| Passport::from_hashmap(x))
.collect();
assert_eq!(
passports,
vec![
Passport {
ecl: "gry".into(),
pid: "860033327".into(),
eyr: 2020,
hcl: "#fffffd".into(),
byr: 1937,
iyr: 2017,
cid: Some(147),
hgt: Height {
hight: 183,
unit: HeightUnit::Cm,
},
},
Passport {
hcl: "#ae17e1".into(),
iyr: 2013,
eyr: 2024,
ecl: "brn".into(),
pid: "760753108".into(),
byr: 1931,
hgt: Height {
hight: 179,
unit: HeightUnit::Cm,
},
cid: None,
},
]
);
}
#[test]
fn test_passport_invalid() {
let input = "eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007";
let kvs = parse_input(&input);
let passports: Vec<Passport> = kvs
.iter()
.filter_map(|x| Passport::from_hashmap(x))
.collect();
assert_eq!(passports, vec![]);
}
}
| test_kvlist_parser | identifier_name |
AtivContex 5.py | ######################################################################################################
# ESTE PROGRAMA É REFERENTE AO NÚMERO DE CASOS DE CORONAVIRUS POR REGIÃO #
######################################################################################################
import math
#Lista de estados do Brasil (a)
estados=["Acre ","Amapá","Amazonas","Rondônia","Roraima","Tocantins","Pará","Alagoas ","Bahia","Ceará ","Maranhão","Paraíba","Pernambuco",
"Piauí","Rio Grande do Norte","Sergipe","Rio de Janeiro","São Paulo ","Espírito Santo","Minas Gerais",
"Rio Grande do Sul","Santa Catarina","Paraná","Distrito Federal","Goiás","Mato Grosso","Mato Grosso do Sul"]
populacao=[881935,845731,4144597,1777225,605761,1572866,8602865,3337357,14873064,9132078,7075181,40181127,9557071,3273227,3506853,2298696,
17264943,45919049,4018650,21168791,11377239,7164788,11433957,3015268,7018354,3484466,2778986]
CasosAcumulado=[22605,39703,112263,48232,39665,37856,178375,72938,217115,137408,198202,96223,113183,66315,57720,68313,
194651,702665,100859,175715,98007,121666,1275,138735,102665,74509,37425]
casosnovos=[57,185,351,42,365,867,406,204,1085,555,583,190,225,237,178,316,372,3172,1279,1331,130,786,
1275,2268,1073,1240,589]
obitosAcumulados=[582,617,3505,568,1017,516,5945,1763,4475,3277,8163,2183,7210,1619,2081,1717,14566,26899,2908,4223,
2744,1839,2733,2042,2336,2368,640]
NovosObitos=[6,4,22,0,0,5,5,10,69,13,30,21,22,11,14,12,4,47,33,91,55,42,42,66,14,28,14]
Estado=[estados,populacao,CasosAcumulado,casosnovos,obitosAcumulados,NovosObitos]##LISTA
##TUPLA (A)-Estados
estadosT=("Acre ","Amapá","Amazonas","Rondônia","Roraima","Tocantins","Pará","Alagoas ","Bahia","Ceará ","Maranhão","Paraíba","Pernambuco",
"Piauí","Rio Grande do Norte","Sergipe","Rio de Janeiro","São Paulo ","Espírito Santo","Minas Gerais",
"Rio Grande do Sul","Santa Catarina","Paraná","Distrito Federal","Goiás","Mato Grosso","Mato Grosso do Sul")
populacaoT=(881935,845731,4144597,1777225,605761,1572866,8602865,3337357,14873064,9132078,7075181,40181127,9557071,3273227,3506853,2298696,
17264943,45919049,4018650,21168791,11377239,7164788,11433957,3015268,7018354,3484466,2778986)
CasosAcumuladoT=(22605,39703,112263,48232,39665,37856,178375,72938,217115,137408,198202,96223,113183,66315,57720,68313,
194651,702665,100859,175715,98007,121666,1275,138735,102665,74509,37425)
casosnovosT=(57,185,351,42,365,867,406,204,1085,555,583,190,225,237,178,316,372,3172,1279,1331,130,786,
1275,2268,1073,1240,589)
obitosAcumuladosT=(582,617,3505,568,1017,516,5945,1763,4475,3277,8163,2183,7210,1619,2081,1717,14566,26899,2908,4223,
2744,1839,2733,2042,2336,2368,640)
NovosObitosT=(6,4,22,0,0,5,5,10,69,13,30,21,22,11,14,12,4,47,33,91,55,42,42,66,14,28,14)
EstadoTupla=(estadosT,populacaoT,CasosAcumuladoT,casosnovosT,obitosAcumuladosT,NovosObitosT)##TUPLA
## inserindo o número certo de novos óbitos do estado da Paraíba (d)
x=Estado[5][11]-10
Estado[5].insert(11,x)
##REGIÕES DO NORDESTE
municipio=["Maceió","Salvador","Fortaleza","São Luis","João Pessoa","Recife","Teresina","Natal",
"Aracaju","Parnamirim"]
regiao=["1 região","SALVADOR","1 região Fortaleza","SÃO LUIS","1 região da Mata Atlantica","RECIFE","Entre rios",
"7 região de saúde-Metropolitana","Aracaju","7 região de saúde-Metropolitana"]
populacaoR=[1018948,2872347,2669342,1101884,809015,1645727,864845,884122,657013,261469]
CasosAcumuladoR=[24909,67994,44971,17658,24597,29718,21803,21458,31777,5408]
casosnovosR=[77,153,41,73,1,57,31,24,180,19]
obitosAcumuladosR=[803,2153,3755,1183,762,2220,826,893,640,136]
NovosObitosR=[3,31,-1,6,6,4,5,6,5,0]
Regioes=[municipio,regiao,populacaoR,CasosAcumuladoR,casosnovosR,obitosAcumuladosR,NovosObitosR]
##TUPLA DAS REGIÕES (a)
municipioT=("Maceió","Salvador","Fortaleza","São Luis","João Pessoa","Recife","Teresina","Natal",
"Aracaju","Parnamirim")
regiaoT=("1 região","SALVADOR","1 região Fortaleza","SÃO LUIS","1 região da Mata Atlantica","RECIFE","Entre rios",
"7 região de saúde-Metropolitana","Aracaju","7 região de saúde-Metropolitana")
populacaoRT=(1018948,2872347,2669342,1101884,809015,1645727,864845,884122,657013,261469)
CasosAcumuladoRT=(24909,67994,44971,17658,24597,29718,21803,21458,31777,5408)
casosnovosRT=(77,153,41,73,1,57,31,24,180,19)
obitosAcumuladosRT=(803,2153,3755,1183,762,2220,826,893,640,136)
NovosObitosRT=(3,31,-1,6,6,4,5,6,5,0)
RegioesT=(municipioT,regiaoT,populacaoRT,CasosAcumuladoRT,casosnovosRT,obitosAcumuladosRT,NovosObitosRT)
#Estado e municipios de AC (f)
EstadoAC=[605761,40183,518,574,6]#LISTA AC
municipioAC=["Acrelândia","Assis Brasil","Brasiléia","Bujari","Capixaba","Cruzeiro do Sul","Feijó","Jordão","Mancio Lima",
"Manoel Urbano","Porto Acre","Rio Branco","Tarauaca","Xapuri","Sena Madureira"]
populacaoAC=[15256,7417,26278,10266,11733,88876, 34780, 8317,18977,9459,18504, 407319,42567,19323,45848 ]
CasosAcumuladoAC=[432,506,1119,367, 247, 3163, 1139, 160, 743,274, 495,10071, 422,1581,979, 1435]
Casos=[283,527,31106,]
casosnovosAC=[31,24,93,23,11,377,57,14,76,30,67,85,59,43]
obitosAcumuladosAC=[9,9,19,6,7, 57,18,1,9,2,2, 15,387,14,13,11]
NovosObitosAC=[1,2,2,2,1,4,2,1,1,1,2,13,2,1,1]
regiaoAC=["Baixo Acre","Alto Acre","Alto Acre","Baixo Acre","Baixo Acre",
"Jurua"," Jurua","Baixo Acre","Baixo Acre","Baixo Acre","Baixo Acre","Baixo Acre","Jurua","Alto Acre", "Baixo Acre"]
municipio.append(municipioAC)
populacaoR.append(populacaoAC)
CasosAcumuladoR.append(CasosAcumuladoAC)
NovosObitosR.append(NovosObitosAC)
AC=[municipioAC,regiaoAC,populacaoAC,CasosAcumuladoAC,casosnovosAC,obitosAcumuladosAC,NovosObitosAC]
##ADD em lista existente (f)
Estado[1].insert(4,EstadoAC[0])
Estado[3].insert(4,EstadoAC[2])
Estado[2].insert(4,EstadoAC[1])
Estado[4].insert(4,EstadoAC[3])
Estado[5].insert(4,EstadoAC[4])
Regioes[0].append(AC[0]) | Regioes[5].append(AC[5])
Regioes[6].append(AC[6])
## Remove os dados das regiões
AC.remove(regiaoAC)
##Soma dos dados do municipio de AC (h)
somaAC=[sum(AC[1]),sum(AC[2]),sum(AC[3]),sum(AC[4]),sum(AC[5])]
##DICIONÁRIO
Nordeste=["Alagoas ","Bahia","Ceará ","Maranhão","Paraíba","Pernambuco",
"Piauí","Rio Grande do Norte","Sergipe"]
print("***Regiões de saúde***\n")
print("\n0-Maceió/AL\n1-Salvador/BA\n2-Fortaleza/CE\n3-São Luis/MA\n4-João Pessoa/PB\n5-Recife/PE\n6-Teresina/PI\n7-Natal/RN\n8-Aracaju/SE")
dadosCovid={"0":[Nordeste[0],Regioes[0][0],Regioes[4][0]],"1":[Nordeste[1],Regioes[0][1],Regioes[4][1]],"2":[Nordeste[2],Regioes[0][2],Regioes[4][2]],
"3":[Nordeste[3],Regioes[0][3],Regioes[4][3]],"4":[Nordeste[4],Regioes[0][4],Regioes[4][4]],"5":[Nordeste[5],Regioes[0][5],Regioes[4][5]],
"6":[Nordeste[6],Regioes[0][6],Regioes[4][6]],"7":[Nordeste[7],Regioes[0][7],Regioes[4][7]],"8":[Nordeste[8],Regioes[0][8],Regioes[4][8]]}
comando=input('Escolha e aperte o número equivalente à cidade buscada')
print("Município: ",dadosCovid[comando][1],"\nEstado:",dadosCovid[comando][0],"\nCasos Novos: ",dadosCovid[comando][2])
#######################################################################
print("\n############################################################")
print("*******Seja bem vindo ao COVID Saúde Brasil**************")
choice=int(input("Caso você deseje saber sobre como está a Covid 19 por estados aperte 3,\npelas principais regiões de sáude do nordeste aperte 2,e 3 para os municipios do RR: "))
## Aqui pode printar o número de casos acumulados para o estado do rio de janeiro (b)
if(choice==1):
e1=int(input("\nPara saber os dados por estado aperte 1 e para os obitos acumulado aperte em 2:"))
if(e1==1):
print("***Wiki:estados do Brasil**\n")
print("\nNORTE:\n0-Acre\n1-Amapá\n2-Amazonas\n3-Rondônia\n4-Roraima\n5-Tocantins\n6-Pará")
print("\nNORDESTE:\n7-Alagoas\n8-Bahia\n9-Ceará\n10-Maranhão\n11-Paraíba\nPernanbuco\n13-Piauí\n14-Rio Grande do Norte\n15-Sergipe")
print("\nSUDESTE:\n16-Rio de Janeira\n17-São Paulo\n18-Espírito Santo\n19-Minas Gerais")
print("\nSUL:\n20-Rio Grande do Sul\n21-Santa Catarina\n22-Paraná")
print("\nCENTRO-OESTE:\n23-Distrito Federal\n24-Goiás\n25-Mato Grosso\n26-Mato Grosso do Sul")
opc=int(input("Selecione o número do estado escolhido: "))
print("\nRESPOSTAS DA LISTA")
print("\n****",Estado[0][opc],"****\nPopulação:",Estado[1][opc],"\nCasos acumulados:",Estado[2][opc],"\nCasos novos:",Estado[3][opc],"\nObitos acumulados",Estado[4][opc],"\nObitosnovos:",Estado[5][opc])
print("\nRESPOSTAS RETIRADAS DA TUPLA")
print("\n****",EstadoTupla[0][opc],"****\nPopulação:",EstadoTupla[1][opc],"\nCasos acumulados:",EstadoTupla[2][opc],"\nCasos novos:",EstadoTupla[3][opc],"\nObitos acumulados",EstadoTupla[4][opc],"\nObitosnovos:",EstadoTupla[5][opc])
## Obito acumulado de todos os estados (c)
else:
print("***Obito Acumulado por estatos***")
print(estados[0],":",obitosAcumulados[0],"\n"+estados[1],":",obitosAcumulados[1],"\n"+estados[2],":",obitosAcumulados[2])
print(estados[3],":",obitosAcumulados[3],"\n"+estados[4],":",obitosAcumulados[4],"\n"+estados[5],":",obitosAcumulados[5])
print(estados[6],":",obitosAcumulados[6],"\n"+estados[7],":",obitosAcumulados[7],"\n"+estados[8],":",obitosAcumulados[8])
print(estados[9],":",obitosAcumulados[9],"\n"+estados[10],":",obitosAcumulados[10],"\n"+estados[11],":",obitosAcumulados[11])
print(estados[14],":",obitosAcumulados[14],"\n"+estados[13],":",obitosAcumulados[13],"\n"+estados[12],":",obitosAcumulados[12])
print(estados[15],":",obitosAcumulados[15],"\n"+estados[16],":",obitosAcumulados[16],"\n"+estados[17],":",obitosAcumulados[17])
print(estados[20],":",obitosAcumulados[20],"\n"+estados[19],":",obitosAcumulados[19],"\n"+estados[18],":",obitosAcumulados[18])
print(estados[21],":",obitosAcumulados[21],"\n"+estados[22],":",obitosAcumulados[22],"\n"+estados[23],":",obitosAcumulados[23])
print(estados[26],":",obitosAcumulados[26],"\n"+estados[25],":",obitosAcumulados[7],"\n"+estados[24],":",obitosAcumulados[24])
## Max e Min dos valores númericos de óbitos novos (j)
print("Menor valor numérico de óbitos novos:",min(Estado[5]))
print("Maior valor numérico de óbitos novos:",max(Estado[5]))
print("***Total:108536***")
##Imprimindo dados sobre as 10 regiões de Saúde
if(choice==2):
print("***Wiki: principais regiões de saúde***\n")
print("\n0-Maceió\n1-Salvador\n2-Fortaleza\n3-São Luis\n4-João Pessoa\n5-Recife\n6-Teresina\n7-Natal\n8-Aracaju\n9-Parnamirim")
## Max e Min dos valores númericos de óbitos novos (j)
print("Menor valor numérico de óbitos novos:",min(Regioes[6]))
print("Maior valor numérico de óbitos novos:",max(Regioes[6]))
opc=int(input("Selecione o número do município escolhido: "))
if(opc!=9):
print("\n***"+Regioes[0][opc]+"***")
print(estados[opc+7],"\n*Região de Saúde:",Regioes[1][opc])
print("População:",Regioes[2][opc],"\nCasos acumulados:",Regioes[3][opc],"\nCasos novos:",Regioes[4][opc],"\nObitos acumulados",Regioes[5][opc],"\nObitosnovos:",Regioes[6][opc])
else:
print("\n***"+Regioes[0][opc]+"***")
print("Rio Grande do Norte","\n*Região de Saúde:",Regioes[1][opc])
print("População:",Regioes[2][opc],"\nCasos acumulados:",Regioes[3][opc],"\nCasos novos:",Regioes[4][opc],"\nObitos acumulados",Regioes[5][opc],"\nObitosnovos:",Regioes[6][opc])
#Imprimindo os dados do municípios de AC
if(choice==3):
print("***Bem vindo Covid Saúde AC***\n")
print("\nRegião de saúde:CENTRO NORTE:\n0-Acrelândia\n1-Assis Brasil\n2-Brasiléia\n3-Bujari\n4-Capixaba\n5-Cruzeiro do Sul\n6-Feijó\n7-Mancio Lima\n8-Manoel Urbano")
print("\nRegião de saúde:SUL:\n9-Porto Acre\n10-Rio Branco\n11-Tarauaca\n12-Xapuri\n13-Sena Madureira")
## Max e Min dos valores númericos de óbitos novos (j)
print("Menor valor numérico de óbitos novos:",min(AC[6]))
print("Maior valor numérico de óbitos novos:",max(AC[6]))
opc=int(input("Selecione o número do município escolhido: "))
if(opc<9):
print("\n***"+municipioAC[opc]+"***")
print("Acre","\n*Região de Saúde: Alto Acre")
print("População:",populacaoAC[opc],"\nCasos acumulados:",CasosAcumuladoAC[opc],"\nCasos novos:",casosnovosAC[opc],"\nObitos acumulados",obitosAcumuladosAC[opc],"\nObitosnovos:",NovosObitosAC[opc])
else:
print("\n***"+municipioAC[opc]+"***")
print("Acre","\n*Região de Saúde: Baixo Acre")
print("População:",populacaoAC[opc],"\nCasos acumulados:",CasosAcumuladoAC[opc],"\nCasos novos:",casosnovosAC[opc],"\nObitos acumulados",obitosAcumuladosAC[opc],"\nObitosnovos:",NovosObitosAC[opc])
## Se conferir, imprima os resultados (h)
if (somaAC==EstadoAC):
print("População:",EstadoAC[0],"\nCasos acumulados:",EstadoAC[1],"\nCasos novos:",EstadoAC[2])
print("\nObitos acumulados",EstadoAC[3],"\nObitosnovos:",EstadoAC[4]) | Regioes[1].append(AC[1])
Regioes[2].append(AC[2])
Regioes[3].append(AC[3])
Regioes[4].append(AC[4]) | random_line_split |
AtivContex 5.py | ######################################################################################################
# ESTE PROGRAMA É REFERENTE AO NÚMERO DE CASOS DE CORONAVIRUS POR REGIÃO #
######################################################################################################
import math
#Lista de estados do Brasil (a)
estados=["Acre ","Amapá","Amazonas","Rondônia","Roraima","Tocantins","Pará","Alagoas ","Bahia","Ceará ","Maranhão","Paraíba","Pernambuco",
"Piauí","Rio Grande do Norte","Sergipe","Rio de Janeiro","São Paulo ","Espírito Santo","Minas Gerais",
"Rio Grande do Sul","Santa Catarina","Paraná","Distrito Federal","Goiás","Mato Grosso","Mato Grosso do Sul"]
populacao=[881935,845731,4144597,1777225,605761,1572866,8602865,3337357,14873064,9132078,7075181,40181127,9557071,3273227,3506853,2298696,
17264943,45919049,4018650,21168791,11377239,7164788,11433957,3015268,7018354,3484466,2778986]
CasosAcumulado=[22605,39703,112263,48232,39665,37856,178375,72938,217115,137408,198202,96223,113183,66315,57720,68313,
194651,702665,100859,175715,98007,121666,1275,138735,102665,74509,37425]
casosnovos=[57,185,351,42,365,867,406,204,1085,555,583,190,225,237,178,316,372,3172,1279,1331,130,786,
1275,2268,1073,1240,589]
obitosAcumulados=[582,617,3505,568,1017,516,5945,1763,4475,3277,8163,2183,7210,1619,2081,1717,14566,26899,2908,4223,
2744,1839,2733,2042,2336,2368,640]
NovosObitos=[6,4,22,0,0,5,5,10,69,13,30,21,22,11,14,12,4,47,33,91,55,42,42,66,14,28,14]
Estado=[estados,populacao,CasosAcumulado,casosnovos,obitosAcumulados,NovosObitos]##LISTA
##TUPLA (A)-Estados
estadosT=("Acre ","Amapá","Amazonas","Rondônia","Roraima","Tocantins","Pará","Alagoas ","Bahia","Ceará ","Maranhão","Paraíba","Pernambuco",
"Piauí","Rio Grande do Norte","Sergipe","Rio de Janeiro","São Paulo ","Espírito Santo","Minas Gerais",
"Rio Grande do Sul","Santa Catarina","Paraná","Distrito Federal","Goiás","Mato Grosso","Mato Grosso do Sul")
populacaoT=(881935,845731,4144597,1777225,605761,1572866,8602865,3337357,14873064,9132078,7075181,40181127,9557071,3273227,3506853,2298696,
17264943,45919049,4018650,21168791,11377239,7164788,11433957,3015268,7018354,3484466,2778986)
CasosAcumuladoT=(22605,39703,112263,48232,39665,37856,178375,72938,217115,137408,198202,96223,113183,66315,57720,68313,
194651,702665,100859,175715,98007,121666,1275,138735,102665,74509,37425)
casosnovosT=(57,185,351,42,365,867,406,204,1085,555,583,190,225,237,178,316,372,3172,1279,1331,130,786,
1275,2268,1073,1240,589)
obitosAcumuladosT=(582,617,3505,568,1017,516,5945,1763,4475,3277,8163,2183,7210,1619,2081,1717,14566,26899,2908,4223,
2744,1839,2733,2042,2336,2368,640)
NovosObitosT=(6,4,22,0,0,5,5,10,69,13,30,21,22,11,14,12,4,47,33,91,55,42,42,66,14,28,14)
EstadoTupla=(estadosT,populacaoT,CasosAcumuladoT,casosnovosT,obitosAcumuladosT,NovosObitosT)##TUPLA
## inserindo o número certo de novos óbitos do estado da Paraíba (d)
x=Estado[5][11]-10
Estado[5].insert(11,x)
##REGIÕES DO NORDESTE
municipio=["Maceió","Salvador","Fortaleza","São Luis","João Pessoa","Recife","Teresina","Natal",
"Aracaju","Parnamirim"]
regiao=["1 região","SALVADOR","1 região Fortaleza","SÃO LUIS","1 região da Mata Atlantica","RECIFE","Entre rios",
"7 região de saúde-Metropolitana","Aracaju","7 região de saúde-Metropolitana"]
populacaoR=[1018948,2872347,2669342,1101884,809015,1645727,864845,884122,657013,261469]
CasosAcumuladoR=[24909,67994,44971,17658,24597,29718,21803,21458,31777,5408]
casosnovosR=[77,153,41,73,1,57,31,24,180,19]
obitosAcumuladosR=[803,2153,3755,1183,762,2220,826,893,640,136]
NovosObitosR=[3,31,-1,6,6,4,5,6,5,0]
Regioes=[municipio,regiao,populacaoR,CasosAcumuladoR,casosnovosR,obitosAcumuladosR,NovosObitosR]
##TUPLA DAS REGIÕES (a)
municipioT=("Maceió","Salvador","Fortaleza","São Luis","João Pessoa","Recife","Teresina","Natal",
"Aracaju","Parnamirim")
regiaoT=("1 região","SALVADOR","1 região Fortaleza","SÃO LUIS","1 região da Mata Atlantica","RECIFE","Entre rios",
"7 região de saúde-Metropolitana","Aracaju","7 região de saúde-Metropolitana")
populacaoRT=(1018948,2872347,2669342,1101884,809015,1645727,864845,884122,657013,261469)
CasosAcumuladoRT=(24909,67994,44971,17658,24597,29718,21803,21458,31777,5408)
casosnovosRT=(77,153,41,73,1,57,31,24,180,19)
obitosAcumuladosRT=(803,2153,3755,1183,762,2220,826,893,640,136)
NovosObitosRT=(3,31,-1,6,6,4,5,6,5,0)
RegioesT=(municipioT,regiaoT,populacaoRT,CasosAcumuladoRT,casosnovosRT,obitosAcumuladosRT,NovosObitosRT)
#Estado e municipios de AC (f)
EstadoAC=[605761,40183,518,574,6]#LISTA AC
municipioAC=["Acrelândia","Assis Brasil","Brasiléia","Bujari","Capixaba","Cruzeiro do Sul","Feijó","Jordão","Mancio Lima",
"Manoel Urbano","Porto Acre","Rio Branco","Tarauaca","Xapuri","Sena Madureira"]
populacaoAC=[15256,7417,26278,10266,11733,88876, 34780, 8317,18977,9459,18504, 407319,42567,19323,45848 ]
CasosAcumuladoAC=[432,506,1119,367, 247, 3163, 1139, 160, 743,274, 495,10071, 422,1581,979, 1435]
Casos=[283,527,31106,]
casosnovosAC=[31,24,93,23,11,377,57,14,76,30,67,85,59,43]
obitosAcumuladosAC=[9,9,19,6,7, 57,18,1,9,2,2, 15,387,14,13,11]
NovosObitosAC=[1,2,2,2,1,4,2,1,1,1,2,13,2,1,1]
regiaoAC=["Baixo Acre","Alto Acre","Alto Acre","Baixo Acre","Baixo Acre",
"Jurua"," Jurua","Baixo Acre","Baixo Acre","Baixo Acre","Baixo Acre","Baixo Acre","Jurua","Alto Acre", "Baixo Acre"]
municipio.append(municipioAC)
populacaoR.append(populacaoAC)
CasosAcumuladoR.append(CasosAcumuladoAC)
NovosObitosR.append(NovosObitosAC)
AC=[municipioAC,regiaoAC,populacaoAC,CasosAcumuladoAC,casosnovosAC,obitosAcumuladosAC,NovosObitosAC]
##ADD em lista existente (f)
Estado[1].insert(4,EstadoAC[0])
Estado[3].insert(4,EstadoAC[2])
Estado[2].insert(4,EstadoAC[1])
Estado[4].insert(4,EstadoAC[3])
Estado[5].insert(4,EstadoAC[4])
Regioes[0].append(AC[0])
Regioes[1].append(AC[1])
Regioes[2].append(AC[2])
Regioes[3].append(AC[3])
Regioes[4].append(AC[4])
Regioes[5].append(AC[5])
Regioes[6].append(AC[6])
## Remove os dados das regiões
AC.remove(regiaoAC)
##Soma dos dados do municipio de AC (h)
somaAC=[sum(AC[1]),sum(AC[2]),sum(AC[3]),sum(AC[4]),sum(AC[5])]
##DICIONÁRIO
Nordeste=["Alagoas ","Bahia","Ceará ","Maranhão","Paraíba","Pernambuco",
"Piauí","Rio Grande do Norte","Sergipe"]
print("***Regiões de saúde***\n")
print("\n0-Maceió/AL\n1-Salvador/BA\n2-Fortaleza/CE\n3-São Luis/MA\n4-João Pessoa/PB\n5-Recife/PE\n6-Teresina/PI\n7-Natal/RN\n8-Aracaju/SE")
dadosCovid={"0":[Nordeste[0],Regioes[0][0],Regioes[4][0]],"1":[Nordeste[1],Regioes[0][1],Regioes[4][1]],"2":[Nordeste[2],Regioes[0][2],Regioes[4][2]],
"3":[Nordeste[3],Regioes[0][3],Regioes[4][3]],"4":[Nordeste[4],Regioes[0][4],Regioes[4][4]],"5":[Nordeste[5],Regioes[0][5],Regioes[4][5]],
"6":[Nordeste[6],Regioes[0][6],Regioes[4][6]],"7":[Nordeste[7],Regioes[0][7],Regioes[4][7]],"8":[Nordeste[8],Regioes[0][8],Regioes[4][8]]}
comando=input('Escolha e aperte o número equivalente à cidade buscada')
print("Município: ",dadosCovid[comando][1],"\nEstado:",dadosCovid[comando][0],"\nCasos Novos: ",dadosCovid[comando][2])
#######################################################################
print("\n############################################################")
print("*******Seja bem vindo ao COVID Saúde Brasil**************")
choice=int(input("Caso você deseje saber sobre como está a Covid 19 por estados aperte 3,\npelas principais regiões de sáude do nordeste aperte 2,e 3 para os municipios do RR: "))
## Aqui pode printar o número de casos acumulados para o estado do rio de janeiro (b)
if(choice==1):
e1=int(input("\nPara saber os dados por estado aperte 1 e para os obitos acu | cipais regiões de saúde***\n")
print("\n0-Maceió\n1-Salvador\n2-Fortaleza\n3-São Luis\n4-João Pessoa\n5-Recife\n6-Teresina\n7-Natal\n8-Aracaju\n9-Parnamirim")
## Max e Min dos valores númericos de óbitos novos (j)
print("Menor valor numérico de óbitos novos:",min(Regioes[6]))
print("Maior valor numérico de óbitos novos:",max(Regioes[6]))
opc=int(input("Selecione o número do município escolhido: "))
if(opc!=9):
print("\n***"+Regioes[0][opc]+"***")
print(estados[opc+7],"\n*Região de Saúde:",Regioes[1][opc])
print("População:",Regioes[2][opc],"\nCasos acumulados:",Regioes[3][opc],"\nCasos novos:",Regioes[4][opc],"\nObitos acumulados",Regioes[5][opc],"\nObitosnovos:",Regioes[6][opc])
else:
print("\n***"+Regioes[0][opc]+"***")
print("Rio Grande do Norte","\n*Região de Saúde:",Regioes[1][opc])
print("População:",Regioes[2][opc],"\nCasos acumulados:",Regioes[3][opc],"\nCasos novos:",Regioes[4][opc],"\nObitos acumulados",Regioes[5][opc],"\nObitosnovos:",Regioes[6][opc])
#Imprimindo os dados do municípios de AC
if(choice==3):
print("***Bem vindo Covid Saúde AC***\n")
print("\nRegião de saúde:CENTRO NORTE:\n0-Acrelândia\n1-Assis Brasil\n2-Brasiléia\n3-Bujari\n4-Capixaba\n5-Cruzeiro do Sul\n6-Feijó\n7-Mancio Lima\n8-Manoel Urbano")
print("\nRegião de saúde:SUL:\n9-Porto Acre\n10-Rio Branco\n11-Tarauaca\n12-Xapuri\n13-Sena Madureira")
## Max e Min dos valores númericos de óbitos novos (j)
print("Menor valor numérico de óbitos novos:",min(AC[6]))
print("Maior valor numérico de óbitos novos:",max(AC[6]))
opc=int(input("Selecione o número do município escolhido: "))
if(opc<9):
print("\n***"+municipioAC[opc]+"***")
print("Acre","\n*Região de Saúde: Alto Acre")
print("População:",populacaoAC[opc],"\nCasos acumulados:",CasosAcumuladoAC[opc],"\nCasos novos:",casosnovosAC[opc],"\nObitos acumulados",obitosAcumuladosAC[opc],"\nObitosnovos:",NovosObitosAC[opc])
else:
print("\n***"+municipioAC[opc]+"***")
print("Acre","\n*Região de Saúde: Baixo Acre")
print("População:",populacaoAC[opc],"\nCasos acumulados:",CasosAcumuladoAC[opc],"\nCasos novos:",casosnovosAC[opc],"\nObitos acumulados",obitosAcumuladosAC[opc],"\nObitosnovos:",NovosObitosAC[opc])
## Se conferir, imprima os resultados (h)
if (somaAC==EstadoAC):
print("População:",EstadoAC[0],"\nCasos acumulados:",EstadoAC[1],"\nCasos novos:",EstadoAC[2])
print("\nObitos acumulados",EstadoAC[3],"\nObitosnovos:",EstadoAC[4])
| mulado aperte em 2:"))
if(e1==1):
print("***Wiki:estados do Brasil**\n")
print("\nNORTE:\n0-Acre\n1-Amapá\n2-Amazonas\n3-Rondônia\n4-Roraima\n5-Tocantins\n6-Pará")
print("\nNORDESTE:\n7-Alagoas\n8-Bahia\n9-Ceará\n10-Maranhão\n11-Paraíba\nPernanbuco\n13-Piauí\n14-Rio Grande do Norte\n15-Sergipe")
print("\nSUDESTE:\n16-Rio de Janeira\n17-São Paulo\n18-Espírito Santo\n19-Minas Gerais")
print("\nSUL:\n20-Rio Grande do Sul\n21-Santa Catarina\n22-Paraná")
print("\nCENTRO-OESTE:\n23-Distrito Federal\n24-Goiás\n25-Mato Grosso\n26-Mato Grosso do Sul")
opc=int(input("Selecione o número do estado escolhido: "))
print("\nRESPOSTAS DA LISTA")
print("\n****",Estado[0][opc],"****\nPopulação:",Estado[1][opc],"\nCasos acumulados:",Estado[2][opc],"\nCasos novos:",Estado[3][opc],"\nObitos acumulados",Estado[4][opc],"\nObitosnovos:",Estado[5][opc])
print("\nRESPOSTAS RETIRADAS DA TUPLA")
print("\n****",EstadoTupla[0][opc],"****\nPopulação:",EstadoTupla[1][opc],"\nCasos acumulados:",EstadoTupla[2][opc],"\nCasos novos:",EstadoTupla[3][opc],"\nObitos acumulados",EstadoTupla[4][opc],"\nObitosnovos:",EstadoTupla[5][opc])
## Obito acumulado de todos os estados (c)
else:
print("***Obito Acumulado por estatos***")
print(estados[0],":",obitosAcumulados[0],"\n"+estados[1],":",obitosAcumulados[1],"\n"+estados[2],":",obitosAcumulados[2])
print(estados[3],":",obitosAcumulados[3],"\n"+estados[4],":",obitosAcumulados[4],"\n"+estados[5],":",obitosAcumulados[5])
print(estados[6],":",obitosAcumulados[6],"\n"+estados[7],":",obitosAcumulados[7],"\n"+estados[8],":",obitosAcumulados[8])
print(estados[9],":",obitosAcumulados[9],"\n"+estados[10],":",obitosAcumulados[10],"\n"+estados[11],":",obitosAcumulados[11])
print(estados[14],":",obitosAcumulados[14],"\n"+estados[13],":",obitosAcumulados[13],"\n"+estados[12],":",obitosAcumulados[12])
print(estados[15],":",obitosAcumulados[15],"\n"+estados[16],":",obitosAcumulados[16],"\n"+estados[17],":",obitosAcumulados[17])
print(estados[20],":",obitosAcumulados[20],"\n"+estados[19],":",obitosAcumulados[19],"\n"+estados[18],":",obitosAcumulados[18])
print(estados[21],":",obitosAcumulados[21],"\n"+estados[22],":",obitosAcumulados[22],"\n"+estados[23],":",obitosAcumulados[23])
print(estados[26],":",obitosAcumulados[26],"\n"+estados[25],":",obitosAcumulados[7],"\n"+estados[24],":",obitosAcumulados[24])
## Max e Min dos valores númericos de óbitos novos (j)
print("Menor valor numérico de óbitos novos:",min(Estado[5]))
print("Maior valor numérico de óbitos novos:",max(Estado[5]))
print("***Total:108536***")
##Imprimindo dados sobre as 10 regiões de Saúde
if(choice==2):
print("***Wiki: prin | conditional_block |
sync.go | package logsync
import (
"context"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strings"
"sync"
"time"
"github.com/mongodb/mongo-go-driver/bson"
"github.com/mongodb/mongo-go-driver/mongo"
"golang.org/x/net/html"
)
var dburl = "mongodb://127.0.0.1:27017"
//CburnFile defines the cburn file type
type CburnFile struct {
contentType string
data []byte
}
//URLEntry is the URL to be explored
type URLEntry struct {
url *url.URL
lastUpdateTime time.Time
}
// URLQueue is a basic FIFO queue based on a circular list that resizes as needed.
type urlQueue struct {
nodes []*URLEntry
size int
head int
tail int
count int
}
//NewURLQueue returns a new queue with the given initial size.
func newURLQueue(size int) *urlQueue {
return &urlQueue{
nodes: make([]*URLEntry, size),
size: size,
}
}
// Push adds a node to the queue.
func (q *urlQueue) Push(n *URLEntry) {
if q.head == q.tail && q.count > 0 {
nodes := make([]*URLEntry, len(q.nodes)+q.size)
copy(nodes, q.nodes[q.head:])
copy(nodes[len(q.nodes)-q.head:], q.nodes[:q.head])
q.head = 0
q.tail = len(q.nodes)
q.nodes = nodes
}
q.nodes[q.tail] = n
q.tail = (q.tail + 1) % len(q.nodes)
q.count++
}
// Pop removes and returns a node from the queue in first to last order.
func (q *urlQueue) Pop() *URLEntry {
if q.count == 0 {
return nil
}
node := q.nodes[q.head]
q.head = (q.head + 1) % len(q.nodes)
q.count--
return node
}
//Fetcher is the base class for *Fetcher
type Fetcher struct {
id int
client *clientConn
urlEntry *URLEntry
ctx context.Context
done chan *Fetcher
timeout time.Duration
//resp *http.Response
//err error
}
//URLFetcher is url fetcher
type URLFetcher struct {
Fetcher
subTree chan *URLEntry
record chan *URLEntry
}
//RecordFetcher is Record fetcher
type RecordFetcher struct {
Fetcher
subUrls *urlQueue
files map[string]*CburnFile
recordAttrs map[string]string
}
type routineStat struct {
routineLauched int
routineReturned int
routineDone chan *Fetcher
routineMax int
}
//Controller is the global data
type Controller struct {
ctx context.Context
wg sync.WaitGroup
explorerClient *clientConn
recordClient *clientConn
subTree chan *URLEntry
record chan *URLEntry
urlCache map[string]*URLEntry
explorerStat *routineStat
recordStat *routineStat
}
type clientConn struct {
client *http.Client
maxPoolSize int
cSemaphore chan int
reqPerSecond int
rateLimiter *time.Ticker
}
func (c *clientConn) Do(req *http.Request) (*http.Response, error) {
if c.maxPoolSize > 0 {
c.cSemaphore <- 1 // Grab a connection from our pool
defer func() {
<-c.cSemaphore // Defer release our connection back to the pool
}()
}
if c.reqPerSecond > 0 {
<-c.rateLimiter.C // Block until a signal is emitted from the rateLimiter
}
resp, err := c.client.Do(req)
return resp, err
}
func newClientConn(maxPoolSize int, reqPerSecond int) *clientConn {
var cSemaphore chan int
var rateLimiter *time.Ticker
if maxPoolSize > 0 {
cSemaphore = make(chan int, maxPoolSize)
}
if reqPerSecond > 0 {
rateLimiter = time.NewTicker(time.Second / time.Duration(reqPerSecond))
}
return &clientConn{
client: &http.Client{},
maxPoolSize: maxPoolSize,
cSemaphore: cSemaphore,
reqPerSecond: reqPerSecond,
rateLimiter: rateLimiter,
}
}
func newRoutineStat(max int) *routineStat {
return &routineStat{
routineDone: make(chan *Fetcher),
routineMax: max,
}
}
//NewController returns new controller
func NewController(ctx context.Context) *Controller {
return &Controller{
ctx: ctx,
explorerClient: newClientConn(1000, 10000),
recordClient: newClientConn(1000, 10000),
subTree: make(chan *URLEntry, 1000),
record: make(chan *URLEntry, 1000),
urlCache: make(map[string]*URLEntry),
explorerStat: newRoutineStat(0),
recordStat: newRoutineStat(0),
}
}
//NewURLEntry create new URL entry
func NewURLEntry(rawurl string) *URLEntry {
url, _ := url.Parse(rawurl)
return &URLEntry{
url: url,
lastUpdateTime: time.Now(),
}
}
//AddNewURLEntry adds new URL entry to the queue
func (c *Controller) AddNewURLEntry(urlEntry *URLEntry) {
select {
case <-c.ctx.Done():
return
case c.subTree <- urlEntry:
}
}
//AddNewRecordURLEntry adds new URL entry to the queue
func (c *Controller) AddNewRecordURLEntry(urlEntry *URLEntry) {
select {
case <-c.ctx.Done():
return
case c.record <- urlEntry:
}
}
//WaitJobDone waits for all child go routines exit
func (c *Controller) WaitJobDone() {
//fmt.Println("wait for job done")
c.wg.Wait()
}
func (c *Controller) startURL(urlFetcher *URLFetcher) {
go urlFetcher.run()
}
func (c *Controller) startRecord(recordFether *RecordFetcher) {
go recordFether.run()
}
//StartExplorer starts explorer
func (c *Controller) StartExplorer() {
c.wg.Add(1)
go c.startExplorer()
}
func (c *Controller) startExplorer() {
defer c.wg.Done()
s := c.explorerStat
loop:
for {
select {
case <-c.ctx.Done():
select {
case fetcher := <-s.routineDone:
s.routineReturned++
fmt.Printf("-Explorer|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
default:
if s.routineReturned >= s.routineLauched {
break loop
}
}
case fetcher := <-s.routineDone:
s.routineReturned++
if s.routineReturned >= s.routineLauched {
fmt.Printf("Explorer|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
break loop
}
default:
if s.routineMax == 0 || s.routineLauched-s.routineReturned < s.routineMax {
select {
case urlEntry := <-c.subTree:
if c.urlNewerThanCache(urlEntry) {
fmt.Printf("Explorer|Start[ %d| %d] : %s %s\n", s.routineLauched, s.routineLauched-s.routineReturned, urlEntry.url.String(), urlEntry.lastUpdateTime)
s.routineLauched++
p := c.newURLFetcher(urlEntry)
c.startURL(p)
}
default:
}
}
}
}
fmt.Printf("\nCburn Explorer Summary : %d routine launched %d routine returned\n", s.routineLauched, s.routineReturned)
}
//StartCburnProcessor starts record process
func (c *Controller) StartCburnProcessor() {
c.wg.Add(1)
go c.startCburnProcessor()
}
func (c *Controller) startCburnProcessor() {
defer c.wg.Done()
s := c.recordStat
loop:
for {
select {
case <-c.ctx.Done():
select {
case fetcher := <-s.routineDone:
fmt.Printf("-Processor|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
s.routineReturned++
default:
if s.routineReturned >= s.routineLauched |
}
case fetcher := <-s.routineDone:
s.routineReturned++
fmt.Printf("Processor|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
default:
if s.routineMax == 0 || s.routineLauched-s.routineReturned < s.routineMax {
select {
case record, ok := <-c.record:
if ok {
s.routineLauched++
fmt.Printf("Processor|Start[ %d| %d] : %s %s\n", s.routineLauched, s.routineLauched-s.routineReturned, record.url.String(), record.lastUpdateTime)
r := c.newRecordFetcher(record)
c.startRecord(r)
} else {
if s.routineReturned >= s.routineLauched {
break loop
}
select {
case <-s.routineDone:
s.routineReturned++
default:
if s.routineReturned >= s.routineLauched {
break loop
}
}
}
default:
}
}
}
}
fmt.Printf("\nCburn Proccessor Summary : %d routine launched %d routine returned\n", s.routineLauched, s.routineReturned)
}
func (c *Controller) urlNewerThanCache(urlEntry *URLEntry) bool {
//TBD
urlCache, ok := c.urlCache[urlEntry.url.String()]
if ok {
if urlEntry.lastUpdateTime.Before(urlCache.lastUpdateTime) {
return true
}
return false
} else {
c.urlCache[urlEntry.url.String()] = urlEntry
return true
}
}
func (c *Controller) newURLFetcher(urlEntry *URLEntry) *URLFetcher {
//ctx, cancel := context.WithCancel(c.ctx)
return &URLFetcher{
Fetcher: Fetcher{
id: c.explorerStat.routineLauched,
client: c.explorerClient,
ctx: c.ctx,
done: c.explorerStat.routineDone,
urlEntry: urlEntry,
timeout: time.Second * 30,
},
subTree: c.subTree,
record: c.record,
}
}
func (c *Controller) newRecordFetcher(urlEntry *URLEntry) *RecordFetcher {
r := &RecordFetcher{
Fetcher: Fetcher{
id: c.recordStat.routineLauched,
client: c.recordClient,
ctx: c.ctx,
done: c.recordStat.routineDone,
urlEntry: urlEntry,
timeout: time.Second * 30,
},
subUrls: newURLQueue(100),
files: make(map[string]*CburnFile),
recordAttrs: make(map[string]string),
}
r.genNewRecordEntry(urlEntry)
return r
}
func (p *URLFetcher) genNewURLEntry(urlEntry *URLEntry) {
if urlEntry.url.Scheme == "http" || urlEntry.url.Scheme == "https" {
select {
case <-p.ctx.Done():
return
case p.subTree <- urlEntry:
return
}
}
}
func createURLEntryFromLink(t html.Token) (string, error) {
for _, attr := range t.Attr {
if attr.Key == "href" {
return attr.Val, nil
}
}
return "", errors.New("No Href attribute in the link")
}
//CreateTime builds url time
func CreateTime(ts string) time.Time {
regex, _ := regexp.Compile("([0-9]+)-([A-Za-z]+)-[0-9]{2}([0-9]{2})[\t ]*([0-9]{2}:[0-9]{2})")
tss := regex.FindStringSubmatch(ts)
if len(tss) == 5 {
ts = fmt.Sprintf("%s %s %s %s PDT", tss[1], tss[2], tss[3], tss[4])
tm, _ := time.Parse(time.RFC822, ts)
return tm
} else {
tm := time.Now()
return tm
}
}
func getURLTime(z *html.Tokenizer) (time.Time, html.TokenType) {
tt := z.Token().Type
for tt != html.EndTagToken {
if tt == html.ErrorToken {
return time.Now(), tt
}
tt = z.Next()
}
for tt != html.TextToken {
if tt == html.ErrorToken {
return time.Now(), tt
}
tt = z.Next()
}
tm := CreateTime(string(z.Text()))
return tm, tt
}
func isCburnFolder(urlEntries []*URLEntry) bool {
for _, urlEntry := range urlEntries {
if strings.HasSuffix(urlEntry.url.String(), "stage1.conf") ||
strings.HasSuffix(urlEntry.url.String(), "stage2.conf") {
return true
}
}
return false
}
func (p *URLFetcher) dispatchURLs(urlEntries []*URLEntry) {
for _, urlEntry := range urlEntries {
if strings.HasSuffix(urlEntry.url.String(), "/") {
p.genNewURLEntry(urlEntry)
}
}
}
func (p *URLFetcher) dispatchCburn() {
fmt.Println("Dispatch cburn : ", p.urlEntry.url.String())
select {
case <-p.ctx.Done():
return
case p.record <- p.urlEntry:
return
}
}
func (p *URLFetcher) processPage(resp *http.Response) {
var urls []*URLEntry
ct := resp.Header.Get("Content-Type")
if strings.Contains(ct, "UTF-8") == false {
resp.Body.Close()
return
}
z := html.NewTokenizer(resp.Body)
func() {
defer resp.Body.Close()
loop:
for {
select {
case <-p.ctx.Done():
break loop
default:
tt := z.Next()
switch {
case tt == html.ErrorToken:
// End of the document, we're done
break loop
case tt == html.StartTagToken:
t := z.Token()
isAnchor := t.Data == "a"
if isAnchor {
l, err := createURLEntryFromLink(t)
if err == nil {
u, err := url.Parse(l)
if err == nil {
var tm time.Time
if u.IsAbs() {
tm, tt = getURLTime(z)
urls = append(urls, &URLEntry{u, tm})
} else {
if len(u.Path) > 0 && !strings.Contains(p.urlEntry.url.Path, u.Path) {
u = p.urlEntry.url.ResolveReference(u)
tm, tt = getURLTime(z)
urls = append(urls, &URLEntry{u, tm})
//fmt.Println(u.String())
}
}
}
}
}
if tt == html.ErrorToken {
break loop
}
}
}
}
}()
if isCburnFolder(urls) {
p.dispatchCburn()
} else {
p.dispatchURLs(urls)
}
}
func (p *URLFetcher) onCompletion() {
p.done <- &p.Fetcher
}
//Run starts the real work
func (p *URLFetcher) run() {
defer p.onCompletion()
req, err := http.NewRequest("GET", p.urlEntry.url.String(), nil)
if err != nil {
//wrong format, don't have to reinsert the URL for retry
//p.err = err
return
}
//req.Header.Set("Connection", "close")
ctx, cancel := context.WithTimeout(p.ctx, p.timeout)
defer cancel()
req.WithContext(ctx)
select {
case <-ctx.Done():
return
default:
resp, err := p.client.Do(req)
if err != nil {
fmt.Println("Http Connection Error (Retry Scheduled) : ", err)
p.genNewURLEntry(p.urlEntry)
//p.err = err
return
}
p.processPage(resp)
}
}
func (p *RecordFetcher) genNewRecordEntry(urlEntry *URLEntry) {
p.subUrls.Push(urlEntry)
}
func (p *RecordFetcher) processPage(urlEntry *URLEntry, resp *http.Response) {
defer resp.Body.Close()
ct := resp.Header.Get("Content-Type")
if strings.Contains(ct, "UTF-8") == false {
return
}
z := html.NewTokenizer(resp.Body)
loop:
for {
select {
case <-p.ctx.Done():
break loop
default:
tt := z.Next()
switch {
case tt == html.ErrorToken:
// End of the document, we're done
break loop
case tt == html.StartTagToken:
t := z.Token()
isAnchor := t.Data == "a"
if isAnchor {
l, err := createURLEntryFromLink(t)
if err == nil {
u, err := url.Parse(l)
if err == nil {
var tm time.Time
if u.IsAbs() {
tm, tt = getURLTime(z)
p.subUrls.Push(&URLEntry{u, tm})
} else {
//fmt.Printf("Checking %s under %s\n", u.String(), urlEntry.url.Path)
if len(u.Path) > 0 && !strings.Contains(urlEntry.url.Path, u.Path) {
u = urlEntry.url.ResolveReference(u)
tm, tt = getURLTime(z)
p.subUrls.Push(&URLEntry{u, tm})
//fmt.Println("Push :", u.String())
}
}
}
}
}
if tt == html.ErrorToken {
break loop
}
}
}
}
}
func (p *RecordFetcher) processFile(filepath string, resp *http.Response) {
defer resp.Body.Close()
ct := resp.Header.Get("Content-Type")
data, _ := ioutil.ReadAll(resp.Body)
p.files[filepath] = &CburnFile{ct, data}
}
func (p *RecordFetcher) onCompletion() {
p.done <- &p.Fetcher
}
func (p *RecordFetcher) runSingle() (bool, bool) {
urlEntry := p.subUrls.Pop()
if urlEntry == nil {
fmt.Println("Finished :", p.urlEntry.url.String())
return true, true
}
if !strings.HasPrefix(urlEntry.url.String(), p.urlEntry.url.String()) {
return false, false
}
req, err := http.NewRequest("GET", urlEntry.url.String(), nil)
if err != nil {
//p.err = err
return false, false
}
//req.Header.Set("Connection", "close")
ctx, cancel := context.WithTimeout(p.ctx, p.timeout)
defer cancel()
req.WithContext(ctx)
select {
case <-ctx.Done():
return false, true
default:
resp, err := p.client.Do(req)
if err != nil {
fmt.Println("Http Connection Error (Retry Scheduled) : ", err)
p.subUrls.Push(urlEntry)
//p.err = err
return false, false
}
if strings.HasSuffix(urlEntry.url.String(), "/") {
fmt.Println("Parse Page ", urlEntry.url.String())
p.processPage(urlEntry, resp)
} else {
filepath := strings.TrimPrefix(urlEntry.url.String(), p.urlEntry.url.String())
p.processFile(filepath, resp)
fmt.Println("Downloaded File ", filepath, " ", p.urlEntry.url.Path)
}
}
return false, false
}
func (p *RecordFetcher) getIns() {
r, _ := regexp.Compile(`([\w-]+)=\"([\w-]+)\"`)
for name, file := range p.files {
if strings.HasPrefix(name, "ins-") {
attrs := r.FindAllString(string(file.data), -1)
for _, attr := range attrs {
ri := r.FindStringSubmatch(attr)
p.recordAttrs[ri[1]] = ri[2]
}
}
}
file, ok := p.files["sysconf.cfg"]
if ok {
attrs := r.FindAllString(string(file.data), -1)
for _, attr := range attrs {
ri := r.FindStringSubmatch(attr)
p.recordAttrs[ri[1]] = ri[2]
}
fmt.Printf("Found : %s/%s\n", p.recordAttrs["BOARD_NAME"], p.recordAttrs["SYSTEM_PRODUCT_NAME"])
}
return
}
func (p *RecordFetcher) saveToDB() {
dbclient, _ := mongo.NewClient(dburl)
db := dbclient.Database("sburn")
coll := db.Collection("cburn_record")
var attrElems []*bson.Element
for name, attr := range p.recordAttrs {
attrElems = append(attrElems, bson.EC.String(name, string(attr)))
}
var fileElems []*bson.Element
fileElems = append(fileElems, bson.EC.SubDocumentFromElements("attrib", attrElems...))
for name, file := range p.files {
if strings.Contains(file.contentType, "text/plain") {
fileElems = append(fileElems, bson.EC.String(name, string(file.data)))
} else {
fileElems = append(fileElems, bson.EC.Binary(name, file.data))
}
}
coll.InsertOne(
context.Background(),
bson.NewDocument(fileElems...))
dbclient.Disconnect(context.Background())
}
func (p *RecordFetcher) run() {
defer p.onCompletion()
loop:
for {
select {
case <-p.ctx.Done():
break loop
default:
if finished, exit := p.runSingle(); exit {
if finished {
p.getIns()
fmt.Println("DB :", p.Fetcher.id, " ", p.Fetcher.urlEntry.url)
p.saveToDB()
}
break loop
}
}
}
}
| {
break loop
} | conditional_block |
sync.go | package logsync
import (
"context"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strings"
"sync"
"time"
"github.com/mongodb/mongo-go-driver/bson"
"github.com/mongodb/mongo-go-driver/mongo"
"golang.org/x/net/html"
)
var dburl = "mongodb://127.0.0.1:27017"
//CburnFile defines the cburn file type
type CburnFile struct {
contentType string
data []byte
}
//URLEntry is the URL to be explored
type URLEntry struct {
url *url.URL
lastUpdateTime time.Time
}
// URLQueue is a basic FIFO queue based on a circular list that resizes as needed.
type urlQueue struct {
nodes []*URLEntry
size int
head int
tail int
count int
}
//NewURLQueue returns a new queue with the given initial size.
func newURLQueue(size int) *urlQueue {
return &urlQueue{
nodes: make([]*URLEntry, size),
size: size,
}
}
// Push adds a node to the queue.
func (q *urlQueue) Push(n *URLEntry) {
if q.head == q.tail && q.count > 0 {
nodes := make([]*URLEntry, len(q.nodes)+q.size)
copy(nodes, q.nodes[q.head:])
copy(nodes[len(q.nodes)-q.head:], q.nodes[:q.head])
q.head = 0
q.tail = len(q.nodes)
q.nodes = nodes
}
q.nodes[q.tail] = n
q.tail = (q.tail + 1) % len(q.nodes)
q.count++
}
// Pop removes and returns a node from the queue in first to last order.
func (q *urlQueue) Pop() *URLEntry {
if q.count == 0 {
return nil
}
node := q.nodes[q.head]
q.head = (q.head + 1) % len(q.nodes)
q.count--
return node
}
//Fetcher is the base class for *Fetcher
type Fetcher struct {
id int
client *clientConn
urlEntry *URLEntry
ctx context.Context
done chan *Fetcher
timeout time.Duration
//resp *http.Response
//err error
}
//URLFetcher is url fetcher
type URLFetcher struct {
Fetcher
subTree chan *URLEntry
record chan *URLEntry
}
//RecordFetcher is Record fetcher
type RecordFetcher struct {
Fetcher
subUrls *urlQueue
files map[string]*CburnFile
recordAttrs map[string]string
}
type routineStat struct {
routineLauched int
routineReturned int
routineDone chan *Fetcher
routineMax int
}
//Controller is the global data
type Controller struct {
ctx context.Context
wg sync.WaitGroup
explorerClient *clientConn
recordClient *clientConn
subTree chan *URLEntry
record chan *URLEntry
urlCache map[string]*URLEntry
explorerStat *routineStat
recordStat *routineStat
}
type clientConn struct {
client *http.Client
maxPoolSize int
cSemaphore chan int
reqPerSecond int
rateLimiter *time.Ticker
}
func (c *clientConn) Do(req *http.Request) (*http.Response, error) {
if c.maxPoolSize > 0 {
c.cSemaphore <- 1 // Grab a connection from our pool
defer func() {
<-c.cSemaphore // Defer release our connection back to the pool
}()
}
if c.reqPerSecond > 0 {
<-c.rateLimiter.C // Block until a signal is emitted from the rateLimiter
}
resp, err := c.client.Do(req)
return resp, err
}
func newClientConn(maxPoolSize int, reqPerSecond int) *clientConn {
var cSemaphore chan int
var rateLimiter *time.Ticker
if maxPoolSize > 0 {
cSemaphore = make(chan int, maxPoolSize)
}
if reqPerSecond > 0 {
rateLimiter = time.NewTicker(time.Second / time.Duration(reqPerSecond))
}
return &clientConn{
client: &http.Client{},
maxPoolSize: maxPoolSize,
cSemaphore: cSemaphore,
reqPerSecond: reqPerSecond,
rateLimiter: rateLimiter,
}
}
func newRoutineStat(max int) *routineStat {
return &routineStat{
routineDone: make(chan *Fetcher),
routineMax: max,
}
}
//NewController returns new controller
func NewController(ctx context.Context) *Controller {
return &Controller{
ctx: ctx,
explorerClient: newClientConn(1000, 10000),
recordClient: newClientConn(1000, 10000),
subTree: make(chan *URLEntry, 1000),
record: make(chan *URLEntry, 1000),
urlCache: make(map[string]*URLEntry),
explorerStat: newRoutineStat(0),
recordStat: newRoutineStat(0),
}
}
//NewURLEntry create new URL entry
func NewURLEntry(rawurl string) *URLEntry {
url, _ := url.Parse(rawurl)
return &URLEntry{
url: url,
lastUpdateTime: time.Now(),
}
}
//AddNewURLEntry adds new URL entry to the queue
func (c *Controller) AddNewURLEntry(urlEntry *URLEntry) {
select {
case <-c.ctx.Done():
return
case c.subTree <- urlEntry:
}
}
//AddNewRecordURLEntry adds new URL entry to the queue
func (c *Controller) AddNewRecordURLEntry(urlEntry *URLEntry) {
select {
case <-c.ctx.Done():
return
case c.record <- urlEntry:
}
}
//WaitJobDone waits for all child go routines exit
func (c *Controller) WaitJobDone() {
//fmt.Println("wait for job done")
c.wg.Wait()
}
func (c *Controller) startURL(urlFetcher *URLFetcher) {
go urlFetcher.run()
}
func (c *Controller) startRecord(recordFether *RecordFetcher) {
go recordFether.run()
}
//StartExplorer starts explorer
func (c *Controller) StartExplorer() {
c.wg.Add(1)
go c.startExplorer()
}
func (c *Controller) startExplorer() {
defer c.wg.Done()
s := c.explorerStat
loop:
for {
select {
case <-c.ctx.Done():
select {
case fetcher := <-s.routineDone:
s.routineReturned++
fmt.Printf("-Explorer|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
default:
if s.routineReturned >= s.routineLauched {
break loop
}
}
case fetcher := <-s.routineDone:
s.routineReturned++
if s.routineReturned >= s.routineLauched {
fmt.Printf("Explorer|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
break loop
}
default:
if s.routineMax == 0 || s.routineLauched-s.routineReturned < s.routineMax {
select {
case urlEntry := <-c.subTree:
if c.urlNewerThanCache(urlEntry) {
fmt.Printf("Explorer|Start[ %d| %d] : %s %s\n", s.routineLauched, s.routineLauched-s.routineReturned, urlEntry.url.String(), urlEntry.lastUpdateTime)
s.routineLauched++
p := c.newURLFetcher(urlEntry)
c.startURL(p)
}
default:
}
}
}
}
fmt.Printf("\nCburn Explorer Summary : %d routine launched %d routine returned\n", s.routineLauched, s.routineReturned)
}
//StartCburnProcessor starts record process
func (c *Controller) StartCburnProcessor() {
c.wg.Add(1)
go c.startCburnProcessor()
}
func (c *Controller) startCburnProcessor() {
defer c.wg.Done()
s := c.recordStat
loop:
for {
select {
case <-c.ctx.Done():
select {
case fetcher := <-s.routineDone:
fmt.Printf("-Processor|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
s.routineReturned++
default:
if s.routineReturned >= s.routineLauched {
break loop
}
}
case fetcher := <-s.routineDone:
s.routineReturned++
fmt.Printf("Processor|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
default:
if s.routineMax == 0 || s.routineLauched-s.routineReturned < s.routineMax {
select {
case record, ok := <-c.record:
if ok {
s.routineLauched++
fmt.Printf("Processor|Start[ %d| %d] : %s %s\n", s.routineLauched, s.routineLauched-s.routineReturned, record.url.String(), record.lastUpdateTime)
r := c.newRecordFetcher(record)
c.startRecord(r)
} else {
if s.routineReturned >= s.routineLauched {
break loop
}
select {
case <-s.routineDone:
s.routineReturned++
default:
if s.routineReturned >= s.routineLauched {
break loop
}
}
}
default:
}
}
}
}
fmt.Printf("\nCburn Proccessor Summary : %d routine launched %d routine returned\n", s.routineLauched, s.routineReturned)
}
func (c *Controller) urlNewerThanCache(urlEntry *URLEntry) bool {
//TBD
urlCache, ok := c.urlCache[urlEntry.url.String()]
if ok {
if urlEntry.lastUpdateTime.Before(urlCache.lastUpdateTime) {
return true
}
return false
} else {
c.urlCache[urlEntry.url.String()] = urlEntry
return true
}
}
func (c *Controller) newURLFetcher(urlEntry *URLEntry) *URLFetcher {
//ctx, cancel := context.WithCancel(c.ctx)
return &URLFetcher{
Fetcher: Fetcher{
id: c.explorerStat.routineLauched,
client: c.explorerClient,
ctx: c.ctx,
done: c.explorerStat.routineDone,
urlEntry: urlEntry,
timeout: time.Second * 30,
},
subTree: c.subTree,
record: c.record,
}
}
func (c *Controller) | (urlEntry *URLEntry) *RecordFetcher {
r := &RecordFetcher{
Fetcher: Fetcher{
id: c.recordStat.routineLauched,
client: c.recordClient,
ctx: c.ctx,
done: c.recordStat.routineDone,
urlEntry: urlEntry,
timeout: time.Second * 30,
},
subUrls: newURLQueue(100),
files: make(map[string]*CburnFile),
recordAttrs: make(map[string]string),
}
r.genNewRecordEntry(urlEntry)
return r
}
func (p *URLFetcher) genNewURLEntry(urlEntry *URLEntry) {
if urlEntry.url.Scheme == "http" || urlEntry.url.Scheme == "https" {
select {
case <-p.ctx.Done():
return
case p.subTree <- urlEntry:
return
}
}
}
func createURLEntryFromLink(t html.Token) (string, error) {
for _, attr := range t.Attr {
if attr.Key == "href" {
return attr.Val, nil
}
}
return "", errors.New("No Href attribute in the link")
}
//CreateTime builds url time
func CreateTime(ts string) time.Time {
regex, _ := regexp.Compile("([0-9]+)-([A-Za-z]+)-[0-9]{2}([0-9]{2})[\t ]*([0-9]{2}:[0-9]{2})")
tss := regex.FindStringSubmatch(ts)
if len(tss) == 5 {
ts = fmt.Sprintf("%s %s %s %s PDT", tss[1], tss[2], tss[3], tss[4])
tm, _ := time.Parse(time.RFC822, ts)
return tm
} else {
tm := time.Now()
return tm
}
}
func getURLTime(z *html.Tokenizer) (time.Time, html.TokenType) {
tt := z.Token().Type
for tt != html.EndTagToken {
if tt == html.ErrorToken {
return time.Now(), tt
}
tt = z.Next()
}
for tt != html.TextToken {
if tt == html.ErrorToken {
return time.Now(), tt
}
tt = z.Next()
}
tm := CreateTime(string(z.Text()))
return tm, tt
}
func isCburnFolder(urlEntries []*URLEntry) bool {
for _, urlEntry := range urlEntries {
if strings.HasSuffix(urlEntry.url.String(), "stage1.conf") ||
strings.HasSuffix(urlEntry.url.String(), "stage2.conf") {
return true
}
}
return false
}
func (p *URLFetcher) dispatchURLs(urlEntries []*URLEntry) {
for _, urlEntry := range urlEntries {
if strings.HasSuffix(urlEntry.url.String(), "/") {
p.genNewURLEntry(urlEntry)
}
}
}
func (p *URLFetcher) dispatchCburn() {
fmt.Println("Dispatch cburn : ", p.urlEntry.url.String())
select {
case <-p.ctx.Done():
return
case p.record <- p.urlEntry:
return
}
}
func (p *URLFetcher) processPage(resp *http.Response) {
var urls []*URLEntry
ct := resp.Header.Get("Content-Type")
if strings.Contains(ct, "UTF-8") == false {
resp.Body.Close()
return
}
z := html.NewTokenizer(resp.Body)
func() {
defer resp.Body.Close()
loop:
for {
select {
case <-p.ctx.Done():
break loop
default:
tt := z.Next()
switch {
case tt == html.ErrorToken:
// End of the document, we're done
break loop
case tt == html.StartTagToken:
t := z.Token()
isAnchor := t.Data == "a"
if isAnchor {
l, err := createURLEntryFromLink(t)
if err == nil {
u, err := url.Parse(l)
if err == nil {
var tm time.Time
if u.IsAbs() {
tm, tt = getURLTime(z)
urls = append(urls, &URLEntry{u, tm})
} else {
if len(u.Path) > 0 && !strings.Contains(p.urlEntry.url.Path, u.Path) {
u = p.urlEntry.url.ResolveReference(u)
tm, tt = getURLTime(z)
urls = append(urls, &URLEntry{u, tm})
//fmt.Println(u.String())
}
}
}
}
}
if tt == html.ErrorToken {
break loop
}
}
}
}
}()
if isCburnFolder(urls) {
p.dispatchCburn()
} else {
p.dispatchURLs(urls)
}
}
func (p *URLFetcher) onCompletion() {
p.done <- &p.Fetcher
}
//Run starts the real work
func (p *URLFetcher) run() {
defer p.onCompletion()
req, err := http.NewRequest("GET", p.urlEntry.url.String(), nil)
if err != nil {
//wrong format, don't have to reinsert the URL for retry
//p.err = err
return
}
//req.Header.Set("Connection", "close")
ctx, cancel := context.WithTimeout(p.ctx, p.timeout)
defer cancel()
req.WithContext(ctx)
select {
case <-ctx.Done():
return
default:
resp, err := p.client.Do(req)
if err != nil {
fmt.Println("Http Connection Error (Retry Scheduled) : ", err)
p.genNewURLEntry(p.urlEntry)
//p.err = err
return
}
p.processPage(resp)
}
}
func (p *RecordFetcher) genNewRecordEntry(urlEntry *URLEntry) {
p.subUrls.Push(urlEntry)
}
func (p *RecordFetcher) processPage(urlEntry *URLEntry, resp *http.Response) {
defer resp.Body.Close()
ct := resp.Header.Get("Content-Type")
if strings.Contains(ct, "UTF-8") == false {
return
}
z := html.NewTokenizer(resp.Body)
loop:
for {
select {
case <-p.ctx.Done():
break loop
default:
tt := z.Next()
switch {
case tt == html.ErrorToken:
// End of the document, we're done
break loop
case tt == html.StartTagToken:
t := z.Token()
isAnchor := t.Data == "a"
if isAnchor {
l, err := createURLEntryFromLink(t)
if err == nil {
u, err := url.Parse(l)
if err == nil {
var tm time.Time
if u.IsAbs() {
tm, tt = getURLTime(z)
p.subUrls.Push(&URLEntry{u, tm})
} else {
//fmt.Printf("Checking %s under %s\n", u.String(), urlEntry.url.Path)
if len(u.Path) > 0 && !strings.Contains(urlEntry.url.Path, u.Path) {
u = urlEntry.url.ResolveReference(u)
tm, tt = getURLTime(z)
p.subUrls.Push(&URLEntry{u, tm})
//fmt.Println("Push :", u.String())
}
}
}
}
}
if tt == html.ErrorToken {
break loop
}
}
}
}
}
func (p *RecordFetcher) processFile(filepath string, resp *http.Response) {
defer resp.Body.Close()
ct := resp.Header.Get("Content-Type")
data, _ := ioutil.ReadAll(resp.Body)
p.files[filepath] = &CburnFile{ct, data}
}
func (p *RecordFetcher) onCompletion() {
p.done <- &p.Fetcher
}
func (p *RecordFetcher) runSingle() (bool, bool) {
urlEntry := p.subUrls.Pop()
if urlEntry == nil {
fmt.Println("Finished :", p.urlEntry.url.String())
return true, true
}
if !strings.HasPrefix(urlEntry.url.String(), p.urlEntry.url.String()) {
return false, false
}
req, err := http.NewRequest("GET", urlEntry.url.String(), nil)
if err != nil {
//p.err = err
return false, false
}
//req.Header.Set("Connection", "close")
ctx, cancel := context.WithTimeout(p.ctx, p.timeout)
defer cancel()
req.WithContext(ctx)
select {
case <-ctx.Done():
return false, true
default:
resp, err := p.client.Do(req)
if err != nil {
fmt.Println("Http Connection Error (Retry Scheduled) : ", err)
p.subUrls.Push(urlEntry)
//p.err = err
return false, false
}
if strings.HasSuffix(urlEntry.url.String(), "/") {
fmt.Println("Parse Page ", urlEntry.url.String())
p.processPage(urlEntry, resp)
} else {
filepath := strings.TrimPrefix(urlEntry.url.String(), p.urlEntry.url.String())
p.processFile(filepath, resp)
fmt.Println("Downloaded File ", filepath, " ", p.urlEntry.url.Path)
}
}
return false, false
}
func (p *RecordFetcher) getIns() {
r, _ := regexp.Compile(`([\w-]+)=\"([\w-]+)\"`)
for name, file := range p.files {
if strings.HasPrefix(name, "ins-") {
attrs := r.FindAllString(string(file.data), -1)
for _, attr := range attrs {
ri := r.FindStringSubmatch(attr)
p.recordAttrs[ri[1]] = ri[2]
}
}
}
file, ok := p.files["sysconf.cfg"]
if ok {
attrs := r.FindAllString(string(file.data), -1)
for _, attr := range attrs {
ri := r.FindStringSubmatch(attr)
p.recordAttrs[ri[1]] = ri[2]
}
fmt.Printf("Found : %s/%s\n", p.recordAttrs["BOARD_NAME"], p.recordAttrs["SYSTEM_PRODUCT_NAME"])
}
return
}
func (p *RecordFetcher) saveToDB() {
dbclient, _ := mongo.NewClient(dburl)
db := dbclient.Database("sburn")
coll := db.Collection("cburn_record")
var attrElems []*bson.Element
for name, attr := range p.recordAttrs {
attrElems = append(attrElems, bson.EC.String(name, string(attr)))
}
var fileElems []*bson.Element
fileElems = append(fileElems, bson.EC.SubDocumentFromElements("attrib", attrElems...))
for name, file := range p.files {
if strings.Contains(file.contentType, "text/plain") {
fileElems = append(fileElems, bson.EC.String(name, string(file.data)))
} else {
fileElems = append(fileElems, bson.EC.Binary(name, file.data))
}
}
coll.InsertOne(
context.Background(),
bson.NewDocument(fileElems...))
dbclient.Disconnect(context.Background())
}
func (p *RecordFetcher) run() {
defer p.onCompletion()
loop:
for {
select {
case <-p.ctx.Done():
break loop
default:
if finished, exit := p.runSingle(); exit {
if finished {
p.getIns()
fmt.Println("DB :", p.Fetcher.id, " ", p.Fetcher.urlEntry.url)
p.saveToDB()
}
break loop
}
}
}
}
| newRecordFetcher | identifier_name |
sync.go | package logsync
import (
"context"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strings"
"sync"
"time"
"github.com/mongodb/mongo-go-driver/bson"
"github.com/mongodb/mongo-go-driver/mongo"
"golang.org/x/net/html"
)
var dburl = "mongodb://127.0.0.1:27017"
//CburnFile defines the cburn file type
type CburnFile struct {
contentType string
data []byte
}
//URLEntry is the URL to be explored
type URLEntry struct {
url *url.URL
lastUpdateTime time.Time
}
// URLQueue is a basic FIFO queue based on a circular list that resizes as needed.
type urlQueue struct {
nodes []*URLEntry
size int
head int
tail int
count int
}
//NewURLQueue returns a new queue with the given initial size.
func newURLQueue(size int) *urlQueue {
return &urlQueue{
nodes: make([]*URLEntry, size),
size: size,
}
}
// Push adds a node to the queue.
func (q *urlQueue) Push(n *URLEntry) {
if q.head == q.tail && q.count > 0 {
nodes := make([]*URLEntry, len(q.nodes)+q.size)
copy(nodes, q.nodes[q.head:])
copy(nodes[len(q.nodes)-q.head:], q.nodes[:q.head])
q.head = 0
q.tail = len(q.nodes)
q.nodes = nodes
}
q.nodes[q.tail] = n
q.tail = (q.tail + 1) % len(q.nodes)
q.count++
}
// Pop removes and returns a node from the queue in first to last order.
func (q *urlQueue) Pop() *URLEntry {
if q.count == 0 {
return nil
}
node := q.nodes[q.head]
q.head = (q.head + 1) % len(q.nodes)
q.count--
return node
}
//Fetcher is the base class for *Fetcher
type Fetcher struct {
id int
client *clientConn
urlEntry *URLEntry
ctx context.Context
done chan *Fetcher
timeout time.Duration
//resp *http.Response
//err error
}
//URLFetcher is url fetcher
type URLFetcher struct {
Fetcher
subTree chan *URLEntry
record chan *URLEntry
}
//RecordFetcher is Record fetcher
type RecordFetcher struct {
Fetcher
subUrls *urlQueue
files map[string]*CburnFile
recordAttrs map[string]string
}
type routineStat struct {
routineLauched int
routineReturned int | routineDone chan *Fetcher
routineMax int
}
//Controller is the global data
type Controller struct {
ctx context.Context
wg sync.WaitGroup
explorerClient *clientConn
recordClient *clientConn
subTree chan *URLEntry
record chan *URLEntry
urlCache map[string]*URLEntry
explorerStat *routineStat
recordStat *routineStat
}
type clientConn struct {
client *http.Client
maxPoolSize int
cSemaphore chan int
reqPerSecond int
rateLimiter *time.Ticker
}
func (c *clientConn) Do(req *http.Request) (*http.Response, error) {
if c.maxPoolSize > 0 {
c.cSemaphore <- 1 // Grab a connection from our pool
defer func() {
<-c.cSemaphore // Defer release our connection back to the pool
}()
}
if c.reqPerSecond > 0 {
<-c.rateLimiter.C // Block until a signal is emitted from the rateLimiter
}
resp, err := c.client.Do(req)
return resp, err
}
func newClientConn(maxPoolSize int, reqPerSecond int) *clientConn {
var cSemaphore chan int
var rateLimiter *time.Ticker
if maxPoolSize > 0 {
cSemaphore = make(chan int, maxPoolSize)
}
if reqPerSecond > 0 {
rateLimiter = time.NewTicker(time.Second / time.Duration(reqPerSecond))
}
return &clientConn{
client: &http.Client{},
maxPoolSize: maxPoolSize,
cSemaphore: cSemaphore,
reqPerSecond: reqPerSecond,
rateLimiter: rateLimiter,
}
}
func newRoutineStat(max int) *routineStat {
return &routineStat{
routineDone: make(chan *Fetcher),
routineMax: max,
}
}
//NewController returns new controller
func NewController(ctx context.Context) *Controller {
return &Controller{
ctx: ctx,
explorerClient: newClientConn(1000, 10000),
recordClient: newClientConn(1000, 10000),
subTree: make(chan *URLEntry, 1000),
record: make(chan *URLEntry, 1000),
urlCache: make(map[string]*URLEntry),
explorerStat: newRoutineStat(0),
recordStat: newRoutineStat(0),
}
}
//NewURLEntry create new URL entry
func NewURLEntry(rawurl string) *URLEntry {
url, _ := url.Parse(rawurl)
return &URLEntry{
url: url,
lastUpdateTime: time.Now(),
}
}
//AddNewURLEntry adds new URL entry to the queue
func (c *Controller) AddNewURLEntry(urlEntry *URLEntry) {
select {
case <-c.ctx.Done():
return
case c.subTree <- urlEntry:
}
}
//AddNewRecordURLEntry adds new URL entry to the queue
func (c *Controller) AddNewRecordURLEntry(urlEntry *URLEntry) {
select {
case <-c.ctx.Done():
return
case c.record <- urlEntry:
}
}
//WaitJobDone waits for all child go routines exit
func (c *Controller) WaitJobDone() {
//fmt.Println("wait for job done")
c.wg.Wait()
}
func (c *Controller) startURL(urlFetcher *URLFetcher) {
go urlFetcher.run()
}
func (c *Controller) startRecord(recordFether *RecordFetcher) {
go recordFether.run()
}
//StartExplorer starts explorer
func (c *Controller) StartExplorer() {
c.wg.Add(1)
go c.startExplorer()
}
func (c *Controller) startExplorer() {
defer c.wg.Done()
s := c.explorerStat
loop:
for {
select {
case <-c.ctx.Done():
select {
case fetcher := <-s.routineDone:
s.routineReturned++
fmt.Printf("-Explorer|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
default:
if s.routineReturned >= s.routineLauched {
break loop
}
}
case fetcher := <-s.routineDone:
s.routineReturned++
if s.routineReturned >= s.routineLauched {
fmt.Printf("Explorer|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
break loop
}
default:
if s.routineMax == 0 || s.routineLauched-s.routineReturned < s.routineMax {
select {
case urlEntry := <-c.subTree:
if c.urlNewerThanCache(urlEntry) {
fmt.Printf("Explorer|Start[ %d| %d] : %s %s\n", s.routineLauched, s.routineLauched-s.routineReturned, urlEntry.url.String(), urlEntry.lastUpdateTime)
s.routineLauched++
p := c.newURLFetcher(urlEntry)
c.startURL(p)
}
default:
}
}
}
}
fmt.Printf("\nCburn Explorer Summary : %d routine launched %d routine returned\n", s.routineLauched, s.routineReturned)
}
//StartCburnProcessor starts record process
func (c *Controller) StartCburnProcessor() {
c.wg.Add(1)
go c.startCburnProcessor()
}
func (c *Controller) startCburnProcessor() {
defer c.wg.Done()
s := c.recordStat
loop:
for {
select {
case <-c.ctx.Done():
select {
case fetcher := <-s.routineDone:
fmt.Printf("-Processor|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
s.routineReturned++
default:
if s.routineReturned >= s.routineLauched {
break loop
}
}
case fetcher := <-s.routineDone:
s.routineReturned++
fmt.Printf("Processor|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
default:
if s.routineMax == 0 || s.routineLauched-s.routineReturned < s.routineMax {
select {
case record, ok := <-c.record:
if ok {
s.routineLauched++
fmt.Printf("Processor|Start[ %d| %d] : %s %s\n", s.routineLauched, s.routineLauched-s.routineReturned, record.url.String(), record.lastUpdateTime)
r := c.newRecordFetcher(record)
c.startRecord(r)
} else {
if s.routineReturned >= s.routineLauched {
break loop
}
select {
case <-s.routineDone:
s.routineReturned++
default:
if s.routineReturned >= s.routineLauched {
break loop
}
}
}
default:
}
}
}
}
fmt.Printf("\nCburn Proccessor Summary : %d routine launched %d routine returned\n", s.routineLauched, s.routineReturned)
}
func (c *Controller) urlNewerThanCache(urlEntry *URLEntry) bool {
//TBD
urlCache, ok := c.urlCache[urlEntry.url.String()]
if ok {
if urlEntry.lastUpdateTime.Before(urlCache.lastUpdateTime) {
return true
}
return false
} else {
c.urlCache[urlEntry.url.String()] = urlEntry
return true
}
}
func (c *Controller) newURLFetcher(urlEntry *URLEntry) *URLFetcher {
//ctx, cancel := context.WithCancel(c.ctx)
return &URLFetcher{
Fetcher: Fetcher{
id: c.explorerStat.routineLauched,
client: c.explorerClient,
ctx: c.ctx,
done: c.explorerStat.routineDone,
urlEntry: urlEntry,
timeout: time.Second * 30,
},
subTree: c.subTree,
record: c.record,
}
}
func (c *Controller) newRecordFetcher(urlEntry *URLEntry) *RecordFetcher {
r := &RecordFetcher{
Fetcher: Fetcher{
id: c.recordStat.routineLauched,
client: c.recordClient,
ctx: c.ctx,
done: c.recordStat.routineDone,
urlEntry: urlEntry,
timeout: time.Second * 30,
},
subUrls: newURLQueue(100),
files: make(map[string]*CburnFile),
recordAttrs: make(map[string]string),
}
r.genNewRecordEntry(urlEntry)
return r
}
func (p *URLFetcher) genNewURLEntry(urlEntry *URLEntry) {
if urlEntry.url.Scheme == "http" || urlEntry.url.Scheme == "https" {
select {
case <-p.ctx.Done():
return
case p.subTree <- urlEntry:
return
}
}
}
func createURLEntryFromLink(t html.Token) (string, error) {
for _, attr := range t.Attr {
if attr.Key == "href" {
return attr.Val, nil
}
}
return "", errors.New("No Href attribute in the link")
}
//CreateTime builds url time
func CreateTime(ts string) time.Time {
regex, _ := regexp.Compile("([0-9]+)-([A-Za-z]+)-[0-9]{2}([0-9]{2})[\t ]*([0-9]{2}:[0-9]{2})")
tss := regex.FindStringSubmatch(ts)
if len(tss) == 5 {
ts = fmt.Sprintf("%s %s %s %s PDT", tss[1], tss[2], tss[3], tss[4])
tm, _ := time.Parse(time.RFC822, ts)
return tm
} else {
tm := time.Now()
return tm
}
}
func getURLTime(z *html.Tokenizer) (time.Time, html.TokenType) {
tt := z.Token().Type
for tt != html.EndTagToken {
if tt == html.ErrorToken {
return time.Now(), tt
}
tt = z.Next()
}
for tt != html.TextToken {
if tt == html.ErrorToken {
return time.Now(), tt
}
tt = z.Next()
}
tm := CreateTime(string(z.Text()))
return tm, tt
}
func isCburnFolder(urlEntries []*URLEntry) bool {
for _, urlEntry := range urlEntries {
if strings.HasSuffix(urlEntry.url.String(), "stage1.conf") ||
strings.HasSuffix(urlEntry.url.String(), "stage2.conf") {
return true
}
}
return false
}
func (p *URLFetcher) dispatchURLs(urlEntries []*URLEntry) {
for _, urlEntry := range urlEntries {
if strings.HasSuffix(urlEntry.url.String(), "/") {
p.genNewURLEntry(urlEntry)
}
}
}
func (p *URLFetcher) dispatchCburn() {
fmt.Println("Dispatch cburn : ", p.urlEntry.url.String())
select {
case <-p.ctx.Done():
return
case p.record <- p.urlEntry:
return
}
}
func (p *URLFetcher) processPage(resp *http.Response) {
var urls []*URLEntry
ct := resp.Header.Get("Content-Type")
if strings.Contains(ct, "UTF-8") == false {
resp.Body.Close()
return
}
z := html.NewTokenizer(resp.Body)
func() {
defer resp.Body.Close()
loop:
for {
select {
case <-p.ctx.Done():
break loop
default:
tt := z.Next()
switch {
case tt == html.ErrorToken:
// End of the document, we're done
break loop
case tt == html.StartTagToken:
t := z.Token()
isAnchor := t.Data == "a"
if isAnchor {
l, err := createURLEntryFromLink(t)
if err == nil {
u, err := url.Parse(l)
if err == nil {
var tm time.Time
if u.IsAbs() {
tm, tt = getURLTime(z)
urls = append(urls, &URLEntry{u, tm})
} else {
if len(u.Path) > 0 && !strings.Contains(p.urlEntry.url.Path, u.Path) {
u = p.urlEntry.url.ResolveReference(u)
tm, tt = getURLTime(z)
urls = append(urls, &URLEntry{u, tm})
//fmt.Println(u.String())
}
}
}
}
}
if tt == html.ErrorToken {
break loop
}
}
}
}
}()
if isCburnFolder(urls) {
p.dispatchCburn()
} else {
p.dispatchURLs(urls)
}
}
func (p *URLFetcher) onCompletion() {
p.done <- &p.Fetcher
}
//Run starts the real work
func (p *URLFetcher) run() {
defer p.onCompletion()
req, err := http.NewRequest("GET", p.urlEntry.url.String(), nil)
if err != nil {
//wrong format, don't have to reinsert the URL for retry
//p.err = err
return
}
//req.Header.Set("Connection", "close")
ctx, cancel := context.WithTimeout(p.ctx, p.timeout)
defer cancel()
req.WithContext(ctx)
select {
case <-ctx.Done():
return
default:
resp, err := p.client.Do(req)
if err != nil {
fmt.Println("Http Connection Error (Retry Scheduled) : ", err)
p.genNewURLEntry(p.urlEntry)
//p.err = err
return
}
p.processPage(resp)
}
}
func (p *RecordFetcher) genNewRecordEntry(urlEntry *URLEntry) {
p.subUrls.Push(urlEntry)
}
func (p *RecordFetcher) processPage(urlEntry *URLEntry, resp *http.Response) {
defer resp.Body.Close()
ct := resp.Header.Get("Content-Type")
if strings.Contains(ct, "UTF-8") == false {
return
}
z := html.NewTokenizer(resp.Body)
loop:
for {
select {
case <-p.ctx.Done():
break loop
default:
tt := z.Next()
switch {
case tt == html.ErrorToken:
// End of the document, we're done
break loop
case tt == html.StartTagToken:
t := z.Token()
isAnchor := t.Data == "a"
if isAnchor {
l, err := createURLEntryFromLink(t)
if err == nil {
u, err := url.Parse(l)
if err == nil {
var tm time.Time
if u.IsAbs() {
tm, tt = getURLTime(z)
p.subUrls.Push(&URLEntry{u, tm})
} else {
//fmt.Printf("Checking %s under %s\n", u.String(), urlEntry.url.Path)
if len(u.Path) > 0 && !strings.Contains(urlEntry.url.Path, u.Path) {
u = urlEntry.url.ResolveReference(u)
tm, tt = getURLTime(z)
p.subUrls.Push(&URLEntry{u, tm})
//fmt.Println("Push :", u.String())
}
}
}
}
}
if tt == html.ErrorToken {
break loop
}
}
}
}
}
func (p *RecordFetcher) processFile(filepath string, resp *http.Response) {
defer resp.Body.Close()
ct := resp.Header.Get("Content-Type")
data, _ := ioutil.ReadAll(resp.Body)
p.files[filepath] = &CburnFile{ct, data}
}
func (p *RecordFetcher) onCompletion() {
p.done <- &p.Fetcher
}
func (p *RecordFetcher) runSingle() (bool, bool) {
urlEntry := p.subUrls.Pop()
if urlEntry == nil {
fmt.Println("Finished :", p.urlEntry.url.String())
return true, true
}
if !strings.HasPrefix(urlEntry.url.String(), p.urlEntry.url.String()) {
return false, false
}
req, err := http.NewRequest("GET", urlEntry.url.String(), nil)
if err != nil {
//p.err = err
return false, false
}
//req.Header.Set("Connection", "close")
ctx, cancel := context.WithTimeout(p.ctx, p.timeout)
defer cancel()
req.WithContext(ctx)
select {
case <-ctx.Done():
return false, true
default:
resp, err := p.client.Do(req)
if err != nil {
fmt.Println("Http Connection Error (Retry Scheduled) : ", err)
p.subUrls.Push(urlEntry)
//p.err = err
return false, false
}
if strings.HasSuffix(urlEntry.url.String(), "/") {
fmt.Println("Parse Page ", urlEntry.url.String())
p.processPage(urlEntry, resp)
} else {
filepath := strings.TrimPrefix(urlEntry.url.String(), p.urlEntry.url.String())
p.processFile(filepath, resp)
fmt.Println("Downloaded File ", filepath, " ", p.urlEntry.url.Path)
}
}
return false, false
}
func (p *RecordFetcher) getIns() {
r, _ := regexp.Compile(`([\w-]+)=\"([\w-]+)\"`)
for name, file := range p.files {
if strings.HasPrefix(name, "ins-") {
attrs := r.FindAllString(string(file.data), -1)
for _, attr := range attrs {
ri := r.FindStringSubmatch(attr)
p.recordAttrs[ri[1]] = ri[2]
}
}
}
file, ok := p.files["sysconf.cfg"]
if ok {
attrs := r.FindAllString(string(file.data), -1)
for _, attr := range attrs {
ri := r.FindStringSubmatch(attr)
p.recordAttrs[ri[1]] = ri[2]
}
fmt.Printf("Found : %s/%s\n", p.recordAttrs["BOARD_NAME"], p.recordAttrs["SYSTEM_PRODUCT_NAME"])
}
return
}
func (p *RecordFetcher) saveToDB() {
dbclient, _ := mongo.NewClient(dburl)
db := dbclient.Database("sburn")
coll := db.Collection("cburn_record")
var attrElems []*bson.Element
for name, attr := range p.recordAttrs {
attrElems = append(attrElems, bson.EC.String(name, string(attr)))
}
var fileElems []*bson.Element
fileElems = append(fileElems, bson.EC.SubDocumentFromElements("attrib", attrElems...))
for name, file := range p.files {
if strings.Contains(file.contentType, "text/plain") {
fileElems = append(fileElems, bson.EC.String(name, string(file.data)))
} else {
fileElems = append(fileElems, bson.EC.Binary(name, file.data))
}
}
coll.InsertOne(
context.Background(),
bson.NewDocument(fileElems...))
dbclient.Disconnect(context.Background())
}
func (p *RecordFetcher) run() {
defer p.onCompletion()
loop:
for {
select {
case <-p.ctx.Done():
break loop
default:
if finished, exit := p.runSingle(); exit {
if finished {
p.getIns()
fmt.Println("DB :", p.Fetcher.id, " ", p.Fetcher.urlEntry.url)
p.saveToDB()
}
break loop
}
}
}
} | random_line_split | |
sync.go | package logsync
import (
"context"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strings"
"sync"
"time"
"github.com/mongodb/mongo-go-driver/bson"
"github.com/mongodb/mongo-go-driver/mongo"
"golang.org/x/net/html"
)
var dburl = "mongodb://127.0.0.1:27017"
//CburnFile defines the cburn file type
type CburnFile struct {
contentType string
data []byte
}
//URLEntry is the URL to be explored
type URLEntry struct {
url *url.URL
lastUpdateTime time.Time
}
// URLQueue is a basic FIFO queue based on a circular list that resizes as needed.
type urlQueue struct {
nodes []*URLEntry
size int
head int
tail int
count int
}
//NewURLQueue returns a new queue with the given initial size.
func newURLQueue(size int) *urlQueue {
return &urlQueue{
nodes: make([]*URLEntry, size),
size: size,
}
}
// Push adds a node to the queue.
func (q *urlQueue) Push(n *URLEntry) {
if q.head == q.tail && q.count > 0 {
nodes := make([]*URLEntry, len(q.nodes)+q.size)
copy(nodes, q.nodes[q.head:])
copy(nodes[len(q.nodes)-q.head:], q.nodes[:q.head])
q.head = 0
q.tail = len(q.nodes)
q.nodes = nodes
}
q.nodes[q.tail] = n
q.tail = (q.tail + 1) % len(q.nodes)
q.count++
}
// Pop removes and returns a node from the queue in first to last order.
func (q *urlQueue) Pop() *URLEntry {
if q.count == 0 {
return nil
}
node := q.nodes[q.head]
q.head = (q.head + 1) % len(q.nodes)
q.count--
return node
}
//Fetcher is the base class for *Fetcher
type Fetcher struct {
id int
client *clientConn
urlEntry *URLEntry
ctx context.Context
done chan *Fetcher
timeout time.Duration
//resp *http.Response
//err error
}
//URLFetcher is url fetcher
type URLFetcher struct {
Fetcher
subTree chan *URLEntry
record chan *URLEntry
}
//RecordFetcher is Record fetcher
type RecordFetcher struct {
Fetcher
subUrls *urlQueue
files map[string]*CburnFile
recordAttrs map[string]string
}
type routineStat struct {
routineLauched int
routineReturned int
routineDone chan *Fetcher
routineMax int
}
//Controller is the global data
type Controller struct {
ctx context.Context
wg sync.WaitGroup
explorerClient *clientConn
recordClient *clientConn
subTree chan *URLEntry
record chan *URLEntry
urlCache map[string]*URLEntry
explorerStat *routineStat
recordStat *routineStat
}
type clientConn struct {
client *http.Client
maxPoolSize int
cSemaphore chan int
reqPerSecond int
rateLimiter *time.Ticker
}
func (c *clientConn) Do(req *http.Request) (*http.Response, error) {
if c.maxPoolSize > 0 {
c.cSemaphore <- 1 // Grab a connection from our pool
defer func() {
<-c.cSemaphore // Defer release our connection back to the pool
}()
}
if c.reqPerSecond > 0 {
<-c.rateLimiter.C // Block until a signal is emitted from the rateLimiter
}
resp, err := c.client.Do(req)
return resp, err
}
func newClientConn(maxPoolSize int, reqPerSecond int) *clientConn {
var cSemaphore chan int
var rateLimiter *time.Ticker
if maxPoolSize > 0 {
cSemaphore = make(chan int, maxPoolSize)
}
if reqPerSecond > 0 {
rateLimiter = time.NewTicker(time.Second / time.Duration(reqPerSecond))
}
return &clientConn{
client: &http.Client{},
maxPoolSize: maxPoolSize,
cSemaphore: cSemaphore,
reqPerSecond: reqPerSecond,
rateLimiter: rateLimiter,
}
}
func newRoutineStat(max int) *routineStat {
return &routineStat{
routineDone: make(chan *Fetcher),
routineMax: max,
}
}
//NewController returns new controller
func NewController(ctx context.Context) *Controller {
return &Controller{
ctx: ctx,
explorerClient: newClientConn(1000, 10000),
recordClient: newClientConn(1000, 10000),
subTree: make(chan *URLEntry, 1000),
record: make(chan *URLEntry, 1000),
urlCache: make(map[string]*URLEntry),
explorerStat: newRoutineStat(0),
recordStat: newRoutineStat(0),
}
}
//NewURLEntry create new URL entry
func NewURLEntry(rawurl string) *URLEntry {
url, _ := url.Parse(rawurl)
return &URLEntry{
url: url,
lastUpdateTime: time.Now(),
}
}
//AddNewURLEntry adds new URL entry to the queue
func (c *Controller) AddNewURLEntry(urlEntry *URLEntry) {
select {
case <-c.ctx.Done():
return
case c.subTree <- urlEntry:
}
}
//AddNewRecordURLEntry adds new URL entry to the queue
func (c *Controller) AddNewRecordURLEntry(urlEntry *URLEntry) {
select {
case <-c.ctx.Done():
return
case c.record <- urlEntry:
}
}
//WaitJobDone waits for all child go routines exit
func (c *Controller) WaitJobDone() {
//fmt.Println("wait for job done")
c.wg.Wait()
}
func (c *Controller) startURL(urlFetcher *URLFetcher) {
go urlFetcher.run()
}
func (c *Controller) startRecord(recordFether *RecordFetcher) {
go recordFether.run()
}
//StartExplorer starts explorer
func (c *Controller) StartExplorer() {
c.wg.Add(1)
go c.startExplorer()
}
func (c *Controller) startExplorer() {
defer c.wg.Done()
s := c.explorerStat
loop:
for {
select {
case <-c.ctx.Done():
select {
case fetcher := <-s.routineDone:
s.routineReturned++
fmt.Printf("-Explorer|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
default:
if s.routineReturned >= s.routineLauched {
break loop
}
}
case fetcher := <-s.routineDone:
s.routineReturned++
if s.routineReturned >= s.routineLauched {
fmt.Printf("Explorer|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
break loop
}
default:
if s.routineMax == 0 || s.routineLauched-s.routineReturned < s.routineMax {
select {
case urlEntry := <-c.subTree:
if c.urlNewerThanCache(urlEntry) {
fmt.Printf("Explorer|Start[ %d| %d] : %s %s\n", s.routineLauched, s.routineLauched-s.routineReturned, urlEntry.url.String(), urlEntry.lastUpdateTime)
s.routineLauched++
p := c.newURLFetcher(urlEntry)
c.startURL(p)
}
default:
}
}
}
}
fmt.Printf("\nCburn Explorer Summary : %d routine launched %d routine returned\n", s.routineLauched, s.routineReturned)
}
//StartCburnProcessor starts record process
func (c *Controller) StartCburnProcessor() {
c.wg.Add(1)
go c.startCburnProcessor()
}
func (c *Controller) startCburnProcessor() {
defer c.wg.Done()
s := c.recordStat
loop:
for {
select {
case <-c.ctx.Done():
select {
case fetcher := <-s.routineDone:
fmt.Printf("-Processor|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
s.routineReturned++
default:
if s.routineReturned >= s.routineLauched {
break loop
}
}
case fetcher := <-s.routineDone:
s.routineReturned++
fmt.Printf("Processor|End[ %d| %d] : %s\n", fetcher.id, s.routineLauched-s.routineReturned, fetcher.urlEntry.url.String())
default:
if s.routineMax == 0 || s.routineLauched-s.routineReturned < s.routineMax {
select {
case record, ok := <-c.record:
if ok {
s.routineLauched++
fmt.Printf("Processor|Start[ %d| %d] : %s %s\n", s.routineLauched, s.routineLauched-s.routineReturned, record.url.String(), record.lastUpdateTime)
r := c.newRecordFetcher(record)
c.startRecord(r)
} else {
if s.routineReturned >= s.routineLauched {
break loop
}
select {
case <-s.routineDone:
s.routineReturned++
default:
if s.routineReturned >= s.routineLauched {
break loop
}
}
}
default:
}
}
}
}
fmt.Printf("\nCburn Proccessor Summary : %d routine launched %d routine returned\n", s.routineLauched, s.routineReturned)
}
func (c *Controller) urlNewerThanCache(urlEntry *URLEntry) bool {
//TBD
urlCache, ok := c.urlCache[urlEntry.url.String()]
if ok {
if urlEntry.lastUpdateTime.Before(urlCache.lastUpdateTime) {
return true
}
return false
} else {
c.urlCache[urlEntry.url.String()] = urlEntry
return true
}
}
func (c *Controller) newURLFetcher(urlEntry *URLEntry) *URLFetcher {
//ctx, cancel := context.WithCancel(c.ctx)
return &URLFetcher{
Fetcher: Fetcher{
id: c.explorerStat.routineLauched,
client: c.explorerClient,
ctx: c.ctx,
done: c.explorerStat.routineDone,
urlEntry: urlEntry,
timeout: time.Second * 30,
},
subTree: c.subTree,
record: c.record,
}
}
func (c *Controller) newRecordFetcher(urlEntry *URLEntry) *RecordFetcher {
r := &RecordFetcher{
Fetcher: Fetcher{
id: c.recordStat.routineLauched,
client: c.recordClient,
ctx: c.ctx,
done: c.recordStat.routineDone,
urlEntry: urlEntry,
timeout: time.Second * 30,
},
subUrls: newURLQueue(100),
files: make(map[string]*CburnFile),
recordAttrs: make(map[string]string),
}
r.genNewRecordEntry(urlEntry)
return r
}
func (p *URLFetcher) genNewURLEntry(urlEntry *URLEntry) {
if urlEntry.url.Scheme == "http" || urlEntry.url.Scheme == "https" {
select {
case <-p.ctx.Done():
return
case p.subTree <- urlEntry:
return
}
}
}
func createURLEntryFromLink(t html.Token) (string, error) |
//CreateTime builds url time
func CreateTime(ts string) time.Time {
regex, _ := regexp.Compile("([0-9]+)-([A-Za-z]+)-[0-9]{2}([0-9]{2})[\t ]*([0-9]{2}:[0-9]{2})")
tss := regex.FindStringSubmatch(ts)
if len(tss) == 5 {
ts = fmt.Sprintf("%s %s %s %s PDT", tss[1], tss[2], tss[3], tss[4])
tm, _ := time.Parse(time.RFC822, ts)
return tm
} else {
tm := time.Now()
return tm
}
}
func getURLTime(z *html.Tokenizer) (time.Time, html.TokenType) {
tt := z.Token().Type
for tt != html.EndTagToken {
if tt == html.ErrorToken {
return time.Now(), tt
}
tt = z.Next()
}
for tt != html.TextToken {
if tt == html.ErrorToken {
return time.Now(), tt
}
tt = z.Next()
}
tm := CreateTime(string(z.Text()))
return tm, tt
}
func isCburnFolder(urlEntries []*URLEntry) bool {
for _, urlEntry := range urlEntries {
if strings.HasSuffix(urlEntry.url.String(), "stage1.conf") ||
strings.HasSuffix(urlEntry.url.String(), "stage2.conf") {
return true
}
}
return false
}
func (p *URLFetcher) dispatchURLs(urlEntries []*URLEntry) {
for _, urlEntry := range urlEntries {
if strings.HasSuffix(urlEntry.url.String(), "/") {
p.genNewURLEntry(urlEntry)
}
}
}
func (p *URLFetcher) dispatchCburn() {
fmt.Println("Dispatch cburn : ", p.urlEntry.url.String())
select {
case <-p.ctx.Done():
return
case p.record <- p.urlEntry:
return
}
}
func (p *URLFetcher) processPage(resp *http.Response) {
var urls []*URLEntry
ct := resp.Header.Get("Content-Type")
if strings.Contains(ct, "UTF-8") == false {
resp.Body.Close()
return
}
z := html.NewTokenizer(resp.Body)
func() {
defer resp.Body.Close()
loop:
for {
select {
case <-p.ctx.Done():
break loop
default:
tt := z.Next()
switch {
case tt == html.ErrorToken:
// End of the document, we're done
break loop
case tt == html.StartTagToken:
t := z.Token()
isAnchor := t.Data == "a"
if isAnchor {
l, err := createURLEntryFromLink(t)
if err == nil {
u, err := url.Parse(l)
if err == nil {
var tm time.Time
if u.IsAbs() {
tm, tt = getURLTime(z)
urls = append(urls, &URLEntry{u, tm})
} else {
if len(u.Path) > 0 && !strings.Contains(p.urlEntry.url.Path, u.Path) {
u = p.urlEntry.url.ResolveReference(u)
tm, tt = getURLTime(z)
urls = append(urls, &URLEntry{u, tm})
//fmt.Println(u.String())
}
}
}
}
}
if tt == html.ErrorToken {
break loop
}
}
}
}
}()
if isCburnFolder(urls) {
p.dispatchCburn()
} else {
p.dispatchURLs(urls)
}
}
func (p *URLFetcher) onCompletion() {
p.done <- &p.Fetcher
}
//Run starts the real work
func (p *URLFetcher) run() {
defer p.onCompletion()
req, err := http.NewRequest("GET", p.urlEntry.url.String(), nil)
if err != nil {
//wrong format, don't have to reinsert the URL for retry
//p.err = err
return
}
//req.Header.Set("Connection", "close")
ctx, cancel := context.WithTimeout(p.ctx, p.timeout)
defer cancel()
req.WithContext(ctx)
select {
case <-ctx.Done():
return
default:
resp, err := p.client.Do(req)
if err != nil {
fmt.Println("Http Connection Error (Retry Scheduled) : ", err)
p.genNewURLEntry(p.urlEntry)
//p.err = err
return
}
p.processPage(resp)
}
}
func (p *RecordFetcher) genNewRecordEntry(urlEntry *URLEntry) {
p.subUrls.Push(urlEntry)
}
func (p *RecordFetcher) processPage(urlEntry *URLEntry, resp *http.Response) {
defer resp.Body.Close()
ct := resp.Header.Get("Content-Type")
if strings.Contains(ct, "UTF-8") == false {
return
}
z := html.NewTokenizer(resp.Body)
loop:
for {
select {
case <-p.ctx.Done():
break loop
default:
tt := z.Next()
switch {
case tt == html.ErrorToken:
// End of the document, we're done
break loop
case tt == html.StartTagToken:
t := z.Token()
isAnchor := t.Data == "a"
if isAnchor {
l, err := createURLEntryFromLink(t)
if err == nil {
u, err := url.Parse(l)
if err == nil {
var tm time.Time
if u.IsAbs() {
tm, tt = getURLTime(z)
p.subUrls.Push(&URLEntry{u, tm})
} else {
//fmt.Printf("Checking %s under %s\n", u.String(), urlEntry.url.Path)
if len(u.Path) > 0 && !strings.Contains(urlEntry.url.Path, u.Path) {
u = urlEntry.url.ResolveReference(u)
tm, tt = getURLTime(z)
p.subUrls.Push(&URLEntry{u, tm})
//fmt.Println("Push :", u.String())
}
}
}
}
}
if tt == html.ErrorToken {
break loop
}
}
}
}
}
func (p *RecordFetcher) processFile(filepath string, resp *http.Response) {
defer resp.Body.Close()
ct := resp.Header.Get("Content-Type")
data, _ := ioutil.ReadAll(resp.Body)
p.files[filepath] = &CburnFile{ct, data}
}
func (p *RecordFetcher) onCompletion() {
p.done <- &p.Fetcher
}
func (p *RecordFetcher) runSingle() (bool, bool) {
urlEntry := p.subUrls.Pop()
if urlEntry == nil {
fmt.Println("Finished :", p.urlEntry.url.String())
return true, true
}
if !strings.HasPrefix(urlEntry.url.String(), p.urlEntry.url.String()) {
return false, false
}
req, err := http.NewRequest("GET", urlEntry.url.String(), nil)
if err != nil {
//p.err = err
return false, false
}
//req.Header.Set("Connection", "close")
ctx, cancel := context.WithTimeout(p.ctx, p.timeout)
defer cancel()
req.WithContext(ctx)
select {
case <-ctx.Done():
return false, true
default:
resp, err := p.client.Do(req)
if err != nil {
fmt.Println("Http Connection Error (Retry Scheduled) : ", err)
p.subUrls.Push(urlEntry)
//p.err = err
return false, false
}
if strings.HasSuffix(urlEntry.url.String(), "/") {
fmt.Println("Parse Page ", urlEntry.url.String())
p.processPage(urlEntry, resp)
} else {
filepath := strings.TrimPrefix(urlEntry.url.String(), p.urlEntry.url.String())
p.processFile(filepath, resp)
fmt.Println("Downloaded File ", filepath, " ", p.urlEntry.url.Path)
}
}
return false, false
}
func (p *RecordFetcher) getIns() {
r, _ := regexp.Compile(`([\w-]+)=\"([\w-]+)\"`)
for name, file := range p.files {
if strings.HasPrefix(name, "ins-") {
attrs := r.FindAllString(string(file.data), -1)
for _, attr := range attrs {
ri := r.FindStringSubmatch(attr)
p.recordAttrs[ri[1]] = ri[2]
}
}
}
file, ok := p.files["sysconf.cfg"]
if ok {
attrs := r.FindAllString(string(file.data), -1)
for _, attr := range attrs {
ri := r.FindStringSubmatch(attr)
p.recordAttrs[ri[1]] = ri[2]
}
fmt.Printf("Found : %s/%s\n", p.recordAttrs["BOARD_NAME"], p.recordAttrs["SYSTEM_PRODUCT_NAME"])
}
return
}
func (p *RecordFetcher) saveToDB() {
dbclient, _ := mongo.NewClient(dburl)
db := dbclient.Database("sburn")
coll := db.Collection("cburn_record")
var attrElems []*bson.Element
for name, attr := range p.recordAttrs {
attrElems = append(attrElems, bson.EC.String(name, string(attr)))
}
var fileElems []*bson.Element
fileElems = append(fileElems, bson.EC.SubDocumentFromElements("attrib", attrElems...))
for name, file := range p.files {
if strings.Contains(file.contentType, "text/plain") {
fileElems = append(fileElems, bson.EC.String(name, string(file.data)))
} else {
fileElems = append(fileElems, bson.EC.Binary(name, file.data))
}
}
coll.InsertOne(
context.Background(),
bson.NewDocument(fileElems...))
dbclient.Disconnect(context.Background())
}
func (p *RecordFetcher) run() {
defer p.onCompletion()
loop:
for {
select {
case <-p.ctx.Done():
break loop
default:
if finished, exit := p.runSingle(); exit {
if finished {
p.getIns()
fmt.Println("DB :", p.Fetcher.id, " ", p.Fetcher.urlEntry.url)
p.saveToDB()
}
break loop
}
}
}
}
| {
for _, attr := range t.Attr {
if attr.Key == "href" {
return attr.Val, nil
}
}
return "", errors.New("No Href attribute in the link")
} | identifier_body |
auth.go | package server
import (
"crypto/md5"
"crypto/rand"
"crypto/x509"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"log"
"net/http"
"os"
"path"
"strings"
"sync"
"time"
)
type Authenticator struct {
nonces map[string]Nonce
mutex sync.Mutex
}
type AuthDomain struct {
Realm string
Entry Entry
}
type Nonce struct {
Time time.Time
Validity time.Duration
Opaque string
Domain string
Realm string
}
func CreateAuthenticator() Authenticator {
return Authenticator{
nonces: make(map[string]Nonce),
}
}
var nonceValidity = time.Minute * 5
var AnonymousAuthDomain string = "anonymous"
func RandomKey() (string, error) {
var data [12]byte
_, err := io.ReadFull(rand.Reader, data[:])
return base64.StdEncoding.EncodeToString(data[:]), err
}
func escape(s string) string {
return strings.Replace(strings.Replace(s, "\\", "\\\\", -1), "\"", "\\\"", -1)
}
/* h function for MD5 algorithm (returns a lower-case hex MD5 digest)
*/
func h(data string) string {
digest := md5.New()
digest.Write([]byte(data))
return fmt.Sprintf("%x", digest.Sum(nil))
}
func getAuthParams(req *http.Request) []AuthMethod {
var methods []AuthMethod
for _, h := range req.Header["Authorization"] {
parser := HeaderParser{Buffer: h}
parser.Init()
if err := parser.Parse(); err != nil {
log.Println(err)
} else {
parser.Execute()
}
for _, m := range parser.Methods {
methods = append(methods, m)
}
}
return methods
}
func (auth *Authenticator) purge(now time.Time) {
for k, nonce := range auth.nonces {
if nonce.Time.Add(nonce.Validity).Before(now) {
//log.Printf("Purge nonce %s\n", k)
delete(auth.nonces, k)
}
}
}
func (auth *Authenticator) generateNonce(domain string, realm string, time time.Time, valid time.Duration) (string, string, error) {
key, err := RandomKey()
if err != nil {
return "", "", err
}
opaque, err := RandomKey()
if err != nil {
return "", "", err
}
//log.Printf("Generate nonce %s\n", key)
auth.nonces[key] = Nonce{
Time: time,
Validity: valid,
Opaque: opaque,
Domain: domain,
Realm: realm,
}
return key, opaque, nil
}
func (auth *Authenticator) checkNonce(noncekey string, nonce_count string, now time.Time) (bool, string, string, string) {
// FIXME do something with nonce_count
nonce, ok := auth.nonces[noncekey]
if !ok {
return false, "", "", ""
}
opaque := nonce.Opaque
auth_domain := nonce.Domain
auth_realm := nonce.Realm
valid := nonce.Time.Add(nonce.Validity).After(now)
if !valid {
delete(auth.nonces, noncekey)
//log.Printf("Purge stale nonce %s\n", noncekey)
}
return valid, opaque, auth_domain, auth_realm
}
func (Auth *Authenticator) checkRequest(entry Entry, req *http.Request, now time.Time) (bool, bool, bool, []error) {
var errors []error
var stale = false
var found_auth_all = false
if req.TLS != nil {
certs := req.TLS.PeerCertificates;
for _, cert := range certs {
authList, ers := authList(entry);
if len(ers) > 0 {
for _, e := range ers { errors = append(errors, e); }
}
for _, authDomainName := range authList {
found, _, err := getAuthCredsCert(entry, authDomainName, *cert)
if err != nil {
errors = append(errors, err)
continue
} else if !found {
continue
}
allow, found_auth, err := getAuthPerms(entry, authDomainName, req.Method)
found_auth_all = found_auth_all || found_auth
if err != nil {
errors = append(errors, err)
}
if allow {
return true, found_auth_all, false, errors
}
}
fingerprint := strings.ToLower(hex.EncodeToString(Fingerprint(*cert)))
log.Printf("TLS Authentication failure for certificate %s.sha256\n", fingerprint)
}
}
auth_params := getAuthParams(req)
for _, auth := range auth_params {
if auth.Name == "Digest" {
algorithm := auth.Params["algorithm"]
qop := auth.Params["qop"]
opaque := auth.Params["opaque"]
nonce := auth.Params["nonce"]
response := auth.Params["response"]
username := auth.Params["username"]
digest_uri := auth.Params["uri"]
cnonce := auth.Params["cnonce"]
nonce_count := auth.Params["nc"]
if algorithm != "MD5" || qop != "auth" {
//log.Println("Invalid algorithm or qop")
continue
}
ok, nonce_opaque, auth_domain, auth_realm := Auth.checkNonce(nonce, nonce_count, now)
if !ok {
stale = true
//log.Println("Stale nonce " + nonce + " count: " + nonce_count)
continue
} else if opaque != nonce_opaque {
//log.Printf("Opaque data invalid %s %s\n", opaque, nonce_opaque)
continue
}
var password string
if found, pass, err := getAuthCreds(entry, auth_domain, auth.Name, username); err != nil {
errors = append(errors, err)
continue
} else if !found {
log.Printf("Digest Authentication failure, %s no username %s\n", req.RequestURI, username)
continue
} else {
password = string(pass)
}
HA1 := h(username + ":" + auth_realm + ":" + password)
HA2 := h(req.Method + ":" + digest_uri)
KD := h(strings.Join([]string{
HA1, nonce, nonce_count, cnonce, qop, HA2}, ":"))
if KD != response {
log.Printf("Digest Authentication failure %s %s != %s\n", req.RequestURI, KD, response)
continue
}
allow, found_auth, err := getAuthPerms(entry, auth_domain, req.Method)
found_auth_all = found_auth_all || found_auth
if err != nil {
errors = append(errors, err)
}
if allow {
return true, found_auth_all, false, errors
}
}
}
allow, found_auth, err := getAuthPerms(entry, AnonymousAuthDomain, req.Method)
found_auth_all = found_auth_all || found_auth
if err != nil {
errors = append(errors, err)
}
return allow, found_auth_all, stale, errors
}
func (Auth *Authenticator) | (entry Entry, res http.ResponseWriter, req *http.Request) bool {
Auth.mutex.Lock()
defer Auth.mutex.Unlock()
now := time.Now()
Auth.purge(now)
authorized, found_auth, stale, errors := Auth.checkRequest(entry, req, now)
if len(errors) > 0 {
for err := range errors {
fmt.Println(err)
}
}
auths, auth_errs := authList(entry)
if len(auth_errs) > 0 {
for err := range auth_errs {
log.Println(err)
}
}
for _, auth := range auths {
domain, err := getAuthDomain(entry, auth)
if err != nil {
log.Println(err)
}
staleStr := ""
if stale {
staleStr = `, stale="true"`
}
if nonce, opaque, nonce_err := Auth.generateNonce(auth, domain.Realm, now, nonceValidity); nonce_err != nil {
log.Println(nonce_err)
} else {
res.Header().Add("WWW-Authenticate",
fmt.Sprintf(`Digest realm="%s", domain="%s", nonce="%s", opaque="%s"%s, algorithm="MD5", qop="auth"`,
escape(domain.Realm), escape(getPath(domain.Entry)), nonce, opaque, staleStr))
}
}
if found_auth {
return authorized
} else {
return len(auths) == 0
}
}
func authList(entry Entry) ([]string, []error) {
var errors []error
var res_auths []string
for ent := entry; ent != nil; ent = ent.Parent(true) {
auths := entry.Parameters().Child("auth")
auth_list, err := auths.Children()
if err != nil && os.IsExist(err) {
errors = append(errors, err)
continue
}
for _, auth := range auth_list {
found := false
if auth.Name() == AnonymousAuthDomain {
continue
}
for _, v := range res_auths {
if v == auth.Name() {
found = true
break
}
}
if !found {
res_auths = append(res_auths, auth.Name())
}
}
if !auths.Child("inherit").Exists() && auths.Exists() {
break
}
}
return res_auths, errors
}
func getAuthDomain(entry Entry, authname string) (AuthDomain, error) {
var domain AuthDomain
var ent Entry
var err error
var haveRealm = false
for ent = entry; ent != nil; ent = ent.Parent(true) {
auth := entry.Parameters().Child("auth").Child(authname)
if !auth.Exists() {
continue
}
if !haveRealm {
data, e := auth.Child("realm").Read()
if e == nil {
domain.Realm = string(data)
haveRealm = true
} else if e != nil && os.IsExist(err) {
domain.Realm = authname
err = e
haveRealm = true
}
}
if !auth.Child("inherit").Exists() {
break
}
}
domain.Entry = ent
return domain, nil
}
func getAuthCreds(entry Entry, auth string, method string, username string) (bool, []byte, error) {
for ent := entry; ent != nil; ent = ent.Parent(true) {
auth := entry.Parameters().Child("auth").Child(auth)
if !auth.Exists() {
continue
}
users := auth.Child(method + ".users")
user := users.Child(username)
data, e := user.Read()
if e == nil {
return true, data, nil
} else if e != nil && os.IsExist(e) {
return false, []byte{}, e
}
if !auth.Child("inherit").Exists() {
break
}
}
return false, []byte{}, nil
}
func getAuthCredsCert(entry Entry, auth string, cert x509.Certificate) (bool, []byte, error) {
fingerprint := strings.ToLower(hex.EncodeToString(Fingerprint(cert)))
for ent := entry; ent != nil; ent = ent.Parent(true) {
auth := entry.Parameters().Child("auth").Child(auth)
if !auth.Exists() {
continue
}
users := auth.Child("certificates")
user := users.Child(fingerprint + ".sha256")
data, e := user.Read()
if e == nil {
return true, data, nil
} else if e != nil && os.IsExist(e) {
return false, []byte{}, e
}
if !auth.Child("inherit").Exists() {
break
}
}
return false, []byte{}, nil
}
func getAuthPerms(entry Entry, auth string, method string) (bool, bool, error) {
for ent := entry; ent != nil; ent = ent.Parent(true) {
auth := ent.Parameters().Child("auth").Child(auth)
if data, err := auth.Child(method + ".perm").Read(); err == nil {
//log.Printf("%v %v %v\n", auth, string(data), strings.TrimSpace(string(data)) == "allow")
return strings.TrimSpace(string(data)) == "allow", true, nil
} else if err != nil && os.IsExist(err) {
return false, true, err
}
if data, err := auth.Child("default.perm").Read(); err == nil {
return strings.TrimSpace(string(data)) == "allow", true, nil
} else if err != nil && os.IsExist(err) {
return false, true, err
}
if auth.DirExists() && !auth.Child("inherit").Exists() {
break
}
}
return false, false, nil
}
func getPath(entry Entry) string {
p := ""
e := entry
for e != nil {
p = e.Name() + "/" + p
e = e.Parent(false)
parent := e.Parent(true)
if e == nil && parent != nil {
p = ""
e = parent
}
}
return path.Clean("/" + p)
}
func Fingerprint(cert x509.Certificate) []byte {
h := sha256.New()
h.Write(cert.RawSubjectPublicKeyInfo)
return h.Sum(nil)
} | Authenticate | identifier_name |
auth.go | package server
import (
"crypto/md5"
"crypto/rand"
"crypto/x509"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"log"
"net/http"
"os"
"path"
"strings"
"sync"
"time"
)
type Authenticator struct {
nonces map[string]Nonce
mutex sync.Mutex
}
type AuthDomain struct {
Realm string
Entry Entry
}
type Nonce struct {
Time time.Time
Validity time.Duration
Opaque string
Domain string
Realm string
}
func CreateAuthenticator() Authenticator {
return Authenticator{
nonces: make(map[string]Nonce),
}
}
var nonceValidity = time.Minute * 5
var AnonymousAuthDomain string = "anonymous"
func RandomKey() (string, error) {
var data [12]byte
_, err := io.ReadFull(rand.Reader, data[:])
return base64.StdEncoding.EncodeToString(data[:]), err
}
func escape(s string) string {
return strings.Replace(strings.Replace(s, "\\", "\\\\", -1), "\"", "\\\"", -1)
}
/* h function for MD5 algorithm (returns a lower-case hex MD5 digest)
*/
func h(data string) string {
digest := md5.New()
digest.Write([]byte(data))
return fmt.Sprintf("%x", digest.Sum(nil))
}
func getAuthParams(req *http.Request) []AuthMethod {
var methods []AuthMethod
for _, h := range req.Header["Authorization"] {
parser := HeaderParser{Buffer: h}
parser.Init()
if err := parser.Parse(); err != nil {
log.Println(err)
} else {
parser.Execute()
}
for _, m := range parser.Methods {
methods = append(methods, m)
}
}
return methods
}
func (auth *Authenticator) purge(now time.Time) {
for k, nonce := range auth.nonces {
if nonce.Time.Add(nonce.Validity).Before(now) {
//log.Printf("Purge nonce %s\n", k)
delete(auth.nonces, k)
}
}
}
func (auth *Authenticator) generateNonce(domain string, realm string, time time.Time, valid time.Duration) (string, string, error) {
key, err := RandomKey()
if err != nil {
return "", "", err
}
opaque, err := RandomKey()
if err != nil {
return "", "", err
}
//log.Printf("Generate nonce %s\n", key)
auth.nonces[key] = Nonce{
Time: time,
Validity: valid,
Opaque: opaque,
Domain: domain,
Realm: realm,
}
return key, opaque, nil
}
func (auth *Authenticator) checkNonce(noncekey string, nonce_count string, now time.Time) (bool, string, string, string) |
func (Auth *Authenticator) checkRequest(entry Entry, req *http.Request, now time.Time) (bool, bool, bool, []error) {
var errors []error
var stale = false
var found_auth_all = false
if req.TLS != nil {
certs := req.TLS.PeerCertificates;
for _, cert := range certs {
authList, ers := authList(entry);
if len(ers) > 0 {
for _, e := range ers { errors = append(errors, e); }
}
for _, authDomainName := range authList {
found, _, err := getAuthCredsCert(entry, authDomainName, *cert)
if err != nil {
errors = append(errors, err)
continue
} else if !found {
continue
}
allow, found_auth, err := getAuthPerms(entry, authDomainName, req.Method)
found_auth_all = found_auth_all || found_auth
if err != nil {
errors = append(errors, err)
}
if allow {
return true, found_auth_all, false, errors
}
}
fingerprint := strings.ToLower(hex.EncodeToString(Fingerprint(*cert)))
log.Printf("TLS Authentication failure for certificate %s.sha256\n", fingerprint)
}
}
auth_params := getAuthParams(req)
for _, auth := range auth_params {
if auth.Name == "Digest" {
algorithm := auth.Params["algorithm"]
qop := auth.Params["qop"]
opaque := auth.Params["opaque"]
nonce := auth.Params["nonce"]
response := auth.Params["response"]
username := auth.Params["username"]
digest_uri := auth.Params["uri"]
cnonce := auth.Params["cnonce"]
nonce_count := auth.Params["nc"]
if algorithm != "MD5" || qop != "auth" {
//log.Println("Invalid algorithm or qop")
continue
}
ok, nonce_opaque, auth_domain, auth_realm := Auth.checkNonce(nonce, nonce_count, now)
if !ok {
stale = true
//log.Println("Stale nonce " + nonce + " count: " + nonce_count)
continue
} else if opaque != nonce_opaque {
//log.Printf("Opaque data invalid %s %s\n", opaque, nonce_opaque)
continue
}
var password string
if found, pass, err := getAuthCreds(entry, auth_domain, auth.Name, username); err != nil {
errors = append(errors, err)
continue
} else if !found {
log.Printf("Digest Authentication failure, %s no username %s\n", req.RequestURI, username)
continue
} else {
password = string(pass)
}
HA1 := h(username + ":" + auth_realm + ":" + password)
HA2 := h(req.Method + ":" + digest_uri)
KD := h(strings.Join([]string{
HA1, nonce, nonce_count, cnonce, qop, HA2}, ":"))
if KD != response {
log.Printf("Digest Authentication failure %s %s != %s\n", req.RequestURI, KD, response)
continue
}
allow, found_auth, err := getAuthPerms(entry, auth_domain, req.Method)
found_auth_all = found_auth_all || found_auth
if err != nil {
errors = append(errors, err)
}
if allow {
return true, found_auth_all, false, errors
}
}
}
allow, found_auth, err := getAuthPerms(entry, AnonymousAuthDomain, req.Method)
found_auth_all = found_auth_all || found_auth
if err != nil {
errors = append(errors, err)
}
return allow, found_auth_all, stale, errors
}
func (Auth *Authenticator) Authenticate(entry Entry, res http.ResponseWriter, req *http.Request) bool {
Auth.mutex.Lock()
defer Auth.mutex.Unlock()
now := time.Now()
Auth.purge(now)
authorized, found_auth, stale, errors := Auth.checkRequest(entry, req, now)
if len(errors) > 0 {
for err := range errors {
fmt.Println(err)
}
}
auths, auth_errs := authList(entry)
if len(auth_errs) > 0 {
for err := range auth_errs {
log.Println(err)
}
}
for _, auth := range auths {
domain, err := getAuthDomain(entry, auth)
if err != nil {
log.Println(err)
}
staleStr := ""
if stale {
staleStr = `, stale="true"`
}
if nonce, opaque, nonce_err := Auth.generateNonce(auth, domain.Realm, now, nonceValidity); nonce_err != nil {
log.Println(nonce_err)
} else {
res.Header().Add("WWW-Authenticate",
fmt.Sprintf(`Digest realm="%s", domain="%s", nonce="%s", opaque="%s"%s, algorithm="MD5", qop="auth"`,
escape(domain.Realm), escape(getPath(domain.Entry)), nonce, opaque, staleStr))
}
}
if found_auth {
return authorized
} else {
return len(auths) == 0
}
}
func authList(entry Entry) ([]string, []error) {
var errors []error
var res_auths []string
for ent := entry; ent != nil; ent = ent.Parent(true) {
auths := entry.Parameters().Child("auth")
auth_list, err := auths.Children()
if err != nil && os.IsExist(err) {
errors = append(errors, err)
continue
}
for _, auth := range auth_list {
found := false
if auth.Name() == AnonymousAuthDomain {
continue
}
for _, v := range res_auths {
if v == auth.Name() {
found = true
break
}
}
if !found {
res_auths = append(res_auths, auth.Name())
}
}
if !auths.Child("inherit").Exists() && auths.Exists() {
break
}
}
return res_auths, errors
}
func getAuthDomain(entry Entry, authname string) (AuthDomain, error) {
var domain AuthDomain
var ent Entry
var err error
var haveRealm = false
for ent = entry; ent != nil; ent = ent.Parent(true) {
auth := entry.Parameters().Child("auth").Child(authname)
if !auth.Exists() {
continue
}
if !haveRealm {
data, e := auth.Child("realm").Read()
if e == nil {
domain.Realm = string(data)
haveRealm = true
} else if e != nil && os.IsExist(err) {
domain.Realm = authname
err = e
haveRealm = true
}
}
if !auth.Child("inherit").Exists() {
break
}
}
domain.Entry = ent
return domain, nil
}
func getAuthCreds(entry Entry, auth string, method string, username string) (bool, []byte, error) {
for ent := entry; ent != nil; ent = ent.Parent(true) {
auth := entry.Parameters().Child("auth").Child(auth)
if !auth.Exists() {
continue
}
users := auth.Child(method + ".users")
user := users.Child(username)
data, e := user.Read()
if e == nil {
return true, data, nil
} else if e != nil && os.IsExist(e) {
return false, []byte{}, e
}
if !auth.Child("inherit").Exists() {
break
}
}
return false, []byte{}, nil
}
func getAuthCredsCert(entry Entry, auth string, cert x509.Certificate) (bool, []byte, error) {
fingerprint := strings.ToLower(hex.EncodeToString(Fingerprint(cert)))
for ent := entry; ent != nil; ent = ent.Parent(true) {
auth := entry.Parameters().Child("auth").Child(auth)
if !auth.Exists() {
continue
}
users := auth.Child("certificates")
user := users.Child(fingerprint + ".sha256")
data, e := user.Read()
if e == nil {
return true, data, nil
} else if e != nil && os.IsExist(e) {
return false, []byte{}, e
}
if !auth.Child("inherit").Exists() {
break
}
}
return false, []byte{}, nil
}
func getAuthPerms(entry Entry, auth string, method string) (bool, bool, error) {
for ent := entry; ent != nil; ent = ent.Parent(true) {
auth := ent.Parameters().Child("auth").Child(auth)
if data, err := auth.Child(method + ".perm").Read(); err == nil {
//log.Printf("%v %v %v\n", auth, string(data), strings.TrimSpace(string(data)) == "allow")
return strings.TrimSpace(string(data)) == "allow", true, nil
} else if err != nil && os.IsExist(err) {
return false, true, err
}
if data, err := auth.Child("default.perm").Read(); err == nil {
return strings.TrimSpace(string(data)) == "allow", true, nil
} else if err != nil && os.IsExist(err) {
return false, true, err
}
if auth.DirExists() && !auth.Child("inherit").Exists() {
break
}
}
return false, false, nil
}
func getPath(entry Entry) string {
p := ""
e := entry
for e != nil {
p = e.Name() + "/" + p
e = e.Parent(false)
parent := e.Parent(true)
if e == nil && parent != nil {
p = ""
e = parent
}
}
return path.Clean("/" + p)
}
func Fingerprint(cert x509.Certificate) []byte {
h := sha256.New()
h.Write(cert.RawSubjectPublicKeyInfo)
return h.Sum(nil)
} | {
// FIXME do something with nonce_count
nonce, ok := auth.nonces[noncekey]
if !ok {
return false, "", "", ""
}
opaque := nonce.Opaque
auth_domain := nonce.Domain
auth_realm := nonce.Realm
valid := nonce.Time.Add(nonce.Validity).After(now)
if !valid {
delete(auth.nonces, noncekey)
//log.Printf("Purge stale nonce %s\n", noncekey)
}
return valid, opaque, auth_domain, auth_realm
} | identifier_body |
auth.go | package server
import (
"crypto/md5"
"crypto/rand"
"crypto/x509"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"log"
"net/http"
"os"
"path"
"strings"
"sync"
"time"
)
type Authenticator struct {
nonces map[string]Nonce
mutex sync.Mutex
}
type AuthDomain struct {
Realm string
Entry Entry
}
type Nonce struct {
Time time.Time
Validity time.Duration
Opaque string
Domain string
Realm string
}
func CreateAuthenticator() Authenticator {
return Authenticator{
nonces: make(map[string]Nonce),
}
}
var nonceValidity = time.Minute * 5
var AnonymousAuthDomain string = "anonymous"
func RandomKey() (string, error) {
var data [12]byte
_, err := io.ReadFull(rand.Reader, data[:])
return base64.StdEncoding.EncodeToString(data[:]), err
}
func escape(s string) string {
return strings.Replace(strings.Replace(s, "\\", "\\\\", -1), "\"", "\\\"", -1)
}
/* h function for MD5 algorithm (returns a lower-case hex MD5 digest)
*/
func h(data string) string {
digest := md5.New()
digest.Write([]byte(data))
return fmt.Sprintf("%x", digest.Sum(nil))
}
func getAuthParams(req *http.Request) []AuthMethod {
var methods []AuthMethod
for _, h := range req.Header["Authorization"] {
parser := HeaderParser{Buffer: h}
parser.Init()
if err := parser.Parse(); err != nil {
log.Println(err)
} else {
parser.Execute()
}
for _, m := range parser.Methods {
methods = append(methods, m)
}
}
return methods
}
func (auth *Authenticator) purge(now time.Time) {
for k, nonce := range auth.nonces {
if nonce.Time.Add(nonce.Validity).Before(now) {
//log.Printf("Purge nonce %s\n", k)
delete(auth.nonces, k)
}
}
}
func (auth *Authenticator) generateNonce(domain string, realm string, time time.Time, valid time.Duration) (string, string, error) {
key, err := RandomKey()
if err != nil {
return "", "", err
}
opaque, err := RandomKey()
if err != nil {
return "", "", err
}
//log.Printf("Generate nonce %s\n", key)
auth.nonces[key] = Nonce{
Time: time,
Validity: valid,
Opaque: opaque,
Domain: domain,
Realm: realm,
}
return key, opaque, nil
}
func (auth *Authenticator) checkNonce(noncekey string, nonce_count string, now time.Time) (bool, string, string, string) {
// FIXME do something with nonce_count
nonce, ok := auth.nonces[noncekey]
if !ok {
return false, "", "", ""
}
opaque := nonce.Opaque
auth_domain := nonce.Domain
auth_realm := nonce.Realm
valid := nonce.Time.Add(nonce.Validity).After(now)
if !valid {
delete(auth.nonces, noncekey)
//log.Printf("Purge stale nonce %s\n", noncekey)
}
return valid, opaque, auth_domain, auth_realm
}
func (Auth *Authenticator) checkRequest(entry Entry, req *http.Request, now time.Time) (bool, bool, bool, []error) {
var errors []error
var stale = false
var found_auth_all = false
if req.TLS != nil {
certs := req.TLS.PeerCertificates;
for _, cert := range certs {
authList, ers := authList(entry);
if len(ers) > 0 {
for _, e := range ers { errors = append(errors, e); }
}
for _, authDomainName := range authList {
found, _, err := getAuthCredsCert(entry, authDomainName, *cert)
if err != nil {
errors = append(errors, err)
continue
} else if !found {
continue
}
allow, found_auth, err := getAuthPerms(entry, authDomainName, req.Method)
found_auth_all = found_auth_all || found_auth
if err != nil {
errors = append(errors, err)
}
if allow {
return true, found_auth_all, false, errors
}
}
fingerprint := strings.ToLower(hex.EncodeToString(Fingerprint(*cert)))
log.Printf("TLS Authentication failure for certificate %s.sha256\n", fingerprint)
}
}
auth_params := getAuthParams(req)
for _, auth := range auth_params {
if auth.Name == "Digest" {
algorithm := auth.Params["algorithm"]
qop := auth.Params["qop"]
opaque := auth.Params["opaque"]
nonce := auth.Params["nonce"]
response := auth.Params["response"]
username := auth.Params["username"]
digest_uri := auth.Params["uri"]
cnonce := auth.Params["cnonce"]
nonce_count := auth.Params["nc"]
if algorithm != "MD5" || qop != "auth" {
//log.Println("Invalid algorithm or qop")
continue
}
ok, nonce_opaque, auth_domain, auth_realm := Auth.checkNonce(nonce, nonce_count, now)
if !ok {
stale = true
//log.Println("Stale nonce " + nonce + " count: " + nonce_count)
continue
} else if opaque != nonce_opaque {
//log.Printf("Opaque data invalid %s %s\n", opaque, nonce_opaque)
continue
}
var password string
if found, pass, err := getAuthCreds(entry, auth_domain, auth.Name, username); err != nil {
errors = append(errors, err)
continue
} else if !found {
log.Printf("Digest Authentication failure, %s no username %s\n", req.RequestURI, username)
continue
} else {
password = string(pass)
}
HA1 := h(username + ":" + auth_realm + ":" + password)
HA2 := h(req.Method + ":" + digest_uri)
KD := h(strings.Join([]string{
HA1, nonce, nonce_count, cnonce, qop, HA2}, ":"))
if KD != response {
log.Printf("Digest Authentication failure %s %s != %s\n", req.RequestURI, KD, response)
continue
}
allow, found_auth, err := getAuthPerms(entry, auth_domain, req.Method)
found_auth_all = found_auth_all || found_auth
if err != nil {
errors = append(errors, err)
}
if allow {
return true, found_auth_all, false, errors
}
}
}
allow, found_auth, err := getAuthPerms(entry, AnonymousAuthDomain, req.Method)
found_auth_all = found_auth_all || found_auth
if err != nil {
errors = append(errors, err)
}
return allow, found_auth_all, stale, errors
}
func (Auth *Authenticator) Authenticate(entry Entry, res http.ResponseWriter, req *http.Request) bool {
Auth.mutex.Lock()
defer Auth.mutex.Unlock()
now := time.Now()
Auth.purge(now)
authorized, found_auth, stale, errors := Auth.checkRequest(entry, req, now)
if len(errors) > 0 {
for err := range errors {
fmt.Println(err)
}
}
auths, auth_errs := authList(entry)
if len(auth_errs) > 0 {
for err := range auth_errs {
log.Println(err)
}
}
for _, auth := range auths {
domain, err := getAuthDomain(entry, auth)
if err != nil {
log.Println(err)
}
staleStr := ""
if stale {
staleStr = `, stale="true"`
}
if nonce, opaque, nonce_err := Auth.generateNonce(auth, domain.Realm, now, nonceValidity); nonce_err != nil {
log.Println(nonce_err)
} else {
res.Header().Add("WWW-Authenticate",
fmt.Sprintf(`Digest realm="%s", domain="%s", nonce="%s", opaque="%s"%s, algorithm="MD5", qop="auth"`,
escape(domain.Realm), escape(getPath(domain.Entry)), nonce, opaque, staleStr))
}
}
if found_auth {
return authorized
} else {
return len(auths) == 0
}
}
func authList(entry Entry) ([]string, []error) {
var errors []error
var res_auths []string | for ent := entry; ent != nil; ent = ent.Parent(true) {
auths := entry.Parameters().Child("auth")
auth_list, err := auths.Children()
if err != nil && os.IsExist(err) {
errors = append(errors, err)
continue
}
for _, auth := range auth_list {
found := false
if auth.Name() == AnonymousAuthDomain {
continue
}
for _, v := range res_auths {
if v == auth.Name() {
found = true
break
}
}
if !found {
res_auths = append(res_auths, auth.Name())
}
}
if !auths.Child("inherit").Exists() && auths.Exists() {
break
}
}
return res_auths, errors
}
func getAuthDomain(entry Entry, authname string) (AuthDomain, error) {
var domain AuthDomain
var ent Entry
var err error
var haveRealm = false
for ent = entry; ent != nil; ent = ent.Parent(true) {
auth := entry.Parameters().Child("auth").Child(authname)
if !auth.Exists() {
continue
}
if !haveRealm {
data, e := auth.Child("realm").Read()
if e == nil {
domain.Realm = string(data)
haveRealm = true
} else if e != nil && os.IsExist(err) {
domain.Realm = authname
err = e
haveRealm = true
}
}
if !auth.Child("inherit").Exists() {
break
}
}
domain.Entry = ent
return domain, nil
}
func getAuthCreds(entry Entry, auth string, method string, username string) (bool, []byte, error) {
for ent := entry; ent != nil; ent = ent.Parent(true) {
auth := entry.Parameters().Child("auth").Child(auth)
if !auth.Exists() {
continue
}
users := auth.Child(method + ".users")
user := users.Child(username)
data, e := user.Read()
if e == nil {
return true, data, nil
} else if e != nil && os.IsExist(e) {
return false, []byte{}, e
}
if !auth.Child("inherit").Exists() {
break
}
}
return false, []byte{}, nil
}
func getAuthCredsCert(entry Entry, auth string, cert x509.Certificate) (bool, []byte, error) {
fingerprint := strings.ToLower(hex.EncodeToString(Fingerprint(cert)))
for ent := entry; ent != nil; ent = ent.Parent(true) {
auth := entry.Parameters().Child("auth").Child(auth)
if !auth.Exists() {
continue
}
users := auth.Child("certificates")
user := users.Child(fingerprint + ".sha256")
data, e := user.Read()
if e == nil {
return true, data, nil
} else if e != nil && os.IsExist(e) {
return false, []byte{}, e
}
if !auth.Child("inherit").Exists() {
break
}
}
return false, []byte{}, nil
}
func getAuthPerms(entry Entry, auth string, method string) (bool, bool, error) {
for ent := entry; ent != nil; ent = ent.Parent(true) {
auth := ent.Parameters().Child("auth").Child(auth)
if data, err := auth.Child(method + ".perm").Read(); err == nil {
//log.Printf("%v %v %v\n", auth, string(data), strings.TrimSpace(string(data)) == "allow")
return strings.TrimSpace(string(data)) == "allow", true, nil
} else if err != nil && os.IsExist(err) {
return false, true, err
}
if data, err := auth.Child("default.perm").Read(); err == nil {
return strings.TrimSpace(string(data)) == "allow", true, nil
} else if err != nil && os.IsExist(err) {
return false, true, err
}
if auth.DirExists() && !auth.Child("inherit").Exists() {
break
}
}
return false, false, nil
}
func getPath(entry Entry) string {
p := ""
e := entry
for e != nil {
p = e.Name() + "/" + p
e = e.Parent(false)
parent := e.Parent(true)
if e == nil && parent != nil {
p = ""
e = parent
}
}
return path.Clean("/" + p)
}
func Fingerprint(cert x509.Certificate) []byte {
h := sha256.New()
h.Write(cert.RawSubjectPublicKeyInfo)
return h.Sum(nil)
} | random_line_split | |
auth.go | package server
import (
"crypto/md5"
"crypto/rand"
"crypto/x509"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"log"
"net/http"
"os"
"path"
"strings"
"sync"
"time"
)
type Authenticator struct {
nonces map[string]Nonce
mutex sync.Mutex
}
type AuthDomain struct {
Realm string
Entry Entry
}
type Nonce struct {
Time time.Time
Validity time.Duration
Opaque string
Domain string
Realm string
}
func CreateAuthenticator() Authenticator {
return Authenticator{
nonces: make(map[string]Nonce),
}
}
var nonceValidity = time.Minute * 5
var AnonymousAuthDomain string = "anonymous"
func RandomKey() (string, error) {
var data [12]byte
_, err := io.ReadFull(rand.Reader, data[:])
return base64.StdEncoding.EncodeToString(data[:]), err
}
func escape(s string) string {
return strings.Replace(strings.Replace(s, "\\", "\\\\", -1), "\"", "\\\"", -1)
}
/* h function for MD5 algorithm (returns a lower-case hex MD5 digest)
*/
func h(data string) string {
digest := md5.New()
digest.Write([]byte(data))
return fmt.Sprintf("%x", digest.Sum(nil))
}
func getAuthParams(req *http.Request) []AuthMethod {
var methods []AuthMethod
for _, h := range req.Header["Authorization"] {
parser := HeaderParser{Buffer: h}
parser.Init()
if err := parser.Parse(); err != nil {
log.Println(err)
} else {
parser.Execute()
}
for _, m := range parser.Methods {
methods = append(methods, m)
}
}
return methods
}
func (auth *Authenticator) purge(now time.Time) {
for k, nonce := range auth.nonces {
if nonce.Time.Add(nonce.Validity).Before(now) {
//log.Printf("Purge nonce %s\n", k)
delete(auth.nonces, k)
}
}
}
func (auth *Authenticator) generateNonce(domain string, realm string, time time.Time, valid time.Duration) (string, string, error) {
key, err := RandomKey()
if err != nil {
return "", "", err
}
opaque, err := RandomKey()
if err != nil {
return "", "", err
}
//log.Printf("Generate nonce %s\n", key)
auth.nonces[key] = Nonce{
Time: time,
Validity: valid,
Opaque: opaque,
Domain: domain,
Realm: realm,
}
return key, opaque, nil
}
func (auth *Authenticator) checkNonce(noncekey string, nonce_count string, now time.Time) (bool, string, string, string) {
// FIXME do something with nonce_count
nonce, ok := auth.nonces[noncekey]
if !ok {
return false, "", "", ""
}
opaque := nonce.Opaque
auth_domain := nonce.Domain
auth_realm := nonce.Realm
valid := nonce.Time.Add(nonce.Validity).After(now)
if !valid {
delete(auth.nonces, noncekey)
//log.Printf("Purge stale nonce %s\n", noncekey)
}
return valid, opaque, auth_domain, auth_realm
}
func (Auth *Authenticator) checkRequest(entry Entry, req *http.Request, now time.Time) (bool, bool, bool, []error) {
var errors []error
var stale = false
var found_auth_all = false
if req.TLS != nil {
certs := req.TLS.PeerCertificates;
for _, cert := range certs {
authList, ers := authList(entry);
if len(ers) > 0 {
for _, e := range ers { errors = append(errors, e); }
}
for _, authDomainName := range authList {
found, _, err := getAuthCredsCert(entry, authDomainName, *cert)
if err != nil {
errors = append(errors, err)
continue
} else if !found {
continue
}
allow, found_auth, err := getAuthPerms(entry, authDomainName, req.Method)
found_auth_all = found_auth_all || found_auth
if err != nil {
errors = append(errors, err)
}
if allow {
return true, found_auth_all, false, errors
}
}
fingerprint := strings.ToLower(hex.EncodeToString(Fingerprint(*cert)))
log.Printf("TLS Authentication failure for certificate %s.sha256\n", fingerprint)
}
}
auth_params := getAuthParams(req)
for _, auth := range auth_params {
if auth.Name == "Digest" {
algorithm := auth.Params["algorithm"]
qop := auth.Params["qop"]
opaque := auth.Params["opaque"]
nonce := auth.Params["nonce"]
response := auth.Params["response"]
username := auth.Params["username"]
digest_uri := auth.Params["uri"]
cnonce := auth.Params["cnonce"]
nonce_count := auth.Params["nc"]
if algorithm != "MD5" || qop != "auth" {
//log.Println("Invalid algorithm or qop")
continue
}
ok, nonce_opaque, auth_domain, auth_realm := Auth.checkNonce(nonce, nonce_count, now)
if !ok {
stale = true
//log.Println("Stale nonce " + nonce + " count: " + nonce_count)
continue
} else if opaque != nonce_opaque {
//log.Printf("Opaque data invalid %s %s\n", opaque, nonce_opaque)
continue
}
var password string
if found, pass, err := getAuthCreds(entry, auth_domain, auth.Name, username); err != nil {
errors = append(errors, err)
continue
} else if !found {
log.Printf("Digest Authentication failure, %s no username %s\n", req.RequestURI, username)
continue
} else {
password = string(pass)
}
HA1 := h(username + ":" + auth_realm + ":" + password)
HA2 := h(req.Method + ":" + digest_uri)
KD := h(strings.Join([]string{
HA1, nonce, nonce_count, cnonce, qop, HA2}, ":"))
if KD != response {
log.Printf("Digest Authentication failure %s %s != %s\n", req.RequestURI, KD, response)
continue
}
allow, found_auth, err := getAuthPerms(entry, auth_domain, req.Method)
found_auth_all = found_auth_all || found_auth
if err != nil {
errors = append(errors, err)
}
if allow {
return true, found_auth_all, false, errors
}
}
}
allow, found_auth, err := getAuthPerms(entry, AnonymousAuthDomain, req.Method)
found_auth_all = found_auth_all || found_auth
if err != nil {
errors = append(errors, err)
}
return allow, found_auth_all, stale, errors
}
func (Auth *Authenticator) Authenticate(entry Entry, res http.ResponseWriter, req *http.Request) bool {
Auth.mutex.Lock()
defer Auth.mutex.Unlock()
now := time.Now()
Auth.purge(now)
authorized, found_auth, stale, errors := Auth.checkRequest(entry, req, now)
if len(errors) > 0 {
for err := range errors {
fmt.Println(err)
}
}
auths, auth_errs := authList(entry)
if len(auth_errs) > 0 {
for err := range auth_errs {
log.Println(err)
}
}
for _, auth := range auths {
domain, err := getAuthDomain(entry, auth)
if err != nil {
log.Println(err)
}
staleStr := ""
if stale {
staleStr = `, stale="true"`
}
if nonce, opaque, nonce_err := Auth.generateNonce(auth, domain.Realm, now, nonceValidity); nonce_err != nil {
log.Println(nonce_err)
} else {
res.Header().Add("WWW-Authenticate",
fmt.Sprintf(`Digest realm="%s", domain="%s", nonce="%s", opaque="%s"%s, algorithm="MD5", qop="auth"`,
escape(domain.Realm), escape(getPath(domain.Entry)), nonce, opaque, staleStr))
}
}
if found_auth {
return authorized
} else {
return len(auths) == 0
}
}
func authList(entry Entry) ([]string, []error) {
var errors []error
var res_auths []string
for ent := entry; ent != nil; ent = ent.Parent(true) {
auths := entry.Parameters().Child("auth")
auth_list, err := auths.Children()
if err != nil && os.IsExist(err) |
for _, auth := range auth_list {
found := false
if auth.Name() == AnonymousAuthDomain {
continue
}
for _, v := range res_auths {
if v == auth.Name() {
found = true
break
}
}
if !found {
res_auths = append(res_auths, auth.Name())
}
}
if !auths.Child("inherit").Exists() && auths.Exists() {
break
}
}
return res_auths, errors
}
func getAuthDomain(entry Entry, authname string) (AuthDomain, error) {
var domain AuthDomain
var ent Entry
var err error
var haveRealm = false
for ent = entry; ent != nil; ent = ent.Parent(true) {
auth := entry.Parameters().Child("auth").Child(authname)
if !auth.Exists() {
continue
}
if !haveRealm {
data, e := auth.Child("realm").Read()
if e == nil {
domain.Realm = string(data)
haveRealm = true
} else if e != nil && os.IsExist(err) {
domain.Realm = authname
err = e
haveRealm = true
}
}
if !auth.Child("inherit").Exists() {
break
}
}
domain.Entry = ent
return domain, nil
}
func getAuthCreds(entry Entry, auth string, method string, username string) (bool, []byte, error) {
for ent := entry; ent != nil; ent = ent.Parent(true) {
auth := entry.Parameters().Child("auth").Child(auth)
if !auth.Exists() {
continue
}
users := auth.Child(method + ".users")
user := users.Child(username)
data, e := user.Read()
if e == nil {
return true, data, nil
} else if e != nil && os.IsExist(e) {
return false, []byte{}, e
}
if !auth.Child("inherit").Exists() {
break
}
}
return false, []byte{}, nil
}
func getAuthCredsCert(entry Entry, auth string, cert x509.Certificate) (bool, []byte, error) {
fingerprint := strings.ToLower(hex.EncodeToString(Fingerprint(cert)))
for ent := entry; ent != nil; ent = ent.Parent(true) {
auth := entry.Parameters().Child("auth").Child(auth)
if !auth.Exists() {
continue
}
users := auth.Child("certificates")
user := users.Child(fingerprint + ".sha256")
data, e := user.Read()
if e == nil {
return true, data, nil
} else if e != nil && os.IsExist(e) {
return false, []byte{}, e
}
if !auth.Child("inherit").Exists() {
break
}
}
return false, []byte{}, nil
}
func getAuthPerms(entry Entry, auth string, method string) (bool, bool, error) {
for ent := entry; ent != nil; ent = ent.Parent(true) {
auth := ent.Parameters().Child("auth").Child(auth)
if data, err := auth.Child(method + ".perm").Read(); err == nil {
//log.Printf("%v %v %v\n", auth, string(data), strings.TrimSpace(string(data)) == "allow")
return strings.TrimSpace(string(data)) == "allow", true, nil
} else if err != nil && os.IsExist(err) {
return false, true, err
}
if data, err := auth.Child("default.perm").Read(); err == nil {
return strings.TrimSpace(string(data)) == "allow", true, nil
} else if err != nil && os.IsExist(err) {
return false, true, err
}
if auth.DirExists() && !auth.Child("inherit").Exists() {
break
}
}
return false, false, nil
}
func getPath(entry Entry) string {
p := ""
e := entry
for e != nil {
p = e.Name() + "/" + p
e = e.Parent(false)
parent := e.Parent(true)
if e == nil && parent != nil {
p = ""
e = parent
}
}
return path.Clean("/" + p)
}
func Fingerprint(cert x509.Certificate) []byte {
h := sha256.New()
h.Write(cert.RawSubjectPublicKeyInfo)
return h.Sum(nil)
} | {
errors = append(errors, err)
continue
} | conditional_block |
tools.py | import matplotlib.pyplot as plt
import numpy as np
cos = np.cos
sin = np.sin
twopi = np.pi*2.
pi = np.pi
def myrotate(th):
# Add pi/2 to make it reference the x-axis
# on range [0,2pi]
# th = (th+pi/2.) if th<(3.*pi/2.) else (th-3.*pi/2.)
# if th > pi: th -= pi
th = th + pi/2.
if th > twopi: th-= twopi
return th
def dist_from(ref,rs):
# ref is 1x2 ndarray
# rs is list of vectors to compare, nx2
return np.linalg.norm((ref-rs),axis=1)
def nematicdirector(Q):
# Given Q matrix return order parameter and angle
w,v = np.linalg.eig(Q)
idx = np.argmax(w)
Lam = w[idx] # Equiv to sqrt(S*S+T*T)
nu = v[:,idx]
alpha = np.angle(np.complex(nu[0,0],nu[1,0]))
if alpha < 0: alpha = twopi+alpha
return Lam, alpha
def quad_features(pca_data,red_dims):
# pca_data should be the nbr vector after the pca transformation
# Let's have this function return feature 1 as f_1^2 +f_2^2
# and feature 2 as f_3^2 + f_4^2
#
# red_dims are the dims to squash together in form [[0,1],[4,4]] etc.
new_pca_data = pca_data.copy()
for pair in red_dims:
new_pca_data[:,pair[0]] = np.sqrt(np.square(pca_data[:,pair[0]]) + np.square(pca_data[:,pair[1]]))
todel = []
for pair in red_dims:
if pair[0] == pair[1]: continue
todel.append(pair[1])
new_pca_data = np.delete(new_pca_data,todel,axis=1)
return new_pca_data
def gen_probes(nx,edge,do_shift=True):
nprobe = nx**2
probes = np.zeros((nprobe,2))
dx = edge/nx
dy = edge/nx
halfdx = dx/2.
shift = 0
if do_shift:
shift = -edge/2.
iprobe = 0
for iy in range(nx):
for ix in range(nx):
# Start from top left of image and read right
probes[iprobe] = [dx*ix + halfdx + shift, -shift - dy*iy - halfdx]
iprobe += 1
return probes
def norm_angles(thetas,alpha):
'''
Given 1D array of angles, express the angles relative to alpha
thetas and alpha should come in range [0,2pi]
'''
th0 = alpha
for ith, th in enumerate(thetas):
dth = th - th0
if dth > pi/2.:
if dth > 3*pi/2.:
th -= twopi
else:
th -= pi
if dth < -pi/2.:
if dth < -3*pi/2.:
th += twopi
else:
th += pi
thetas[ith] = th
th0 = th
return thetas
def winding_angles(thetas_,alpha,pol=0):
'''
Given 1D array of angles, return their angles relative to alpha
such that the polarity is closest to that of the angle previous to it
in the array
I think it boils down to deciding on a direction that things are spinning
After that we can make a better adjustment of the polarity conversion
region, say [(+/-) pi/4, (-/+) 3pi/4]
There are two ideas that come to mind: a threshold method or averaging method
for determining when to decide direction
Returns corrected thetas that are with respect to alpha
'''
thetas = thetas_.copy()
nth = thetas.shape[0]
# thresh = pi/2.
th0 = alpha
dalpha = 0
for ith, th in enumerate(thetas):
dth = th - th0
# print th, dth
if dth > pi/2 + pol*pi/4:
if dth > 3*pi/2 + pol*pi/4:
th -= twopi
else:
th -= pi
if dth < -pi/2 + pol*pi/4:
if dth < -3*pi/2 + pol*pi/4:
th += twopi
else:
th += pi
# dalpha = th - alpha
# if (abs(dalpha) > thresh) and (pol==0):
dalpha += th - alpha
if (ith > nth//4) and (pol==0): | th0 = th
# Detect false rotations
if (0):
gap = 5
# Thetas are wrt alpha
for ith, th in enumerate(thetas):
if ith > gap:
iref = max(0,ith-gap)
dth = th - thetas[iref]
if dth > pi:
# angles should be considered as returning
# to earlier ref, not further away
for isubth, subth in enumerate(thetas[iref+1:ith]):
sub_dth = subth - thetas[iref]
if sub_dth > pi/2:
thetas[iref+1+isubth] = subth - pi
thetas[ith] = th - pi
elif dth < -pi:
for isubth, subth in enumerate(thetas[iref+1:ith]):
sub_dth = subth - thetas[iref]
if sub_dth < pi/2:
thetas[iref+1+isubth] = subth + pi
thetas[ith] = th + pi
return thetas
def polar_cluster_sort(rods_,alpha,nndist=0.):
'''
rod x,y coordinates must be relative to their probe center
angular coordinates are in range [0,2pi]
Given your application, they will have their 4th column as a
variable vector
Instead of strict polar sort, the next rod is actually a nearest
neighbour in the winding direction
We can achieve this by first checking to see if there are any
neighbours in the nndist along the winding direction
We can sort this in place. Let's add a new column that we
update with nn distances to the current rod, and we only check
rods that are upcoming
Notice, rods are already sorted based on distance to center
Returns rods with final dimension being th_jalpha
'''
rods = rods_.copy()
phi = np.angle(rods[:,0] + 1j*rods[:,1])
phi = np.where(phi < 0., phi+twopi, phi)
phi_jalpha = phi - alpha
phi_jalpha = np.where(phi_jalpha < 0., phi_jalpha+twopi, phi_jalpha)
rods[:,-1] = phi_jalpha
rods = rods[rods[:,-1].argsort()]
nrod = len(rods)
idxs = np.arange(nrod).reshape(nrod,1)
rods = np.append(idxs,rods,axis=1)
rods2 = rods.copy()
rods3 = rods.copy()
# rods is sorted by phi_jalpha
# rods2 will be used for holding dists
# rods3 is also sorted by phi_jalpha
checked = []
rod3cnt = 0
for rod in rods:
start = int(rod[0])
if start not in checked:
# Add to rods3
checked.append(start)
rods3[rod3cnt] = rod
rod3cnt += 1
cent = np.asarray([rod[1],rod[2]])
dists = dist_from(cent,rods2[:,1:3])
rods2[:,-1] = dists
rods2 = rods2[rods2[:,-1].argsort()]
rodnbrs = rods2[1:6]
for nbr in rodnbrs:
if int(nbr[0]) in checked:
continue
# Add nbr rods to rods3 if needed
# See if it's close to our subject rod
# and if it is further along phi
dphi = rods[int(nbr[0]), -1] - rods[start,-1]
# print nbr
# print rods[int(nbr[0])]
# print rod
# print dphi
if (nbr[-1] < nndist) and (dphi > 0.) and (dphi < pi/2):
checked.append(int(nbr[0]))
rods3[rod3cnt] = nbr
rod3cnt+=1
if abs(sum(rods[:,1]) - sum(rods3[:,1])) > 0.00001:
print "wuhoh"
return rods3[:,1:]
def get_lat_nbrs(block,n_nbr,edge,nx,probes,use_bulk=False,\
method="random",ret_nbrs=False,sparse_bulk_factor=1,\
use_xyth=False):
'''
Return (xprobe, n_nbr) array of neighbours
I use xprobe because if use_bulk is true then
It's some indeterminant amount
use_bulk should maybe not be true
method can be one of: random, radial, polar, angular
random and radial are self-explanatory
polar is sorted by rod center location order while circling
around starting with the nem director
angular is ordered by those closest to the nem director
return 2D array "features"
'''
# Create probes
nprobe = probes.shape[0]
block2 = block.copy()
nrod = len(block)
z = np.zeros((nrod,1))
block2 = np.append(block2,z,axis=1)
nnbr_ft = 2
features = np.zeros(shape=(nprobe,n_nbr)) if not (use_xyth) else np.zeros(shape=(nprobe,n_nbr*nnbr_ft))
nbrs_full = np.empty(shape=(nprobe,n_nbr,4))
nbrs = np.empty(shape=(n_nbr,4))
alphas = np.zeros(shape=(nprobe))
for i,prob in enumerate(probes):
cent = np.asarray([prob[0],prob[1]])
dists = dist_from(cent,block2[:,:2])
block2[:,-1] = dists
block2 = block2[block2[:,-1].argsort()]
nbrs = np.copy(block2[:n_nbr])
# Convert coordinates, relative to center of mass
com_x, com_y = np.mean(nbrs[:,0]), np.mean(nbrs[:,1])
# com_x, com_y = prob[0], prob[1]
nbrs[:,0] -= com_x
nbrs[:,1] -= com_y
# Convert angles to be relative to nem director
# Angles are originally [0,2pi]
th_j = nbrs[:,2]
S,T = np.mean(cos(2.*th_j)), np.mean(sin(2.*th_j))
Q = np.matrix([[S,T],[T,-S]])
_, alpha = nematicdirector(Q) # alpha is range [0,2pi]
alphas[i] = alpha
th_jalpha = th_j - alpha # [-2pi,2pi]
if method == "random":
nbrs[:,-1] = cos(2.*th_jalpha)
# nbrs[:,-1] = cos(2.*th_j)
np.random.shuffle(nbrs)
if use_xyth:
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = nbrs[:,-1]
if method == "radial":
if use_xyth:
nbrs[:,-1] = cos(2.*th_jalpha)
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
if method == "angular":
# sort by cos(2*th_jalpha)
# we use argsort()[::-1] so that most aligned appear first
# Normalize rod angles first
nbrs = polar_cluster_sort(nbrs,alpha,nndist=0.4)
nbrs[:,-1] = winding_angles(nbrs[:,2],alpha) / pi
# nbrs[:,-1] = np.square(nbrs[:,-1])
nbrs[:,-1] = cos(2.*nbrs[:,-1])
# nbrs[:,-1] = (norm_angles(nbrs[:,2],alpha))
nbrs = nbrs[nbrs[:,-1].argsort()]
if use_xyth:
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = nbrs[:,-1]
if method == "WA":
nbrs = polar_cluster_sort(nbrs,alpha,nndist=0.)
nbrs[:,-1] = cos(2.* (nbrs[:,2] - alpha) )
if use_xyth:
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = nbrs[:,-1]
if method == "WD":
nbrs = polar_cluster_sort(nbrs,alpha,nndist=0.3)
nbrs[:,-1] = winding_angles(nbrs[:,2],alpha, pol=0)
# nbrs[:,-1] = nbrs[:,2] - alpha
c2 = cos(2. * nbrs[:,-1])
s2 = sin(2. * nbrs[:,-1])
# phi = np.angle(nbrs[:,0] + 1j*nbrs[:,1])
# phi_jalpha = (phi - alpha)%twopi
# dc2 = (c2 - np.roll(c2,1)) / (phi_jalpha - np.roll(phi_jalpha,1))
# ds2 = (s2 - np.roll(s2,1)) / (phi_jalpha - np.roll(phi_jalpha,1))
# nbrs[:,-1] = sin( 2. * winding_angles(nbrs[:,2],alpha, pol=0))
if use_xyth:
features[i] = np.array([c2,s2]).flatten()
else:
features[i,:] = nbrs[:,-1] / pi
# Return nbrs to original coords
nbrs[:,0] += com_x
nbrs[:,1] += com_y
nbrs_full[i] = nbrs
if ret_nbrs:
return features, nbrs_full, alphas
else:
return features
def get_xyth_feature(nbrs):
c2 = cos(2.* nbrs[:,2])
s2 = sin(2.* nbrs[:,2])
return np.array([c2,s2]).flatten()
def plotLine(x1,y1,x2,y2,c='b',ax=None,lw=0.4,alpha=1.0):
if ax: # given axis handle
ax.plot([x1, x2], [y1, y2], color=c, linestyle='-', linewidth=lw, alpha=alpha);
else:
plt.gca().plot([x1, x2], [y1, y2], color=c, linestyle='-', linewidth=lw, alpha=alpha);
def plotrods(rods,myax,halfL=0.5,hotrods=[],col='k',lw=0.4,alpha=1.0,add_crosses=False,number=None):
for r in rods:
th = r[2]
x1 = r[0] - halfL*cos(th)
x2 = r[0] + halfL*cos(th)
y1 = r[1] - halfL*sin(th)
y2 = r[1] + halfL*sin(th)
plotLine(x1,y1,x2,y2,c=col,lw=lw,ax=myax,alpha=alpha)
if add_crosses:
myax.plot(r[0],r[1],"+",markersize=20,color="grey",linewidth=5)
elif number:
font = {'family': 'sans-serif',
'size': 13,
'usetex': False
}
myax.text(r[0],r[1],"%d"%(number),fontdict=font,fontsize=12,
horizontalalignment="center",
verticalalignment="center")
if len(hotrods)>0:
for r in hotrods:
th = r[2]
x1 = r[0] - halfL*cos(th)
x2 = r[0] + halfL*cos(th)
y1 = r[1] - halfL*sin(th)
y2 = r[1] + halfL*sin(th)
plotLine(x1,y1,x2,y2,c='r',lw=1.6,ax=myax,alpha=alpha)
def get_nbrs(block,n_nbr,edge,use_bulk=False,method="random",ret_nbrs=False,sparse_bulk_factor=1):
'''
Return (xrod, n_nbr) array of neighbours
I use xrod because if use_bulk is true then
It's some indeterminant amount
use_bulk should maybe not be true
method can be one of: random, radial, polar, angular
random and radial are self-explanatory
polar is sorted by rod center location order while circling
around starting with the nem director
angular is ordered by those closest to the nem director
return 2D array "features"
'''
bulk = block.copy()
bulk = bulk[np.where(bulk[:,0] > -0.25*edge)]
bulk = bulk[np.where(bulk[:,0] < 0.25*edge)]
bulk = bulk[np.where(bulk[:,1] > -0.25*edge)]
bulk = bulk[np.where(bulk[:,1] < 0.25*edge)]
bulk = bulk[::sparse_bulk_factor]
perim = block.copy()
perimidx = np.where(perim[:,0] < -0.25*edge)
perimidx = np.append(perimidx,np.where(perim[:,0] > 0.25*edge))
perimidx = np.append(perimidx,np.where(perim[:,1] < -0.25*edge))
perimidx = np.append(perimidx,np.where(perim[:,1] > 0.25*edge))
perimidx = np.unique(perimidx) # remove double counts
perim = perim[perimidx]
probes = np.append(perim,bulk,axis=0)
# This is mainly just for viewing the rods in gen_nbrfiles
if sparse_bulk_factor == 1:
probes = block.copy()
block2 = block.copy()
nrod = len(block)
nprobe = len(probes)
z = np.zeros((nrod,1))
block2 = np.append(block2,z,axis=1)
features = np.zeros(shape=(nprobe,n_nbr))
nbrs_full = np.empty(shape=(nprobe,n_nbr,4))
nbrs = np.empty(shape=(n_nbr,4))
alphas = np.zeros(shape=(nprobe))
for i,rod in enumerate(probes):
cent = np.asarray([rod[0],rod[1]])
dists = dist_from(cent,block2[:,:2])
block2[:,-1] = dists
block2 = block2[block2[:,-1].argsort()]
nbrs = np.copy(block2[1:1+n_nbr])
# Convert coordinates, relative to center of mass
cx, cy = np.mean(nbrs[:,0]), np.mean(nbrs[:,1])
nbrs[:,0] -= cx
nbrs[:,1] -= cy
# Convert angles to be relative to nem director
th_j = nbrs[:,2]
S,T = np.mean(cos(2.*th_j)), np.mean(sin(2.*th_j))
Q = np.matrix([[S,T],[T,-S]])
_, alpha = nematicdirector(Q)
# Try using rod probe as alpha
# alpha = rod[2]
th_jalpha = th_j - alpha
alphas[i] = rod[0]
if method == "random":
np.random.shuffle(nbrs)
th_jalpha = th_j - alpha
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
if method == "radial":
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
if method == "angular":
nbrs[:,-1] = cos(2.*th_jalpha)
# sort by cos(2*th_jalpha)
# we use argsort()[::-1] so that most aligned appear first
nbrs = nbrs[nbrs[:,-1].argsort()[::-1]]
features[i,:] = nbrs[:,-1]
nbrs[:,-1] = features[i,:]
if method == "polar":
# get polar coordinates
# arctan2 returns [-pi,pi]
phi = np.arctan2(nbrs[:,1],nbrs[:,0])
phi = np.where(phi < 0., phi+twopi, phi)
phi_jalpha = phi - alpha
phi_jalpha = np.where(phi_jalpha < 0., phi_jalpha+twopi, phi_jalpha)
nbrs[:,-1] = phi_jalpha
nbrs = nbrs[nbrs[:,-1].argsort()]
th_jalpha = nbrs[:,2] - alpha
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
# Return nbrs to original coords
nbrs[:,0] += cx
nbrs[:,1] += cy
nbrs_full[i] = nbrs
if ret_nbrs:
return features, nbrs_full, alphas
else:
return features | pol = np.sign(dalpha)
thetas[ith] = th - alpha | random_line_split |
tools.py | import matplotlib.pyplot as plt
import numpy as np
cos = np.cos
sin = np.sin
twopi = np.pi*2.
pi = np.pi
def myrotate(th):
# Add pi/2 to make it reference the x-axis
# on range [0,2pi]
# th = (th+pi/2.) if th<(3.*pi/2.) else (th-3.*pi/2.)
# if th > pi: th -= pi
th = th + pi/2.
if th > twopi: th-= twopi
return th
def | (ref,rs):
# ref is 1x2 ndarray
# rs is list of vectors to compare, nx2
return np.linalg.norm((ref-rs),axis=1)
def nematicdirector(Q):
# Given Q matrix return order parameter and angle
w,v = np.linalg.eig(Q)
idx = np.argmax(w)
Lam = w[idx] # Equiv to sqrt(S*S+T*T)
nu = v[:,idx]
alpha = np.angle(np.complex(nu[0,0],nu[1,0]))
if alpha < 0: alpha = twopi+alpha
return Lam, alpha
def quad_features(pca_data,red_dims):
# pca_data should be the nbr vector after the pca transformation
# Let's have this function return feature 1 as f_1^2 +f_2^2
# and feature 2 as f_3^2 + f_4^2
#
# red_dims are the dims to squash together in form [[0,1],[4,4]] etc.
new_pca_data = pca_data.copy()
for pair in red_dims:
new_pca_data[:,pair[0]] = np.sqrt(np.square(pca_data[:,pair[0]]) + np.square(pca_data[:,pair[1]]))
todel = []
for pair in red_dims:
if pair[0] == pair[1]: continue
todel.append(pair[1])
new_pca_data = np.delete(new_pca_data,todel,axis=1)
return new_pca_data
def gen_probes(nx,edge,do_shift=True):
nprobe = nx**2
probes = np.zeros((nprobe,2))
dx = edge/nx
dy = edge/nx
halfdx = dx/2.
shift = 0
if do_shift:
shift = -edge/2.
iprobe = 0
for iy in range(nx):
for ix in range(nx):
# Start from top left of image and read right
probes[iprobe] = [dx*ix + halfdx + shift, -shift - dy*iy - halfdx]
iprobe += 1
return probes
def norm_angles(thetas,alpha):
'''
Given 1D array of angles, express the angles relative to alpha
thetas and alpha should come in range [0,2pi]
'''
th0 = alpha
for ith, th in enumerate(thetas):
dth = th - th0
if dth > pi/2.:
if dth > 3*pi/2.:
th -= twopi
else:
th -= pi
if dth < -pi/2.:
if dth < -3*pi/2.:
th += twopi
else:
th += pi
thetas[ith] = th
th0 = th
return thetas
def winding_angles(thetas_,alpha,pol=0):
'''
Given 1D array of angles, return their angles relative to alpha
such that the polarity is closest to that of the angle previous to it
in the array
I think it boils down to deciding on a direction that things are spinning
After that we can make a better adjustment of the polarity conversion
region, say [(+/-) pi/4, (-/+) 3pi/4]
There are two ideas that come to mind: a threshold method or averaging method
for determining when to decide direction
Returns corrected thetas that are with respect to alpha
'''
thetas = thetas_.copy()
nth = thetas.shape[0]
# thresh = pi/2.
th0 = alpha
dalpha = 0
for ith, th in enumerate(thetas):
dth = th - th0
# print th, dth
if dth > pi/2 + pol*pi/4:
if dth > 3*pi/2 + pol*pi/4:
th -= twopi
else:
th -= pi
if dth < -pi/2 + pol*pi/4:
if dth < -3*pi/2 + pol*pi/4:
th += twopi
else:
th += pi
# dalpha = th - alpha
# if (abs(dalpha) > thresh) and (pol==0):
dalpha += th - alpha
if (ith > nth//4) and (pol==0):
pol = np.sign(dalpha)
thetas[ith] = th - alpha
th0 = th
# Detect false rotations
if (0):
gap = 5
# Thetas are wrt alpha
for ith, th in enumerate(thetas):
if ith > gap:
iref = max(0,ith-gap)
dth = th - thetas[iref]
if dth > pi:
# angles should be considered as returning
# to earlier ref, not further away
for isubth, subth in enumerate(thetas[iref+1:ith]):
sub_dth = subth - thetas[iref]
if sub_dth > pi/2:
thetas[iref+1+isubth] = subth - pi
thetas[ith] = th - pi
elif dth < -pi:
for isubth, subth in enumerate(thetas[iref+1:ith]):
sub_dth = subth - thetas[iref]
if sub_dth < pi/2:
thetas[iref+1+isubth] = subth + pi
thetas[ith] = th + pi
return thetas
def polar_cluster_sort(rods_,alpha,nndist=0.):
'''
rod x,y coordinates must be relative to their probe center
angular coordinates are in range [0,2pi]
Given your application, they will have their 4th column as a
variable vector
Instead of strict polar sort, the next rod is actually a nearest
neighbour in the winding direction
We can achieve this by first checking to see if there are any
neighbours in the nndist along the winding direction
We can sort this in place. Let's add a new column that we
update with nn distances to the current rod, and we only check
rods that are upcoming
Notice, rods are already sorted based on distance to center
Returns rods with final dimension being th_jalpha
'''
rods = rods_.copy()
phi = np.angle(rods[:,0] + 1j*rods[:,1])
phi = np.where(phi < 0., phi+twopi, phi)
phi_jalpha = phi - alpha
phi_jalpha = np.where(phi_jalpha < 0., phi_jalpha+twopi, phi_jalpha)
rods[:,-1] = phi_jalpha
rods = rods[rods[:,-1].argsort()]
nrod = len(rods)
idxs = np.arange(nrod).reshape(nrod,1)
rods = np.append(idxs,rods,axis=1)
rods2 = rods.copy()
rods3 = rods.copy()
# rods is sorted by phi_jalpha
# rods2 will be used for holding dists
# rods3 is also sorted by phi_jalpha
checked = []
rod3cnt = 0
for rod in rods:
start = int(rod[0])
if start not in checked:
# Add to rods3
checked.append(start)
rods3[rod3cnt] = rod
rod3cnt += 1
cent = np.asarray([rod[1],rod[2]])
dists = dist_from(cent,rods2[:,1:3])
rods2[:,-1] = dists
rods2 = rods2[rods2[:,-1].argsort()]
rodnbrs = rods2[1:6]
for nbr in rodnbrs:
if int(nbr[0]) in checked:
continue
# Add nbr rods to rods3 if needed
# See if it's close to our subject rod
# and if it is further along phi
dphi = rods[int(nbr[0]), -1] - rods[start,-1]
# print nbr
# print rods[int(nbr[0])]
# print rod
# print dphi
if (nbr[-1] < nndist) and (dphi > 0.) and (dphi < pi/2):
checked.append(int(nbr[0]))
rods3[rod3cnt] = nbr
rod3cnt+=1
if abs(sum(rods[:,1]) - sum(rods3[:,1])) > 0.00001:
print "wuhoh"
return rods3[:,1:]
def get_lat_nbrs(block,n_nbr,edge,nx,probes,use_bulk=False,\
method="random",ret_nbrs=False,sparse_bulk_factor=1,\
use_xyth=False):
'''
Return (xprobe, n_nbr) array of neighbours
I use xprobe because if use_bulk is true then
It's some indeterminant amount
use_bulk should maybe not be true
method can be one of: random, radial, polar, angular
random and radial are self-explanatory
polar is sorted by rod center location order while circling
around starting with the nem director
angular is ordered by those closest to the nem director
return 2D array "features"
'''
# Create probes
nprobe = probes.shape[0]
block2 = block.copy()
nrod = len(block)
z = np.zeros((nrod,1))
block2 = np.append(block2,z,axis=1)
nnbr_ft = 2
features = np.zeros(shape=(nprobe,n_nbr)) if not (use_xyth) else np.zeros(shape=(nprobe,n_nbr*nnbr_ft))
nbrs_full = np.empty(shape=(nprobe,n_nbr,4))
nbrs = np.empty(shape=(n_nbr,4))
alphas = np.zeros(shape=(nprobe))
for i,prob in enumerate(probes):
cent = np.asarray([prob[0],prob[1]])
dists = dist_from(cent,block2[:,:2])
block2[:,-1] = dists
block2 = block2[block2[:,-1].argsort()]
nbrs = np.copy(block2[:n_nbr])
# Convert coordinates, relative to center of mass
com_x, com_y = np.mean(nbrs[:,0]), np.mean(nbrs[:,1])
# com_x, com_y = prob[0], prob[1]
nbrs[:,0] -= com_x
nbrs[:,1] -= com_y
# Convert angles to be relative to nem director
# Angles are originally [0,2pi]
th_j = nbrs[:,2]
S,T = np.mean(cos(2.*th_j)), np.mean(sin(2.*th_j))
Q = np.matrix([[S,T],[T,-S]])
_, alpha = nematicdirector(Q) # alpha is range [0,2pi]
alphas[i] = alpha
th_jalpha = th_j - alpha # [-2pi,2pi]
if method == "random":
nbrs[:,-1] = cos(2.*th_jalpha)
# nbrs[:,-1] = cos(2.*th_j)
np.random.shuffle(nbrs)
if use_xyth:
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = nbrs[:,-1]
if method == "radial":
if use_xyth:
nbrs[:,-1] = cos(2.*th_jalpha)
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
if method == "angular":
# sort by cos(2*th_jalpha)
# we use argsort()[::-1] so that most aligned appear first
# Normalize rod angles first
nbrs = polar_cluster_sort(nbrs,alpha,nndist=0.4)
nbrs[:,-1] = winding_angles(nbrs[:,2],alpha) / pi
# nbrs[:,-1] = np.square(nbrs[:,-1])
nbrs[:,-1] = cos(2.*nbrs[:,-1])
# nbrs[:,-1] = (norm_angles(nbrs[:,2],alpha))
nbrs = nbrs[nbrs[:,-1].argsort()]
if use_xyth:
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = nbrs[:,-1]
if method == "WA":
nbrs = polar_cluster_sort(nbrs,alpha,nndist=0.)
nbrs[:,-1] = cos(2.* (nbrs[:,2] - alpha) )
if use_xyth:
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = nbrs[:,-1]
if method == "WD":
nbrs = polar_cluster_sort(nbrs,alpha,nndist=0.3)
nbrs[:,-1] = winding_angles(nbrs[:,2],alpha, pol=0)
# nbrs[:,-1] = nbrs[:,2] - alpha
c2 = cos(2. * nbrs[:,-1])
s2 = sin(2. * nbrs[:,-1])
# phi = np.angle(nbrs[:,0] + 1j*nbrs[:,1])
# phi_jalpha = (phi - alpha)%twopi
# dc2 = (c2 - np.roll(c2,1)) / (phi_jalpha - np.roll(phi_jalpha,1))
# ds2 = (s2 - np.roll(s2,1)) / (phi_jalpha - np.roll(phi_jalpha,1))
# nbrs[:,-1] = sin( 2. * winding_angles(nbrs[:,2],alpha, pol=0))
if use_xyth:
features[i] = np.array([c2,s2]).flatten()
else:
features[i,:] = nbrs[:,-1] / pi
# Return nbrs to original coords
nbrs[:,0] += com_x
nbrs[:,1] += com_y
nbrs_full[i] = nbrs
if ret_nbrs:
return features, nbrs_full, alphas
else:
return features
def get_xyth_feature(nbrs):
c2 = cos(2.* nbrs[:,2])
s2 = sin(2.* nbrs[:,2])
return np.array([c2,s2]).flatten()
def plotLine(x1,y1,x2,y2,c='b',ax=None,lw=0.4,alpha=1.0):
if ax: # given axis handle
ax.plot([x1, x2], [y1, y2], color=c, linestyle='-', linewidth=lw, alpha=alpha);
else:
plt.gca().plot([x1, x2], [y1, y2], color=c, linestyle='-', linewidth=lw, alpha=alpha);
def plotrods(rods,myax,halfL=0.5,hotrods=[],col='k',lw=0.4,alpha=1.0,add_crosses=False,number=None):
for r in rods:
th = r[2]
x1 = r[0] - halfL*cos(th)
x2 = r[0] + halfL*cos(th)
y1 = r[1] - halfL*sin(th)
y2 = r[1] + halfL*sin(th)
plotLine(x1,y1,x2,y2,c=col,lw=lw,ax=myax,alpha=alpha)
if add_crosses:
myax.plot(r[0],r[1],"+",markersize=20,color="grey",linewidth=5)
elif number:
font = {'family': 'sans-serif',
'size': 13,
'usetex': False
}
myax.text(r[0],r[1],"%d"%(number),fontdict=font,fontsize=12,
horizontalalignment="center",
verticalalignment="center")
if len(hotrods)>0:
for r in hotrods:
th = r[2]
x1 = r[0] - halfL*cos(th)
x2 = r[0] + halfL*cos(th)
y1 = r[1] - halfL*sin(th)
y2 = r[1] + halfL*sin(th)
plotLine(x1,y1,x2,y2,c='r',lw=1.6,ax=myax,alpha=alpha)
def get_nbrs(block,n_nbr,edge,use_bulk=False,method="random",ret_nbrs=False,sparse_bulk_factor=1):
'''
Return (xrod, n_nbr) array of neighbours
I use xrod because if use_bulk is true then
It's some indeterminant amount
use_bulk should maybe not be true
method can be one of: random, radial, polar, angular
random and radial are self-explanatory
polar is sorted by rod center location order while circling
around starting with the nem director
angular is ordered by those closest to the nem director
return 2D array "features"
'''
bulk = block.copy()
bulk = bulk[np.where(bulk[:,0] > -0.25*edge)]
bulk = bulk[np.where(bulk[:,0] < 0.25*edge)]
bulk = bulk[np.where(bulk[:,1] > -0.25*edge)]
bulk = bulk[np.where(bulk[:,1] < 0.25*edge)]
bulk = bulk[::sparse_bulk_factor]
perim = block.copy()
perimidx = np.where(perim[:,0] < -0.25*edge)
perimidx = np.append(perimidx,np.where(perim[:,0] > 0.25*edge))
perimidx = np.append(perimidx,np.where(perim[:,1] < -0.25*edge))
perimidx = np.append(perimidx,np.where(perim[:,1] > 0.25*edge))
perimidx = np.unique(perimidx) # remove double counts
perim = perim[perimidx]
probes = np.append(perim,bulk,axis=0)
# This is mainly just for viewing the rods in gen_nbrfiles
if sparse_bulk_factor == 1:
probes = block.copy()
block2 = block.copy()
nrod = len(block)
nprobe = len(probes)
z = np.zeros((nrod,1))
block2 = np.append(block2,z,axis=1)
features = np.zeros(shape=(nprobe,n_nbr))
nbrs_full = np.empty(shape=(nprobe,n_nbr,4))
nbrs = np.empty(shape=(n_nbr,4))
alphas = np.zeros(shape=(nprobe))
for i,rod in enumerate(probes):
cent = np.asarray([rod[0],rod[1]])
dists = dist_from(cent,block2[:,:2])
block2[:,-1] = dists
block2 = block2[block2[:,-1].argsort()]
nbrs = np.copy(block2[1:1+n_nbr])
# Convert coordinates, relative to center of mass
cx, cy = np.mean(nbrs[:,0]), np.mean(nbrs[:,1])
nbrs[:,0] -= cx
nbrs[:,1] -= cy
# Convert angles to be relative to nem director
th_j = nbrs[:,2]
S,T = np.mean(cos(2.*th_j)), np.mean(sin(2.*th_j))
Q = np.matrix([[S,T],[T,-S]])
_, alpha = nematicdirector(Q)
# Try using rod probe as alpha
# alpha = rod[2]
th_jalpha = th_j - alpha
alphas[i] = rod[0]
if method == "random":
np.random.shuffle(nbrs)
th_jalpha = th_j - alpha
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
if method == "radial":
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
if method == "angular":
nbrs[:,-1] = cos(2.*th_jalpha)
# sort by cos(2*th_jalpha)
# we use argsort()[::-1] so that most aligned appear first
nbrs = nbrs[nbrs[:,-1].argsort()[::-1]]
features[i,:] = nbrs[:,-1]
nbrs[:,-1] = features[i,:]
if method == "polar":
# get polar coordinates
# arctan2 returns [-pi,pi]
phi = np.arctan2(nbrs[:,1],nbrs[:,0])
phi = np.where(phi < 0., phi+twopi, phi)
phi_jalpha = phi - alpha
phi_jalpha = np.where(phi_jalpha < 0., phi_jalpha+twopi, phi_jalpha)
nbrs[:,-1] = phi_jalpha
nbrs = nbrs[nbrs[:,-1].argsort()]
th_jalpha = nbrs[:,2] - alpha
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
# Return nbrs to original coords
nbrs[:,0] += cx
nbrs[:,1] += cy
nbrs_full[i] = nbrs
if ret_nbrs:
return features, nbrs_full, alphas
else:
return features
| dist_from | identifier_name |
tools.py | import matplotlib.pyplot as plt
import numpy as np
cos = np.cos
sin = np.sin
twopi = np.pi*2.
pi = np.pi
def myrotate(th):
# Add pi/2 to make it reference the x-axis
# on range [0,2pi]
# th = (th+pi/2.) if th<(3.*pi/2.) else (th-3.*pi/2.)
# if th > pi: th -= pi
th = th + pi/2.
if th > twopi: th-= twopi
return th
def dist_from(ref,rs):
# ref is 1x2 ndarray
# rs is list of vectors to compare, nx2
return np.linalg.norm((ref-rs),axis=1)
def nematicdirector(Q):
# Given Q matrix return order parameter and angle
w,v = np.linalg.eig(Q)
idx = np.argmax(w)
Lam = w[idx] # Equiv to sqrt(S*S+T*T)
nu = v[:,idx]
alpha = np.angle(np.complex(nu[0,0],nu[1,0]))
if alpha < 0: alpha = twopi+alpha
return Lam, alpha
def quad_features(pca_data,red_dims):
# pca_data should be the nbr vector after the pca transformation
# Let's have this function return feature 1 as f_1^2 +f_2^2
# and feature 2 as f_3^2 + f_4^2
#
# red_dims are the dims to squash together in form [[0,1],[4,4]] etc.
new_pca_data = pca_data.copy()
for pair in red_dims:
new_pca_data[:,pair[0]] = np.sqrt(np.square(pca_data[:,pair[0]]) + np.square(pca_data[:,pair[1]]))
todel = []
for pair in red_dims:
if pair[0] == pair[1]: continue
todel.append(pair[1])
new_pca_data = np.delete(new_pca_data,todel,axis=1)
return new_pca_data
def gen_probes(nx,edge,do_shift=True):
nprobe = nx**2
probes = np.zeros((nprobe,2))
dx = edge/nx
dy = edge/nx
halfdx = dx/2.
shift = 0
if do_shift:
shift = -edge/2.
iprobe = 0
for iy in range(nx):
for ix in range(nx):
# Start from top left of image and read right
probes[iprobe] = [dx*ix + halfdx + shift, -shift - dy*iy - halfdx]
iprobe += 1
return probes
def norm_angles(thetas,alpha):
'''
Given 1D array of angles, express the angles relative to alpha
thetas and alpha should come in range [0,2pi]
'''
th0 = alpha
for ith, th in enumerate(thetas):
dth = th - th0
if dth > pi/2.:
if dth > 3*pi/2.:
th -= twopi
else:
th -= pi
if dth < -pi/2.:
if dth < -3*pi/2.:
th += twopi
else:
th += pi
thetas[ith] = th
th0 = th
return thetas
def winding_angles(thetas_,alpha,pol=0):
'''
Given 1D array of angles, return their angles relative to alpha
such that the polarity is closest to that of the angle previous to it
in the array
I think it boils down to deciding on a direction that things are spinning
After that we can make a better adjustment of the polarity conversion
region, say [(+/-) pi/4, (-/+) 3pi/4]
There are two ideas that come to mind: a threshold method or averaging method
for determining when to decide direction
Returns corrected thetas that are with respect to alpha
'''
thetas = thetas_.copy()
nth = thetas.shape[0]
# thresh = pi/2.
th0 = alpha
dalpha = 0
for ith, th in enumerate(thetas):
dth = th - th0
# print th, dth
if dth > pi/2 + pol*pi/4:
if dth > 3*pi/2 + pol*pi/4:
th -= twopi
else:
th -= pi
if dth < -pi/2 + pol*pi/4:
if dth < -3*pi/2 + pol*pi/4:
th += twopi
else:
th += pi
# dalpha = th - alpha
# if (abs(dalpha) > thresh) and (pol==0):
dalpha += th - alpha
if (ith > nth//4) and (pol==0):
pol = np.sign(dalpha)
thetas[ith] = th - alpha
th0 = th
# Detect false rotations
if (0):
gap = 5
# Thetas are wrt alpha
for ith, th in enumerate(thetas):
if ith > gap:
iref = max(0,ith-gap)
dth = th - thetas[iref]
if dth > pi:
# angles should be considered as returning
# to earlier ref, not further away
for isubth, subth in enumerate(thetas[iref+1:ith]):
sub_dth = subth - thetas[iref]
if sub_dth > pi/2:
thetas[iref+1+isubth] = subth - pi
thetas[ith] = th - pi
elif dth < -pi:
for isubth, subth in enumerate(thetas[iref+1:ith]):
sub_dth = subth - thetas[iref]
if sub_dth < pi/2:
thetas[iref+1+isubth] = subth + pi
thetas[ith] = th + pi
return thetas
def polar_cluster_sort(rods_,alpha,nndist=0.):
|
def get_lat_nbrs(block,n_nbr,edge,nx,probes,use_bulk=False,\
method="random",ret_nbrs=False,sparse_bulk_factor=1,\
use_xyth=False):
'''
Return (xprobe, n_nbr) array of neighbours
I use xprobe because if use_bulk is true then
It's some indeterminant amount
use_bulk should maybe not be true
method can be one of: random, radial, polar, angular
random and radial are self-explanatory
polar is sorted by rod center location order while circling
around starting with the nem director
angular is ordered by those closest to the nem director
return 2D array "features"
'''
# Create probes
nprobe = probes.shape[0]
block2 = block.copy()
nrod = len(block)
z = np.zeros((nrod,1))
block2 = np.append(block2,z,axis=1)
nnbr_ft = 2
features = np.zeros(shape=(nprobe,n_nbr)) if not (use_xyth) else np.zeros(shape=(nprobe,n_nbr*nnbr_ft))
nbrs_full = np.empty(shape=(nprobe,n_nbr,4))
nbrs = np.empty(shape=(n_nbr,4))
alphas = np.zeros(shape=(nprobe))
for i,prob in enumerate(probes):
cent = np.asarray([prob[0],prob[1]])
dists = dist_from(cent,block2[:,:2])
block2[:,-1] = dists
block2 = block2[block2[:,-1].argsort()]
nbrs = np.copy(block2[:n_nbr])
# Convert coordinates, relative to center of mass
com_x, com_y = np.mean(nbrs[:,0]), np.mean(nbrs[:,1])
# com_x, com_y = prob[0], prob[1]
nbrs[:,0] -= com_x
nbrs[:,1] -= com_y
# Convert angles to be relative to nem director
# Angles are originally [0,2pi]
th_j = nbrs[:,2]
S,T = np.mean(cos(2.*th_j)), np.mean(sin(2.*th_j))
Q = np.matrix([[S,T],[T,-S]])
_, alpha = nematicdirector(Q) # alpha is range [0,2pi]
alphas[i] = alpha
th_jalpha = th_j - alpha # [-2pi,2pi]
if method == "random":
nbrs[:,-1] = cos(2.*th_jalpha)
# nbrs[:,-1] = cos(2.*th_j)
np.random.shuffle(nbrs)
if use_xyth:
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = nbrs[:,-1]
if method == "radial":
if use_xyth:
nbrs[:,-1] = cos(2.*th_jalpha)
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
if method == "angular":
# sort by cos(2*th_jalpha)
# we use argsort()[::-1] so that most aligned appear first
# Normalize rod angles first
nbrs = polar_cluster_sort(nbrs,alpha,nndist=0.4)
nbrs[:,-1] = winding_angles(nbrs[:,2],alpha) / pi
# nbrs[:,-1] = np.square(nbrs[:,-1])
nbrs[:,-1] = cos(2.*nbrs[:,-1])
# nbrs[:,-1] = (norm_angles(nbrs[:,2],alpha))
nbrs = nbrs[nbrs[:,-1].argsort()]
if use_xyth:
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = nbrs[:,-1]
if method == "WA":
nbrs = polar_cluster_sort(nbrs,alpha,nndist=0.)
nbrs[:,-1] = cos(2.* (nbrs[:,2] - alpha) )
if use_xyth:
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = nbrs[:,-1]
if method == "WD":
nbrs = polar_cluster_sort(nbrs,alpha,nndist=0.3)
nbrs[:,-1] = winding_angles(nbrs[:,2],alpha, pol=0)
# nbrs[:,-1] = nbrs[:,2] - alpha
c2 = cos(2. * nbrs[:,-1])
s2 = sin(2. * nbrs[:,-1])
# phi = np.angle(nbrs[:,0] + 1j*nbrs[:,1])
# phi_jalpha = (phi - alpha)%twopi
# dc2 = (c2 - np.roll(c2,1)) / (phi_jalpha - np.roll(phi_jalpha,1))
# ds2 = (s2 - np.roll(s2,1)) / (phi_jalpha - np.roll(phi_jalpha,1))
# nbrs[:,-1] = sin( 2. * winding_angles(nbrs[:,2],alpha, pol=0))
if use_xyth:
features[i] = np.array([c2,s2]).flatten()
else:
features[i,:] = nbrs[:,-1] / pi
# Return nbrs to original coords
nbrs[:,0] += com_x
nbrs[:,1] += com_y
nbrs_full[i] = nbrs
if ret_nbrs:
return features, nbrs_full, alphas
else:
return features
def get_xyth_feature(nbrs):
c2 = cos(2.* nbrs[:,2])
s2 = sin(2.* nbrs[:,2])
return np.array([c2,s2]).flatten()
def plotLine(x1,y1,x2,y2,c='b',ax=None,lw=0.4,alpha=1.0):
if ax: # given axis handle
ax.plot([x1, x2], [y1, y2], color=c, linestyle='-', linewidth=lw, alpha=alpha);
else:
plt.gca().plot([x1, x2], [y1, y2], color=c, linestyle='-', linewidth=lw, alpha=alpha);
def plotrods(rods,myax,halfL=0.5,hotrods=[],col='k',lw=0.4,alpha=1.0,add_crosses=False,number=None):
for r in rods:
th = r[2]
x1 = r[0] - halfL*cos(th)
x2 = r[0] + halfL*cos(th)
y1 = r[1] - halfL*sin(th)
y2 = r[1] + halfL*sin(th)
plotLine(x1,y1,x2,y2,c=col,lw=lw,ax=myax,alpha=alpha)
if add_crosses:
myax.plot(r[0],r[1],"+",markersize=20,color="grey",linewidth=5)
elif number:
font = {'family': 'sans-serif',
'size': 13,
'usetex': False
}
myax.text(r[0],r[1],"%d"%(number),fontdict=font,fontsize=12,
horizontalalignment="center",
verticalalignment="center")
if len(hotrods)>0:
for r in hotrods:
th = r[2]
x1 = r[0] - halfL*cos(th)
x2 = r[0] + halfL*cos(th)
y1 = r[1] - halfL*sin(th)
y2 = r[1] + halfL*sin(th)
plotLine(x1,y1,x2,y2,c='r',lw=1.6,ax=myax,alpha=alpha)
def get_nbrs(block,n_nbr,edge,use_bulk=False,method="random",ret_nbrs=False,sparse_bulk_factor=1):
'''
Return (xrod, n_nbr) array of neighbours
I use xrod because if use_bulk is true then
It's some indeterminant amount
use_bulk should maybe not be true
method can be one of: random, radial, polar, angular
random and radial are self-explanatory
polar is sorted by rod center location order while circling
around starting with the nem director
angular is ordered by those closest to the nem director
return 2D array "features"
'''
bulk = block.copy()
bulk = bulk[np.where(bulk[:,0] > -0.25*edge)]
bulk = bulk[np.where(bulk[:,0] < 0.25*edge)]
bulk = bulk[np.where(bulk[:,1] > -0.25*edge)]
bulk = bulk[np.where(bulk[:,1] < 0.25*edge)]
bulk = bulk[::sparse_bulk_factor]
perim = block.copy()
perimidx = np.where(perim[:,0] < -0.25*edge)
perimidx = np.append(perimidx,np.where(perim[:,0] > 0.25*edge))
perimidx = np.append(perimidx,np.where(perim[:,1] < -0.25*edge))
perimidx = np.append(perimidx,np.where(perim[:,1] > 0.25*edge))
perimidx = np.unique(perimidx) # remove double counts
perim = perim[perimidx]
probes = np.append(perim,bulk,axis=0)
# This is mainly just for viewing the rods in gen_nbrfiles
if sparse_bulk_factor == 1:
probes = block.copy()
block2 = block.copy()
nrod = len(block)
nprobe = len(probes)
z = np.zeros((nrod,1))
block2 = np.append(block2,z,axis=1)
features = np.zeros(shape=(nprobe,n_nbr))
nbrs_full = np.empty(shape=(nprobe,n_nbr,4))
nbrs = np.empty(shape=(n_nbr,4))
alphas = np.zeros(shape=(nprobe))
for i,rod in enumerate(probes):
cent = np.asarray([rod[0],rod[1]])
dists = dist_from(cent,block2[:,:2])
block2[:,-1] = dists
block2 = block2[block2[:,-1].argsort()]
nbrs = np.copy(block2[1:1+n_nbr])
# Convert coordinates, relative to center of mass
cx, cy = np.mean(nbrs[:,0]), np.mean(nbrs[:,1])
nbrs[:,0] -= cx
nbrs[:,1] -= cy
# Convert angles to be relative to nem director
th_j = nbrs[:,2]
S,T = np.mean(cos(2.*th_j)), np.mean(sin(2.*th_j))
Q = np.matrix([[S,T],[T,-S]])
_, alpha = nematicdirector(Q)
# Try using rod probe as alpha
# alpha = rod[2]
th_jalpha = th_j - alpha
alphas[i] = rod[0]
if method == "random":
np.random.shuffle(nbrs)
th_jalpha = th_j - alpha
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
if method == "radial":
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
if method == "angular":
nbrs[:,-1] = cos(2.*th_jalpha)
# sort by cos(2*th_jalpha)
# we use argsort()[::-1] so that most aligned appear first
nbrs = nbrs[nbrs[:,-1].argsort()[::-1]]
features[i,:] = nbrs[:,-1]
nbrs[:,-1] = features[i,:]
if method == "polar":
# get polar coordinates
# arctan2 returns [-pi,pi]
phi = np.arctan2(nbrs[:,1],nbrs[:,0])
phi = np.where(phi < 0., phi+twopi, phi)
phi_jalpha = phi - alpha
phi_jalpha = np.where(phi_jalpha < 0., phi_jalpha+twopi, phi_jalpha)
nbrs[:,-1] = phi_jalpha
nbrs = nbrs[nbrs[:,-1].argsort()]
th_jalpha = nbrs[:,2] - alpha
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
# Return nbrs to original coords
nbrs[:,0] += cx
nbrs[:,1] += cy
nbrs_full[i] = nbrs
if ret_nbrs:
return features, nbrs_full, alphas
else:
return features
| '''
rod x,y coordinates must be relative to their probe center
angular coordinates are in range [0,2pi]
Given your application, they will have their 4th column as a
variable vector
Instead of strict polar sort, the next rod is actually a nearest
neighbour in the winding direction
We can achieve this by first checking to see if there are any
neighbours in the nndist along the winding direction
We can sort this in place. Let's add a new column that we
update with nn distances to the current rod, and we only check
rods that are upcoming
Notice, rods are already sorted based on distance to center
Returns rods with final dimension being th_jalpha
'''
rods = rods_.copy()
phi = np.angle(rods[:,0] + 1j*rods[:,1])
phi = np.where(phi < 0., phi+twopi, phi)
phi_jalpha = phi - alpha
phi_jalpha = np.where(phi_jalpha < 0., phi_jalpha+twopi, phi_jalpha)
rods[:,-1] = phi_jalpha
rods = rods[rods[:,-1].argsort()]
nrod = len(rods)
idxs = np.arange(nrod).reshape(nrod,1)
rods = np.append(idxs,rods,axis=1)
rods2 = rods.copy()
rods3 = rods.copy()
# rods is sorted by phi_jalpha
# rods2 will be used for holding dists
# rods3 is also sorted by phi_jalpha
checked = []
rod3cnt = 0
for rod in rods:
start = int(rod[0])
if start not in checked:
# Add to rods3
checked.append(start)
rods3[rod3cnt] = rod
rod3cnt += 1
cent = np.asarray([rod[1],rod[2]])
dists = dist_from(cent,rods2[:,1:3])
rods2[:,-1] = dists
rods2 = rods2[rods2[:,-1].argsort()]
rodnbrs = rods2[1:6]
for nbr in rodnbrs:
if int(nbr[0]) in checked:
continue
# Add nbr rods to rods3 if needed
# See if it's close to our subject rod
# and if it is further along phi
dphi = rods[int(nbr[0]), -1] - rods[start,-1]
# print nbr
# print rods[int(nbr[0])]
# print rod
# print dphi
if (nbr[-1] < nndist) and (dphi > 0.) and (dphi < pi/2):
checked.append(int(nbr[0]))
rods3[rod3cnt] = nbr
rod3cnt+=1
if abs(sum(rods[:,1]) - sum(rods3[:,1])) > 0.00001:
print "wuhoh"
return rods3[:,1:] | identifier_body |
tools.py | import matplotlib.pyplot as plt
import numpy as np
cos = np.cos
sin = np.sin
twopi = np.pi*2.
pi = np.pi
def myrotate(th):
# Add pi/2 to make it reference the x-axis
# on range [0,2pi]
# th = (th+pi/2.) if th<(3.*pi/2.) else (th-3.*pi/2.)
# if th > pi: th -= pi
th = th + pi/2.
if th > twopi: th-= twopi
return th
def dist_from(ref,rs):
# ref is 1x2 ndarray
# rs is list of vectors to compare, nx2
return np.linalg.norm((ref-rs),axis=1)
def nematicdirector(Q):
# Given Q matrix return order parameter and angle
w,v = np.linalg.eig(Q)
idx = np.argmax(w)
Lam = w[idx] # Equiv to sqrt(S*S+T*T)
nu = v[:,idx]
alpha = np.angle(np.complex(nu[0,0],nu[1,0]))
if alpha < 0: alpha = twopi+alpha
return Lam, alpha
def quad_features(pca_data,red_dims):
# pca_data should be the nbr vector after the pca transformation
# Let's have this function return feature 1 as f_1^2 +f_2^2
# and feature 2 as f_3^2 + f_4^2
#
# red_dims are the dims to squash together in form [[0,1],[4,4]] etc.
new_pca_data = pca_data.copy()
for pair in red_dims:
new_pca_data[:,pair[0]] = np.sqrt(np.square(pca_data[:,pair[0]]) + np.square(pca_data[:,pair[1]]))
todel = []
for pair in red_dims:
if pair[0] == pair[1]: continue
todel.append(pair[1])
new_pca_data = np.delete(new_pca_data,todel,axis=1)
return new_pca_data
def gen_probes(nx,edge,do_shift=True):
nprobe = nx**2
probes = np.zeros((nprobe,2))
dx = edge/nx
dy = edge/nx
halfdx = dx/2.
shift = 0
if do_shift:
shift = -edge/2.
iprobe = 0
for iy in range(nx):
for ix in range(nx):
# Start from top left of image and read right
probes[iprobe] = [dx*ix + halfdx + shift, -shift - dy*iy - halfdx]
iprobe += 1
return probes
def norm_angles(thetas,alpha):
'''
Given 1D array of angles, express the angles relative to alpha
thetas and alpha should come in range [0,2pi]
'''
th0 = alpha
for ith, th in enumerate(thetas):
dth = th - th0
if dth > pi/2.:
if dth > 3*pi/2.:
th -= twopi
else:
th -= pi
if dth < -pi/2.:
if dth < -3*pi/2.:
th += twopi
else:
th += pi
thetas[ith] = th
th0 = th
return thetas
def winding_angles(thetas_,alpha,pol=0):
'''
Given 1D array of angles, return their angles relative to alpha
such that the polarity is closest to that of the angle previous to it
in the array
I think it boils down to deciding on a direction that things are spinning
After that we can make a better adjustment of the polarity conversion
region, say [(+/-) pi/4, (-/+) 3pi/4]
There are two ideas that come to mind: a threshold method or averaging method
for determining when to decide direction
Returns corrected thetas that are with respect to alpha
'''
thetas = thetas_.copy()
nth = thetas.shape[0]
# thresh = pi/2.
th0 = alpha
dalpha = 0
for ith, th in enumerate(thetas):
dth = th - th0
# print th, dth
if dth > pi/2 + pol*pi/4:
if dth > 3*pi/2 + pol*pi/4:
th -= twopi
else:
th -= pi
if dth < -pi/2 + pol*pi/4:
if dth < -3*pi/2 + pol*pi/4:
th += twopi
else:
th += pi
# dalpha = th - alpha
# if (abs(dalpha) > thresh) and (pol==0):
dalpha += th - alpha
if (ith > nth//4) and (pol==0):
pol = np.sign(dalpha)
thetas[ith] = th - alpha
th0 = th
# Detect false rotations
if (0):
gap = 5
# Thetas are wrt alpha
for ith, th in enumerate(thetas):
if ith > gap:
iref = max(0,ith-gap)
dth = th - thetas[iref]
if dth > pi:
# angles should be considered as returning
# to earlier ref, not further away
for isubth, subth in enumerate(thetas[iref+1:ith]):
sub_dth = subth - thetas[iref]
if sub_dth > pi/2:
thetas[iref+1+isubth] = subth - pi
thetas[ith] = th - pi
elif dth < -pi:
for isubth, subth in enumerate(thetas[iref+1:ith]):
sub_dth = subth - thetas[iref]
if sub_dth < pi/2:
thetas[iref+1+isubth] = subth + pi
thetas[ith] = th + pi
return thetas
def polar_cluster_sort(rods_,alpha,nndist=0.):
'''
rod x,y coordinates must be relative to their probe center
angular coordinates are in range [0,2pi]
Given your application, they will have their 4th column as a
variable vector
Instead of strict polar sort, the next rod is actually a nearest
neighbour in the winding direction
We can achieve this by first checking to see if there are any
neighbours in the nndist along the winding direction
We can sort this in place. Let's add a new column that we
update with nn distances to the current rod, and we only check
rods that are upcoming
Notice, rods are already sorted based on distance to center
Returns rods with final dimension being th_jalpha
'''
rods = rods_.copy()
phi = np.angle(rods[:,0] + 1j*rods[:,1])
phi = np.where(phi < 0., phi+twopi, phi)
phi_jalpha = phi - alpha
phi_jalpha = np.where(phi_jalpha < 0., phi_jalpha+twopi, phi_jalpha)
rods[:,-1] = phi_jalpha
rods = rods[rods[:,-1].argsort()]
nrod = len(rods)
idxs = np.arange(nrod).reshape(nrod,1)
rods = np.append(idxs,rods,axis=1)
rods2 = rods.copy()
rods3 = rods.copy()
# rods is sorted by phi_jalpha
# rods2 will be used for holding dists
# rods3 is also sorted by phi_jalpha
checked = []
rod3cnt = 0
for rod in rods:
start = int(rod[0])
if start not in checked:
# Add to rods3
checked.append(start)
rods3[rod3cnt] = rod
rod3cnt += 1
cent = np.asarray([rod[1],rod[2]])
dists = dist_from(cent,rods2[:,1:3])
rods2[:,-1] = dists
rods2 = rods2[rods2[:,-1].argsort()]
rodnbrs = rods2[1:6]
for nbr in rodnbrs:
if int(nbr[0]) in checked:
continue
# Add nbr rods to rods3 if needed
# See if it's close to our subject rod
# and if it is further along phi
dphi = rods[int(nbr[0]), -1] - rods[start,-1]
# print nbr
# print rods[int(nbr[0])]
# print rod
# print dphi
if (nbr[-1] < nndist) and (dphi > 0.) and (dphi < pi/2):
checked.append(int(nbr[0]))
rods3[rod3cnt] = nbr
rod3cnt+=1
if abs(sum(rods[:,1]) - sum(rods3[:,1])) > 0.00001:
print "wuhoh"
return rods3[:,1:]
def get_lat_nbrs(block,n_nbr,edge,nx,probes,use_bulk=False,\
method="random",ret_nbrs=False,sparse_bulk_factor=1,\
use_xyth=False):
'''
Return (xprobe, n_nbr) array of neighbours
I use xprobe because if use_bulk is true then
It's some indeterminant amount
use_bulk should maybe not be true
method can be one of: random, radial, polar, angular
random and radial are self-explanatory
polar is sorted by rod center location order while circling
around starting with the nem director
angular is ordered by those closest to the nem director
return 2D array "features"
'''
# Create probes
nprobe = probes.shape[0]
block2 = block.copy()
nrod = len(block)
z = np.zeros((nrod,1))
block2 = np.append(block2,z,axis=1)
nnbr_ft = 2
features = np.zeros(shape=(nprobe,n_nbr)) if not (use_xyth) else np.zeros(shape=(nprobe,n_nbr*nnbr_ft))
nbrs_full = np.empty(shape=(nprobe,n_nbr,4))
nbrs = np.empty(shape=(n_nbr,4))
alphas = np.zeros(shape=(nprobe))
for i,prob in enumerate(probes):
cent = np.asarray([prob[0],prob[1]])
dists = dist_from(cent,block2[:,:2])
block2[:,-1] = dists
block2 = block2[block2[:,-1].argsort()]
nbrs = np.copy(block2[:n_nbr])
# Convert coordinates, relative to center of mass
com_x, com_y = np.mean(nbrs[:,0]), np.mean(nbrs[:,1])
# com_x, com_y = prob[0], prob[1]
nbrs[:,0] -= com_x
nbrs[:,1] -= com_y
# Convert angles to be relative to nem director
# Angles are originally [0,2pi]
th_j = nbrs[:,2]
S,T = np.mean(cos(2.*th_j)), np.mean(sin(2.*th_j))
Q = np.matrix([[S,T],[T,-S]])
_, alpha = nematicdirector(Q) # alpha is range [0,2pi]
alphas[i] = alpha
th_jalpha = th_j - alpha # [-2pi,2pi]
if method == "random":
nbrs[:,-1] = cos(2.*th_jalpha)
# nbrs[:,-1] = cos(2.*th_j)
np.random.shuffle(nbrs)
if use_xyth:
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = nbrs[:,-1]
if method == "radial":
if use_xyth:
nbrs[:,-1] = cos(2.*th_jalpha)
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
if method == "angular":
# sort by cos(2*th_jalpha)
# we use argsort()[::-1] so that most aligned appear first
# Normalize rod angles first
nbrs = polar_cluster_sort(nbrs,alpha,nndist=0.4)
nbrs[:,-1] = winding_angles(nbrs[:,2],alpha) / pi
# nbrs[:,-1] = np.square(nbrs[:,-1])
nbrs[:,-1] = cos(2.*nbrs[:,-1])
# nbrs[:,-1] = (norm_angles(nbrs[:,2],alpha))
nbrs = nbrs[nbrs[:,-1].argsort()]
if use_xyth:
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = nbrs[:,-1]
if method == "WA":
nbrs = polar_cluster_sort(nbrs,alpha,nndist=0.)
nbrs[:,-1] = cos(2.* (nbrs[:,2] - alpha) )
if use_xyth:
features[i] = nbrs[:,:nnbr_ft].flatten()
else:
features[i,:] = nbrs[:,-1]
if method == "WD":
nbrs = polar_cluster_sort(nbrs,alpha,nndist=0.3)
nbrs[:,-1] = winding_angles(nbrs[:,2],alpha, pol=0)
# nbrs[:,-1] = nbrs[:,2] - alpha
c2 = cos(2. * nbrs[:,-1])
s2 = sin(2. * nbrs[:,-1])
# phi = np.angle(nbrs[:,0] + 1j*nbrs[:,1])
# phi_jalpha = (phi - alpha)%twopi
# dc2 = (c2 - np.roll(c2,1)) / (phi_jalpha - np.roll(phi_jalpha,1))
# ds2 = (s2 - np.roll(s2,1)) / (phi_jalpha - np.roll(phi_jalpha,1))
# nbrs[:,-1] = sin( 2. * winding_angles(nbrs[:,2],alpha, pol=0))
if use_xyth:
features[i] = np.array([c2,s2]).flatten()
else:
|
# Return nbrs to original coords
nbrs[:,0] += com_x
nbrs[:,1] += com_y
nbrs_full[i] = nbrs
if ret_nbrs:
return features, nbrs_full, alphas
else:
return features
def get_xyth_feature(nbrs):
c2 = cos(2.* nbrs[:,2])
s2 = sin(2.* nbrs[:,2])
return np.array([c2,s2]).flatten()
def plotLine(x1,y1,x2,y2,c='b',ax=None,lw=0.4,alpha=1.0):
if ax: # given axis handle
ax.plot([x1, x2], [y1, y2], color=c, linestyle='-', linewidth=lw, alpha=alpha);
else:
plt.gca().plot([x1, x2], [y1, y2], color=c, linestyle='-', linewidth=lw, alpha=alpha);
def plotrods(rods,myax,halfL=0.5,hotrods=[],col='k',lw=0.4,alpha=1.0,add_crosses=False,number=None):
for r in rods:
th = r[2]
x1 = r[0] - halfL*cos(th)
x2 = r[0] + halfL*cos(th)
y1 = r[1] - halfL*sin(th)
y2 = r[1] + halfL*sin(th)
plotLine(x1,y1,x2,y2,c=col,lw=lw,ax=myax,alpha=alpha)
if add_crosses:
myax.plot(r[0],r[1],"+",markersize=20,color="grey",linewidth=5)
elif number:
font = {'family': 'sans-serif',
'size': 13,
'usetex': False
}
myax.text(r[0],r[1],"%d"%(number),fontdict=font,fontsize=12,
horizontalalignment="center",
verticalalignment="center")
if len(hotrods)>0:
for r in hotrods:
th = r[2]
x1 = r[0] - halfL*cos(th)
x2 = r[0] + halfL*cos(th)
y1 = r[1] - halfL*sin(th)
y2 = r[1] + halfL*sin(th)
plotLine(x1,y1,x2,y2,c='r',lw=1.6,ax=myax,alpha=alpha)
def get_nbrs(block,n_nbr,edge,use_bulk=False,method="random",ret_nbrs=False,sparse_bulk_factor=1):
'''
Return (xrod, n_nbr) array of neighbours
I use xrod because if use_bulk is true then
It's some indeterminant amount
use_bulk should maybe not be true
method can be one of: random, radial, polar, angular
random and radial are self-explanatory
polar is sorted by rod center location order while circling
around starting with the nem director
angular is ordered by those closest to the nem director
return 2D array "features"
'''
bulk = block.copy()
bulk = bulk[np.where(bulk[:,0] > -0.25*edge)]
bulk = bulk[np.where(bulk[:,0] < 0.25*edge)]
bulk = bulk[np.where(bulk[:,1] > -0.25*edge)]
bulk = bulk[np.where(bulk[:,1] < 0.25*edge)]
bulk = bulk[::sparse_bulk_factor]
perim = block.copy()
perimidx = np.where(perim[:,0] < -0.25*edge)
perimidx = np.append(perimidx,np.where(perim[:,0] > 0.25*edge))
perimidx = np.append(perimidx,np.where(perim[:,1] < -0.25*edge))
perimidx = np.append(perimidx,np.where(perim[:,1] > 0.25*edge))
perimidx = np.unique(perimidx) # remove double counts
perim = perim[perimidx]
probes = np.append(perim,bulk,axis=0)
# This is mainly just for viewing the rods in gen_nbrfiles
if sparse_bulk_factor == 1:
probes = block.copy()
block2 = block.copy()
nrod = len(block)
nprobe = len(probes)
z = np.zeros((nrod,1))
block2 = np.append(block2,z,axis=1)
features = np.zeros(shape=(nprobe,n_nbr))
nbrs_full = np.empty(shape=(nprobe,n_nbr,4))
nbrs = np.empty(shape=(n_nbr,4))
alphas = np.zeros(shape=(nprobe))
for i,rod in enumerate(probes):
cent = np.asarray([rod[0],rod[1]])
dists = dist_from(cent,block2[:,:2])
block2[:,-1] = dists
block2 = block2[block2[:,-1].argsort()]
nbrs = np.copy(block2[1:1+n_nbr])
# Convert coordinates, relative to center of mass
cx, cy = np.mean(nbrs[:,0]), np.mean(nbrs[:,1])
nbrs[:,0] -= cx
nbrs[:,1] -= cy
# Convert angles to be relative to nem director
th_j = nbrs[:,2]
S,T = np.mean(cos(2.*th_j)), np.mean(sin(2.*th_j))
Q = np.matrix([[S,T],[T,-S]])
_, alpha = nematicdirector(Q)
# Try using rod probe as alpha
# alpha = rod[2]
th_jalpha = th_j - alpha
alphas[i] = rod[0]
if method == "random":
np.random.shuffle(nbrs)
th_jalpha = th_j - alpha
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
if method == "radial":
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
if method == "angular":
nbrs[:,-1] = cos(2.*th_jalpha)
# sort by cos(2*th_jalpha)
# we use argsort()[::-1] so that most aligned appear first
nbrs = nbrs[nbrs[:,-1].argsort()[::-1]]
features[i,:] = nbrs[:,-1]
nbrs[:,-1] = features[i,:]
if method == "polar":
# get polar coordinates
# arctan2 returns [-pi,pi]
phi = np.arctan2(nbrs[:,1],nbrs[:,0])
phi = np.where(phi < 0., phi+twopi, phi)
phi_jalpha = phi - alpha
phi_jalpha = np.where(phi_jalpha < 0., phi_jalpha+twopi, phi_jalpha)
nbrs[:,-1] = phi_jalpha
nbrs = nbrs[nbrs[:,-1].argsort()]
th_jalpha = nbrs[:,2] - alpha
features[i,:] = cos(2.*th_jalpha)
nbrs[:,-1] = features[i,:]
# Return nbrs to original coords
nbrs[:,0] += cx
nbrs[:,1] += cy
nbrs_full[i] = nbrs
if ret_nbrs:
return features, nbrs_full, alphas
else:
return features
| features[i,:] = nbrs[:,-1] / pi | conditional_block |
resnext_cifar.py |
"""
Creates a ResNeXt Model as defined in:
Xie, S., Girshick, R., Dollar, P., Tu, Z., & He, K. (2016).
Aggregated residual transformations for deep neural networks.
arXiv preprint arXiv:1611.05431.
import from https://github.com/prlz77/ResNeXt.pytorch/blob/master/models/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
__all__ = ['resnext']
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, in_channels, out_channels, stride, cardinality, widen_factor):
""" Constructor
Args:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
widen_factor: factor to reduce the input dimensionality before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
D = cardinality * out_channels // widen_factor
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False))
self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
bottleneck = self.conv_conv.forward(bottleneck)
bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
residual = self.shortcut.forward(x)
return F.relu(residual + bottleneck, inplace=True)
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, cardinality, depth, num_classes, widen_factor=4, dropRate=0):
""" Constructor
Args:
cardinality: number of convolution groups.
depth: number of layers.
num_classes: number of classes
widen_factor: factor to adjust the channel dimensionality
"""
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.widen_factor = widen_factor
self.num_classes = num_classes
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(1024, num_classes)
self.stage_att = self.block('stage_att', self.stages[2], self.stages[3], 1)
self.bn_att = nn.BatchNorm2d(self.stages[3])
self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=1, padding=0,
bias=False)
self.bn_att2 = nn.BatchNorm2d(num_classes)
self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1, padding=0,
bias=False)
self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,
bias=False)
self.bn_att3 = nn.BatchNorm2d(1)
self.att_gap = nn.AvgPool2d(16)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if key.split('.')[-1] == 'weight':
if 'conv' in key:
init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if 'bn' in key:
self.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
in_channels: number of input channels
out_channels: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality,
self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
ax = self.stage_att(x)
ax = self.relu(self.bn_att2(self.att_conv(ax)))
bs, cs, ys, xs = ax.shape
self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))
# self.att = self.att.view(bs, 1, ys, xs)
ax = self.att_conv2(ax)
ax = self.att_gap(ax)
ax = ax.view(ax.size(0), -1)
rx = x * self.att
rx = rx + x
rx = self.stage_3.forward(rx)
rx = F.avg_pool2d(rx, 8, 1)
rx = rx.view(-1, 1024)
rx = self.classifier(rx)
return ax, rx, self.att
def | (**kwargs):
"""Constructs a ResNeXt.
"""
model = CifarResNeXt(**kwargs)
return model
# """
# resneXt for cifar with pytorch
# Reference:
# [1] S. Xie, G. Ross, P. Dollar, Z. Tu and K. He Aggregated residual transformations for deep neural networks. In CVPR, 2017
# """
#
# import torch
# import torch.nn as nn
# import math
#
#
# class Bottleneck(nn.Module):
# expansion = 4
#
# def __init__(self, inplanes, planes, cardinality, baseWidth, stride=1, downsample=None):
# super(Bottleneck, self).__init__()
# D = int(planes * (baseWidth / 64.))
# C = cardinality
# self.conv1 = nn.Conv2d(inplanes, D * C, kernel_size=1, bias=False)
# self.bn1 = nn.BatchNorm2d(D * C)
# self.conv2 = nn.Conv2d(D * C, D * C, kernel_size=3, stride=stride, padding=1, groups=C, bias=False)
# self.bn2 = nn.BatchNorm2d(D * C)
# self.conv3 = nn.Conv2d(D * C, planes * 4, kernel_size=1, bias=False)
# self.bn3 = nn.BatchNorm2d(planes * 4)
# self.relu = nn.ReLU(inplace=True)
# self.downsample = downsample
# self.stride = stride
#
# def forward(self, x):
# residual = x
#
# out = self.conv1(x)
# out = self.bn1(out)
# out = self.relu(out)
#
# out = self.conv2(out)
# out = self.bn2(out)
# out = self.relu(out)
#
# out = self.conv3(out)
# out = self.bn3(out)
#
# if self.downsample is not None:
# residual = self.downsample(x)
#
# if residual.size() != out.size():
# print(out.size(), residual.size())
# out += residual
# out = self.relu(out)
#
# return out
#
#
# class ResNeXt_Cifar(nn.Module):
#
# def __init__(self, block, layers, cardinality, baseWidth, num_classes=10):
# super(ResNeXt_Cifar, self).__init__()
# self.inplanes = 64
# self.cardinality = cardinality
# self.baseWidth = baseWidth
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
# self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True)
# self.layer1 = self._make_layer(block, 64, layers[0])
# self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
# self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# self.avgpool = nn.AvgPool2d(8, stride=1)
# self.fc = nn.Linear(256 * block.expansion, num_classes)
#
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
#
# def _make_layer(self, block, planes, blocks, stride=1):
# downsample = None
# if stride != 1 or self.inplanes != planes * block.expansion:
# downsample = nn.Sequential(
# nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
# nn.BatchNorm2d(planes * block.expansion)
# )
#
# layers = []
# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth, stride, downsample))
# self.inplanes = planes * block.expansion
# for _ in range(1, blocks):
# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth))
#
# return nn.Sequential(*layers)
#
# def forward(self, x):
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
#
# x = self.layer1(x)
# x = self.layer2(x)
# x = self.layer3(x)
#
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
#
# return x
#
#
# def resneXt_cifar(depth, cardinality, baseWidth, **kwargs):
# assert (depth - 2) % 9 == 0
# n = int((depth - 2) / 9)
# model = ResNeXt_Cifar(Bottleneck, [n, n, n], cardinality, baseWidth, **kwargs)
# return model
# if __name__ == '__main__':
# net = resneXt_cifar(29, 16, 64)
# y = net(torch.randn(1, 3, 32, 32))
# print(net)
# print(y.size()) | resnext | identifier_name |
resnext_cifar.py | """
Creates a ResNeXt Model as defined in:
Xie, S., Girshick, R., Dollar, P., Tu, Z., & He, K. (2016).
Aggregated residual transformations for deep neural networks.
arXiv preprint arXiv:1611.05431.
import from https://github.com/prlz77/ResNeXt.pytorch/blob/master/models/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
__all__ = ['resnext']
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, in_channels, out_channels, stride, cardinality, widen_factor):
""" Constructor
Args:
in_channels: input channel dimensionality | stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
widen_factor: factor to reduce the input dimensionality before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
D = cardinality * out_channels // widen_factor
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False))
self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
bottleneck = self.conv_conv.forward(bottleneck)
bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
residual = self.shortcut.forward(x)
return F.relu(residual + bottleneck, inplace=True)
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, cardinality, depth, num_classes, widen_factor=4, dropRate=0):
""" Constructor
Args:
cardinality: number of convolution groups.
depth: number of layers.
num_classes: number of classes
widen_factor: factor to adjust the channel dimensionality
"""
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.widen_factor = widen_factor
self.num_classes = num_classes
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(1024, num_classes)
self.stage_att = self.block('stage_att', self.stages[2], self.stages[3], 1)
self.bn_att = nn.BatchNorm2d(self.stages[3])
self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=1, padding=0,
bias=False)
self.bn_att2 = nn.BatchNorm2d(num_classes)
self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1, padding=0,
bias=False)
self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,
bias=False)
self.bn_att3 = nn.BatchNorm2d(1)
self.att_gap = nn.AvgPool2d(16)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if key.split('.')[-1] == 'weight':
if 'conv' in key:
init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if 'bn' in key:
self.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
in_channels: number of input channels
out_channels: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality,
self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
ax = self.stage_att(x)
ax = self.relu(self.bn_att2(self.att_conv(ax)))
bs, cs, ys, xs = ax.shape
self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))
# self.att = self.att.view(bs, 1, ys, xs)
ax = self.att_conv2(ax)
ax = self.att_gap(ax)
ax = ax.view(ax.size(0), -1)
rx = x * self.att
rx = rx + x
rx = self.stage_3.forward(rx)
rx = F.avg_pool2d(rx, 8, 1)
rx = rx.view(-1, 1024)
rx = self.classifier(rx)
return ax, rx, self.att
def resnext(**kwargs):
"""Constructs a ResNeXt.
"""
model = CifarResNeXt(**kwargs)
return model
# """
# resneXt for cifar with pytorch
# Reference:
# [1] S. Xie, G. Ross, P. Dollar, Z. Tu and K. He Aggregated residual transformations for deep neural networks. In CVPR, 2017
# """
#
# import torch
# import torch.nn as nn
# import math
#
#
# class Bottleneck(nn.Module):
# expansion = 4
#
# def __init__(self, inplanes, planes, cardinality, baseWidth, stride=1, downsample=None):
# super(Bottleneck, self).__init__()
# D = int(planes * (baseWidth / 64.))
# C = cardinality
# self.conv1 = nn.Conv2d(inplanes, D * C, kernel_size=1, bias=False)
# self.bn1 = nn.BatchNorm2d(D * C)
# self.conv2 = nn.Conv2d(D * C, D * C, kernel_size=3, stride=stride, padding=1, groups=C, bias=False)
# self.bn2 = nn.BatchNorm2d(D * C)
# self.conv3 = nn.Conv2d(D * C, planes * 4, kernel_size=1, bias=False)
# self.bn3 = nn.BatchNorm2d(planes * 4)
# self.relu = nn.ReLU(inplace=True)
# self.downsample = downsample
# self.stride = stride
#
# def forward(self, x):
# residual = x
#
# out = self.conv1(x)
# out = self.bn1(out)
# out = self.relu(out)
#
# out = self.conv2(out)
# out = self.bn2(out)
# out = self.relu(out)
#
# out = self.conv3(out)
# out = self.bn3(out)
#
# if self.downsample is not None:
# residual = self.downsample(x)
#
# if residual.size() != out.size():
# print(out.size(), residual.size())
# out += residual
# out = self.relu(out)
#
# return out
#
#
# class ResNeXt_Cifar(nn.Module):
#
# def __init__(self, block, layers, cardinality, baseWidth, num_classes=10):
# super(ResNeXt_Cifar, self).__init__()
# self.inplanes = 64
# self.cardinality = cardinality
# self.baseWidth = baseWidth
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
# self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True)
# self.layer1 = self._make_layer(block, 64, layers[0])
# self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
# self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# self.avgpool = nn.AvgPool2d(8, stride=1)
# self.fc = nn.Linear(256 * block.expansion, num_classes)
#
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
#
# def _make_layer(self, block, planes, blocks, stride=1):
# downsample = None
# if stride != 1 or self.inplanes != planes * block.expansion:
# downsample = nn.Sequential(
# nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
# nn.BatchNorm2d(planes * block.expansion)
# )
#
# layers = []
# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth, stride, downsample))
# self.inplanes = planes * block.expansion
# for _ in range(1, blocks):
# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth))
#
# return nn.Sequential(*layers)
#
# def forward(self, x):
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
#
# x = self.layer1(x)
# x = self.layer2(x)
# x = self.layer3(x)
#
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
#
# return x
#
#
# def resneXt_cifar(depth, cardinality, baseWidth, **kwargs):
# assert (depth - 2) % 9 == 0
# n = int((depth - 2) / 9)
# model = ResNeXt_Cifar(Bottleneck, [n, n, n], cardinality, baseWidth, **kwargs)
# return model
# if __name__ == '__main__':
# net = resneXt_cifar(29, 16, 64)
# y = net(torch.randn(1, 3, 32, 32))
# print(net)
# print(y.size()) | out_channels: output channel dimensionality | random_line_split |
resnext_cifar.py |
"""
Creates a ResNeXt Model as defined in:
Xie, S., Girshick, R., Dollar, P., Tu, Z., & He, K. (2016).
Aggregated residual transformations for deep neural networks.
arXiv preprint arXiv:1611.05431.
import from https://github.com/prlz77/ResNeXt.pytorch/blob/master/models/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
__all__ = ['resnext']
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, in_channels, out_channels, stride, cardinality, widen_factor):
""" Constructor
Args:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
widen_factor: factor to reduce the input dimensionality before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
D = cardinality * out_channels // widen_factor
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False))
self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
bottleneck = self.conv_conv.forward(bottleneck)
bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
residual = self.shortcut.forward(x)
return F.relu(residual + bottleneck, inplace=True)
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, cardinality, depth, num_classes, widen_factor=4, dropRate=0):
""" Constructor
Args:
cardinality: number of convolution groups.
depth: number of layers.
num_classes: number of classes
widen_factor: factor to adjust the channel dimensionality
"""
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.widen_factor = widen_factor
self.num_classes = num_classes
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(1024, num_classes)
self.stage_att = self.block('stage_att', self.stages[2], self.stages[3], 1)
self.bn_att = nn.BatchNorm2d(self.stages[3])
self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=1, padding=0,
bias=False)
self.bn_att2 = nn.BatchNorm2d(num_classes)
self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1, padding=0,
bias=False)
self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,
bias=False)
self.bn_att3 = nn.BatchNorm2d(1)
self.att_gap = nn.AvgPool2d(16)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if key.split('.')[-1] == 'weight':
|
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
in_channels: number of input channels
out_channels: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality,
self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
ax = self.stage_att(x)
ax = self.relu(self.bn_att2(self.att_conv(ax)))
bs, cs, ys, xs = ax.shape
self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))
# self.att = self.att.view(bs, 1, ys, xs)
ax = self.att_conv2(ax)
ax = self.att_gap(ax)
ax = ax.view(ax.size(0), -1)
rx = x * self.att
rx = rx + x
rx = self.stage_3.forward(rx)
rx = F.avg_pool2d(rx, 8, 1)
rx = rx.view(-1, 1024)
rx = self.classifier(rx)
return ax, rx, self.att
def resnext(**kwargs):
"""Constructs a ResNeXt.
"""
model = CifarResNeXt(**kwargs)
return model
# """
# resneXt for cifar with pytorch
# Reference:
# [1] S. Xie, G. Ross, P. Dollar, Z. Tu and K. He Aggregated residual transformations for deep neural networks. In CVPR, 2017
# """
#
# import torch
# import torch.nn as nn
# import math
#
#
# class Bottleneck(nn.Module):
# expansion = 4
#
# def __init__(self, inplanes, planes, cardinality, baseWidth, stride=1, downsample=None):
# super(Bottleneck, self).__init__()
# D = int(planes * (baseWidth / 64.))
# C = cardinality
# self.conv1 = nn.Conv2d(inplanes, D * C, kernel_size=1, bias=False)
# self.bn1 = nn.BatchNorm2d(D * C)
# self.conv2 = nn.Conv2d(D * C, D * C, kernel_size=3, stride=stride, padding=1, groups=C, bias=False)
# self.bn2 = nn.BatchNorm2d(D * C)
# self.conv3 = nn.Conv2d(D * C, planes * 4, kernel_size=1, bias=False)
# self.bn3 = nn.BatchNorm2d(planes * 4)
# self.relu = nn.ReLU(inplace=True)
# self.downsample = downsample
# self.stride = stride
#
# def forward(self, x):
# residual = x
#
# out = self.conv1(x)
# out = self.bn1(out)
# out = self.relu(out)
#
# out = self.conv2(out)
# out = self.bn2(out)
# out = self.relu(out)
#
# out = self.conv3(out)
# out = self.bn3(out)
#
# if self.downsample is not None:
# residual = self.downsample(x)
#
# if residual.size() != out.size():
# print(out.size(), residual.size())
# out += residual
# out = self.relu(out)
#
# return out
#
#
# class ResNeXt_Cifar(nn.Module):
#
# def __init__(self, block, layers, cardinality, baseWidth, num_classes=10):
# super(ResNeXt_Cifar, self).__init__()
# self.inplanes = 64
# self.cardinality = cardinality
# self.baseWidth = baseWidth
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
# self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True)
# self.layer1 = self._make_layer(block, 64, layers[0])
# self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
# self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# self.avgpool = nn.AvgPool2d(8, stride=1)
# self.fc = nn.Linear(256 * block.expansion, num_classes)
#
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
#
# def _make_layer(self, block, planes, blocks, stride=1):
# downsample = None
# if stride != 1 or self.inplanes != planes * block.expansion:
# downsample = nn.Sequential(
# nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
# nn.BatchNorm2d(planes * block.expansion)
# )
#
# layers = []
# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth, stride, downsample))
# self.inplanes = planes * block.expansion
# for _ in range(1, blocks):
# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth))
#
# return nn.Sequential(*layers)
#
# def forward(self, x):
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
#
# x = self.layer1(x)
# x = self.layer2(x)
# x = self.layer3(x)
#
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
#
# return x
#
#
# def resneXt_cifar(depth, cardinality, baseWidth, **kwargs):
# assert (depth - 2) % 9 == 0
# n = int((depth - 2) / 9)
# model = ResNeXt_Cifar(Bottleneck, [n, n, n], cardinality, baseWidth, **kwargs)
# return model
# if __name__ == '__main__':
# net = resneXt_cifar(29, 16, 64)
# y = net(torch.randn(1, 3, 32, 32))
# print(net)
# print(y.size()) | if 'conv' in key:
init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if 'bn' in key:
self.state_dict()[key][...] = 1 | conditional_block |
resnext_cifar.py |
"""
Creates a ResNeXt Model as defined in:
Xie, S., Girshick, R., Dollar, P., Tu, Z., & He, K. (2016).
Aggregated residual transformations for deep neural networks.
arXiv preprint arXiv:1611.05431.
import from https://github.com/prlz77/ResNeXt.pytorch/blob/master/models/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
__all__ = ['resnext']
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, in_channels, out_channels, stride, cardinality, widen_factor):
""" Constructor
Args:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
widen_factor: factor to reduce the input dimensionality before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
D = cardinality * out_channels // widen_factor
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False))
self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
bottleneck = self.conv_conv.forward(bottleneck)
bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
residual = self.shortcut.forward(x)
return F.relu(residual + bottleneck, inplace=True)
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, cardinality, depth, num_classes, widen_factor=4, dropRate=0):
""" Constructor
Args:
cardinality: number of convolution groups.
depth: number of layers.
num_classes: number of classes
widen_factor: factor to adjust the channel dimensionality
"""
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.widen_factor = widen_factor
self.num_classes = num_classes
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(1024, num_classes)
self.stage_att = self.block('stage_att', self.stages[2], self.stages[3], 1)
self.bn_att = nn.BatchNorm2d(self.stages[3])
self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=1, padding=0,
bias=False)
self.bn_att2 = nn.BatchNorm2d(num_classes)
self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1, padding=0,
bias=False)
self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,
bias=False)
self.bn_att3 = nn.BatchNorm2d(1)
self.att_gap = nn.AvgPool2d(16)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if key.split('.')[-1] == 'weight':
if 'conv' in key:
init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if 'bn' in key:
self.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
in_channels: number of input channels
out_channels: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality,
self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
ax = self.stage_att(x)
ax = self.relu(self.bn_att2(self.att_conv(ax)))
bs, cs, ys, xs = ax.shape
self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))
# self.att = self.att.view(bs, 1, ys, xs)
ax = self.att_conv2(ax)
ax = self.att_gap(ax)
ax = ax.view(ax.size(0), -1)
rx = x * self.att
rx = rx + x
rx = self.stage_3.forward(rx)
rx = F.avg_pool2d(rx, 8, 1)
rx = rx.view(-1, 1024)
rx = self.classifier(rx)
return ax, rx, self.att
def resnext(**kwargs):
|
# """
# resneXt for cifar with pytorch
# Reference:
# [1] S. Xie, G. Ross, P. Dollar, Z. Tu and K. He Aggregated residual transformations for deep neural networks. In CVPR, 2017
# """
#
# import torch
# import torch.nn as nn
# import math
#
#
# class Bottleneck(nn.Module):
# expansion = 4
#
# def __init__(self, inplanes, planes, cardinality, baseWidth, stride=1, downsample=None):
# super(Bottleneck, self).__init__()
# D = int(planes * (baseWidth / 64.))
# C = cardinality
# self.conv1 = nn.Conv2d(inplanes, D * C, kernel_size=1, bias=False)
# self.bn1 = nn.BatchNorm2d(D * C)
# self.conv2 = nn.Conv2d(D * C, D * C, kernel_size=3, stride=stride, padding=1, groups=C, bias=False)
# self.bn2 = nn.BatchNorm2d(D * C)
# self.conv3 = nn.Conv2d(D * C, planes * 4, kernel_size=1, bias=False)
# self.bn3 = nn.BatchNorm2d(planes * 4)
# self.relu = nn.ReLU(inplace=True)
# self.downsample = downsample
# self.stride = stride
#
# def forward(self, x):
# residual = x
#
# out = self.conv1(x)
# out = self.bn1(out)
# out = self.relu(out)
#
# out = self.conv2(out)
# out = self.bn2(out)
# out = self.relu(out)
#
# out = self.conv3(out)
# out = self.bn3(out)
#
# if self.downsample is not None:
# residual = self.downsample(x)
#
# if residual.size() != out.size():
# print(out.size(), residual.size())
# out += residual
# out = self.relu(out)
#
# return out
#
#
# class ResNeXt_Cifar(nn.Module):
#
# def __init__(self, block, layers, cardinality, baseWidth, num_classes=10):
# super(ResNeXt_Cifar, self).__init__()
# self.inplanes = 64
# self.cardinality = cardinality
# self.baseWidth = baseWidth
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
# self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True)
# self.layer1 = self._make_layer(block, 64, layers[0])
# self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
# self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# self.avgpool = nn.AvgPool2d(8, stride=1)
# self.fc = nn.Linear(256 * block.expansion, num_classes)
#
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
#
# def _make_layer(self, block, planes, blocks, stride=1):
# downsample = None
# if stride != 1 or self.inplanes != planes * block.expansion:
# downsample = nn.Sequential(
# nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
# nn.BatchNorm2d(planes * block.expansion)
# )
#
# layers = []
# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth, stride, downsample))
# self.inplanes = planes * block.expansion
# for _ in range(1, blocks):
# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth))
#
# return nn.Sequential(*layers)
#
# def forward(self, x):
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
#
# x = self.layer1(x)
# x = self.layer2(x)
# x = self.layer3(x)
#
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
#
# return x
#
#
# def resneXt_cifar(depth, cardinality, baseWidth, **kwargs):
# assert (depth - 2) % 9 == 0
# n = int((depth - 2) / 9)
# model = ResNeXt_Cifar(Bottleneck, [n, n, n], cardinality, baseWidth, **kwargs)
# return model
# if __name__ == '__main__':
# net = resneXt_cifar(29, 16, 64)
# y = net(torch.randn(1, 3, 32, 32))
# print(net)
# print(y.size()) | """Constructs a ResNeXt.
"""
model = CifarResNeXt(**kwargs)
return model | identifier_body |
store.ts | // store.ts
import { InjectionKey, State } from 'vue';
import {
createLogger,
createStore,
useStore as baseUseStore,
Store,
} from 'vuex';
import { nextLine, playerAnswered, runLine } from './vm/renpy-vm';
import { DialogKey, MachineStack } from './types/vuex';
import {
addDataHelper,
getModifiableData,
setDataHelper,
} from './utils/data-helpers';
import { getFile } from './utils/ajax';
import { ParserContext, parseRenpyScript } from './renpy/renpy-parser';
import { ButtonConfig, Config, HudStatConfig, SkillData } from './types/config';
import { GameSave } from './types/game-save';
import { SAVE_FILE } from './constants';
import { AppOptions } from '.';
import { getConfig } from './config';
import { parserError } from './utils/error-handling';
import { getPlayTime } from './utils/time-helpers';
import { timeout } from './utils/promises';
// define your typings for the store state
export interface SetupStoreResult {
store: Store<State>;
key: InjectionKey<Store<State>>;
}
export type AddStackOptions = Partial<MachineStack>;
let key: InjectionKey<Store<State>> = Symbol('Store Injection Key');
let store: Store<State>;
export function setupStore(options: AppOptions): SetupStoreResult {
const plugins = [];
// checking process.env actually exists just for safety
if (options.logging) {
plugins.push(createLogger());
}
// define injection key
key = Symbol('Store Injection Key');
console.log('setup store');
store = createStore<State>({
state: {
machine: {
stack: [],
script: {},
data: {
playerName: 'Player',
},
},
dialog: [],
ready: false,
count: 0,
skills: {},
lastLabel: 'main',
skillChecks: {},
playing: false,
currentScreen: 'default',
buttons: {},
rendering: {
screenHeight: window.innerHeight,
screenWidth: window.innerWidth,
canvasWidth: window.innerWidth,
canvasHeight: window.innerHeight,
renderRatio: 1,
topOffset: 0,
leftOffset: 0,
layoutMode: 'horizontal',
},
audio: {},
errors: [],
playTime: {
start: Date.now(),
previousPlaytime: 0,
},
notifications: {},
hudStats: {},
},
getters: {
machineHead(state): MachineStack {
return state.machine.stack[state.machine.stack.length - 1];
},
currentLine(state, getters): Parser.Command {
const machineHead = getters.machineHead;
return machineHead.branch[machineHead.currentIndex];
},
command(state, getters): Parser.Command {
const machineHead = getters.machineHead;
return machineHead.branch[machineHead.currentIndex];
},
},
actions: {
async startMachine(
{ commit, dispatch },
payload: { scriptPaths: string[]; config: Config },
) {
const { scriptPaths } = payload;
const filePromises: Array<Promise<string>> = [];
for (const path of scriptPaths) {
filePromises.push(getFile(path));
}
const files = await Promise.all(filePromises);
const start = Date.now();
let scripts: Parser.ParsedScript = {};
for (const index in files) {
const file = files[index];
scripts = {
...scripts,
...parseRenpyScript(
(ctx: ParserContext, line: number, error: string) =>
parserError(commit, ctx, line, error),
file,
scriptPaths[index],
),
};
}
const end = Date.now();
console.log(`script parsed in ${end - start} ms`);
commit('setButtons', payload.config.buttons);
commit('setScript', scripts);
commit('setupSkills', payload.config.skills);
commit('setupHudStats', payload.config.hudStats);
},
runLabel({ state, commit }, label) {
const branch = state.machine.script[label];
if (!branch) {
console.error(`Label ${branch} doesn't exist`);
}
commit('setLastLabel', label);
state.machine.stack = [
{
currentIndex: 0,
branch,
label,
},
];
this.dispatch('runLine');
},
async runLine(context) {
await this.dispatch('saveGame');
await runLine(context);
},
nextLine(context) {
return nextLine(context);
},
playerAnswered(context, index) {
return playerAnswered(context, index);
},
saveGame({ state }) {
const save: GameSave = {
data: state.machine.data,
skills: state.skills,
dialog: state.dialog,
buttons: state.buttons,
lastLabel: state.lastLabel,
skillChecks: state.skillChecks,
playTime: getPlayTime(
state.playTime.start,
state.playTime.previousPlaytime,
),
};
localStorage.setItem(SAVE_FILE, JSON.stringify(save));
},
loadGame({ commit, dispatch }, saveFile: string) {
if (saveFile) {
const save: GameSave = JSON.parse(saveFile);
commit('setLoadedData', save);
dispatch('runLabel', save.lastLabel);
}
},
async addNotification({ commit }, text: string) {
const id = `${Date.now()}-${Math.random() * 10000}`;
const notification = {
text,
};
commit('addNotification', { id, notification });
await timeout(getConfig().notifications.timeOnScreen * 1000);
commit('deleteNotification', id);
},
},
mutations: {
setLoadedData(state, save: GameSave) {
state.machine.data = save.data;
state.skills = save.skills;
state.dialog = save.dialog;
state.buttons = save.buttons;
state.lastLabel = save.lastLabel;
state.skillChecks = save.skillChecks;
state.playTime.previousPlaytime = save.playTime;
},
reset(state) {
state.ready = false;
state.machine.stack = [];
state.machine.script = {};
state.machine.data = {};
state.dialog = [];
},
setLastLabel(state, label) {
state.lastLabel = label;
},
setupSkillCheck(state, { skillCheck, skillCheckId }) {
state.skillChecks[skillCheckId] = skillCheck;
},
passSkillCheck(state, skillCheckId) {
state.skillChecks[skillCheckId].passed = true;
},
failSkillCheck(state, skillCheckId) {
state.skillChecks[skillCheckId].passed = false;
state.skillChecks[skillCheckId].available = false;
},
setScript(state, script: Parser.ParsedScript) {
state.machine.script = script;
state.machine.stack.push({
currentIndex: 0,
branch: script.main,
label: 'main',
});
state.ready = true;
},
setupSkills(state, skills: { [key: string]: SkillData }) {
for (const skill in skills) {
state.skills[skill] = {
level: skills[skill].startingLevel || 0,
};
}
},
setupHudStats(state, stats: { [key: string]: HudStatConfig }) {
for (const stat in stats) {
state.hudStats[stat] = stats[stat].startingValue;
}
},
incrementSkill(
state,
{ skill, amount }: { skill: string; amount: number },
) {
state.skills[skill].level += amount;
},
setStat(state, { stat, amount }: { stat: string; amount: number }) {
state.hudStats[stat] = amount;
},
addStat(state, { stat, amount }: { stat: string; amount: number }) {
state.hudStats[stat] += amount;
},
addDialog(state, payload: { dialog: DialogKey }) {
state.dialog.push(payload.dialog);
},
nextLine(state) {
state.machine.stack[state.machine.stack.length - 1].currentIndex += 1;
},
previousStack(state) {
state.machine.stack.splice(state.machine.stack.length - 1);
},
addStack(state, newStack: AddStackOptions) {
if (!newStack.label) {
newStack.label =
state.machine.stack[state.machine.stack.length - 1].label;
}
state.machine.stack.push(newStack as MachineStack);
},
setStack(state, newStack: MachineStack) {
state.machine.stack = [];
state.machine.stack.push(newStack);
},
setData(state, { path, value }: { path: string; value: any }) {
const dataToModify = getModifiableData(state);
setDataHelper(dataToModify, path, value);
},
addInstruction(state, { path, value }: { path: string; value: any }) {
const dataToModify = getModifiableData(state);
addDataHelper(dataToModify, path, value);
},
startPlaying(state) {
state.playing = true;
state.playTime.start = Date.now();
},
setScreen(state, screen) | ,
setButtons(state, buttons: { [key: string]: ButtonConfig }) {
for (const i in buttons) {
state.buttons[i] = {
enabled: buttons[i].enabled,
};
}
},
clearDialog(state) {
state.dialog.splice(0);
},
changeButton(state, payload: { button: string; enabled: boolean }) {
state.buttons[payload.button].enabled = payload.enabled;
},
updateScreenSize(
state,
{
width,
height,
textWidth,
}: { width: number; height: number; textWidth: number },
) {
state.rendering.screenHeight = height;
state.rendering.screenWidth = width;
state.rendering.renderRatio = 1;
state.rendering.topOffset = 0;
state.rendering.leftOffset = 0;
if (width < getConfig().layout.verticalLayoutThreshold) {
state.rendering.layoutMode = 'vertical';
} else {
state.rendering.layoutMode = 'horizontal';
}
},
setMusic(state, music) {
state.audio.currentMusic = music;
},
createError(state, errorText: string) {
state.errors.push({
text: errorText,
});
},
clearErrors(state) {
state.errors = [];
},
addNotification(state, { id, notification }) {
state.notifications[id] = notification;
return id;
},
deleteNotification(state, id) {
console.log('delete notif', id);
delete state.notifications[id];
},
},
plugins,
});
return {
store,
key,
};
}
// define your own `useStore` composition function
export function useStore() {
console.log('use store');
console.log(`key `, key);
console.log(`store `, store);
const result = baseUseStore(key);
console.log(result);
return result;
}
| {
state.currentScreen = screen;
} | identifier_body |
store.ts | // store.ts
import { InjectionKey, State } from 'vue';
import {
createLogger,
createStore,
useStore as baseUseStore,
Store,
} from 'vuex';
import { nextLine, playerAnswered, runLine } from './vm/renpy-vm';
import { DialogKey, MachineStack } from './types/vuex';
import {
addDataHelper,
getModifiableData,
setDataHelper,
} from './utils/data-helpers';
import { getFile } from './utils/ajax';
import { ParserContext, parseRenpyScript } from './renpy/renpy-parser';
import { ButtonConfig, Config, HudStatConfig, SkillData } from './types/config';
import { GameSave } from './types/game-save';
import { SAVE_FILE } from './constants';
import { AppOptions } from '.';
import { getConfig } from './config';
import { parserError } from './utils/error-handling';
import { getPlayTime } from './utils/time-helpers';
import { timeout } from './utils/promises';
// define your typings for the store state
export interface SetupStoreResult {
store: Store<State>;
key: InjectionKey<Store<State>>;
}
export type AddStackOptions = Partial<MachineStack>;
let key: InjectionKey<Store<State>> = Symbol('Store Injection Key');
let store: Store<State>;
export function setupStore(options: AppOptions): SetupStoreResult {
const plugins = [];
// checking process.env actually exists just for safety
if (options.logging) |
// define injection key
key = Symbol('Store Injection Key');
console.log('setup store');
store = createStore<State>({
state: {
machine: {
stack: [],
script: {},
data: {
playerName: 'Player',
},
},
dialog: [],
ready: false,
count: 0,
skills: {},
lastLabel: 'main',
skillChecks: {},
playing: false,
currentScreen: 'default',
buttons: {},
rendering: {
screenHeight: window.innerHeight,
screenWidth: window.innerWidth,
canvasWidth: window.innerWidth,
canvasHeight: window.innerHeight,
renderRatio: 1,
topOffset: 0,
leftOffset: 0,
layoutMode: 'horizontal',
},
audio: {},
errors: [],
playTime: {
start: Date.now(),
previousPlaytime: 0,
},
notifications: {},
hudStats: {},
},
getters: {
machineHead(state): MachineStack {
return state.machine.stack[state.machine.stack.length - 1];
},
currentLine(state, getters): Parser.Command {
const machineHead = getters.machineHead;
return machineHead.branch[machineHead.currentIndex];
},
command(state, getters): Parser.Command {
const machineHead = getters.machineHead;
return machineHead.branch[machineHead.currentIndex];
},
},
actions: {
async startMachine(
{ commit, dispatch },
payload: { scriptPaths: string[]; config: Config },
) {
const { scriptPaths } = payload;
const filePromises: Array<Promise<string>> = [];
for (const path of scriptPaths) {
filePromises.push(getFile(path));
}
const files = await Promise.all(filePromises);
const start = Date.now();
let scripts: Parser.ParsedScript = {};
for (const index in files) {
const file = files[index];
scripts = {
...scripts,
...parseRenpyScript(
(ctx: ParserContext, line: number, error: string) =>
parserError(commit, ctx, line, error),
file,
scriptPaths[index],
),
};
}
const end = Date.now();
console.log(`script parsed in ${end - start} ms`);
commit('setButtons', payload.config.buttons);
commit('setScript', scripts);
commit('setupSkills', payload.config.skills);
commit('setupHudStats', payload.config.hudStats);
},
runLabel({ state, commit }, label) {
const branch = state.machine.script[label];
if (!branch) {
console.error(`Label ${branch} doesn't exist`);
}
commit('setLastLabel', label);
state.machine.stack = [
{
currentIndex: 0,
branch,
label,
},
];
this.dispatch('runLine');
},
async runLine(context) {
await this.dispatch('saveGame');
await runLine(context);
},
nextLine(context) {
return nextLine(context);
},
playerAnswered(context, index) {
return playerAnswered(context, index);
},
saveGame({ state }) {
const save: GameSave = {
data: state.machine.data,
skills: state.skills,
dialog: state.dialog,
buttons: state.buttons,
lastLabel: state.lastLabel,
skillChecks: state.skillChecks,
playTime: getPlayTime(
state.playTime.start,
state.playTime.previousPlaytime,
),
};
localStorage.setItem(SAVE_FILE, JSON.stringify(save));
},
loadGame({ commit, dispatch }, saveFile: string) {
if (saveFile) {
const save: GameSave = JSON.parse(saveFile);
commit('setLoadedData', save);
dispatch('runLabel', save.lastLabel);
}
},
async addNotification({ commit }, text: string) {
const id = `${Date.now()}-${Math.random() * 10000}`;
const notification = {
text,
};
commit('addNotification', { id, notification });
await timeout(getConfig().notifications.timeOnScreen * 1000);
commit('deleteNotification', id);
},
},
mutations: {
setLoadedData(state, save: GameSave) {
state.machine.data = save.data;
state.skills = save.skills;
state.dialog = save.dialog;
state.buttons = save.buttons;
state.lastLabel = save.lastLabel;
state.skillChecks = save.skillChecks;
state.playTime.previousPlaytime = save.playTime;
},
reset(state) {
state.ready = false;
state.machine.stack = [];
state.machine.script = {};
state.machine.data = {};
state.dialog = [];
},
setLastLabel(state, label) {
state.lastLabel = label;
},
setupSkillCheck(state, { skillCheck, skillCheckId }) {
state.skillChecks[skillCheckId] = skillCheck;
},
passSkillCheck(state, skillCheckId) {
state.skillChecks[skillCheckId].passed = true;
},
failSkillCheck(state, skillCheckId) {
state.skillChecks[skillCheckId].passed = false;
state.skillChecks[skillCheckId].available = false;
},
setScript(state, script: Parser.ParsedScript) {
state.machine.script = script;
state.machine.stack.push({
currentIndex: 0,
branch: script.main,
label: 'main',
});
state.ready = true;
},
setupSkills(state, skills: { [key: string]: SkillData }) {
for (const skill in skills) {
state.skills[skill] = {
level: skills[skill].startingLevel || 0,
};
}
},
setupHudStats(state, stats: { [key: string]: HudStatConfig }) {
for (const stat in stats) {
state.hudStats[stat] = stats[stat].startingValue;
}
},
incrementSkill(
state,
{ skill, amount }: { skill: string; amount: number },
) {
state.skills[skill].level += amount;
},
setStat(state, { stat, amount }: { stat: string; amount: number }) {
state.hudStats[stat] = amount;
},
addStat(state, { stat, amount }: { stat: string; amount: number }) {
state.hudStats[stat] += amount;
},
addDialog(state, payload: { dialog: DialogKey }) {
state.dialog.push(payload.dialog);
},
nextLine(state) {
state.machine.stack[state.machine.stack.length - 1].currentIndex += 1;
},
previousStack(state) {
state.machine.stack.splice(state.machine.stack.length - 1);
},
addStack(state, newStack: AddStackOptions) {
if (!newStack.label) {
newStack.label =
state.machine.stack[state.machine.stack.length - 1].label;
}
state.machine.stack.push(newStack as MachineStack);
},
setStack(state, newStack: MachineStack) {
state.machine.stack = [];
state.machine.stack.push(newStack);
},
setData(state, { path, value }: { path: string; value: any }) {
const dataToModify = getModifiableData(state);
setDataHelper(dataToModify, path, value);
},
addInstruction(state, { path, value }: { path: string; value: any }) {
const dataToModify = getModifiableData(state);
addDataHelper(dataToModify, path, value);
},
startPlaying(state) {
state.playing = true;
state.playTime.start = Date.now();
},
setScreen(state, screen) {
state.currentScreen = screen;
},
setButtons(state, buttons: { [key: string]: ButtonConfig }) {
for (const i in buttons) {
state.buttons[i] = {
enabled: buttons[i].enabled,
};
}
},
clearDialog(state) {
state.dialog.splice(0);
},
changeButton(state, payload: { button: string; enabled: boolean }) {
state.buttons[payload.button].enabled = payload.enabled;
},
updateScreenSize(
state,
{
width,
height,
textWidth,
}: { width: number; height: number; textWidth: number },
) {
state.rendering.screenHeight = height;
state.rendering.screenWidth = width;
state.rendering.renderRatio = 1;
state.rendering.topOffset = 0;
state.rendering.leftOffset = 0;
if (width < getConfig().layout.verticalLayoutThreshold) {
state.rendering.layoutMode = 'vertical';
} else {
state.rendering.layoutMode = 'horizontal';
}
},
setMusic(state, music) {
state.audio.currentMusic = music;
},
createError(state, errorText: string) {
state.errors.push({
text: errorText,
});
},
clearErrors(state) {
state.errors = [];
},
addNotification(state, { id, notification }) {
state.notifications[id] = notification;
return id;
},
deleteNotification(state, id) {
console.log('delete notif', id);
delete state.notifications[id];
},
},
plugins,
});
return {
store,
key,
};
}
// define your own `useStore` composition function
export function useStore() {
console.log('use store');
console.log(`key `, key);
console.log(`store `, store);
const result = baseUseStore(key);
console.log(result);
return result;
}
| {
plugins.push(createLogger());
} | conditional_block |
store.ts | // store.ts
import { InjectionKey, State } from 'vue';
import {
createLogger,
createStore,
useStore as baseUseStore,
Store,
} from 'vuex';
import { nextLine, playerAnswered, runLine } from './vm/renpy-vm';
import { DialogKey, MachineStack } from './types/vuex';
import {
addDataHelper,
getModifiableData,
setDataHelper,
} from './utils/data-helpers';
import { getFile } from './utils/ajax';
import { ParserContext, parseRenpyScript } from './renpy/renpy-parser';
import { ButtonConfig, Config, HudStatConfig, SkillData } from './types/config';
import { GameSave } from './types/game-save';
import { SAVE_FILE } from './constants';
import { AppOptions } from '.';
import { getConfig } from './config';
import { parserError } from './utils/error-handling';
import { getPlayTime } from './utils/time-helpers';
import { timeout } from './utils/promises';
// define your typings for the store state
export interface SetupStoreResult {
store: Store<State>;
key: InjectionKey<Store<State>>;
}
export type AddStackOptions = Partial<MachineStack>;
let key: InjectionKey<Store<State>> = Symbol('Store Injection Key');
let store: Store<State>;
export function setupStore(options: AppOptions): SetupStoreResult {
const plugins = [];
// checking process.env actually exists just for safety
if (options.logging) {
plugins.push(createLogger());
}
// define injection key
key = Symbol('Store Injection Key');
console.log('setup store');
store = createStore<State>({
state: {
machine: {
stack: [],
script: {},
data: {
playerName: 'Player',
},
},
dialog: [],
ready: false,
count: 0,
skills: {},
lastLabel: 'main',
skillChecks: {},
playing: false,
currentScreen: 'default',
buttons: {},
rendering: {
screenHeight: window.innerHeight,
screenWidth: window.innerWidth,
canvasWidth: window.innerWidth,
canvasHeight: window.innerHeight,
renderRatio: 1,
topOffset: 0,
leftOffset: 0,
layoutMode: 'horizontal',
},
audio: {},
errors: [],
playTime: {
start: Date.now(),
previousPlaytime: 0,
},
notifications: {},
hudStats: {},
},
getters: {
machineHead(state): MachineStack {
return state.machine.stack[state.machine.stack.length - 1];
},
currentLine(state, getters): Parser.Command {
const machineHead = getters.machineHead;
return machineHead.branch[machineHead.currentIndex];
},
command(state, getters): Parser.Command {
const machineHead = getters.machineHead;
return machineHead.branch[machineHead.currentIndex];
},
},
actions: {
async startMachine(
{ commit, dispatch },
payload: { scriptPaths: string[]; config: Config },
) {
const { scriptPaths } = payload;
const filePromises: Array<Promise<string>> = [];
for (const path of scriptPaths) {
filePromises.push(getFile(path));
}
const files = await Promise.all(filePromises);
const start = Date.now();
let scripts: Parser.ParsedScript = {};
for (const index in files) {
const file = files[index];
scripts = {
...scripts,
...parseRenpyScript(
(ctx: ParserContext, line: number, error: string) =>
parserError(commit, ctx, line, error),
file,
scriptPaths[index],
),
};
}
const end = Date.now();
console.log(`script parsed in ${end - start} ms`);
commit('setButtons', payload.config.buttons);
commit('setScript', scripts);
commit('setupSkills', payload.config.skills);
commit('setupHudStats', payload.config.hudStats);
},
runLabel({ state, commit }, label) {
const branch = state.machine.script[label];
if (!branch) {
console.error(`Label ${branch} doesn't exist`);
}
commit('setLastLabel', label);
state.machine.stack = [
{
currentIndex: 0,
branch,
label,
},
];
this.dispatch('runLine');
},
async runLine(context) {
await this.dispatch('saveGame');
await runLine(context);
},
nextLine(context) {
return nextLine(context);
},
playerAnswered(context, index) {
return playerAnswered(context, index);
},
saveGame({ state }) {
const save: GameSave = {
data: state.machine.data,
skills: state.skills,
dialog: state.dialog,
buttons: state.buttons,
lastLabel: state.lastLabel,
skillChecks: state.skillChecks,
playTime: getPlayTime(
state.playTime.start,
state.playTime.previousPlaytime,
),
};
localStorage.setItem(SAVE_FILE, JSON.stringify(save));
},
loadGame({ commit, dispatch }, saveFile: string) {
if (saveFile) {
const save: GameSave = JSON.parse(saveFile);
commit('setLoadedData', save);
dispatch('runLabel', save.lastLabel);
}
},
async addNotification({ commit }, text: string) {
const id = `${Date.now()}-${Math.random() * 10000}`;
const notification = {
text,
};
commit('addNotification', { id, notification });
await timeout(getConfig().notifications.timeOnScreen * 1000);
commit('deleteNotification', id);
},
},
mutations: {
setLoadedData(state, save: GameSave) {
state.machine.data = save.data;
state.skills = save.skills;
state.dialog = save.dialog;
state.buttons = save.buttons;
state.lastLabel = save.lastLabel;
state.skillChecks = save.skillChecks;
state.playTime.previousPlaytime = save.playTime;
},
reset(state) {
state.ready = false;
state.machine.stack = [];
state.machine.script = {};
state.machine.data = {};
state.dialog = [];
},
setLastLabel(state, label) {
state.lastLabel = label;
},
setupSkillCheck(state, { skillCheck, skillCheckId }) {
state.skillChecks[skillCheckId] = skillCheck;
},
passSkillCheck(state, skillCheckId) {
state.skillChecks[skillCheckId].passed = true;
},
failSkillCheck(state, skillCheckId) {
state.skillChecks[skillCheckId].passed = false;
state.skillChecks[skillCheckId].available = false;
},
setScript(state, script: Parser.ParsedScript) {
state.machine.script = script;
state.machine.stack.push({
currentIndex: 0,
branch: script.main,
label: 'main',
});
state.ready = true;
},
setupSkills(state, skills: { [key: string]: SkillData }) {
for (const skill in skills) {
state.skills[skill] = {
level: skills[skill].startingLevel || 0,
};
}
},
setupHudStats(state, stats: { [key: string]: HudStatConfig }) {
for (const stat in stats) {
state.hudStats[stat] = stats[stat].startingValue;
}
},
incrementSkill(
state,
{ skill, amount }: { skill: string; amount: number },
) {
state.skills[skill].level += amount;
},
setStat(state, { stat, amount }: { stat: string; amount: number }) {
state.hudStats[stat] = amount;
},
addStat(state, { stat, amount }: { stat: string; amount: number }) {
state.hudStats[stat] += amount;
},
addDialog(state, payload: { dialog: DialogKey }) {
state.dialog.push(payload.dialog);
},
nextLine(state) {
state.machine.stack[state.machine.stack.length - 1].currentIndex += 1;
},
previousStack(state) {
state.machine.stack.splice(state.machine.stack.length - 1);
},
addStack(state, newStack: AddStackOptions) {
if (!newStack.label) {
newStack.label =
state.machine.stack[state.machine.stack.length - 1].label;
}
state.machine.stack.push(newStack as MachineStack);
},
setStack(state, newStack: MachineStack) {
state.machine.stack = [];
state.machine.stack.push(newStack);
},
setData(state, { path, value }: { path: string; value: any }) {
const dataToModify = getModifiableData(state);
setDataHelper(dataToModify, path, value);
},
addInstruction(state, { path, value }: { path: string; value: any }) {
const dataToModify = getModifiableData(state);
addDataHelper(dataToModify, path, value);
},
startPlaying(state) {
state.playing = true;
state.playTime.start = Date.now();
},
setScreen(state, screen) {
state.currentScreen = screen;
},
setButtons(state, buttons: { [key: string]: ButtonConfig }) {
for (const i in buttons) {
state.buttons[i] = {
enabled: buttons[i].enabled,
};
}
},
clearDialog(state) {
state.dialog.splice(0);
},
changeButton(state, payload: { button: string; enabled: boolean }) {
state.buttons[payload.button].enabled = payload.enabled;
},
updateScreenSize(
state,
{
width,
height,
textWidth,
}: { width: number; height: number; textWidth: number },
) {
state.rendering.screenHeight = height;
state.rendering.screenWidth = width;
state.rendering.renderRatio = 1;
state.rendering.topOffset = 0;
state.rendering.leftOffset = 0;
if (width < getConfig().layout.verticalLayoutThreshold) {
state.rendering.layoutMode = 'vertical';
} else {
state.rendering.layoutMode = 'horizontal';
}
},
setMusic(state, music) {
state.audio.currentMusic = music;
},
createError(state, errorText: string) {
state.errors.push({
text: errorText,
});
},
| (state) {
state.errors = [];
},
addNotification(state, { id, notification }) {
state.notifications[id] = notification;
return id;
},
deleteNotification(state, id) {
console.log('delete notif', id);
delete state.notifications[id];
},
},
plugins,
});
return {
store,
key,
};
}
// define your own `useStore` composition function
export function useStore() {
console.log('use store');
console.log(`key `, key);
console.log(`store `, store);
const result = baseUseStore(key);
console.log(result);
return result;
}
| clearErrors | identifier_name |
store.ts | // store.ts
import { InjectionKey, State } from 'vue';
import {
createLogger,
createStore,
useStore as baseUseStore,
Store,
} from 'vuex';
import { nextLine, playerAnswered, runLine } from './vm/renpy-vm';
import { DialogKey, MachineStack } from './types/vuex';
import {
addDataHelper,
getModifiableData,
setDataHelper,
} from './utils/data-helpers';
import { getFile } from './utils/ajax';
import { ParserContext, parseRenpyScript } from './renpy/renpy-parser';
import { ButtonConfig, Config, HudStatConfig, SkillData } from './types/config';
import { GameSave } from './types/game-save';
import { SAVE_FILE } from './constants';
import { AppOptions } from '.';
import { getConfig } from './config';
import { parserError } from './utils/error-handling';
import { getPlayTime } from './utils/time-helpers';
import { timeout } from './utils/promises';
// define your typings for the store state
export interface SetupStoreResult {
store: Store<State>;
key: InjectionKey<Store<State>>;
}
export type AddStackOptions = Partial<MachineStack>;
let key: InjectionKey<Store<State>> = Symbol('Store Injection Key');
let store: Store<State>;
export function setupStore(options: AppOptions): SetupStoreResult {
const plugins = [];
// checking process.env actually exists just for safety
if (options.logging) {
plugins.push(createLogger());
}
// define injection key
key = Symbol('Store Injection Key');
console.log('setup store');
store = createStore<State>({
state: {
machine: {
stack: [],
script: {},
data: {
playerName: 'Player',
},
},
dialog: [],
ready: false,
count: 0,
skills: {},
lastLabel: 'main',
skillChecks: {},
playing: false,
currentScreen: 'default',
buttons: {},
rendering: {
screenHeight: window.innerHeight,
screenWidth: window.innerWidth,
canvasWidth: window.innerWidth,
canvasHeight: window.innerHeight,
renderRatio: 1,
topOffset: 0,
leftOffset: 0,
layoutMode: 'horizontal',
},
audio: {},
errors: [],
playTime: {
start: Date.now(),
previousPlaytime: 0,
},
notifications: {},
hudStats: {},
},
getters: {
machineHead(state): MachineStack {
return state.machine.stack[state.machine.stack.length - 1];
},
currentLine(state, getters): Parser.Command {
const machineHead = getters.machineHead;
return machineHead.branch[machineHead.currentIndex];
},
command(state, getters): Parser.Command {
const machineHead = getters.machineHead;
return machineHead.branch[machineHead.currentIndex];
},
},
actions: {
async startMachine(
{ commit, dispatch },
payload: { scriptPaths: string[]; config: Config },
) {
const { scriptPaths } = payload;
const filePromises: Array<Promise<string>> = [];
for (const path of scriptPaths) {
filePromises.push(getFile(path));
}
const files = await Promise.all(filePromises);
const start = Date.now();
let scripts: Parser.ParsedScript = {};
for (const index in files) {
const file = files[index];
scripts = {
...scripts,
...parseRenpyScript(
(ctx: ParserContext, line: number, error: string) =>
parserError(commit, ctx, line, error),
file,
scriptPaths[index],
),
};
}
const end = Date.now();
console.log(`script parsed in ${end - start} ms`);
commit('setButtons', payload.config.buttons);
commit('setScript', scripts);
commit('setupSkills', payload.config.skills);
commit('setupHudStats', payload.config.hudStats);
},
runLabel({ state, commit }, label) {
const branch = state.machine.script[label];
if (!branch) {
console.error(`Label ${branch} doesn't exist`);
}
commit('setLastLabel', label);
state.machine.stack = [
{
currentIndex: 0,
branch,
label,
},
];
this.dispatch('runLine');
},
async runLine(context) {
await this.dispatch('saveGame');
await runLine(context);
},
nextLine(context) {
return nextLine(context);
},
playerAnswered(context, index) {
return playerAnswered(context, index);
},
saveGame({ state }) {
const save: GameSave = {
data: state.machine.data,
skills: state.skills,
dialog: state.dialog,
buttons: state.buttons,
lastLabel: state.lastLabel,
skillChecks: state.skillChecks,
playTime: getPlayTime(
state.playTime.start,
state.playTime.previousPlaytime,
),
};
localStorage.setItem(SAVE_FILE, JSON.stringify(save));
},
loadGame({ commit, dispatch }, saveFile: string) {
if (saveFile) {
const save: GameSave = JSON.parse(saveFile);
commit('setLoadedData', save);
dispatch('runLabel', save.lastLabel);
}
},
async addNotification({ commit }, text: string) {
const id = `${Date.now()}-${Math.random() * 10000}`;
const notification = {
text,
};
commit('addNotification', { id, notification });
await timeout(getConfig().notifications.timeOnScreen * 1000);
commit('deleteNotification', id);
},
},
mutations: {
setLoadedData(state, save: GameSave) {
state.machine.data = save.data;
state.skills = save.skills;
state.dialog = save.dialog;
state.buttons = save.buttons;
state.lastLabel = save.lastLabel;
state.skillChecks = save.skillChecks;
state.playTime.previousPlaytime = save.playTime;
},
reset(state) {
state.ready = false;
state.machine.stack = [];
state.machine.script = {};
state.machine.data = {};
state.dialog = [];
},
setLastLabel(state, label) {
state.lastLabel = label;
},
setupSkillCheck(state, { skillCheck, skillCheckId }) {
state.skillChecks[skillCheckId] = skillCheck;
},
passSkillCheck(state, skillCheckId) {
state.skillChecks[skillCheckId].passed = true;
},
failSkillCheck(state, skillCheckId) {
state.skillChecks[skillCheckId].passed = false;
state.skillChecks[skillCheckId].available = false;
},
setScript(state, script: Parser.ParsedScript) {
state.machine.script = script;
state.machine.stack.push({
currentIndex: 0,
branch: script.main,
label: 'main',
});
state.ready = true;
},
setupSkills(state, skills: { [key: string]: SkillData }) {
for (const skill in skills) {
state.skills[skill] = {
level: skills[skill].startingLevel || 0,
};
}
},
setupHudStats(state, stats: { [key: string]: HudStatConfig }) {
for (const stat in stats) {
state.hudStats[stat] = stats[stat].startingValue;
}
},
incrementSkill(
state,
{ skill, amount }: { skill: string; amount: number },
) {
state.skills[skill].level += amount;
},
setStat(state, { stat, amount }: { stat: string; amount: number }) {
state.hudStats[stat] = amount;
},
addStat(state, { stat, amount }: { stat: string; amount: number }) {
state.hudStats[stat] += amount;
},
addDialog(state, payload: { dialog: DialogKey }) {
state.dialog.push(payload.dialog);
},
nextLine(state) {
state.machine.stack[state.machine.stack.length - 1].currentIndex += 1;
},
previousStack(state) {
state.machine.stack.splice(state.machine.stack.length - 1);
},
addStack(state, newStack: AddStackOptions) {
if (!newStack.label) {
newStack.label =
state.machine.stack[state.machine.stack.length - 1].label;
}
state.machine.stack.push(newStack as MachineStack);
},
setStack(state, newStack: MachineStack) {
state.machine.stack = [];
state.machine.stack.push(newStack);
},
setData(state, { path, value }: { path: string; value: any }) {
const dataToModify = getModifiableData(state);
setDataHelper(dataToModify, path, value);
},
addInstruction(state, { path, value }: { path: string; value: any }) {
const dataToModify = getModifiableData(state);
addDataHelper(dataToModify, path, value);
},
startPlaying(state) {
state.playing = true;
state.playTime.start = Date.now();
},
setScreen(state, screen) {
state.currentScreen = screen;
},
setButtons(state, buttons: { [key: string]: ButtonConfig }) {
for (const i in buttons) {
state.buttons[i] = {
enabled: buttons[i].enabled,
};
}
},
clearDialog(state) {
state.dialog.splice(0);
},
changeButton(state, payload: { button: string; enabled: boolean }) {
state.buttons[payload.button].enabled = payload.enabled;
},
updateScreenSize(
state,
{
width,
height,
textWidth,
}: { width: number; height: number; textWidth: number },
) {
state.rendering.screenHeight = height;
state.rendering.screenWidth = width;
state.rendering.renderRatio = 1;
state.rendering.topOffset = 0;
state.rendering.leftOffset = 0;
if (width < getConfig().layout.verticalLayoutThreshold) {
state.rendering.layoutMode = 'vertical';
} else {
state.rendering.layoutMode = 'horizontal';
}
},
setMusic(state, music) {
state.audio.currentMusic = music;
},
createError(state, errorText: string) {
state.errors.push({
text: errorText,
});
},
clearErrors(state) {
state.errors = [];
},
addNotification(state, { id, notification }) {
state.notifications[id] = notification;
return id;
},
deleteNotification(state, id) {
console.log('delete notif', id);
delete state.notifications[id];
}, | plugins,
});
return {
store,
key,
};
}
// define your own `useStore` composition function
export function useStore() {
console.log('use store');
console.log(`key `, key);
console.log(`store `, store);
const result = baseUseStore(key);
console.log(result);
return result;
} | }, | random_line_split |
tls_pstm_montgomery_reduce.rs |
use libc;
use libc::free;
extern "C" {
#[no_mangle]
fn memset(_: *mut libc::c_void, _: libc::c_int, _: libc::c_ulong) -> *mut libc::c_void;
#[no_mangle]
fn xzalloc(size: size_t) -> *mut libc::c_void;
#[no_mangle]
fn pstm_clamp(a: *mut pstm_int);
#[no_mangle]
fn pstm_cmp_mag(a: *mut pstm_int, b: *mut pstm_int) -> int32;
#[no_mangle]
fn s_pstm_sub(a: *mut pstm_int, b: *mut pstm_int, c: *mut pstm_int) -> int32;
}
use crate::librb::size_t;
/*
* Copyright (C) 2017 Denys Vlasenko
*
* Licensed under GPLv2, see file LICENSE in this source tree.
*/
/* Interface glue between bbox code and minimally tweaked matrixssl
* code. All C files (matrixssl and bbox (ones which need TLS))
* include this file, and guaranteed to see a consistent API,
* defines, types, etc.
*/
/* Config tweaks */
/* pstm: multiprecision numbers */
//#if defined(__GNUC__) && defined(__x86_64__)
// /* PSTM_X86_64 works correctly, but +782 bytes. */
// /* Looks like most of the growth is because of PSTM_64BIT. */
//# define PSTM_64BIT
//# define PSTM_X86_64
//#endif
//#if SOME_COND #define PSTM_MIPS, #define PSTM_32BIT
//#if SOME_COND #define PSTM_ARM, #define PSTM_32BIT
/* Failure due to bad function param */
/* Failure as a result of system call error */
/* Failure to allocate requested memory */
/* Failure on sanity/limit tests */
pub type uint64 = u64;
pub type uint32 = u32;
pub type int32 = i32;
pub type pstm_digit = uint32;
pub type pstm_word = uint64;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct pstm_int {
pub used: libc::c_int,
pub alloc: libc::c_int,
pub sign: libc::c_int,
pub dp: *mut pstm_digit,
}
/*
* Copyright (C) 2017 Denys Vlasenko
*
* Licensed under GPLv2, see file LICENSE in this source tree.
*/
/* The file is taken almost verbatim from matrixssl-3-7-2b-open/crypto/math/.
* Changes are flagged with //bbox
*/
/* *
* @file pstm.h
* @version 33ef80f (HEAD, tag: MATRIXSSL-3-7-2-OPEN, tag: MATRIXSSL-3-7-2-COMM, origin/master, origin/HEAD, master)
*
* multiple-precision integer library.
*/
/*
* Copyright (c) 2013-2015 INSIDE Secure Corporation
* Copyright (c) PeerSec Networks, 2002-2011
* All Rights Reserved
*
* The latest version of this code is available at http://www.matrixssl.org
*
* This software is open source; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This General Public License does NOT permit incorporating this software
* into proprietary programs. If you are unable to comply with the GPL, a
* commercial license for this software may be purchased from INSIDE at
* http://www.insidesecure.com/eng/Company/Locations
*
* This program is distributed in WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* http://www.gnu.org/copyleft/gpl.html
*/
/* *****************************************************************************/
/* Define this here to avoid including circular limits.h on some platforms */
/* *****************************************************************************/
/*
If native 64 bit integers are not supported, we do not support 32x32->64
in hardware, so we must set the 16 bit flag to produce 16x16->32 products.
*/
/* ! HAVE_NATIVE_INT64 */
/* *****************************************************************************/
/*
Some default configurations.
pstm_word should be the largest value the processor can hold as the product
of a multiplication. Most platforms support a 32x32->64 MAC instruction,
so 64bits is the default pstm_word size.
pstm_digit should be half the size of pstm_word
*/
/* This is the default case, 32-bit digits, 64-bit word products */
/* digit and word size */
/* *****************************************************************************/
/*
equalities
*/
/* less than */
/* equal to */
/* greater than */
/* positive integer */
/* negative */
/* *****************************************************************************/
/*
Various build options
*/
/* default (64) digits of allocation */
//bbox: was int16
//bbox psPool_t *pool;
/* *****************************************************************************/
/*
Operations on large integers
*/
//made static:extern void pstm_set(pstm_int *a, pstm_digit b);
//made static:extern void pstm_zero(pstm_int * a);
//bbox: pool unused
//made static:extern int32 pstm_init(psPool_t *pool, pstm_int * a);
//bbox: pool unused
//bbox: pool unused
//made static:extern int32 pstm_init_copy(psPool_t *pool, pstm_int * a, pstm_int * b,
//made static: int toSqr); //bbox: was int16 toSqr
//made static:extern int pstm_count_bits (pstm_int * a) FAST_FUNC; //bbox: was returning int16
//bbox: pool unused
//made static:extern void pstm_exch(pstm_int * a, pstm_int * b);
//bbox: was int16 size
//made static:extern void pstm_rshd(pstm_int *a, int x); //bbox: was int16 x
//made static:extern int32 pstm_lshd(pstm_int * a, int b); //bbox: was int16 b
//bbox: pool unused
//made static:extern int32 pstm_div(psPool_t *pool, pstm_int *a, pstm_int *b, pstm_int *c,
//made static: pstm_int *d);
//bbox: pool unused
//made static:extern int32 pstm_div_2d(psPool_t *pool, pstm_int *a, int b, pstm_int *c,
//made static: pstm_int *d); //bbox: was int16 b
//bbox: pool unused
//bbox: pool unused
//made static:extern int32 pstm_mod(psPool_t *pool, pstm_int *a, pstm_int *b, pstm_int *c);
//bbox: pool unused
//bbox: pool unused
//made static:extern int32 pstm_2expt(pstm_int *a, int b); //bbox: was int16 b
//bbox: pool unused
//bbox: pool unused
//made static:extern int32 pstm_montgomery_setup(pstm_int *a, pstm_digit *rho);
//bbox: pool unused
/*
* Copyright (C) 2017 Denys Vlasenko
*
* Licensed under GPLv2, see file LICENSE in this source tree.
*/
/* The file is taken almost verbatim from matrixssl-3-7-2b-open/crypto/math/.
* Changes are flagged with //bbox
*/
/* *
* @file pstm_montgomery_reduce.c
* @version 33ef80f (HEAD, tag: MATRIXSSL-3-7-2-OPEN, tag: MATRIXSSL-3-7-2-COMM, origin/master, origin/HEAD, master)
*
* Multiprecision Montgomery Reduction.
*/
/*
* Copyright (c) 2013-2015 INSIDE Secure Corporation
* Copyright (c) PeerSec Networks, 2002-2011
* All Rights Reserved
*
* The latest version of this code is available at http://www.matrixssl.org
*
* This software is open source; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This General Public License does NOT permit incorporating this software
* into proprietary programs. If you are unable to comply with the GPL, a
* commercial license for this software may be purchased from INSIDE at
* http://www.insidesecure.com/eng/Company/Locations
*
* This program is distributed in WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* http://www.gnu.org/copyleft/gpl.html
*/
/* *****************************************************************************/
//bbox
//#include "../cryptoApi.h"
/* *****************************************************************************/
/* ISO C code */
/* *****************************************************************************/
/* computes x/R == x (mod N) via Montgomery Reduction */
#[no_mangle]
pub unsafe extern "C" fn | (
mut a: *mut pstm_int,
mut m: *mut pstm_int,
mut mp: pstm_digit,
mut paD: *mut pstm_digit,
mut paDlen: uint32,
) -> int32 {
let mut c: *mut pstm_digit = 0 as *mut pstm_digit; //bbox: was int16
let mut _c: *mut pstm_digit = 0 as *mut pstm_digit;
let mut tmpm: *mut pstm_digit = 0 as *mut pstm_digit;
let mut mu: pstm_digit = 0;
let mut oldused: int32 = 0;
let mut x: int32 = 0;
let mut y: int32 = 0;
let mut pa: libc::c_int = 0;
pa = (*m).used;
if pa > (*a).alloc {
/* Sanity test for bad numbers. This will confirm no buffer overruns */
return -9i32;
}
if !paD.is_null()
&& paDlen
>= (2i32 as uint32)
.wrapping_mul(pa as libc::c_uint)
.wrapping_add(1i32 as libc::c_uint)
{
c = paD;
memset(c as *mut libc::c_void, 0i32, paDlen as libc::c_ulong);
} else {
c = xzalloc((2i32 * pa + 1i32) as size_t) as *mut pstm_digit
//bbox
}
/* copy the input */
oldused = (*a).used;
x = 0i32;
while x < oldused {
*c.offset(x as isize) = *(*a).dp.offset(x as isize);
x += 1
}
x = 0i32;
while x < pa {
let mut cy: pstm_digit = 0i32 as pstm_digit;
/* get Mu for this round */
mu = (*c.offset(x as isize)).wrapping_mul(mp);
_c = c.offset(x as isize);
tmpm = (*m).dp;
y = 0i32;
/* PSTM_X86_64 */
while y < pa {
let mut t: pstm_word = 0;
let fresh0 = tmpm;
tmpm = tmpm.offset(1);
t = (*_c.offset(0) as pstm_word)
.wrapping_add(cy as pstm_word)
.wrapping_add((mu as pstm_word).wrapping_mul(*fresh0 as pstm_word));
*_c.offset(0) = t as pstm_digit;
cy = (t >> 32i32) as pstm_digit;
_c = _c.offset(1);
y += 1
}
while cy != 0 {
let ref mut fresh1 = *_c.offset(0);
*fresh1 = (*fresh1 as libc::c_uint).wrapping_add(cy) as pstm_digit as pstm_digit;
let mut t_0: pstm_digit = *fresh1;
cy = (t_0 < cy) as libc::c_int as pstm_digit;
_c = _c.offset(1)
}
x += 1
}
/* now copy out */
_c = c.offset(pa as isize);
tmpm = (*a).dp;
x = 0i32;
while x < pa + 1i32 {
let fresh2 = _c;
_c = _c.offset(1);
let fresh3 = tmpm;
tmpm = tmpm.offset(1);
*fresh3 = *fresh2;
x += 1
}
while x < oldused {
let fresh4 = tmpm;
tmpm = tmpm.offset(1);
*fresh4 = 0i32 as pstm_digit;
x += 1
}
(*a).used = pa + 1i32;
pstm_clamp(a);
/* reuse x as return code */
x = 0i32;
/* if A >= m then A = A - m */
if pstm_cmp_mag(a, m) != -1i32 {
if s_pstm_sub(a, m, a) != 0i32 {
x = -8i32
}
}
if paDlen
< (2i32 as uint32)
.wrapping_mul(pa as libc::c_uint)
.wrapping_add(1i32 as libc::c_uint)
{
free(c as *mut libc::c_void);
}
return x;
}
/* *****************************************************************************/
/* !DISABLE_PSTM */
| pstm_montgomery_reduce | identifier_name |
tls_pstm_montgomery_reduce.rs |
use libc;
use libc::free;
extern "C" {
#[no_mangle]
fn memset(_: *mut libc::c_void, _: libc::c_int, _: libc::c_ulong) -> *mut libc::c_void;
#[no_mangle]
fn xzalloc(size: size_t) -> *mut libc::c_void;
#[no_mangle]
fn pstm_clamp(a: *mut pstm_int);
#[no_mangle]
fn pstm_cmp_mag(a: *mut pstm_int, b: *mut pstm_int) -> int32;
#[no_mangle]
fn s_pstm_sub(a: *mut pstm_int, b: *mut pstm_int, c: *mut pstm_int) -> int32;
}
use crate::librb::size_t;
/*
* Copyright (C) 2017 Denys Vlasenko
*
* Licensed under GPLv2, see file LICENSE in this source tree.
*/
/* Interface glue between bbox code and minimally tweaked matrixssl
* code. All C files (matrixssl and bbox (ones which need TLS))
* include this file, and guaranteed to see a consistent API,
* defines, types, etc.
*/
/* Config tweaks */
/* pstm: multiprecision numbers */
//#if defined(__GNUC__) && defined(__x86_64__)
// /* PSTM_X86_64 works correctly, but +782 bytes. */
// /* Looks like most of the growth is because of PSTM_64BIT. */
//# define PSTM_64BIT
//# define PSTM_X86_64
//#endif
//#if SOME_COND #define PSTM_MIPS, #define PSTM_32BIT
//#if SOME_COND #define PSTM_ARM, #define PSTM_32BIT
/* Failure due to bad function param */
/* Failure as a result of system call error */
/* Failure to allocate requested memory */
/* Failure on sanity/limit tests */
pub type uint64 = u64;
pub type uint32 = u32;
pub type int32 = i32;
pub type pstm_digit = uint32;
pub type pstm_word = uint64;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct pstm_int {
pub used: libc::c_int,
pub alloc: libc::c_int,
pub sign: libc::c_int,
pub dp: *mut pstm_digit,
}
/*
* Copyright (C) 2017 Denys Vlasenko
*
* Licensed under GPLv2, see file LICENSE in this source tree.
*/
/* The file is taken almost verbatim from matrixssl-3-7-2b-open/crypto/math/.
* Changes are flagged with //bbox
*/
/* *
* @file pstm.h
* @version 33ef80f (HEAD, tag: MATRIXSSL-3-7-2-OPEN, tag: MATRIXSSL-3-7-2-COMM, origin/master, origin/HEAD, master)
*
* multiple-precision integer library.
*/
/*
* Copyright (c) 2013-2015 INSIDE Secure Corporation
* Copyright (c) PeerSec Networks, 2002-2011
* All Rights Reserved
*
* The latest version of this code is available at http://www.matrixssl.org
*
* This software is open source; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This General Public License does NOT permit incorporating this software
* into proprietary programs. If you are unable to comply with the GPL, a
* commercial license for this software may be purchased from INSIDE at
* http://www.insidesecure.com/eng/Company/Locations
*
* This program is distributed in WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* http://www.gnu.org/copyleft/gpl.html
*/
/* *****************************************************************************/
/* Define this here to avoid including circular limits.h on some platforms */
/* *****************************************************************************/
/*
If native 64 bit integers are not supported, we do not support 32x32->64
in hardware, so we must set the 16 bit flag to produce 16x16->32 products.
*/
/* ! HAVE_NATIVE_INT64 */
/* *****************************************************************************/
/*
Some default configurations.
pstm_word should be the largest value the processor can hold as the product
of a multiplication. Most platforms support a 32x32->64 MAC instruction,
so 64bits is the default pstm_word size.
pstm_digit should be half the size of pstm_word
*/
/* This is the default case, 32-bit digits, 64-bit word products */
/* digit and word size */
/* *****************************************************************************/
/*
equalities
*/
/* less than */
/* equal to */
/* greater than */
/* positive integer */
/* negative */
/* *****************************************************************************/
/*
Various build options
*/
/* default (64) digits of allocation */
//bbox: was int16
//bbox psPool_t *pool;
/* *****************************************************************************/
/*
Operations on large integers
*/
//made static:extern void pstm_set(pstm_int *a, pstm_digit b);
//made static:extern void pstm_zero(pstm_int * a);
//bbox: pool unused
//made static:extern int32 pstm_init(psPool_t *pool, pstm_int * a);
//bbox: pool unused
//bbox: pool unused
//made static:extern int32 pstm_init_copy(psPool_t *pool, pstm_int * a, pstm_int * b,
//made static: int toSqr); //bbox: was int16 toSqr
//made static:extern int pstm_count_bits (pstm_int * a) FAST_FUNC; //bbox: was returning int16
//bbox: pool unused
//made static:extern void pstm_exch(pstm_int * a, pstm_int * b);
//bbox: was int16 size
//made static:extern void pstm_rshd(pstm_int *a, int x); //bbox: was int16 x
//made static:extern int32 pstm_lshd(pstm_int * a, int b); //bbox: was int16 b
//bbox: pool unused
//made static:extern int32 pstm_div(psPool_t *pool, pstm_int *a, pstm_int *b, pstm_int *c,
//made static: pstm_int *d);
//bbox: pool unused
//made static:extern int32 pstm_div_2d(psPool_t *pool, pstm_int *a, int b, pstm_int *c,
//made static: pstm_int *d); //bbox: was int16 b
//bbox: pool unused
//bbox: pool unused
//made static:extern int32 pstm_mod(psPool_t *pool, pstm_int *a, pstm_int *b, pstm_int *c);
//bbox: pool unused
//bbox: pool unused
//made static:extern int32 pstm_2expt(pstm_int *a, int b); //bbox: was int16 b
//bbox: pool unused
//bbox: pool unused
//made static:extern int32 pstm_montgomery_setup(pstm_int *a, pstm_digit *rho);
//bbox: pool unused
/*
* Copyright (C) 2017 Denys Vlasenko
*
* Licensed under GPLv2, see file LICENSE in this source tree.
*/
/* The file is taken almost verbatim from matrixssl-3-7-2b-open/crypto/math/.
* Changes are flagged with //bbox
*/
/* *
* @file pstm_montgomery_reduce.c
* @version 33ef80f (HEAD, tag: MATRIXSSL-3-7-2-OPEN, tag: MATRIXSSL-3-7-2-COMM, origin/master, origin/HEAD, master)
*
* Multiprecision Montgomery Reduction.
*/
/*
* Copyright (c) 2013-2015 INSIDE Secure Corporation
* Copyright (c) PeerSec Networks, 2002-2011
* All Rights Reserved
*
* The latest version of this code is available at http://www.matrixssl.org
*
* This software is open source; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This General Public License does NOT permit incorporating this software
* into proprietary programs. If you are unable to comply with the GPL, a
* commercial license for this software may be purchased from INSIDE at
* http://www.insidesecure.com/eng/Company/Locations
*
* This program is distributed in WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* http://www.gnu.org/copyleft/gpl.html
*/
/* *****************************************************************************/
//bbox
//#include "../cryptoApi.h"
/* *****************************************************************************/
/* ISO C code */
/* *****************************************************************************/
/* computes x/R == x (mod N) via Montgomery Reduction */
#[no_mangle]
pub unsafe extern "C" fn pstm_montgomery_reduce(
mut a: *mut pstm_int,
mut m: *mut pstm_int,
mut mp: pstm_digit,
mut paD: *mut pstm_digit,
mut paDlen: uint32,
) -> int32 {
let mut c: *mut pstm_digit = 0 as *mut pstm_digit; //bbox: was int16
let mut _c: *mut pstm_digit = 0 as *mut pstm_digit;
let mut tmpm: *mut pstm_digit = 0 as *mut pstm_digit;
let mut mu: pstm_digit = 0;
let mut oldused: int32 = 0;
let mut x: int32 = 0;
let mut y: int32 = 0;
let mut pa: libc::c_int = 0;
pa = (*m).used;
if pa > (*a).alloc {
/* Sanity test for bad numbers. This will confirm no buffer overruns */
return -9i32;
}
if !paD.is_null()
&& paDlen
>= (2i32 as uint32)
.wrapping_mul(pa as libc::c_uint)
.wrapping_add(1i32 as libc::c_uint)
{
c = paD;
memset(c as *mut libc::c_void, 0i32, paDlen as libc::c_ulong);
} else |
/* copy the input */
oldused = (*a).used;
x = 0i32;
while x < oldused {
*c.offset(x as isize) = *(*a).dp.offset(x as isize);
x += 1
}
x = 0i32;
while x < pa {
let mut cy: pstm_digit = 0i32 as pstm_digit;
/* get Mu for this round */
mu = (*c.offset(x as isize)).wrapping_mul(mp);
_c = c.offset(x as isize);
tmpm = (*m).dp;
y = 0i32;
/* PSTM_X86_64 */
while y < pa {
let mut t: pstm_word = 0;
let fresh0 = tmpm;
tmpm = tmpm.offset(1);
t = (*_c.offset(0) as pstm_word)
.wrapping_add(cy as pstm_word)
.wrapping_add((mu as pstm_word).wrapping_mul(*fresh0 as pstm_word));
*_c.offset(0) = t as pstm_digit;
cy = (t >> 32i32) as pstm_digit;
_c = _c.offset(1);
y += 1
}
while cy != 0 {
let ref mut fresh1 = *_c.offset(0);
*fresh1 = (*fresh1 as libc::c_uint).wrapping_add(cy) as pstm_digit as pstm_digit;
let mut t_0: pstm_digit = *fresh1;
cy = (t_0 < cy) as libc::c_int as pstm_digit;
_c = _c.offset(1)
}
x += 1
}
/* now copy out */
_c = c.offset(pa as isize);
tmpm = (*a).dp;
x = 0i32;
while x < pa + 1i32 {
let fresh2 = _c;
_c = _c.offset(1);
let fresh3 = tmpm;
tmpm = tmpm.offset(1);
*fresh3 = *fresh2;
x += 1
}
while x < oldused {
let fresh4 = tmpm;
tmpm = tmpm.offset(1);
*fresh4 = 0i32 as pstm_digit;
x += 1
}
(*a).used = pa + 1i32;
pstm_clamp(a);
/* reuse x as return code */
x = 0i32;
/* if A >= m then A = A - m */
if pstm_cmp_mag(a, m) != -1i32 {
if s_pstm_sub(a, m, a) != 0i32 {
x = -8i32
}
}
if paDlen
< (2i32 as uint32)
.wrapping_mul(pa as libc::c_uint)
.wrapping_add(1i32 as libc::c_uint)
{
free(c as *mut libc::c_void);
}
return x;
}
/* *****************************************************************************/
/* !DISABLE_PSTM */
| {
c = xzalloc((2i32 * pa + 1i32) as size_t) as *mut pstm_digit
//bbox
} | conditional_block |
tls_pstm_montgomery_reduce.rs | use libc;
use libc::free;
extern "C" {
#[no_mangle]
fn memset(_: *mut libc::c_void, _: libc::c_int, _: libc::c_ulong) -> *mut libc::c_void;
#[no_mangle]
fn xzalloc(size: size_t) -> *mut libc::c_void;
#[no_mangle]
fn pstm_clamp(a: *mut pstm_int);
#[no_mangle]
fn pstm_cmp_mag(a: *mut pstm_int, b: *mut pstm_int) -> int32;
#[no_mangle]
fn s_pstm_sub(a: *mut pstm_int, b: *mut pstm_int, c: *mut pstm_int) -> int32;
}
use crate::librb::size_t;
/*
* Copyright (C) 2017 Denys Vlasenko
*
* Licensed under GPLv2, see file LICENSE in this source tree.
*/
/* Interface glue between bbox code and minimally tweaked matrixssl
* code. All C files (matrixssl and bbox (ones which need TLS))
* include this file, and guaranteed to see a consistent API,
* defines, types, etc.
*/
/* Config tweaks */
/* pstm: multiprecision numbers */
//#if defined(__GNUC__) && defined(__x86_64__)
// /* PSTM_X86_64 works correctly, but +782 bytes. */
// /* Looks like most of the growth is because of PSTM_64BIT. */
//# define PSTM_64BIT
//# define PSTM_X86_64
//#endif
//#if SOME_COND #define PSTM_MIPS, #define PSTM_32BIT
//#if SOME_COND #define PSTM_ARM, #define PSTM_32BIT
/* Failure due to bad function param */
/* Failure as a result of system call error */
/* Failure to allocate requested memory */
/* Failure on sanity/limit tests */
pub type uint64 = u64;
pub type uint32 = u32;
pub type int32 = i32;
pub type pstm_digit = uint32;
pub type pstm_word = uint64;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct pstm_int {
pub used: libc::c_int,
pub alloc: libc::c_int,
pub sign: libc::c_int,
pub dp: *mut pstm_digit,
}
/*
* Copyright (C) 2017 Denys Vlasenko
*
* Licensed under GPLv2, see file LICENSE in this source tree.
*/
/* The file is taken almost verbatim from matrixssl-3-7-2b-open/crypto/math/.
* Changes are flagged with //bbox
*/
/* *
* @file pstm.h
* @version 33ef80f (HEAD, tag: MATRIXSSL-3-7-2-OPEN, tag: MATRIXSSL-3-7-2-COMM, origin/master, origin/HEAD, master)
*
* multiple-precision integer library.
*/
/*
* Copyright (c) 2013-2015 INSIDE Secure Corporation
* Copyright (c) PeerSec Networks, 2002-2011
* All Rights Reserved | * the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This General Public License does NOT permit incorporating this software
* into proprietary programs. If you are unable to comply with the GPL, a
* commercial license for this software may be purchased from INSIDE at
* http://www.insidesecure.com/eng/Company/Locations
*
* This program is distributed in WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* http://www.gnu.org/copyleft/gpl.html
*/
/* *****************************************************************************/
/* Define this here to avoid including circular limits.h on some platforms */
/* *****************************************************************************/
/*
If native 64 bit integers are not supported, we do not support 32x32->64
in hardware, so we must set the 16 bit flag to produce 16x16->32 products.
*/
/* ! HAVE_NATIVE_INT64 */
/* *****************************************************************************/
/*
Some default configurations.
pstm_word should be the largest value the processor can hold as the product
of a multiplication. Most platforms support a 32x32->64 MAC instruction,
so 64bits is the default pstm_word size.
pstm_digit should be half the size of pstm_word
*/
/* This is the default case, 32-bit digits, 64-bit word products */
/* digit and word size */
/* *****************************************************************************/
/*
equalities
*/
/* less than */
/* equal to */
/* greater than */
/* positive integer */
/* negative */
/* *****************************************************************************/
/*
Various build options
*/
/* default (64) digits of allocation */
//bbox: was int16
//bbox psPool_t *pool;
/* *****************************************************************************/
/*
Operations on large integers
*/
//made static:extern void pstm_set(pstm_int *a, pstm_digit b);
//made static:extern void pstm_zero(pstm_int * a);
//bbox: pool unused
//made static:extern int32 pstm_init(psPool_t *pool, pstm_int * a);
//bbox: pool unused
//bbox: pool unused
//made static:extern int32 pstm_init_copy(psPool_t *pool, pstm_int * a, pstm_int * b,
//made static: int toSqr); //bbox: was int16 toSqr
//made static:extern int pstm_count_bits (pstm_int * a) FAST_FUNC; //bbox: was returning int16
//bbox: pool unused
//made static:extern void pstm_exch(pstm_int * a, pstm_int * b);
//bbox: was int16 size
//made static:extern void pstm_rshd(pstm_int *a, int x); //bbox: was int16 x
//made static:extern int32 pstm_lshd(pstm_int * a, int b); //bbox: was int16 b
//bbox: pool unused
//made static:extern int32 pstm_div(psPool_t *pool, pstm_int *a, pstm_int *b, pstm_int *c,
//made static: pstm_int *d);
//bbox: pool unused
//made static:extern int32 pstm_div_2d(psPool_t *pool, pstm_int *a, int b, pstm_int *c,
//made static: pstm_int *d); //bbox: was int16 b
//bbox: pool unused
//bbox: pool unused
//made static:extern int32 pstm_mod(psPool_t *pool, pstm_int *a, pstm_int *b, pstm_int *c);
//bbox: pool unused
//bbox: pool unused
//made static:extern int32 pstm_2expt(pstm_int *a, int b); //bbox: was int16 b
//bbox: pool unused
//bbox: pool unused
//made static:extern int32 pstm_montgomery_setup(pstm_int *a, pstm_digit *rho);
//bbox: pool unused
/*
* Copyright (C) 2017 Denys Vlasenko
*
* Licensed under GPLv2, see file LICENSE in this source tree.
*/
/* The file is taken almost verbatim from matrixssl-3-7-2b-open/crypto/math/.
* Changes are flagged with //bbox
*/
/* *
* @file pstm_montgomery_reduce.c
* @version 33ef80f (HEAD, tag: MATRIXSSL-3-7-2-OPEN, tag: MATRIXSSL-3-7-2-COMM, origin/master, origin/HEAD, master)
*
* Multiprecision Montgomery Reduction.
*/
/*
* Copyright (c) 2013-2015 INSIDE Secure Corporation
* Copyright (c) PeerSec Networks, 2002-2011
* All Rights Reserved
*
* The latest version of this code is available at http://www.matrixssl.org
*
* This software is open source; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This General Public License does NOT permit incorporating this software
* into proprietary programs. If you are unable to comply with the GPL, a
* commercial license for this software may be purchased from INSIDE at
* http://www.insidesecure.com/eng/Company/Locations
*
* This program is distributed in WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* http://www.gnu.org/copyleft/gpl.html
*/
/* *****************************************************************************/
//bbox
//#include "../cryptoApi.h"
/* *****************************************************************************/
/* ISO C code */
/* *****************************************************************************/
/* computes x/R == x (mod N) via Montgomery Reduction */
#[no_mangle]
pub unsafe extern "C" fn pstm_montgomery_reduce(
mut a: *mut pstm_int,
mut m: *mut pstm_int,
mut mp: pstm_digit,
mut paD: *mut pstm_digit,
mut paDlen: uint32,
) -> int32 {
let mut c: *mut pstm_digit = 0 as *mut pstm_digit; //bbox: was int16
let mut _c: *mut pstm_digit = 0 as *mut pstm_digit;
let mut tmpm: *mut pstm_digit = 0 as *mut pstm_digit;
let mut mu: pstm_digit = 0;
let mut oldused: int32 = 0;
let mut x: int32 = 0;
let mut y: int32 = 0;
let mut pa: libc::c_int = 0;
pa = (*m).used;
if pa > (*a).alloc {
/* Sanity test for bad numbers. This will confirm no buffer overruns */
return -9i32;
}
if !paD.is_null()
&& paDlen
>= (2i32 as uint32)
.wrapping_mul(pa as libc::c_uint)
.wrapping_add(1i32 as libc::c_uint)
{
c = paD;
memset(c as *mut libc::c_void, 0i32, paDlen as libc::c_ulong);
} else {
c = xzalloc((2i32 * pa + 1i32) as size_t) as *mut pstm_digit
//bbox
}
/* copy the input */
oldused = (*a).used;
x = 0i32;
while x < oldused {
*c.offset(x as isize) = *(*a).dp.offset(x as isize);
x += 1
}
x = 0i32;
while x < pa {
let mut cy: pstm_digit = 0i32 as pstm_digit;
/* get Mu for this round */
mu = (*c.offset(x as isize)).wrapping_mul(mp);
_c = c.offset(x as isize);
tmpm = (*m).dp;
y = 0i32;
/* PSTM_X86_64 */
while y < pa {
let mut t: pstm_word = 0;
let fresh0 = tmpm;
tmpm = tmpm.offset(1);
t = (*_c.offset(0) as pstm_word)
.wrapping_add(cy as pstm_word)
.wrapping_add((mu as pstm_word).wrapping_mul(*fresh0 as pstm_word));
*_c.offset(0) = t as pstm_digit;
cy = (t >> 32i32) as pstm_digit;
_c = _c.offset(1);
y += 1
}
while cy != 0 {
let ref mut fresh1 = *_c.offset(0);
*fresh1 = (*fresh1 as libc::c_uint).wrapping_add(cy) as pstm_digit as pstm_digit;
let mut t_0: pstm_digit = *fresh1;
cy = (t_0 < cy) as libc::c_int as pstm_digit;
_c = _c.offset(1)
}
x += 1
}
/* now copy out */
_c = c.offset(pa as isize);
tmpm = (*a).dp;
x = 0i32;
while x < pa + 1i32 {
let fresh2 = _c;
_c = _c.offset(1);
let fresh3 = tmpm;
tmpm = tmpm.offset(1);
*fresh3 = *fresh2;
x += 1
}
while x < oldused {
let fresh4 = tmpm;
tmpm = tmpm.offset(1);
*fresh4 = 0i32 as pstm_digit;
x += 1
}
(*a).used = pa + 1i32;
pstm_clamp(a);
/* reuse x as return code */
x = 0i32;
/* if A >= m then A = A - m */
if pstm_cmp_mag(a, m) != -1i32 {
if s_pstm_sub(a, m, a) != 0i32 {
x = -8i32
}
}
if paDlen
< (2i32 as uint32)
.wrapping_mul(pa as libc::c_uint)
.wrapping_add(1i32 as libc::c_uint)
{
free(c as *mut libc::c_void);
}
return x;
}
/* *****************************************************************************/
/* !DISABLE_PSTM */ | *
* The latest version of this code is available at http://www.matrixssl.org
*
* This software is open source; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.