text
stringlengths
1
22.8M
```c++ /*============================================================================= file LICENSE_1_0.txt or copy at path_to_url =============================================================================*/ /////////////////////////////////////////////////////////////////////////////// // // A parser for summing a comma-separated list of numbers using phoenix. // // [ JDG June 28, 2002 ] spirit1 // [ JDG March 24, 2007 ] spirit2 // /////////////////////////////////////////////////////////////////////////////// #include <boost/config/warning_disable.hpp> //[tutorial_adder_includes #include <boost/spirit/include/qi.hpp> #include <boost/spirit/include/phoenix_core.hpp> #include <boost/spirit/include/phoenix_operator.hpp> #include <iostream> #include <string> //] namespace client { //[tutorial_adder_using namespace qi = boost::spirit::qi; namespace ascii = boost::spirit::ascii; namespace phoenix = boost::phoenix; using qi::double_; using qi::_1; using ascii::space; using phoenix::ref; //] /////////////////////////////////////////////////////////////////////////// // Our adder /////////////////////////////////////////////////////////////////////////// //[tutorial_adder template <typename Iterator> bool adder(Iterator first, Iterator last, double& n) { bool r = qi::phrase_parse(first, last, // Begin grammar ( double_[ref(n) = _1] >> *(',' >> double_[ref(n) += _1]) ) , // End grammar space); if (first != last) // fail if we did not get a full match return false; return r; } //] } //////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////// int main() { std::cout << "/////////////////////////////////////////////////////////\n\n"; std::cout << "\t\tA parser for summing a list of numbers...\n\n"; std::cout << "/////////////////////////////////////////////////////////\n\n"; std::cout << "Give me a comma separated list of numbers.\n"; std::cout << "The numbers are added using Phoenix.\n"; std::cout << "Type [q or Q] to quit\n\n"; std::string str; while (getline(std::cin, str)) { if (str.empty() || str[0] == 'q' || str[0] == 'Q') break; double n; if (client::adder(str.begin(), str.end(), n)) { std::cout << "-------------------------\n"; std::cout << "Parsing succeeded\n"; std::cout << str << " Parses OK: " << std::endl; std::cout << "sum = " << n; std::cout << "\n-------------------------\n"; } else { std::cout << "-------------------------\n"; std::cout << "Parsing failed\n"; std::cout << "-------------------------\n"; } } std::cout << "Bye... :-) \n\n"; return 0; } ```
Elections to Moyle District Council were held on 15 May 1985 on the same day as the other Northern Irish local government elections. The election used three district electoral areas to elect a total of 15 councillors. Election results Note: "Votes" are the first preference votes. Districts summary |- class="unsortable" align="centre" !rowspan=2 align="left"|Ward ! % !Cllrs ! % !Cllrs ! % !Cllrs ! % !Cllrs ! % !Cllrs !rowspan=2|TotalCllrs |- class="unsortable" align="center" !colspan=2 bgcolor="" | SDLP !colspan=2 bgcolor="" | DUP !colspan=2 bgcolor="" | UUP !colspan=2 bgcolor="" | Sinn Féin !colspan=2 bgcolor="white"| Others |- |align="left"|Ballycastle |bgcolor="#99FF66"|28.5 |bgcolor="#99FF66"|1 |26.2 |1 |12.9 |1 |13.8 |1 |18.6 |1 |5 |- |align="left"|Giant's Causeway |0.0 |0 |bgcolor="#D46A4C"|41.8 |bgcolor="#D46A4C"|2 |29.1 |1 |0.0 |0 |29.1 |2 |5 |- |align="left"|The Glens |bgcolor="#99FF66"|56.4 |bgcolor="#99FF66"|3 |0.0 |0 |8.8 |0 |20.7 |1 |14.1 |1 |5 |- |- class="unsortable" class="sortbottom" style="background:#C9C9C9" |align="left"| Total |30.4 |4 |21.0 |3 |16.2 |2 |12.2 |2 |20.2 |4 |15 |- |} District results Ballycastle 1985: 1 x SDLP, 1 x DUP, 1 x UUP, 1 x Sinn Féin, 1 x Independent Giant's Causeway 1985: 2 x Independent Unionist, 2 x DUP, 1 x UUP The Glens 1985: 3 x SDLP, 1 x Sinn Féin, 1 x Independent Nationalist References Moyle District Council elections Moyle
```xml <?xml version="1.0" encoding="utf-8"?> <android.support.v7.widget.CardView android:id="@+id/card_view" xmlns:android="path_to_url" xmlns:app="path_to_url" android:layout_width="match_parent" android:layout_height="wrap_content" android:layout_margin="@dimen/card_margin" android:clickable="true" android:foreground="?attr/selectableItemBackground" app:cardBackgroundColor="@color/cardview_light_background" app:cardCornerRadius="@dimen/card_corner_radius" app:cardElevation="@dimen/app_bar_default_elevation"> <ImageView android:id="@+id/home_bangumi_boby_image" android:layout_width="match_parent" android:layout_height="@dimen/card_image_default_height" android:scaleType="center" /> </android.support.v7.widget.CardView> ```
Dazzle camouflage of warships was adopted by the U.S. Navy during World War II, following research at the Naval Research Laboratory. Dazzle consists in painting obtrusive patterns on vertical surfaces. Unlike some other forms of camouflage, dazzle works not by offering concealment but by making it difficult to estimate a target's range, speed and heading. Each ship's dazzle pattern was unique to make it more difficult for the enemy to recognize different classes of ships. The result was that a profusion of dazzle schemes were tried, and the evidence for their success was at best mixed. Dazzle camouflage patterns used on aircraft carriers are presented here. Colors Patterns Notes References See also World War II ship camouflage measures of the United States Navy World War II US Navy dazzle camouflage measures 31, 32 and 33: battleships World War II US Navy dazzle camouflage measures 31, 32 and 33: cruisers World War II US Navy dazzle camouflage measures 31, 32 and 33: destroyers World War II ships of the United States Vehicle markings Camouflage Military camouflage Camouflage patterns
Atractus ecuadorensis, the Ecuadorean ground snake, is a species of snake in the family Colubridae. The species can be found in Ecuador. References Atractus Reptiles of Ecuador Endemic fauna of Ecuador Reptiles described in 1955 Taxa named by Jay M. Savage
The Face Removed (German: Das ausgeschnittene Gesicht) is a 1920 German silent crime film directed by Franz Seitz and starring Carla Ferra and Ernst Rückert. The film's sets were designed by the art director August Rinaldi. Cast Carla Ferra as Estella Heinrich Peer as Detective Fogg Ernst Rückert Max Weydner as Harry Davis References Bibliography Grange, William. Cultural Chronicle of the Weimar Republic. Scarecrow Press, 2008. External links 1920 films Films of the Weimar Republic Films directed by Franz Seitz German silent feature films 1920 crime films German crime films German black-and-white films 1920s German films 1920s German-language films
```shell #!/bin/sh os=`uname` load=0 if [ "$os" = "linux" ] || [ "$os" = "Linux" ] then uptime>loadtemp load=`sed -n 1p loadtemp|awk '{print substr($(NF-2),1,4)}'` elif [ "$os" = "SunOS" ] then uptime>loadtemp load=`sed -n 1p loadtemp|awk '{print substr($(NF-2),1,4)}'` elif [ "$os" = "HP-UX" ] then uptime>loadtemp load=`sed -n 1p loadtemp|awk '{print substr($(NF-2),1,4)}'` elif [ "$os" = "AIX" ] then uptime>loadtemp load=`sed -n 1p loadtemp|awk '{print substr($(NF-2),1,4)}'` elif [ "$os" = "SCO_SV" ] || [ "$os" = "UnixWare" ] then uptime>loadtemp load=`sed -n 1p loadtemp|awk '{print substr($(NF-2),1,4)}'` fi status_text=`cat loadtemp` echo "Load Average=$load" echo "status_text=Uptime:$status_text" exit 0 ```
```qml /* * * Authors: * Michael Vogt <mvo@ubuntu.com> * Olivier Tilloy <olivier@tilloy.net> * * This program is free software; you can redistribute it and/or modify * the Free Software Foundation; version 3. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ import QtQuick 1.0 Rectangle { property string text: "ButtonText" signal clicked SystemPalette { id: activePalette } Text { id: buttontxt anchors.centerIn: parent text: parent.text color: activePalette.buttonText } width: buttontxt.width + 10 height: buttontxt.height + 10 radius: 4 border.width: 1 border.color: activePalette.shadow color: mousearea.containsMouse && !mousearea.pressed ? activePalette.light : activePalette.button MouseArea { id: mousearea anchors.fill: parent hoverEnabled: true onClicked: parent.clicked() } } ```
The following lists events that happened during 1917 in New Zealand. Incumbents Regal and viceregal Head of State – George V Governor – Arthur Foljambe, 2nd Earl of Liverpool, until 28 June Governor-General – Arthur Foljambe, 2nd Earl of Liverpool, from 28 June Government The 19th New Zealand Parliament continues as a grand coalition led by the Reform Party. The general election due this year is deferred because of World War I. Speaker of the House – Frederic Lang (Reform Party) Prime Minister – William Massey (Reform Party) Minister of Finance – Joseph Ward Parliamentary opposition Leader of the Opposition – Joseph Ward (Liberal Party). Ward retains the title even though he is part of the coalition government. Judiciary Chief Justice – Sir Robert Stout Main centre leaders Mayor of Auckland – James Gunson Mayor of Wellington – John Luke Mayor of Christchurch – Henry Holland Mayor of Dunedin – James Clark Events April – The first Caudron biplane purchased by Henry Wigram for the Canterbury Aviation Company arrives. 1 May – The New Zealand Rifle Brigade (Earl of Liverpool's Own) is formed as the 3rd Brigade of the New Zealand Division, part of the New Zealand Expeditionary Force. 7 May – Cecil McKenzie Hill makes the first flight for the Canterbury Aviation Company. June – Pilot training by the Canterbury Aviation Company commences at Sockburn. 24 September – Ten New Zealand are soldiers killed in England in the Bere Ferrers rail accident. 20 October – 850 New Zealand are soldiers killed in the Second Battle of Passchendaele, the greatest loss of life in a single day in the military history of New Zealand. 1 December – Six o’clock closing of hotel bars is introduced as a wartime measure. Undated "Extraordinary and continuous rainfall" throughout the year causes "enormous damage to roads and bridges", and "the country became waterlogged", according to the Public Works Statement. The West Coast Times, established in 1865, is merged into the Hokitika Guardian and Star. Arts and literature See 1917 in art, 1917 in literature, :Category:1917 books Music See: 1917 in music Film See: :Category:1917 film awards, 1917 in film, List of New Zealand feature films, Cinema of New Zealand, :Category:1917 films Sport Golf The New Zealand Open championship and National Amateur Championships are not held due to the war. Horse racing Harness racing New Zealand Trotting Cup – Adelaide Direct Auckland Trotting Cup – Steel Bell (2nd win) Thoroughbred racing New Zealand Cup – Meelaus Auckland Cup – Fiery Cross Wellington Cup – Bunting New Zealand Derby – Estland Lawn bowls The national outdoor lawn bowls championships are held in Wellington. Men's singles champion – C.R. Ingram (Wellington Bowling Club) Men's pair champions – A. Sawyer, J.J. Martin (skip) (Turanganui Bowling Club) Men's fours champions – J.S. Ryrie, A.R. Coltman, W. Coltman, G.S. Osmond (skip) (Auckland Bowling Club) Rugby union The Ranfurly Shield (held by ) is not contested as interprovincial matches are cancelled due to the war. Soccer Provincial league champions: Auckland – Brotherhood Canterbury – Linwood Hawke's Bay – Waipukurau Otago – Northern Southland – No competition Wanganui – No competition Wellington – No competition Births January 13 January – Doris Strachan, athlete 19 January – Agnes Ell, cricketer 20 January – Emily Carpenter, home science academic, adult educationalist, consumer advocate 25 January – Rosalie Gascoigne, sculptor 27 January – John Pattison, World War II pilot 28 January – Jack Hatchard, association footballer 31 January Erich Geiringer, writer, doctor, anti-nuclear weapons activist Frank Gill, air force officer, politician February 19 February Morrie McHugh, boxer, rugby union player Peg Taylor, cricketer 26 February – Clyde Jeffery, politician, mayor of Napier (1974–83) March 1 March – Bill Sutton, artist 9 March – Clarrie Gordon, boxer 10 March – Tom Pritchard, cricketer 20 March – Haddon Donald, soldier, politician, sports shooter 22 March – Phil Holloway, politician 26 March – Ruth Gilbert, poet April 13 April – Bruce Ferguson, soldier 18 April – Brian Mason, geochemistry, mineralogist, meteoriticist May 6 May – Roy Scott, cricketer 21 May – Margaret Milne, potter 22 May – Charlie Munro, jazz musician June 10 June – Jack Henry, industrialist 11 June – Tom Davis, Cook Islands politician 25 June – Nora Crawford, police officer July 1 July – Maurice Carter, property developer, politician, philanthropist 6 July – Arthur Lydiard, runner, athletics coach 7 July – John Crichton, furniture and interior designer 13 July – Frank Carpay, ceramics, textile and graphic designer 14 July – Doug Zohrab, public servant, diplomat 19 July – Lewis Johnston, cricket umpire 21 July – Jock Newall, association footballer 23 July – Douglas Goodfellow, businessman, philanthropist 27 July – Ron Meek. economist and social scientist 31 July – Derek Ward, World War II pilot August 1 August – Esme Tombleson, politician 3 August – Eddie Isbey, politician 7 August – Arthur Cresswell, cricketer 24 August – Ruth Park, writer September 2 September – Jack Scholes, sailor 6 September – Cecil Hight, World War II pilot 7 September – Ewen Solon, actor 16 September – David Lewis, sailor, Polynesian scholar 23 September – Wiremu Te Tau Huata, Anglican priest, military chaplain 26 September – James Coe, artist, art teacher, industrial designer, ergonomist 30 September – Denis Rogers, politician, mayor of Hamilton (1959–68) October 2 October – Rosaleen Norton, artist, occultist 17 October Martin Donnelly, cricketer, rugby union player John Oswald Sanders, missionary 18 October – Roy White, rugby union player 31 October – Evan Mackie, World War II pilot November 6 November – Henry Walters, cricketer 17 November – Tom Larkin, public servant, diplomat 25 November – Paul Beadle, sculptor, medallist December 2 December – Betty Batham, marine biologist 7 December – Bert Roth, librarian, historian 8 December – Alan Stewart, rugby union player, university administrator 11 December – Owen Snedden, Roman Catholic bishop 12 December – Alan Deere, military pilot, author 13 December – Keith Hay, construction company founder, politician, conservative activist 24 December – Ronald Triner, road cyclist Deaths January–February 3 February – Robert McNab, politician (born 1864) 17 February Graham Gow, government trade representative (born 1850) Sir George McLean, politician (born 1834) 22 February – Hugh Murray-Aynsley, politician (born 1828) March–April 6 March Tame Parata, politician (born 1837) William Salmond, Presbyterian minister, theologian (born 1835) 11 March – William Hosking, doctor (born 1841) 13 March – Percy Dix, vaudeville company manager (born 1866) 27 March – Joseph Braithwaite, bookseller, politician, mayor of Dunedin (1905–06) (born 1848) 30 March – Ferdinand Holm, mariner, ship owner (born 1844) 23 April – Robert Bruce, politician, conservationist (born 1843) May–June 2 May – Alfred Lee Smith, politician (born 1838) 7 June Bill Bussell, rugby league player (born 1887) George Sellars, rugby union player (born 1886) 8 June George Bollinger, soldier, diarist (born 1890) Charles Henry Brown, military leader (born 1872) Thomas Culling, World War I flying ace (born 1896) 22 June – John Lecky, rugby union player (born 1863) July–August 8 July – Alexander McKay, geologist (born 1841) 14 July Robert Batley, storekeeper, sheep farmer (born 1849) Alexander Bruce, politician (born 1839) 15 July – Bill Mackrell, rugby union and rugby league player (born 1881) 23 July – James Gore, politician, mayor of Dunedin (1881–82) (born 1834) 27 July – Arthur Brown, Mayor of Wellington 30 July – William Baldwin, politician (born 1836) 31 July – William Henry Dillon Bell, politician (born 1884) 4 August Purakau Maika, newspaper editor and publisher (born 1852) Cecil Perry, cricketer (born 1846) 5 August – Don Buck, gum digger (born 1869) 6 August – Charles James, rugby league player (born 1891) 7 August – Francis Earl Johnston, army officer (born 1871) 14 August – William Sanders, naval officer, Victoria Cross recipient (born 1883) 24 August – Alfred Kidd, politician, mayor of Auckland (1901–03) (born 1851) 26 August – William Lane, journalist, utopian (born 1861) September–October 4 October – Dave Gallaher. rugby union player (born 1873) 6 October – John Davies Ormond, politician (born 1831) 12 October Henry Du Vall, rugby league player (born 1886) George Augustus King, military officer (born 1885) 20 October – Elise Kemp, nurse (born 1881) 22 October – Bob Fitzsimmons, boxer (born 1863) 27 October – William Beehan, politician (born 1853) November–December 10 November – Charles King, cricketer (born 1847) 13 November – Cecil Fitzroy, politician, mayor of Hastings (1894–99) (born 1844) 15 November – Frank Twisleton, soldier, writer (born 1873) 29 November – Ellen Greenwood, schoolteacher, social worker (born 1837) 12 December – Sir Charles Bowen, politician (born 1830) 14 December – George Wilson, cricketer (born 1887) 23 December – Clive Franklyn Collett, World War I flying ace (born 1886) See also History of New Zealand List of years in New Zealand Military history of New Zealand Timeline of New Zealand history Timeline of New Zealand's links with Antarctica Timeline of the New Zealand environment References
Scout is an American dystopian comic book series created and written by Timothy Truman, and first published by Eclipse Comics in 1985. The story stars a Native American Apache named Emanuel Santana. The setting of the series is a future United States which has become a Third World country. Creation Timothy Truman first met Eclipse publisher Dean Mullaney and editor-in-chief Cat Yronwode through mutual acquaintance Tom Yeates. At the time Truman's association with First Comics was winding down as he was beginning to tire of late payments, and after a positive experience publishing the one-shot Killer Tales by Tim Truman (compiled from material created for Pacific Comics) he was impressed by their standards of creator ownership and struck a deal with them to publish new creation Scout. A student of Native American history, Truman based Santana's origin on 1890s reeducation camps, particularly Camp Falwell in Carlisle, Pennsylvania, and also drew inspiration from Forrest Carter's Cry, Geronimo!, a fictionalized biography of Geronimo, and Apache mythical beliefs. He cited the influence of European artists including Paolo Serpieri, Hugo Pratt, Juan Zanotto and Jordi Bernet on his style for Scout'''s visuals. Truman's love of music also shaped the story; the characters Rosana 'Rosa' Winter and Raymond Vaughn were named for two of Truman's favourite blues guitarists - Johnny Winter and Stevie Ray Vaughan, while issue titles referenced some of his favourite songs, including "Evil" by Howlin' Wolf, "Little Red Rooster" and "I Ain't Superstitious" by Willie Dixon and "Machine Gun" by Jimi Hendrix. Truman intentionally made it ambiguous as to whether Santana was truly communicating with Apache spirits or just under the influence of drugs administered by his former employers. Publication history Scout Twenty-four issues of the first series were published, initially with John K. Snyder III's Fashion in Action as a back-up strip. The series started off as a bimonthly title before becoming a monthly from the fourth issue onwards. The series was initially planned as a six-issue limited series but positive response saw it instead become an ongoing. Truman would then loosely plot the series in lengthy arcs of around 12 issues a time. He largely wrote and drew the series himself; however Scout #7 saw Yeates fill in on art duties. In place of a backup feature, Scout #10 featured a gallery of pinups of characters from the series by the 'XQBs' (ex-Kubies, a nickname for Truman's fellow Joe Kubert School of Illustration alumni). From #11 the backup became Truman's own Monday, the Eliminator with art by Flint Henry; the story was set in the Scout universe. When Eclipse attempted to create a shared universe for some of its titles, Truman and Eclipse decided Scout's alternate future setting made it unsuitable; the characters were also absent from cross-property series Total Eclipse for the same reason. However, Scout #17 did feature a brief unlikely crossover with Larry Marder's Tales of the Beanworld. Issue 15 saw Rick Veitch and Steve Bissette provide guest art, and also saw the debut of the backup story Swords of Texas, which tied into the main Scout storyline and was drawn by Ben Dunn. Issue #16 was converted to 3D by Ray Zone, using stereoscopy technology. Truman also recorded a theme tune for the series with his side project band The Dixie Pistols, and a flexi disc containing the track "Blues Crusade" was bundled with Scout #19. Swords of Texas and New America In 1987 Truman and colleague Chuck Dixon set up 4Winds Productions, a joint studio and packaging operation. Scout continued to be distributed through Eclipse, and to fresh the title and advance the narrative Truman decided to end the title after 24 issues and take a four-month sabbatical. During this period he oversaw a pair of linked 4-issue limited series set in the Scout universe; Swords of Texas was written by Dixon with art by Dunn, and concerned the adventures of the titular smugglers, previously introduced in a backup strip. New America was written by John Ostrander and Kim Yale with art by Gary Kwapisz; the series covered a 10-year period through the eyes of supporting character Rosa Winter. A short limited edition comic depicting Santana's wedding was included with the Dixie Pistols' album Marauder. Scout: War Shaman These titles were followed by Truman's return in Scout: War Shaman, a six-weekly ongoing picking up the main character's story over a decade after the events of Scout #24, with the lead now a widowed father of two. To promote the new series, retailers were encouraged to create a display for the series in order to win original Truman artwork. The first issue reached #98 on Diamond Comic Distributors' chart in January 1988, a solid performance for an Eclipse title. Truman also produced the Scout Handbook, a collection of profiles, maps and other material, while Eclipse collected Scout #1-7 in the trade paperback Scout - The Four Monsters. While War Shaman #2 was delayed due to colourist Sam Parsons falling ill, War Shaman became monthly from #3. Scout: War Shaman #8-9 featured the character Beau La Duke (a fictionalised version of Eclipse sales manager Beau Smith), who then appeared in a backup strip in #14-16. As planned by Truman, the series ended after 16 issues, ending with Rosa killing Scout. Truman stated that Santana would not be resurrected, with the series instead slated to continue with the wider supporting cast he had built up. Marauder and Blue Leader A third book called Scout: Marauder was planned to succeed War Shaman, while Eclipse also produced a second trade called Scout: Mount Fire, compiling #8-14 of the first series. Truman planned a two-year break from Scout while he worked on other projects; however, Eclipse folded in 1994 before any new material could be published. As Truman retained ownership of Scout he was eventually able to seek out other publishers for Marauder and the planned fourth and final storyline, Blue Leader. Between 2006 and 2008 Dynamite Entertainment produced trade paperbacks, printing the first two-thirds of the first Scout series. Truman subsequently raised funds to continue the story himself via Kickstarter. work on Scout: Marauder as a full-length graphic novel is ongoing. Plot At the end of the 20th century, a history of ecological excesses has led other nations to levy vast sanctions against the USA for stealing world resources. Emanuel Santana is born in an Apache reservation in New Mexico's White Mountains before being taken away as a 'Schoolboy' - a term for teenage recruits effectively conscripted for the National Guard. Finding the values imparted to be contrary to those of his people, in 1999 Santana escapes after two years and uses the military knowledge gained from the programme to combat the Great Monsters of the Apache after receiving a vision. The four Great Monsters have taken on new forms in pastoral America - Owl Giant Man is a misogynistic pornographer; the Buffalo Monster is now Chippy Waltz, comedian turned Secretary of Agriculture; Antelope Monster runs a vast, controlling media empire; and Eagle Monster is an oil baron based on an airship. Meanwhile Santana is being hunted by two former Schoolboy colleagues, Rossana Winter and Raymond Vaughn. He is aided by Missy, a 17-year old previously employed by the Owl Giant Man, and Gahn, his Apache spirit guide. Reception Author Michael A. Sheyahshe noted in Native Americans in Comic Books – A Critical Study, that "Scout is presented in a respectful and genuine manner with tribally specific cultural ties". Another aspect of Scout to draw attention was the inclusion of a non-sensationalist lesbian relationship between Rosa Winter and US President Laura Carver, still a rare phenomenon in mainstream comics at the time. In 1988, Truman would also state that another supporting character - Avner Glansman - was gay but it had yet to become relevant to the plot. Reviewing the first issue for Amazing Heroes, R.A. Jones praised most of the book - aside from the characterisation of Gahn, comparing the character to a "Jewish uncle" and feeling he undercut the tone of the rest of the book. Scout was shortlisted for 'Best New Title' at the 1986 Kirby Awards, losing out to fellow Eclipse series Miracleman''. Truman's art for the title also gained a nomination for the same year's Comics Buyer's Guide Awards. Collected editions Eclipse Comics Dynamite Entertainment Film adaptation In October 2016, Christopher MacBride was set to adapt and direct for the big screen for Studio 8. Jon Silk and Hell or High Water’s Braden Aftergood were to produce the film and Truman serving as a consultant. References External links American comics 1985 comics debuts 1987 comics endings Comics characters introduced in 1985 Comics set in Texas Dystopian comics Eclipse Comics titles Fictional Apache people Fictional Native American people in comics LGBT-related comics
Pratibha Paul is an Indian television actress, who appears in television series like Dil Dosti Dance, Bhanwar, Bharat Ka Veer Putra – Maharana Pratap and Aahat (season 6). Television Channel V's Dil Dosti Dance Sony TV's Bhanwar, Bharat Ka Veer Putra – Maharana Pratap and Aahat (season 6) References Indian television actresses Actresses in Hindi television Living people Year of birth missing (living people)
[[Image:Maison de Verre.JPG|thumb|right|266px|Maison de Verres umbrella stand (at the entry) typical of the hand-crafted machine looking interior]] The Maison de Verre (French for House of Glass''') was built from 1928 to 1932 in Paris, France. Constructed in the early modern style of architecture, the house's design emphasized three primary traits: honesty of materials, variable transparency of forms, and juxtaposition of "industrial" materials and fixtures with a more traditional style of home décor. The primary materials used were steel, glass, and glass block. Some of the notable "industrial" elements included rubberized floor tiles, bare steel beams, perforated metal sheet, heavy industrial light fixtures, and mechanical fixtures. The design was a collaboration among Pierre Chareau (a furniture and interiors designer), Bernard Bijvoet (a Dutch architect working in Paris since 1927) and Louis Dalbet (craftsman metalworker). Much of the intricate moving scenery of the house was designed on site as the project developed. The historian Henry-Russel Hitchcock as well as the designer Eileen Gray have declared that the architect was in fact 'that clever Dutch engineer (Bijvoet)'(Gray). The external form is defined by translucent glass block walls, with select areas of clear glazing for transparency. Internally, spatial division is variable by the use of sliding, folding or rotating screens in glass, sheet or perforated metal, or in combination. Other mechanical components included an overhead trolley from the kitchen to dining room, a retracting stair from the private sitting room to Mme Dalsace's bedroom and complex bathroom cupboards and fittings. The program of the home was somewhat unusual in that it included a ground-floor medical suite for Dr. Jean Dalsace. This variable circulation pattern was provided for by a rotating screen that hid the private stairs from patients during the day but framed the stairs at night. The house is notable for its splendid architecture, but it may be more well known for another reason. It was built on the site of a much older building that the patron had purchased and intended to demolish. Much to his or her chagrin, however, the elderly tenant on the top floor of the building absolutely refused to sell, and so the patron was obliged to completely demolish the bottom three floors of the building and construct the Maison de Verre underneath, all without disturbing the original top floor. Dr. Dalsace was a member of the French Communist Party who played a significant role in both anti-fascist and cultural affairs. In the mid-1930s, the Maison de Verre's double-height "salle de séjour" was transformed into a salon regularly frequented by Marxist intellectuals like Walter Benjamin as well as by Surrealist poets and artists such as Louis Aragon, Paul Éluard, Pablo Picasso, Max Ernst, Jacques Lipchitz, Jean Cocteau, Yves Tanguy, Joan Miró and Max Jacob. According to the American art historian Maria Gough, the Maison de Verre had a powerful influence on Walter Benjamin, especially on his constructivist - rather than expressionist - reading of Paul Scheerbart's utopian project for a future "culture of glass", for a "new glass environment [which] will completely transform mankind," as the latter expressed it in his 1914 treatise Glass Architecture. See in particular Benjamin's 1933 essay Erfahrung und Armut'' ("Experience and Poverty"). American architectural historian Robert Rubin bought the house from Dalsace family in 2006 to restore it and use it for his family residence. He allows a limited number of tours to the house. Notes References External links Michael Carapetian, 100 x Maison de verre, Brinkmann und Bose, Berlin 2016, Nicolai Ouroussoff's article in The New York Times, "The Best House in Paris", 26 August 2007 Article: "A Serious Point of Departure" - La Maison de la Rue St. Guillaume "Paris’s Luminous Secret" WSJ Article, 18 July 2009 Video tour of Maison de Verre Maison de Verre Buildings and structures in the 7th arrondissement of Paris Houses in Paris Modernist architecture in France Buildings and structures completed in 1932
The United Nations Office at Geneva (UNOG, ) in Geneva, Switzerland, is one of the four major offices of the United Nations where numerous different UN agencies have a joint presence. The main UNOG administrative offices are located inside the Palais des Nations complex, which was originally constructed for the League of Nations between 1929 and 1938. Besides United Nations administration, the Palais des Nations also hosts the offices for a number of programmes and funds such as the United Nations Conference on Trade and Development (UNCTAD), the United Nations Office for the Coordination of Humanitarian Affairs (OCHA) and the United Nations Economic Commission for Europe (ECE). The United Nations and its specialized agencies, programmes and funds may have other offices or functions hosted outside the Palais des Nations, normally in office spaces provided by the Swiss Government. UN specialised agencies and other UN entities with offices in Geneva hold bi-weekly briefings at the Palais des Nations, organized by the United Nations Information Service at Geneva. UNOG produces an annual report where it lists all major events and activities that happened through a year. Constituent agencies Headquartered in Geneva Conference on Disarmament International Bureau of Education International Computing Centre International Labour Organization International Organization for Migration International Trade Centre International Telecommunication Union Joint Inspection Unit Joint United Nations Programme on HIV/AIDS Office of the United Nations High Commissioner for Human Rights United Nations Chief Executives Board for Coordination United Nations Compensation Commission United Nations Conference on Trade and Development United Nations Economic Commission for Europe United Nations High Commissioner for Refugees United Nations Human Rights Council (see also United Nations Commission on Human Rights) United Nations Institute for Disarmament Research United Nations Institute for Training and Research United Nations Non-Governmental Liaison Service United Nations Office for the Coordination of Humanitarian Affairs United Nations Office on Sport for Development and Peace United Nations Research Institute For Social Development World Health Organization World Intellectual Property Organization World Meteorological Organization World Trade Organization Presence at Geneva Food and Agriculture Organization of the United Nations - FAO (headquarters in Rome) International Atomic Energy Agency (headquarters are in Vienna) United Nations Environment Programme (headquarters are in Nairobi) United Nations Educational, Scientific and Cultural Organization (headquarters are in Paris) United Nations Industrial Development Organization (headquarters are in Vienna) World Food Programme (headquarters are in Rome) United Nations World Tourism Organization (headquarters in Madrid) Directors-general Wladimir Moderow, Poland, 1946–1951 Adriaan Pelt, Netherlands, 1952–1957 Pier Pasquale Spinelli, Italy, 1957–1968 Vittorio Winspeare-Guicciardi, Italy, 1968–1978 Luigi Cottafavi, Italy, 1978–1983 Eric Suy, Belgium, 1983–1987 Jan Mårtenson, Sweden, 1987–1992 Antoine Blanca, France, 1992–1993 Vladimir Petrovsky, Russia, 1993–2002 Sergei Ordzhonikidze, Russia, 2002–2011 Kassym-Jomart Tokayev, Kazakhstan, 2011–2013 Michael Møller, Denmark, 2013–2019 Tatiana Valovaya, Russia, 2019–present Administrative history United Nations Geneva Office, from beginning, Aug 1946 – Apr 1947, (IC/Geneva/1) European Office of the UN, 11 Apr 1947 – 10 Aug 1948, (IC/Geneva/49) United Nations Office at Geneva, 10 Aug 1948 – 9 Aug 1949, (IC/Geneva/152) European Office of the UN, 9 Aug 1949 – 8 Dec 1957, (SGB/82/Rev.1) United Nations Office at Geneva, 8 December 1957 – present, (SGB/82/Rev.2) See also Headquarters of the United Nations (New York City) United Nations Information Service at Geneva United Nations Office at Vienna United Nations Office at Nairobi Outline of the United Nations List of United Nations organizations by location List of international organizations based in Geneva Notes References Bibliography Joëlle Kuntz, Geneva and the Call of Internationalism: A History, Éditions Zoé, 2011, 96 pages (). External links Official website UN Geneva information United Nations organisations in Geneva UN Geneva Annual Report Organisations based in Geneva Diplomatic buildings United Nations properties
```c #include "oily_png_ext.h" void oily_png_check_size_constraints(long self_width, long self_height, long other_width, long other_height, long offset_x, long offset_y){ // For now, these raise a standard runtime error. They should however raise custom exception classes (OutOfBounds) if(self_width < other_width + offset_x){ rb_raise(rb_eRuntimeError, "Background image width is too small!"); } if(self_height < other_height + offset_y){ rb_raise(rb_eRuntimeError, "Background image height is too small!"); } } VALUE oily_png_compose_bang(int argc, VALUE *argv, VALUE self) { // Corresponds to the other image(foreground) that we want to compose onto this one(background). VALUE other; // The offsets are optional arguments, so these may or may not be null pointers. // We'll prefix them with 'opt' to identify this. VALUE opt_offset_x; VALUE opt_offset_y; // Scan the passed in arguments, and populate the above-declared variables. Notice that '12' // specifies that oily_png_compose_bang takes in 1 required parameter, and 2 optional ones (the offsets) rb_scan_args(argc, argv, "12", &other,&opt_offset_x,&opt_offset_y); // Regardless of whether offsets were provided, we must specify a default value for them since they will // be used in calculating the position of the composed element. long offset_x = 0; long offset_y = 0; // If offsets were provided, then the opt_offset_* variables will not be null pointers. FIXNUM_P checks // whether they point to a fixnum object. If they do, then we can safely assign our offset_* variables to the values. if(FIXNUM_P(opt_offset_x)){ offset_x = FIX2LONG(opt_offset_x); } if(FIXNUM_P(opt_offset_y)){ offset_y = FIX2LONG(opt_offset_y); } // Get the dimension data for both foreground and background images. long self_width = FIX2LONG(rb_funcall(self, rb_intern("width"), 0)); long self_height = FIX2LONG(rb_funcall(self, rb_intern("height"), 0)); long other_width = FIX2LONG(rb_funcall(other, rb_intern("width"), 0)); long other_height = FIX2LONG(rb_funcall(other, rb_intern("height"), 0)); // Make sure that the 'other' image fits within the current image. If it doesn't, an exception is raised // and the operation should be aborted. oily_png_check_size_constraints( self_width, self_height, other_width, other_height, offset_x, offset_y ); // Get the pixel data for both the foreground(other) and background(self) pixels. VALUE* bg_pixels = RARRAY_PTR(rb_funcall(self, rb_intern("pixels"), 0)); VALUE* fg_pixels = RARRAY_PTR(rb_funcall(other, rb_intern("pixels"), 0)); long x = 0; long y = 0; long bg_index = 0; // corresponds to the current index in the bg_pixels array. for( y = 0; y < other_height; y++ ){ for( x = 0; x < other_width; x++ ){ // We need to find the value of bg_index twice, so we only calculate and store it once. bg_index = ( x + offset_x ) + ( y + offset_y ) * self_width; // Replace the background pixel with the composition of background + foreground bg_pixels[bg_index] = UINT2NUM( oily_png_compose_color( NUM2UINT( fg_pixels[x+ y * other_width] ), NUM2UINT( bg_pixels[bg_index] ) ) ); } } return self; } VALUE oily_png_replace_bang(int argc, VALUE *argv, VALUE self) { // Corresponds to the other image(foreground) that we want to compose onto this one(background). VALUE other; // The offsets are optional arguments, so these may or may not be null pointers. // We'll prefix them with 'opt' to identify this. VALUE opt_offset_x; VALUE opt_offset_y; // Scan the passed in arguments, and populate the above-declared variables. Notice that '12' // specifies that oily_png_compose_bang takes in 1 required parameter, and 2 optional ones (the offsets) rb_scan_args(argc, argv, "12", &other,&opt_offset_x,&opt_offset_y); // Regardless of whether offsets were provided, we must specify a default value for them since they will // be used in calculating the position of the composed element. long offset_x = 0; long offset_y = 0; // If offsets were provided, then the opt_offset_* variables will not be null pointers. FIXNUM_P checks // whether they point to a fixnum object. If they do, then we can safely assign our offset_* variables to the values. if(FIXNUM_P(opt_offset_x)){ offset_x = FIX2LONG(opt_offset_x); } if(FIXNUM_P(opt_offset_y)){ offset_y = FIX2LONG(opt_offset_y); } // Get the dimension data for both foreground and background images. long self_width = FIX2LONG(rb_funcall(self, rb_intern("width"), 0)); long self_height = FIX2LONG(rb_funcall(self, rb_intern("height"), 0)); long other_width = FIX2LONG(rb_funcall(other, rb_intern("width"), 0)); long other_height = FIX2LONG(rb_funcall(other, rb_intern("height"), 0)); // Make sure that the 'other' image fits within the current image. If it doesn't, an exception is raised // and the operation should be aborted. oily_png_check_size_constraints( self_width, self_height, other_width, other_height, offset_x, offset_y ); // Get the pixel data for both the foreground(other) and background(self) pixels. VALUE* bg_pixels = RARRAY_PTR(rb_funcall(self, rb_intern("pixels"), 0)); VALUE* fg_pixels = RARRAY_PTR(rb_funcall(other, rb_intern("pixels"), 0)); long x = 0; long y = 0; long bg_index = 0; // corresponds to the current index in the bg_pixels array. for( y = 0; y < other_height; y++ ){ for( x = 0; x < other_width; x++ ){ // We need to find the value of bg_index twice, so we only calculate and store it once. bg_index = ( x + offset_x ) + ( y + offset_y ) * self_width; // Replace the background pixel with the composition of background + foreground bg_pixels[bg_index] = fg_pixels[x+ y * other_width]; } } return self; } ```
Members Only was an American hip hop collective from Broward County, Florida, formed in 2014. It was originally only a duo consisting of Jahseh Onfroy (XXXTentacion) and Stokeley Goulbourne (Ski Mask the Slump God) after the two met in a juvenile detention center. It closely associated with another collective named Very Rare, and members of both collectives also refer to themselves as VR All-Stars. Following the release of their fourth project, Members Only, Vol. 4, the collective has gone on an indefinite hiatus, with some of its most prominent members such as Craig Xen and Wifisfuneral exiting the group. Founding member XXXTentacion died in 2018 due to a robbery which resulted in X being murdered in broad daylight. All four perpetrators involved have been found guilty and sentenced, with three of them receiving life sentences without parole. He was 20 years old. On May 16, 2019, Khaed, a producer for the group, died due to ongoing heart complications. He was 26 years old. On April 24, 2022, former member Tablez died to an accidental drug overdose. Members Current members absentwill (2015–present) Bass Santana (2015–present) Flyboy Tarantino (2015–present) Kid Trunks (2015–present) Kin$oul (2015–present) Kid Pronto (2015-present) Danny Towers (2015-present) ikabodVEINS (2015-present) Kilo Junior (2015-present) SB (2015–present) Bhris (2015–present) PRXZ (2015–present) Robb Banks (2016–present) DJ Scheme (2016-present) Tankhead666 (2016–present) Icecat Laflare (2016–present) Kidway (2017-present) Trippie Redd (2017-present) Cujo (2018-present) Reddz (2018-present) Ratchet Roach (2018-present) Former members XXXTentacion (2014–2018; his death) Ski Mask the Slump God (2014-2019) Tablez (2015–2016; died 2022) Wifisfuneral (2015–2017) Fukkit (2015–2017) ElGato (2015-2018) Khaed (2015–2019; his death) Killstation (2015-2019) Craig Xen (2015–2019) Cooliecut (2016–2019) Rawhool Mane (2018-2019) Discography Studio albums Mixtapes Extended plays Singles Tours The Revenge Tour (2017) Members Only vs. the World (2019) See also East Coast hip hop XXXTentacion discography Ski Mask the Slump God discography List of East Coast hip hop artists References Hip hop collectives Trap musicians Emo rap musicians
McDonald Elementary School may refer to: McDonald Elementary School, Central Valley School District, Washington, United States McDonald Elementary School, Georgetown County School District, South Carolina, United States McDonald Elementary School, Mohawk, Tennessee, United States F. A. McDonald Elementary School, Seattle, Washington, United States (1914-1981), now reopened as McDonald International School See also: Macdonald Elementary School (disambiguation)
```c * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <assert.h> #include <signal.h> #include "uv.h" #include "internal.h" #include "handle-inl.h" #include "req-inl.h" RB_HEAD(uv_signal_tree_s, uv_signal_s); static struct uv_signal_tree_s uv__signal_tree = RB_INITIALIZER(uv__signal_tree); static CRITICAL_SECTION uv__signal_lock; static BOOL WINAPI uv__signal_control_handler(DWORD type); int uv__signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum, int oneshot); void uv_signals_init(void) { InitializeCriticalSection(&uv__signal_lock); if (!SetConsoleCtrlHandler(uv__signal_control_handler, TRUE)) abort(); } static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) { /* Compare signums first so all watchers with the same signnum end up * adjacent. */ if (w1->signum < w2->signum) return -1; if (w1->signum > w2->signum) return 1; /* Sort by loop pointer, so we can easily look up the first item after * { .signum = x, .loop = NULL }. */ if ((uintptr_t) w1->loop < (uintptr_t) w2->loop) return -1; if ((uintptr_t) w1->loop > (uintptr_t) w2->loop) return 1; if ((uintptr_t) w1 < (uintptr_t) w2) return -1; if ((uintptr_t) w1 > (uintptr_t) w2) return 1; return 0; } RB_GENERATE_STATIC(uv_signal_tree_s, uv_signal_s, tree_entry, uv__signal_compare) /* * Dispatches signal {signum} to all active uv_signal_t watchers in all loops. * Returns 1 if the signal was dispatched to any watcher, or 0 if there were * no active signal watchers observing this signal. */ int uv__signal_dispatch(int signum) { uv_signal_t lookup; uv_signal_t* handle; int dispatched; dispatched = 0; EnterCriticalSection(&uv__signal_lock); lookup.signum = signum; lookup.loop = NULL; for (handle = RB_NFIND(uv_signal_tree_s, &uv__signal_tree, &lookup); handle != NULL && handle->signum == signum; handle = RB_NEXT(uv_signal_tree_s, &uv__signal_tree, handle)) { unsigned long previous = InterlockedExchange( (volatile LONG*) &handle->pending_signum, signum); if (handle->flags & UV_SIGNAL_ONE_SHOT_DISPATCHED) continue; if (!previous) { POST_COMPLETION_FOR_REQ(handle->loop, &handle->signal_req); } dispatched = 1; if (handle->flags & UV_SIGNAL_ONE_SHOT) handle->flags |= UV_SIGNAL_ONE_SHOT_DISPATCHED; } LeaveCriticalSection(&uv__signal_lock); return dispatched; } static BOOL WINAPI uv__signal_control_handler(DWORD type) { switch (type) { case CTRL_C_EVENT: return uv__signal_dispatch(SIGINT); case CTRL_BREAK_EVENT: return uv__signal_dispatch(SIGBREAK); case CTRL_CLOSE_EVENT: if (uv__signal_dispatch(SIGHUP)) { /* Windows will terminate the process after the control handler * returns. After that it will just terminate our process. Therefore * block the signal handler so the main loop has some time to pick up * the signal and do something for a few seconds. */ Sleep(INFINITE); return TRUE; } return FALSE; case CTRL_LOGOFF_EVENT: case CTRL_SHUTDOWN_EVENT: /* These signals are only sent to services. Services have their own * notification mechanism, so there's no point in handling these. */ default: /* We don't handle these. */ return FALSE; } } int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) { uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL); handle->pending_signum = 0; handle->signum = 0; handle->signal_cb = NULL; UV_REQ_INIT(&handle->signal_req, UV_SIGNAL_REQ); handle->signal_req.data = handle; return 0; } int uv_signal_stop(uv_signal_t* handle) { uv_signal_t* removed_handle; /* If the watcher wasn't started, this is a no-op. */ if (handle->signum == 0) return 0; EnterCriticalSection(&uv__signal_lock); removed_handle = RB_REMOVE(uv_signal_tree_s, &uv__signal_tree, handle); assert(removed_handle == handle); LeaveCriticalSection(&uv__signal_lock); handle->signum = 0; uv__handle_stop(handle); return 0; } int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) { return uv__signal_start(handle, signal_cb, signum, 0); } int uv_signal_start_oneshot(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) { return uv__signal_start(handle, signal_cb, signum, 1); } int uv__signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum, int oneshot) { /* Test for invalid signal values. */ if (signum != SIGWINCH && (signum <= 0 || signum >= NSIG)) return UV_EINVAL; /* Short circuit: if the signal watcher is already watching {signum} don't go * through the process of deregistering and registering the handler. * Additionally, this avoids pending signals getting lost in the (small) time * frame that handle->signum == 0. */ if (signum == handle->signum) { handle->signal_cb = signal_cb; return 0; } /* If the signal handler was already active, stop it first. */ if (handle->signum != 0) { int r = uv_signal_stop(handle); /* uv_signal_stop is infallible. */ assert(r == 0); } EnterCriticalSection(&uv__signal_lock); handle->signum = signum; if (oneshot) handle->flags |= UV_SIGNAL_ONE_SHOT; RB_INSERT(uv_signal_tree_s, &uv__signal_tree, handle); LeaveCriticalSection(&uv__signal_lock); handle->signal_cb = signal_cb; uv__handle_start(handle); return 0; } void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle, uv_req_t* req) { long dispatched_signum; assert(handle->type == UV_SIGNAL); assert(req->type == UV_SIGNAL_REQ); dispatched_signum = InterlockedExchange( (volatile LONG*) &handle->pending_signum, 0); assert(dispatched_signum != 0); /* Check if the pending signal equals the signum that we are watching for. * These can get out of sync when the handler is stopped and restarted while * the signal_req is pending. */ if (dispatched_signum == handle->signum) handle->signal_cb(handle, dispatched_signum); if (handle->flags & UV_SIGNAL_ONE_SHOT) uv_signal_stop(handle); if (handle->flags & UV_HANDLE_CLOSING) { /* When it is closing, it must be stopped at this point. */ assert(handle->signum == 0); uv_want_endgame(loop, (uv_handle_t*) handle); } } void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle) { uv_signal_stop(handle); uv__handle_closing(handle); if (handle->pending_signum == 0) { uv_want_endgame(loop, (uv_handle_t*) handle); } } void uv_signal_endgame(uv_loop_t* loop, uv_signal_t* handle) { assert(handle->flags & UV_HANDLE_CLOSING); assert(!(handle->flags & UV_HANDLE_CLOSED)); assert(handle->signum == 0); assert(handle->pending_signum == 0); handle->flags |= UV_HANDLE_CLOSED; uv__handle_close(handle); } ```
```javascript (globalThis.TURBOPACK = globalThis.TURBOPACK || []).push(["output/your_sha256_hashnput_index_4187ef.js", { "[project]/turbopack/crates/turbopack-tests/tests/snapshot/imports/resolve_error_esm/input/index.js [test] (ecmascript)": (({ r: __turbopack_require__, f: __turbopack_module_context__, i: __turbopack_import__, s: __turbopack_esm__, v: __turbopack_export_value__, n: __turbopack_export_namespace__, c: __turbopack_cache__, M: __turbopack_modules__, l: __turbopack_load__, j: __turbopack_dynamic__, P: __turbopack_resolve_absolute_path__, U: __turbopack_relative_url__, R: __turbopack_resolve_module_id_path__, g: global, __dirname }) => (() => { "use strict"; __turbopack_esm__({}); (()=>{ const e = new Error("Cannot find module 'does-not-exist/path'"); e.code = 'MODULE_NOT_FOUND'; throw e; })(); "__TURBOPACK__ecmascript__hoisting__location__"; ; console.log(dne); console.log({}[dne]); })()), }]); //# sourceMappingURL=your_sha256_hashnput_index_4187ef.js.map ```
```javascript ; SPIR-V ; Version: 1.0 ; Generator: Khronos Glslang Reference Front End; 7 ; Bound: 14 ; Schema: 0 OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Fragment %main "main" %FragColor OpExecutionMode %main OriginUpperLeft OpSource ESSL 310 OpName %main "main" OpName %FragColor "FragColor" OpDecorate %FragColor Location 0 %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %v3float = OpTypeVector %float 3 %_ptr_Output_v3float = OpTypePointer Output %v3float %FragColor = OpVariable %_ptr_Output_v3float Output %float_0x1p_128 = OpConstant %float 0x1p+128 %float_n0x1p_128 = OpConstant %float -0x1p+128 %float_0x1_8p_128 = OpConstant %float 0x1.8p+128 %13 = OpConstantComposite %v3float %float_0x1p_128 %float_n0x1p_128 %float_0x1_8p_128 %main = OpFunction %void None %3 %5 = OpLabel OpStore %FragColor %13 OpReturn OpFunctionEnd ```
```html <!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Strict//EN' 'path_to_url <html xmlns='path_to_url xml:lang='en' lang='en'> <head> <meta http-equiv='Content-Type' content='text/html; charset=utf-8'/> <title>boost/exception/errinfo_file_name.hpp</title> <link href='reno.css' type='text/css' rel='stylesheet'/> </head> <body> <div class="body-0"> <div class="body-1"> <div class="body-2"> <div> <div id="boost_logo"> <a href="path_to_url"><img style="border:0" src="../../../boost.png" alt="Boost" width="277" height="86"/></a> </div> <h1>Boost Exception</h1> </div> <!-- file LICENSE_1_0.txt or copy at path_to_url --> <div class="RenoIncludeDIV"><div class="RenoAutoDIV"><h2>boost/exception/errinfo_file_name.hpp</h2> </div> <h3>Synopsis</h3> <div class="RenoIncludeDIV"><pre>#include &lt;<span class="RenoLink"><a href="boost_exception_error_info_hpp.html">boost/exception/error_info.hpp</a></span>&gt; #include &lt;string&gt; namespace boost { <span class="RenoIncludeSPAN"> <span class="RenoIncludeSPAN">typedef <span class="RenoLink"><a href="error_info.html">error_info</a></span>&lt;struct errinfo_file_name_,std::string&gt; <span class="RenoLink"><a href="errinfo_file_name.html">errinfo_file_name</a></span>;</span></span> }</pre> </div></div><div class="RenoAutoDIV"><div class="RenoHR"><hr/></div> See also: <span class="RenoPageList"><a href="boost_exception_all_hpp.html">boost/exception/all.hpp</a>&nbsp;| <a href="synopsis.html">Synopsis</a></span> </div> <!-- file LICENSE_1_0.txt or copy at path_to_url --> <div id="footer"> <p> <a class="logo" href="path_to_url"><img class="logo_pic" src="valid-css.png" alt="Valid CSS" height="31" width="88"/></a> <a class="logo" href="path_to_url"><img class="logo_pic" src="valid-xhtml.png" alt="Valid XHTML 1.0" height="31" width="88"/></a> </p> </div> </div> </div> </div> </body> </html> ```
```c++ /* * * * path_to_url * * Unless required by applicable law or agreed to in writing, software * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "gtest/gtest.h" #include "src/string-view.h" #include <cstring> #include <functional> using namespace wabt; namespace { void assert_string_view_eq(const char* s, string_view sv) { size_t len = std::strlen(s); ASSERT_EQ(len, sv.size()); for (size_t i = 0; i < len; ++i) { ASSERT_EQ(s[i], sv[i]); } } constexpr string_view::size_type npos = string_view::npos; } // end anonymous namespace TEST(string_view, default_constructor) { assert_string_view_eq("", string_view()); } TEST(string_view, copy_constructor) { string_view sv1("copy"); assert_string_view_eq("copy", string_view(sv1)); string_view sv2; assert_string_view_eq("", string_view(sv2)); } TEST(string_view, assignment_operator) { string_view sv1; sv1 = string_view("assign"); assert_string_view_eq("assign", sv1); string_view sv2; sv2 = string_view(); assert_string_view_eq("", sv2); } TEST(string_view, string_constructor) { assert_string_view_eq("", string_view(std::string())); assert_string_view_eq("string", string_view(std::string("string"))); } TEST(string_view, cstr_constructor) { assert_string_view_eq("", string_view("")); assert_string_view_eq("cstr", string_view("cstr")); } TEST(string_view, cstr_len_constructor) { assert_string_view_eq("", string_view("foo-bar-baz", 0)); assert_string_view_eq("foo", string_view("foo-bar-baz", 3)); assert_string_view_eq("foo-bar", string_view("foo-bar-baz", 7)); } TEST(string_view, begin_end) { string_view sv("012345"); char count = 0; for (auto iter = sv.begin(), end = sv.end(); iter != end; ++iter) { ASSERT_EQ('0' + count, *iter); ++count; } ASSERT_EQ(6, count); } TEST(string_view, cbegin_cend) { const string_view sv("012345"); char count = 0; for (auto iter = sv.cbegin(), end = sv.cend(); iter != end; ++iter) { ASSERT_EQ('0' + count, *iter); ++count; } ASSERT_EQ(6, count); } TEST(string_view, rbegin_rend) { string_view sv("012345"); char count = 0; for (auto iter = sv.rbegin(), end = sv.rend(); iter != end; ++iter) { ASSERT_EQ('5' - count, *iter); ++count; } ASSERT_EQ(6, count); } TEST(string_view, crbegin_crend) { const string_view sv("012345"); char count = 0; for (auto iter = sv.crbegin(), end = sv.crend(); iter != end; ++iter) { ASSERT_EQ('5' - count, *iter); ++count; } ASSERT_EQ(6, count); } TEST(string_view, size) { string_view sv1; ASSERT_EQ(0U, sv1.size()); string_view sv2(""); ASSERT_EQ(0U, sv2.size()); string_view sv3("hello"); ASSERT_EQ(5U, sv3.size()); } TEST(string_view, length) { string_view sv1; ASSERT_EQ(0U, sv1.length()); string_view sv2("hello"); ASSERT_EQ(5U, sv2.length()); } TEST(string_view, empty) { string_view sv1; ASSERT_TRUE(sv1.empty()); string_view sv2("bye"); ASSERT_FALSE(sv2.empty()); } TEST(string_view, operator_bracket) { string_view sv("words"); ASSERT_EQ('w', sv[0]); ASSERT_EQ('o', sv[1]); ASSERT_EQ('r', sv[2]); ASSERT_EQ('d', sv[3]); ASSERT_EQ('s', sv[4]); } TEST(string_view, at) { string_view sv("words"); ASSERT_EQ('w', sv.at(0)); ASSERT_EQ('o', sv.at(1)); ASSERT_EQ('r', sv.at(2)); ASSERT_EQ('d', sv.at(3)); ASSERT_EQ('s', sv.at(4)); } TEST(string_view, front) { string_view sv("words"); ASSERT_EQ('w', sv.front()); } TEST(string_view, back) { string_view sv("words"); ASSERT_EQ('s', sv.back()); } TEST(string_view, data) { const char* cstr = "words"; string_view sv(cstr); ASSERT_EQ(cstr, sv.data()); } TEST(string_view, remove_prefix) { string_view sv("words"); sv.remove_prefix(2); assert_string_view_eq("rds", sv); } TEST(string_view, remove_suffix) { string_view sv("words"); sv.remove_suffix(2); assert_string_view_eq("wor", sv); } TEST(string_view, swap) { string_view sv1("hello"); string_view sv2("bye"); sv1.swap(sv2); assert_string_view_eq("bye", sv1); assert_string_view_eq("hello", sv2); } TEST(string_view, operator_std_string) { string_view sv1("hi"); std::string s(sv1); ASSERT_EQ(2U, s.size()); ASSERT_EQ('h', s[0]); ASSERT_EQ('i', s[1]); } TEST(string_view, copy) { string_view sv("words"); char buffer[10] = {0}; sv.copy(buffer, 10, 2); ASSERT_EQ('r', buffer[0]); ASSERT_EQ('d', buffer[1]); ASSERT_EQ('s', buffer[2]); for (int i = 3; i < 10; ++i) { ASSERT_EQ(0, buffer[i]); } } TEST(string_view, substr) { string_view sv1("abcdefghij"); string_view sv2 = sv1.substr(2, 3); assert_string_view_eq("cde", sv2); } TEST(string_view, compare0) { ASSERT_TRUE(string_view("meat").compare(string_view("meet")) < 0); ASSERT_TRUE(string_view("rest").compare(string_view("rate")) > 0); ASSERT_TRUE(string_view("equal").compare(string_view("equal")) == 0); ASSERT_TRUE(string_view("star").compare(string_view("start")) < 0); ASSERT_TRUE(string_view("finished").compare(string_view("fin")) > 0); } TEST(string_view, compare1) { ASSERT_TRUE(string_view("abcdef").compare(2, 2, string_view("ca")) > 0); ASSERT_TRUE(string_view("abcdef").compare(2, 2, string_view("cd")) == 0); ASSERT_TRUE(string_view("abcdef").compare(2, 2, string_view("cz")) < 0); } TEST(string_view, compare2) { ASSERT_TRUE(string_view("abcdef").compare(2, 2, string_view("_ca__"), 1, 2) > 0); ASSERT_TRUE(string_view("abcdef").compare(2, 2, string_view("_cd__"), 1, 2) == 0); ASSERT_TRUE(string_view("abcdef").compare(2, 2, string_view("_cz__"), 1, 2) < 0); } TEST(string_view, compare3) { ASSERT_TRUE(string_view("abcdef").compare("aaaa") > 0); ASSERT_TRUE(string_view("abcdef").compare("abcdef") == 0); ASSERT_TRUE(string_view("abcdef").compare("zzzz") < 0); } TEST(string_view, compare4) { ASSERT_TRUE(string_view("abcdef").compare(2, 2, "ca") > 0); ASSERT_TRUE(string_view("abcdef").compare(2, 2, "cd") == 0); ASSERT_TRUE(string_view("abcdef").compare(2, 2, "cz") < 0); } TEST(string_view, compare5) { ASSERT_TRUE(string_view("abcdef").compare(2, 2, "ca____", 2) > 0); ASSERT_TRUE(string_view("abcdef").compare(2, 2, "cd___", 2) == 0); ASSERT_TRUE(string_view("abcdef").compare(2, 2, "cz__", 2) < 0); } TEST(string_view, find0) { ASSERT_EQ(0U, string_view("find fins").find(string_view("fin"))); ASSERT_EQ(5U, string_view("find fins").find(string_view("fin"), 1)); ASSERT_EQ(npos, string_view("find fins").find(string_view("fin"), 6)); } TEST(string_view, find1) { ASSERT_EQ(0U, string_view("012340123").find('0')); ASSERT_EQ(5U, string_view("012340123").find('0', 2)); ASSERT_EQ(npos, string_view("012340123").find('0', 6)); } TEST(string_view, find2) { ASSERT_EQ(1U, string_view("012340123").find("12345", 0, 2)); ASSERT_EQ(6U, string_view("012340123").find("12345", 3, 2)); ASSERT_EQ(npos, string_view("012340123").find("12345", 10, 2)); } TEST(string_view, find3) { ASSERT_EQ(1U, string_view("012340123").find("12")); ASSERT_EQ(6U, string_view("012340123").find("12", 2)); ASSERT_EQ(npos, string_view("012340123").find("12", 10)); } TEST(string_view, rfind0) { ASSERT_EQ(5U, string_view("find fins").rfind(string_view("fin"))); ASSERT_EQ(0U, string_view("find fins").rfind(string_view("fin"), 4)); ASSERT_EQ(npos, string_view("find fins").rfind(string_view("no"))); ASSERT_EQ(npos, string_view("foo").rfind(string_view("foobar"))); } TEST(string_view, rfind1) { ASSERT_EQ(5U, string_view("012340123").rfind('0')); ASSERT_EQ(0U, string_view("012340123").rfind('0', 2)); ASSERT_EQ(npos, string_view("012340123").rfind('9')); } TEST(string_view, rfind2) { ASSERT_EQ(6U, string_view("012340123").rfind("12345", npos, 2)); ASSERT_EQ(1U, string_view("012340123").rfind("12345", 4, 2)); ASSERT_EQ(npos, string_view("012340123").rfind("12345", npos, 5)); ASSERT_EQ(npos, string_view("012").rfind("12345", npos, 5)); } TEST(string_view, rfind3) { ASSERT_EQ(6U, string_view("012340123").rfind("12")); ASSERT_EQ(1U, string_view("012340123").rfind("12", 2)); ASSERT_EQ(npos, string_view("012340123").rfind("12", 0)); ASSERT_EQ(npos, string_view("012").rfind("12345")); } TEST(string_view, find_first_of0) { ASSERT_EQ(0U, string_view("0123abc").find_first_of(string_view("0a"))); ASSERT_EQ(4U, string_view("0123abc").find_first_of(string_view("0a"), 1)); ASSERT_EQ(npos, string_view("0123abc").find_first_of(string_view("xyz"))); } TEST(string_view, find_first_of1) { ASSERT_EQ(1U, string_view("ahellohi").find_first_of('h')); ASSERT_EQ(6U, string_view("ahellohi").find_first_of('h', 2)); ASSERT_EQ(npos, string_view("ahellohi").find_first_of('z', 2)); } TEST(string_view, find_first_of2) { ASSERT_EQ(0U, string_view("0123abc").find_first_of("0a1b", 0, 2)); ASSERT_EQ(4U, string_view("0123abc").find_first_of("0a1b", 1, 2)); ASSERT_EQ(npos, string_view("0123abc").find_first_of("0a1b", 5, 2)); } TEST(string_view, find_first_of3) { ASSERT_EQ(0U, string_view("0123abc").find_first_of("0a")); ASSERT_EQ(0U, string_view("0123abc").find_first_of("0a", 0)); ASSERT_EQ(4U, string_view("0123abc").find_first_of("0a", 1)); ASSERT_EQ(npos, string_view("0123abc").find_first_of("0a", 5)); } TEST(string_view, find_last_of0) { ASSERT_EQ(4U, string_view("0123abc").find_last_of(string_view("0a"))); ASSERT_EQ(0U, string_view("0123abc").find_last_of(string_view("0a"), 1)); ASSERT_EQ(npos, string_view("0123abc").find_last_of(string_view("xyz"))); } TEST(string_view, find_last_of1) { ASSERT_EQ(6U, string_view("ahellohi").find_last_of('h')); ASSERT_EQ(1U, string_view("ahellohi").find_last_of('h', 2)); ASSERT_EQ(npos, string_view("ahellohi").find_last_of('z', 2)); } TEST(string_view, find_last_of2) { ASSERT_EQ(4U, string_view("0123abc").find_last_of("0a1b", npos, 2)); ASSERT_EQ(0U, string_view("0123abc").find_last_of("0a1b", 1, 2)); ASSERT_EQ(npos, string_view("0123abc").find_last_of("a1b", 0, 2)); ASSERT_EQ(npos, string_view("0123abc").find_last_of("xyz", npos, 0)); } TEST(string_view, find_last_of3) { ASSERT_EQ(4U, string_view("0123abc").find_last_of("0a")); ASSERT_EQ(4U, string_view("0123abc").find_last_of("0a", npos)); ASSERT_EQ(0U, string_view("0123abc").find_last_of("0a", 1)); ASSERT_EQ(npos, string_view("0123abc").find_last_of("a1", 0)); } TEST(string_view, operator_equal) { ASSERT_TRUE(string_view("this") == string_view("this")); ASSERT_FALSE(string_view("this") == string_view("that")); } TEST(string_view, operator_not_equal) { ASSERT_FALSE(string_view("here") != string_view("here")); ASSERT_TRUE(string_view("here") != string_view("there")); } TEST(string_view, operator_less_than) { ASSERT_TRUE(string_view("abc") < string_view("xyz")); ASSERT_FALSE(string_view("later") < string_view("earlier")); ASSERT_FALSE(string_view("one") < string_view("one")); } TEST(string_view, operator_greater_than) { ASSERT_TRUE(string_view("much") > string_view("little")); ASSERT_FALSE(string_view("future") > string_view("past")); ASSERT_FALSE(string_view("now") > string_view("now")); } TEST(string_view, operator_less_than_or_equal) { ASSERT_TRUE(string_view("abc") <= string_view("xyz")); ASSERT_FALSE(string_view("later") <= string_view("earlier")); ASSERT_TRUE(string_view("one") <= string_view("one")); } TEST(string_view, operator_greater_than_or_equal) { ASSERT_TRUE(string_view("much") >= string_view("little")); ASSERT_FALSE(string_view("future") >= string_view("past")); ASSERT_TRUE(string_view("now") >= string_view("now")); } TEST(string_view, hash) { std::hash<string_view> hasher; ASSERT_NE(hasher(string_view("hello")), hasher(string_view("goodbye"))); ASSERT_EQ(hasher(string_view("same")), hasher(string_view("same"))); } ```
The Institute for Social and Economic Research and Policy (ISERP) is the research arm of the social sciences at Columbia University, formerly known as the Paul F. Lazarsfeld Center for the Social Sciences. ISERP works to produce pioneering social science research and to shape public policy by integrating knowledge and methods across the social scientific disciplines. ISERP organizes an active intellectual community at Columbia University through its Faculty Fellows program, research centers, projects, and training initiatives. History ISERP is the direct descendant of the Bureau of Applied Social Research (BASR), established at Columbia University in 1944 by sociologist Paul F. Lazarsfeld. One of the first social science institutes in the nation, the Bureau made landmark contributions to communications research, public opinion polling, organizational studies, and social science methodology. BASR’s tradition was carried on by the Paul F. Lazarsfeld Center for the Social Sciences, established in 1976 after Lazarsfeld’s death and later renamed to honor him. Under directors Harold W. Watts, Jonathan Cole, and Harrison White, the Lazarsfeld Center expanded its interdisciplinary reach and established particular strengths in the sociology of science and network analysis. The Lazarsfeld Center for the Social Sciences was one of the centers incorporated into the Institute at its founding in 1999 as the Institute for Social and Economic Theory and Research (ISETR). Also joining ISETR were the Center for Urban Research and Policy, founded in 1992, and several new research centers. In January 2001, ISETR merged with the Office of Sponsored Research to become the Institute for Social and Economic Research and Policy. The institute is currently led by professors Thomas DiPrete and Matthew Connelly. Fellowship The fellowship of ISERP is drawn from faculty of the departments of Anthropology, Economics, History, Political Science, Psychology, Sociology, and Statistics, as well as of Barnard College, the Earth Institute, Teachers College, the Mailman School of Public Health and the Schools of Architecture, Planning and Preservation, Business, International and Public Affairs, Law, and Social Work. Centers and Major Projects The Institute contains fifteen research centers and major projects that conduct basic research, develop policy initiatives, and train graduate students and postdoctoral fellows. American Institutions Project The American Institutions Project focuses on Congress, regional issues, the treasury, and the military. Applied Statistics Center The Applied Statistics Center is a community of scholars at Columbia organized around research projects in the human, social, and engineering sciences, as well as basic statistical research. It is directed by Andrew Gelman. Center for Intersectionality and Social Policy Studies The Center for Intersectionality and Social Policy Studies functions as a research entity and a site for intellectual dialogue and collaboration for academics who are committed to analyzing the dynamics of complex inequity and stratification, as well as to policy and legal advocacy. It is directed by Kimberlé Crenshaw. Center for Research on Environmental Decisions (CRED) CRED studies decision making under climate uncertainty and risk. Its objectives address the human responses to climate change and climate variability. Center for the Study of Democracy, Toleration, and Religion (CDTR) CDTR, opened in 2006, conducts research and training on the tensions between religion, toleration, and democracy. Center for the Study of Wealth and Inequality (CWI) CWI investigates the economic well-being of families and societal inequality. It interests encompass family welfare and standard of living as well. Center on Organizational Innovation (COI) COI promotes research on organizational innovation as well as new forms of collaboration, communication, and coordination made possible with the advent of interactive technologies. Columbia Center for the Study of Development Strategies The Center for the Study of Development Strategies provides a forum at Columbia to support rigorous field based research on major questions in the political economy of development. Columbia Program for Indian Economic Policies(PIEP) PIEP is led by Jagdish Bhagwati and Arvind Panagariya. The Program brings together scholars from Columbia and other universities and think tanks around the world. The program houses a data center on India’s economy and organizes lectures, seminars, and conferences in the U.S. and India. Initiative for Policy Dialogue (IPD) IPD, led by Nobel laureate Joseph E. Stiglitz, helps developing countries respond to globalization. Paul F. Lazarsfeld Center for the Social Sciences The Lazarsfeld Center, the oldest of the ISERP centers, is the catalyst for new research through its sponsorship of workshops, seminars, and conferences. The center is well known for playing a central role in the development of social network analysis and relational sociology. Public Opinion Project (POP) The Public Opinion Project (POP) examines trends in public opinion, public policy, and political leadership in the United States. Roundtable on the Sexual Politics of Black Churches This project convenes a team of sixteen African American scholars and religious leaders for a series of three convenings over a period of seventeen months. The Global Health Research Center of Central Asia Columbia University's Global Health Research Center of Central Asia brings together multidisciplinary expertise from Columbia, Central Asia and the surrounding region to address a range of global health challenges: HIV/AIDS, sexually transmitted infections (STIs),hepatitis C, substance abuse, malnutrition, mental health and other threats to health. Understanding Autism Project Autism is a condition characterized by impairments in communication, social interaction, and stereotyped or repetitive behaviors. No one knows with certainty what has caused autism prevalence—which has increased roughly ten-fold in the past forty years—to increase so precipitously. This group looks explores this increase. References External links Columbia University Think tanks based in the United States Social science research institutes Research institutes in New York (state)
```go /* path_to_url Unless required by applicable law or agreed to in writing, software WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ package multiplex const ( // PluginServiceConn is the mux connection ID for NRI plugin services. PluginServiceConn ConnID = iota + 1 // RuntimeServiceConn is the mux connection ID for NRI runtime services. RuntimeServiceConn ) ```
```xml import React from 'react'; import ReactDOM from 'react-dom'; import { PropTypes, Utils, ComponentRegistry } from 'mailspring-exports'; import InjectedComponentErrorBoundary from './injected-component-error-boundary'; import InjectedComponentLabel from './injected-component-label'; import { ComponentRegistryDescriptor } from '../registries/component-registry'; type InjectedComponentProps = { matching: ComponentRegistryDescriptor; className?: string; exposedProps?: any; fallback?: (...args: any[]) => any; style?: React.CSSProperties; requiredMethods?: string[]; onComponentDidChange?: (...args: any[]) => any; }; type InjectedComponentState = { component: any; visible: any; }; /** Public: InjectedComponent makes it easy to include dynamically registered components inside of your React render method. Rather than explicitly render a component, such as a `<Composer>`, you can use InjectedComponent: ```javascript <InjectedComponent matching={role:"Composer"} exposedProps={headerMessageId:123} /> ``` InjectedComponent will look up the component registered with that role in the {ComponentRegistry} and render it, passing the exposedProps (`headerMessageId={123}`) along. InjectedComponent monitors the ComponentRegistry for changes. If a new component is registered that matches the descriptor you provide, InjectedComponent will refresh. If no matching component is found, the InjectedComponent renders an empty div. Section: Component Kit */ export default class InjectedComponent extends React.Component< InjectedComponentProps, InjectedComponentState > { static displayName = 'InjectedComponent'; /* Public: React `props` supported by InjectedComponent: - `matching` Pass an {Object} with ComponentRegistry descriptors. This set of descriptors is provided to {ComponentRegistry::findComponentsForDescriptor} to retrieve the component that will be displayed. - `className` (optional) A {String} class name for the containing element. - `exposedProps` (optional) An {Object} with props that will be passed to each item rendered into the set. - `fallback` (optional) A {Component} to default to in case there are no matching components in the ComponentRegistry - `requiredMethods` (options) An {Array} with a list of methods that should be implemented by the registered component instance. If these are not implemented, an error will be thrown. */ static propTypes = { matching: PropTypes.object.isRequired, className: PropTypes.string, exposedProps: PropTypes.object, fallback: PropTypes.func, style: PropTypes.object, requiredMethods: PropTypes.arrayOf(PropTypes.string), onComponentDidChange: PropTypes.func, }; static defaultProps = { style: {}, className: '', exposedProps: {}, requiredMethods: [], onComponentDidChange: () => {}, }; private _componentUnlistener: () => void; constructor(props) { super(props); this.state = this._getStateFromStores(); this._verifyRequiredMethods(); this._setRequiredMethods(this.props.requiredMethods); } componentDidMount() { this._componentUnlistener = ComponentRegistry.listen(() => { this.setState(this._getStateFromStores()); }); if (this.state.component && this.state.component.containerRequired === false) { this.props.onComponentDidChange(); } } componentWillReceiveProps(newProps) { if (!Utils.isEqual(newProps.matching, this.props && this.props.matching)) { this.setState(this._getStateFromStores(newProps)); } } componentDidUpdate(prevProps, prevState) { this._setRequiredMethods(this.props.requiredMethods); if (this.state.component !== prevState.component) { this.props.onComponentDidChange(); } } componentWillUnmount() { if (this._componentUnlistener) { this._componentUnlistener(); } } focus = () => { this._runInnerDOMMethod('focus'); }; blur = () => { this._runInnerDOMMethod('blur'); }; // Private: Attempts to run the DOM method, ie 'focus', on // 1. Any implementation provided by the inner component // 2. Any native implementation provided by the DOM // 3. Ourselves, so that the method always has /some/ effect. // _runInnerDOMMethod = (method, ...rest) => { let target = null; if (this.refs.inner && this.refs.inner[method]) { target = this.refs.inner; } else if (this.refs.inner) { target = ReactDOM.findDOMNode(this.refs.inner); } else { target = ReactDOM.findDOMNode(this); } if (target[method]) { target[method].bind(target)(...rest); } }; _setRequiredMethods = methods => { methods.forEach(method => { Object.defineProperty(this, method, { configurable: true, enumerable: true, value: (...rest) => this._runInnerDOMMethod(method, ...rest), }); }); }; _verifyRequiredMethods = () => { if (this.state.component) { const component = this.state.component; this.props.requiredMethods.forEach(method => { if (component.prototype[method] === undefined) { throw new Error( `${ component.name } must implement method ${method} when registering for ${JSON.stringify( this.props.matching )}` ); } }); } }; _getStateFromStores = (props = this.props) => { const components = ComponentRegistry.findComponentsMatching(props.matching); if (components.length > 1) { console.warn( `There are multiple components available for ${JSON.stringify( props.matching )}. <InjectedComponent> is only rendering the first one.` ); } return { component: components.length === 0 ? this.props.fallback : components[0], visible: ComponentRegistry.showComponentRegions(), }; }; render() { if (!this.state.component) { return <div />; } const exposedProps = Object.assign({}, this.props.exposedProps, { fallback: this.props.fallback, }); let className = this.props.className; if (this.state.visible) { className += ' injected-region-visible'; } const Component = this.state.component; let element = null; const privateProps: any = {}; if (Object.prototype.isPrototypeOf.call(React.Component, Component)) { privateProps.ref = 'inner'; } if (Component.containerRequired === false) { element = <Component {...privateProps} key={Component.displayName} {...exposedProps} />; } else { element = ( <InjectedComponentErrorBoundary key={Component.displayName}> <Component {...privateProps} {...exposedProps} className={className} style={this.props.style} /> </InjectedComponentErrorBoundary> ); } if (this.state.visible) { return ( <div className={className} style={this.props.style}> {element} <InjectedComponentLabel matching={this.props.matching} {...exposedProps} /> <span style={{ clear: 'both' }} /> </div> ); } return element; } } ```
The Citadel Rail Bridge (Most przy Cytadeli) was a bridge in Warsaw, crossing the Vistula River. It opened in November, 1875, and was expanded with a second part in 1908. It was blown up for the final time in September 13, 1944 by retreating Germans and was later replaced with the Gdański Bridge. History The Kierbedzia Bridge, built in 1864, was originally planned as a railway bridge, connecting the Petersburg train station (now Warszawa Wileńska station) with the Vienna train station (Dworzec Wiedeński, which was demolished in 1944). These plans were abandoned and the bridge was built solely for road transport (with tracks for horse-drawn trams). It was decided soon after to build a railway bridge in a different place in Warsaw. The choice fell much further north, at the Warsaw Citadel (south of the fortress). This bridge was built from April, 1873 and was opened in November, 1875. It was simply called the "Second Bridge" (as it was the second Warsaw bridge crossing the Vistula). Officially it was called the "Railway Bridge", even though it was both a rail and road bridge. The bridge had a rail track running on one level and the lower level was designed for pedestrian and vehicular traffic which was originally intended only for military traffic but civilian traffic was allowed as well. The bridge turned out to be insufficient, mainly because it only had one rail track. For this reason, by 1908 the "Second Railway Bridge" was built. It was also known as the "Fourth Bridge" (after the third Poniatowski Bridge, which was built from 1904 to 1914). It was built right next to the first (basically as one bridge) and had two railway tracks. The existing Railway Bridge was handed to the city and adapted for road and pedestrian traffic only. Both bridges shared the fate of other bridges in Warsaw - on August 5, 1915, they were blown up by the retreating Russian forces. The bridge from 1875 lost three spans while the other bridge from 1908 lost four central spans (the third, fourth, fifth and sixth, counting from the Citadel). The Second Bridge was rebuilt by the Germans during World War I. The Fourth Bridge was rebuilt during World War I or just after its completion, and in the summer of 1920 it was rebuilt as a military road bridge under the direction of Bronisław Plebiński. Both bridges were again blown up on 13 September, 1944 by Germans withdrawing from Praga. Well after World War II in 1959, the new Gdański Bridge was built upon the pillars of the old bridge. The bridge today stands in exactly the same place as its pre-war counterparts. In 2009, parts of the old bridge were excavated to be exhibited in public with a fragment Kierbedzia Bridge rediscovered in 2011. References Bridges in Warsaw
```php <?php /* * * * path_to_url * * Unless required by applicable law or agreed to in writing, software * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the */ namespace Google\Service\RemoteBuildExecution; class BuildBazelRemoteExecutionV2BatchUpdateBlobsResponseResponse extends \Google\Model { protected $digestType = BuildBazelRemoteExecutionV2Digest::class; protected $digestDataType = ''; protected $statusType = GoogleRpcStatus::class; protected $statusDataType = ''; /** * @param BuildBazelRemoteExecutionV2Digest */ public function setDigest(BuildBazelRemoteExecutionV2Digest $digest) { $this->digest = $digest; } /** * @return BuildBazelRemoteExecutionV2Digest */ public function getDigest() { return $this->digest; } /** * @param GoogleRpcStatus */ public function setStatus(GoogleRpcStatus $status) { $this->status = $status; } /** * @return GoogleRpcStatus */ public function getStatus() { return $this->status; } } // Adding a class alias for backwards compatibility with the previous class name. class_alias(BuildBazelRemoteExecutionV2BatchUpdateBlobsResponseResponse::class, your_sha256_hashatchUpdateBlobsResponseResponse'); ```
Utica City FC is a professional indoor soccer team based in Utica, New York that plays in the Eastern Conference of the Major Arena Soccer League. History The team formed in 2011 as the Syracuse Silver Knights with the intention of playing in the new I-League (a United Soccer Leagues league set to begin play in 2011), but the team moved to the Major Indoor Soccer League when the two leagues merged. The team was rumored to be named the Syracuse Scorpions and revive the name of an old American Soccer League team. However, the name Silver Knights was announced at the team's inaugural press conference. The club is coached by former Silver Knights player, Ryan Hall, who replaced goalkeeper Bryan O'Quinn, who replaced club President, Team Owner and Syracuse native Tommy Tanner for the 2016–17 season. The club has signed former Syracuse Salty Dogs player Ryan Hall, among others. The team announced the signing of former Major League Soccer star Diego Serna on October 11, 2011. The Syracuse Silver Knights' first season of play began in 2011–12 when they joined the MISL as an expansion franchise. For their inaugural season, the Silver Knights played their home games at the Oncenter War Memorial Arena in downtown Syracuse. The team finished third in the Eastern Division in 2012, having been eliminated from playoff contention when the Rochester Lancers beat the Wichita Wings on February 23, 2012. In 2013, the Silver Knight Foundation was launched to help benefit kids in tough economic and social situations. The foundation hosts events throughout the year which helps raise money for these children. Many Silver Knights players, including reserve and U19 players, volunteer their time to help partake in the events. After the 2013–2014 season, the team announced that it was leaving the MISL along with five other teams joining the PASL, which was subsequently renamed the MASL. On June 13, 2018, the team announced that it had partnered with the Utica Comets of the AHL and would be moving to Utica as Utica City FC for the 2018-19 season. The team did not play during the 2021 season due to the COVID-19 pandemic. Players Active roster Updated July 28, 2023 Inactive roster Year-by-year Club staff President: Robert Esche Vice-president: Adam Pawlick CFO: Michael Potrzeba Executive Administrator: Luann Horton-Murad General Manager: Tommy Tanner VP Corporate Partnerships: Alicia Leone-Desarro VP Marketing: Cecelie Pikus VP Creative Services: Eric Kowiatek Head coach: Hewerton Moriera Kits Rivalries Utica City FC's main rival is the Baltimore Blast. It existed in their days as the Syracuse Silver Knights and has extended to Utica City FC when the team relocated. Both teams have won five games in the series since their first meeting on December 2, 2018, a 4–2 win for Utica City. The Rochester Lancers are also considered a rival of Utica City FC. Rochester is two hours away from Utica and are Utica's closest opponent by proximity. The first matchup of this rivalry took place on November 29, 2019, with Utica City defeating Rochester by a 9–1 score. References External links Official Site Major Arena Soccer League official site Indoor soccer clubs in the United States Major Arena Soccer League teams 2018 establishments in New York (state) Sports in Utica, New York Men's soccer clubs in New York (state) Association football clubs established in 2018
```html <!DOCTYPE html> <html lang="en-US"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width"> <title>:user-valid and :user-invalid example</title> <link href="path_to_url" rel="stylesheet"> <style> body { font-family: 'Josefin Sans', sans-serif; margin: 20px auto; max-width: 460px; } fieldset { padding: 10px 30px 0; } legend { color: white; background: black; padding: 5px 10px; } fieldset > div { margin-bottom: 20px; display: flex; flex-flow: row wrap; } button, label, input { display: block; font-family: inherit; font-size: 100%; padding: 0; margin: 0; box-sizing: border-box; width: 100%; padding: 5px; height: 30px; } input { box-shadow: inset 1px 1px 3px #ccc; border-radius: 5px; } input:hover, input:focus { background-color: #eee; } input + span { position: relative; } input:required + span::after { font-size: 0.7rem; position: absolute; content: "required"; color: white; background-color: black; padding: 5px 10px; top: -26px; left: -70px; } input + span::before { position: absolute; right: -20px; top: 5px; } input:user-invalid { border: 2px solid red; } input:user-invalid + span::before { content: ''; color: red; } input:user-valid + span::before { content: ''; color: green; } button { width: 60%; margin: 0 auto; } </style> </head> <body> <form> <fieldset> <legend>Feedback form</legend> <p>Required fields are labelled with "required".</p> <div> <label for="fname">First name: </label> <input id="fname" name="fname" type="text" required> <span></span> </div> <div> <label for="lname">Last name: </label> <input id="lname" name="lname" type="text" required> <span></span> </div> <div> <label for="email">Email address (include if you want a response): </label> <input id="email" name="email" type="email"> <span></span> </div> <div><button>Submit</button></div> </fieldset> </form> </body> </html> ```
```java /* * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * * path_to_url * * Unless required by applicable law or agreed to in writing, * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * specific language governing permissions and limitations */ package org.apache.weex.util; import android.app.Activity; import android.support.v7.widget.RecyclerView; import android.test.InstrumentationTestCase; import android.test.TouchUtils; import android.text.TextUtils; import android.util.Log; import android.view.View; import android.view.ViewGroup; import android.widget.EditText; import android.widget.ScrollView; import com.taobao.weex.ui.view.WXTextView; import java.util.ArrayList; /** * Created by admin on 16/3/29. */ public class ViewUtil{ public static ArrayList<View> findViewWithText(ViewGroup vg, String text){ ArrayList<View> out = new ArrayList<View>(); if(null != vg && !TextUtils.isEmpty(text)){ vg.findViewsWithText(out, text, View.FIND_VIEWS_WITH_TEXT); } if(out.size() == 0){ ArrayList<View> wxTextViewList = new ArrayList<View>(); wxTextViewList = getAllChildViews((View)vg); for (View wxText:wxTextViewList) { if(wxText instanceof WXTextView){ String value = ((WXTextView) wxText).getText().toString(); if(value.contains(text)){ Log.e("TestFlow", "find target:" + text + "|" + "actualVale=" + value); out.add(wxText); } } } } return out; } public static ArrayList<View> getAllChildViews(Activity activity) { View view = activity.getWindow().getDecorView(); return getAllChildViews(view); } public static ArrayList<View> getAllChildViews(View view) { ArrayList<View> allchildren = new ArrayList<View>(); if (view instanceof ViewGroup) { ViewGroup vp = (ViewGroup) view; for (int i = 0; i < vp.getChildCount(); i++) { View viewchild = vp.getChildAt(i); allchildren.add(viewchild); allchildren.addAll(getAllChildViews(viewchild)); } } return allchildren; } public static ArrayList<View> getAllChildViews(ViewGroup view) { ArrayList<View> allchildren = new ArrayList<View>(); ViewGroup vp = view; for (int i = 0; i < vp.getChildCount(); i++) { View viewchild = vp.getChildAt(i); allchildren.add(viewchild); allchildren.addAll(getAllChildViews(viewchild)); } return allchildren; } /** * scroller or RecyclerView * @param view * @return */ public static View getFirstChildScrollableView(ViewGroup view){ View viewResult = null; ArrayList<View> allViews = new ArrayList<View>(); if(null != view){ allViews= getAllChildViews(view); for(View eachView : allViews){ if(eachView instanceof ScrollView || eachView instanceof RecyclerView){ return eachView; } } } return viewResult; } public static ArrayList<View> getAllEditTextViews(View view) { ArrayList<View> allchildren = new ArrayList<View>(); if (view instanceof ViewGroup) { ViewGroup vp = (ViewGroup) view; for (int i = 0; i < vp.getChildCount(); i++) { View viewchild = vp.getChildAt(i); if(viewchild instanceof EditText){ allchildren.add(viewchild); allchildren.addAll(getAllChildViews(viewchild)); } } } return allchildren; } /* * */ public static int getWidth(View view) { int w = View.MeasureSpec.makeMeasureSpec(0,View.MeasureSpec.UNSPECIFIED); int h = View.MeasureSpec.makeMeasureSpec(0,View.MeasureSpec.UNSPECIFIED); view.measure(w, h); return (view.getMeasuredWidth()); } /* * */ public static int getHeight(View view) { int w = View.MeasureSpec.makeMeasureSpec(0,View.MeasureSpec.UNSPECIFIED); int h = View.MeasureSpec.makeMeasureSpec(0,View.MeasureSpec.UNSPECIFIED); view.measure(w, h); return (view.getMeasuredHeight()); } public static void dragQuarterScreenUp(InstrumentationTestCase test, Activity activity) { TouchUtils.dragQuarterScreenUp(test, activity); } } ```
Cheust is a commune in the Hautes-Pyrénées department in south-western France. See also Communes of the Hautes-Pyrénées department References Communes of Hautes-Pyrénées
Fred Lange-Nielsen (28 September 1919 – 28 December 1989) was a Norwegian doctor and jazz musician (bass, vocals), known in the early Oslo environments, and from several recordings. Lange-Nielsen and Anton Jervell were the first to describe Jervell and Lange-Nielsen syndrome (JLNS) in 1953. He played in String Swing (1937–1941), the quartet Hot Dogs, in Rowland Greenberg's orchestra (1941), the Oslo Swing Club's orchestra, the studio group Seven Cheerful and with Cecil Aagaard's "Swingsters" and quintet Sew-We-La (1950–1953). References 1919 births 1989 deaths Norwegian jazz musicians Norwegian cardiologists 20th-century Norwegian physicians 20th-century Norwegian musicians
Club Rivadavia is an Argentine football club from Lincoln, Buenos Aires. The squad currently plays in Torneo Argentino A, the regionalised third division of the Argentine football league system. At the end of the 2005–06 season Rivadavia was promoted to Argentino A via the promotion/relegation playoff. The team beat Racing de Olavarría 2–1 on aggregate to secure promotion. See also List of football clubs in Argentina Argentine football league system External links Official website Association football clubs established in 1915 Football clubs in Buenos Aires Province Lincoln, Buenos Aires 1915 establishments in Argentina
United Hebrew Congregation may refer to: England Blackpool United Hebrew Congregation, Blackpool, England Newcastle United Hebrew Congregation, in the former Leazes Park Synagogue, Newcastle upon Tyne, England United Hebrew Congregation (closed in 2000), Torquay, England Other places United Hebrew Congregation (UHC Singapore), part of the History of the Jews in Singapore United Hebrew Congregation (Johannesburg, South Africa); see Oxford Shul United Hebrew Congregation (Chesterfield, Missouri), United States See also United Synagogue, a union of British Orthodox Jewish synagogues
```xml /* eslint-disable @typescript-eslint/explicit-function-return-type */ /* eslint-disable @typescript-eslint/no-explicit-any */ import * as React from 'react'; import { useAtomValue } from 'jotai'; import { IconButton, ScrollablePane, ScrollbarVisibility, Text, } from 'office-ui-fabric-react'; import { Stack } from 'office-ui-fabric-react/lib/Stack'; import { globalState } from '../../jotai/atoms'; import { IFlightTrackerListItem } from '../../models'; import { FlightTrackerListItem, } from '../FlightTrackerList/FlightTrackerListItem'; import { FlightTrackerNoData } from '../FlightTrackerList/FlightTrackerNoData'; import { useFlightTrackerStyles, } from '../FlightTrackerList/useFlightTrackerStyles'; export interface IShowListProps { listItems: IFlightTrackerListItem[]; showList: boolean; onScroll: () => Promise<void>; onRefresh: () => Promise<void>; } export const ShowList: React.FunctionComponent<IShowListProps> = (props: React.PropsWithChildren<IShowListProps>) => { const { listItems, showList, onScroll, onRefresh } = props; const { noDataContainerStyles, listHeaderStyles, scollableContainerStyles, stackContainerStyles, } = useFlightTrackerStyles(); const appState =useAtomValue(globalState); const { selectedAirPort, selectedInformationType, } = appState; const scrollablePaneRef: any = React.createRef<HTMLDivElement>(); const [isScrolling, setIsScrolling] = React.useState<boolean>(false); const listHeader = React.useMemo(() => { if (selectedAirPort?.municipality) { return `${selectedAirPort?.municipality}, ${selectedAirPort?.name} - ${selectedInformationType}`; } else { return `${selectedAirPort?.name} - ${selectedInformationType}`; } }, [selectedAirPort, selectedInformationType]); const getScrollPosition = React.useCallback((divContainerRef: any) => { const { scrollTop, scrollHeight, clientHeight } = divContainerRef; const percentNow = (scrollTop / (scrollHeight - clientHeight)) * 100; return percentNow; }, []); const onScrollList = React.useCallback(async () => { if (isScrolling) { return; } setIsScrolling(true); const scrollPosition = getScrollPosition(scrollablePaneRef.current.contentContainer); if (scrollPosition > 90) { await onScroll(); } setIsScrolling(false); }, [onScroll, isScrolling, getScrollPosition, scrollablePaneRef]); if (!showList) { return null; } return ( <> <Stack tokens={{ childrenGap: 25 }} styles={stackContainerStyles}> <Stack verticalAlign="center" horizontal horizontalAlign="space-between"> <Text styles={listHeaderStyles} variant="large"> {listHeader} </Text> <Stack style={{ paddingRight: 20 }}> <IconButton iconProps={{ iconName: "Refresh" }} title="Refresh" ariaLabel="Refresh" onClick={async (ev) => { ev.preventDefault(); await onRefresh(); }} /> </Stack> </Stack> <ScrollablePane scrollbarVisibility={ScrollbarVisibility.auto} styles={scollableContainerStyles} onScroll={onScrollList} componentRef={scrollablePaneRef} > {listItems && listItems.length ? ( listItems.map((item, index) => { return ( <FlightTrackerListItem key={index} flights={item} flightInformationType={selectedInformationType} /> ); }) ) : ( <Stack horizontalAlign="center" verticalAlign="center" styles={noDataContainerStyles}> <FlightTrackerNoData /> </Stack> )} </ScrollablePane> </Stack> </> ); }; ```
Julio Escoto (born February 28, 1944 in San Pedro Sula) is a Honduran short-story teller, novelist and essayist. Notable novels include El Arbol de los Panuelos, Días de Ventisca, Noches de Huracán, El General Morazán marcha a batallar desde la Muerte, Rey del Albor. Madrugada, and the last published (2007) El Génesis en Santa Cariba. External links Literature Honduran male writers 1944 births Living people International Writing Program alumni
```go package accounts import ( "encoding/json" "errors" "net/http" "net/url" "github.com/cozy/cozy-stack/model/account" "github.com/cozy/cozy-stack/model/instance" "github.com/cozy/cozy-stack/model/instance/lifecycle" "github.com/cozy/cozy-stack/model/permission" "github.com/cozy/cozy-stack/model/session" "github.com/cozy/cozy-stack/pkg/config/config" "github.com/cozy/cozy-stack/pkg/consts" "github.com/cozy/cozy-stack/pkg/couchdb" "github.com/cozy/cozy-stack/pkg/jsonapi" "github.com/cozy/cozy-stack/pkg/logger" "github.com/cozy/cozy-stack/web/auth" "github.com/cozy/cozy-stack/web/middlewares" "github.com/cozy/cozy-stack/web/oidc" jwt "github.com/golang-jwt/jwt/v5" "github.com/labstack/echo/v4" ) type apiAccount struct { *account.Account } func (a *apiAccount) MarshalJSON() ([]byte, error) { return json.Marshal(a.Account) } func (a *apiAccount) Relationships() jsonapi.RelationshipMap { return nil } func (a *apiAccount) Included() []jsonapi.Object { return nil } func (a *apiAccount) Links() *jsonapi.LinksList { return &jsonapi.LinksList{Self: "/data/" + consts.Accounts + "/" + a.ID()} } func start(c echo.Context) error { instance := middlewares.GetInstance(c) accountTypeID := c.Param("accountType") accountType, err := account.TypeInfo(accountTypeID, instance.ContextName) if err != nil { return err } state, err := getStorage().Add(&stateHolder{ InstanceDomain: instance.Domain, AccountType: accountType.ServiceID(), ClientState: c.QueryParam("state"), Nonce: c.QueryParam("nonce"), Slug: c.QueryParam("slug"), }) if err != nil { return err } url, err := accountType.MakeOauthStartURL(instance, state, c.QueryParams()) if err != nil { return err } return c.Redirect(http.StatusSeeOther, url) } func redirectToApp( c echo.Context, inst *instance.Instance, acc *account.Account, clientState, slug, connID, connDeleted, errorMessage string, ) error { if slug == "" { slug = consts.HomeSlug } u := inst.SubDomain(slug) vv := &url.Values{} if acc != nil { vv.Add("account", acc.ID()) } if clientState != "" { vv.Add("state", clientState) } if connID != "" { vv.Add("connection_id", connID) } if connDeleted != "" { vv.Add("connection_deleted", connDeleted) } if errorMessage != "" { vv.Add("error", errorMessage) } u.RawQuery = vv.Encode() return c.Redirect(http.StatusSeeOther, u.String()) } // redirect is the redirect_uri endpoint passed to oauth services // it should create the account. // middlewares.NeedInstance is not applied before this handler // it needs to handle both // - with instance redirect // - without instance redirect func redirect(c echo.Context) error { accessCode := c.QueryParam("code") accessToken := c.QueryParam("access_token") accountTypeID := c.Param("accountType") i, _ := lifecycle.GetInstance(c.Request().Host) var clientState, connID, connDeleted, slug string var acc *account.Account connID = c.QueryParam("connection_id") connDeleted = c.QueryParam("connection_deleted") if accessToken != "" { if i == nil { return echo.NewHTTPError(http.StatusBadRequest, "using ?access_token with instance-less redirect") } acc = &account.Account{ AccountType: accountTypeID, Oauth: &account.OauthInfo{ AccessToken: accessToken, }, } } else { stateCode := c.QueryParam("state") state := getStorage().Find(stateCode) if state == nil || state.AccountType != accountTypeID || (i != nil && state.InstanceDomain != i.Domain) { return errors.New("bad state") } if i == nil { var err error i, err = lifecycle.GetInstance(state.InstanceDomain) if err != nil { return errors.New("bad state") } } clientState = state.ClientState slug = state.Slug // path_to_url#handlingresponse if c.QueryParam("error") == "access_denied" { return redirectToApp(c, i, nil, clientState, slug, connID, connDeleted, "access_denied") } accountType, err := account.TypeInfo(accountTypeID, i.ContextName) if err != nil { return err } if state.WebviewFlow { return redirectToApp(c, i, nil, clientState, slug, connID, connDeleted, "") } if accountType.TokenEndpoint == "" { params := c.QueryParams() params.Del("state") acc = &account.Account{ AccountType: accountTypeID, Oauth: &account.OauthInfo{ ClientID: accountType.ClientID, ClientSecret: accountType.ClientSecret, Query: &params, }, } } else { acc, err = accountType.RequestAccessToken(i, accessCode, stateCode, state.Nonce) if err != nil { return err } } } if connID != "" { if existingAccount, err := findAccountWithSameConnectionID(i, connID); err == nil { acc = existingAccount } } if acc.ID() == "" { if err := couchdb.CreateDoc(i, acc); err != nil { return err } } c.Set("instance", i.WithContextualDomain(c.Request().Host)) return redirectToApp(c, i, acc, clientState, slug, connID, connDeleted, "") } func findAccountWithSameConnectionID(inst *instance.Instance, connectionID string) (*account.Account, error) { var accounts []*account.Account req := &couchdb.AllDocsRequest{Limit: 1000} err := couchdb.GetAllDocs(inst, consts.Accounts, req, &accounts) if err != nil { return nil, err } for _, a := range accounts { if a.Oauth == nil || a.Oauth.Query == nil { continue } connID := a.Oauth.Query.Get("connection_id") if connID == connectionID { return a, nil } } return nil, errors.New("not found") } // refresh is an internal route used by konnectors to refresh accounts // it requires permissions GET:io.cozy.accounts:accountid func refresh(c echo.Context) error { instance := middlewares.GetInstance(c) accountid := c.Param("accountid") var acc account.Account if err := couchdb.GetDoc(instance, consts.Accounts, accountid, &acc); err != nil { return err } if err := middlewares.Allow(c, permission.GET, &acc); err != nil { return err } accountType, err := account.TypeInfo(acc.AccountType, instance.ContextName) if err != nil { return err } err = accountType.RefreshAccount(acc) if err != nil { return err } err = couchdb.UpdateDoc(instance, &acc) if err != nil { return err } return jsonapi.Data(c, http.StatusOK, &apiAccount{&acc}, nil) } // manage redirects the user to the BI webview allowing them to manage their // bank connections func manage(c echo.Context) error { instance := middlewares.GetInstance(c) accountid := c.Param("accountid") var acc account.Account if err := couchdb.GetDoc(instance, consts.Accounts, accountid, &acc); err != nil { return err } accountType, err := account.TypeInfo(acc.AccountType, instance.ContextName) if err != nil { return err } state, err := getStorage().Add(&stateHolder{ InstanceDomain: instance.Domain, AccountType: accountType.ServiceID(), ClientState: c.QueryParam("state"), Slug: c.QueryParam("slug"), WebviewFlow: true, }) if err != nil { return err } url, err := accountType.MakeManageURL(instance, state, c.QueryParams()) if err != nil { return err } return c.Redirect(http.StatusSeeOther, url) } // reconnect can be used to reconnect a user from BI func reconnect(c echo.Context) error { instance := middlewares.GetInstance(c) accountid := c.Param("accountid") var acc account.Account if err := couchdb.GetDoc(instance, consts.Accounts, accountid, &acc); err != nil { return err } accountType, err := account.TypeInfo(acc.AccountType, instance.ContextName) if err != nil { return err } state, err := getStorage().Add(&stateHolder{ InstanceDomain: instance.Domain, AccountType: accountType.ServiceID(), ClientState: c.QueryParam("state"), Slug: c.QueryParam("slug"), WebviewFlow: true, }) if err != nil { return err } url, err := accountType.MakeReconnectURL(instance, state, c.QueryParams()) if err != nil { return err } return c.Redirect(http.StatusSeeOther, url) } func checkLogin(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { inst := middlewares.GetInstance(c) sess, isLoggedIn := middlewares.GetSession(c) wasLoggedIn := isLoggedIn if sess != nil && sess.ShortRun { // XXX it's better to create a new session in that case, as the // existing short session can easily timeout between now and when // the user will come back. wasLoggedIn = false } if code := c.QueryParam("session_code"); code != "" { // XXX we should always clear the session code to avoid it being // reused, even if the user is already logged in and we don't want to // create a new session if checked := inst.CheckAndClearSessionCode(code); checked { isLoggedIn = true } } if !isLoggedIn && checkIDToken(c) { isLoggedIn = true } if !isLoggedIn { return echo.NewHTTPError(http.StatusForbidden) } if !wasLoggedIn { sessionID, err := auth.SetCookieForNewSession(c, session.ShortRun) req := c.Request() if err == nil { if err = session.StoreNewLoginEntry(inst, sessionID, "", req, "session_code", false); err != nil { inst.Logger().Errorf("Could not store session history %q: %s", sessionID, err) } } } return next(c) } } func checkIDToken(c echo.Context) bool { inst := middlewares.GetInstance(c) cfg, ok := config.GetOIDC(inst.ContextName) if !ok { return false } allowOAuthToken, _ := cfg["allow_oauth_token"].(bool) if !allowOAuthToken { return false } idTokenKeyURL, _ := cfg["id_token_jwk_url"].(string) if idTokenKeyURL == "" { return false } keys, err := oidc.GetIDTokenKeys(idTokenKeyURL) if err != nil { return false } idToken := c.QueryParam("id_token") token, err := jwt.Parse(idToken, func(token *jwt.Token) (interface{}, error) { return oidc.ChooseKeyForIDToken(keys, token) }) if err != nil { logger.WithNamespace("oidc").Errorf("Error on jwt.Parse: %s", err) return false } if !token.Valid { logger.WithNamespace("oidc").Errorf("Invalid token: %#v", token) return false } claims := token.Claims.(jwt.MapClaims) if claims["sub"] == "" || claims["sub"] != inst.OIDCID { inst.Logger().WithNamespace("oidc").Errorf("Invalid sub: %s != %s", claims["sub"], inst.OIDCID) return false } return true } // Routes setups routing for cozy-as-oauth-client routes // Careful, the normal middlewares NeedInstance and LoadSession are not applied // to this group in web/routing func Routes(router *echo.Group) { router.GET("/:accountType/start", start, middlewares.NeedInstance, middlewares.LoadSession, checkLogin) router.GET("/:accountType/redirect", redirect) router.GET("/:accountType/:accountid/manage", manage, middlewares.NeedInstance, middlewares.LoadSession, checkLogin) router.POST("/:accountType/:accountid/refresh", refresh, middlewares.NeedInstance) router.GET("/:accountType/:accountid/reconnect", reconnect, middlewares.NeedInstance, middlewares.LoadSession, checkLogin) } ```
Bernard Bonnin (September 8, 1938 – November 21, 2009) was a Spanish Filipino actor. Early life He was born on September 8, 1938, to parents Juan S. Bonnin, a pure Spaniard from Palma de Mallorca, and Lina Zayco, a native of Himamaylan, Negros Occidental. He moved to Manila at the age of 14 to complete his secondary education in San Beda. Career He rose to fame with the 1965 movie, Palos. In 2008, ABS-CBN remade Palos into a teleserye starring Jake Cuenca, with Bonnin returning in a supporting role. Bonnin appeared in over 100 films, including Gagamba, Ako ang Lalagot sa Hininga Mo, Code Name: Bomba, Target: Captain Karate. His first film was Ay Pepita. In This Action Movie Villain Role of Ako ang Tatapos sa Araw mo (1989), Ibabaon Kita sa Lupa (1990), Dudurugin Kita ng Bala ko (1992), Nandito Ako (1994), and Duwelo (1996). Filmography Film Walang Takot (1958) Anak ni Waray (1958) Kung Ako'y Mahal Mo - Virgilio (1960) Sandata at Pangako (1961) Alyas Palos - Palos (1961) Kambal Na Baril (1962) Jam Session - Dondoy (1962) Bakas ng Gagamba - Gagamba (1962) Dapit-Hapon: Oras ng Pagtutuos (1963) Kilabot Maghiganti (1963) Palos Kontra Gagamba (1963) Carioca (1963) Ang Mga Lawin (1963) Dakpin si Pedro Navarro! (1963) Ikaw Ako Ngayon Bukas (1963) Ang Lihim ni Gagamba (1964) 3 Musketeras (1964) Walang Duwag Na Bisaya (1965) Sandalyas ni Zafira (1965) Oro Blanco (1965) Doble 45 (1965) Tagisan ng Mga Agimat (1965) Batas ng .45 (1965) Lambat - Agent 707 (1965) La Sombra (1965) Pedrong Hunyango (1965) Karate sa Karate (1965) Doble Talim (1965) Wanted: Johnny L (1966) Palos: Counterspy (1966) - Palos Philcag in Vietnam (1967) Target Captain Karate (1968) Palos Strikes Again (1968) Bart Salamanca (1968) Gagamba at si Scorpio (1969) - Gagamba The Arizona Kid (1970) Women in Cages (1971) - Acosta Tatlong Patak ng Dugo ni Adan (1980) Alyas Palos II (1982) - Palos Buy One, Take One (1988) Ako ang Tatapos sa Araw Mo! (1989) - Martin Ibabaon Kita sa Lupa (1990) - Mayor Roman Romualdez Apoy sa Lupang Hinirang (1990) Hanggang Kailan Kita Papatay (1990) - Garrido Iglesia Alyas Pogi: Birador ng Nueva Ecija (1990) - Don Pepe Alyas Pogi 2 (1992) - Don Pepe Alyas Hunyango (1992) - Acosta Dudurugin Kita ng Bala Ko (1992) - Victor Riduque Big Boy Bato: Kilabot ng Kankaloo (1992) - Big Daddy Nandito Ako (1994) - Don Rodrigo Braganza Ikaw Pa ... Eh Love Kita (1995) - Major Morales Seth Corteza (1996) Duwelo (1996) - Eduardo Roldan Alamid: Ang Alamat (1998) Code Name: Bomba (1998) - Congressman Bambino Moises Archangel 2: Tapusin Natin ang Labanan (1998) Ako ang Lalagot sa Hininga Mo (1999) Masikip sa Dibdib (2004) Television Palos - Vittorio Canavaro aka Ninong (2008; ABS-CBN) Death Bonnin died at the age of 71 on November 21, 2009 at the Philippine Heart Center in Quezon City following multiple organ failure due to diabetes. He is buried at the Heritage Memorial Park in Taguig. References 1939 births 2009 deaths Deaths from diabetes Male actors from Negros Occidental Filipino people of Spanish descent Filipino television personalities Hiligaynon people Burials at The Heritage Park Filipino male film actors
The Big Shave is a 1967 six-minute body horror short film directed by Martin Scorsese. It is also known as Viet '67. Home media In May 2020, it was made available on DVD/Blu-ray by The Criterion Collection as part of a collection of his short films from the 60s and 70s. Synopsis Peter Bernuth stars as the recipient of the title shave, repeatedly shaving away hair, then skin, in an increasingly bloody and graphic bathroom scene. Prompted by the film's alternative title, many film critics have interpreted the young man's process of self-mutilation as a metaphor for the self-destructive involvement of the United States in the Vietnam War. Production The music accompanying the film is Bunny Berigan's "I Can't Get Started". The film was produced at New York University's Tisch School of the Arts, shot on Agfa color film donated by Palais des Beaux Arts. The short's use of violence, music and montage would become trademarks of Scorsese's future work. Cultural references Cutting Moments (1997) has a scene which greatly resembles "The Big Shave". American Dad!: In "The Best Christmas Story Never", Stan travels back in time to 1970 and meets a young Martin Scorsese. When Stan tells the director that he's a big fan, Scorsese says, incredulously, "You saw my six minute film about a guy shaving?!" Dave Hause, singer of The Loved Ones, released a video for the song "Time Will Tell" from his debut solo album Resolutions that pays homage to "The Big Shave". PEARLIES A two-minute short horror film directed by Jonathan Gularte Ramirez starring as himself as he brushes his teeth in an increasingly bloody and graphic fashion. The short is directly inspired by "The Big Shave". See also List of American films of 1967 Counterculture of the 1960s Experimental film References External links The short on Internet Archive Criterion article about the early shorts including The Big Shave 1960s avant-garde and experimental films 1967 short films Films without speech Short films directed by Martin Scorsese American short films Self-harm in fiction American student films 1960s English-language films
```python # # # path_to_url # # Unless required by applicable law or agreed to in writing, software # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import entities_pb2 from google.protobuf import timestamp_pb2 from google.protobuf import json_format class ActionUtils: @staticmethod def parse_from_csv_line(line): parts = line.split(",") if len(parts) < 3 or len(parts) > 4: return False user_id = -1 item_id = -1 timestamp_seconds = -1 try: user_id = int(parts[1]) except ValueError: print("Could not parse: " + parts[1]) return False try: timestamp_seconds = int(parts[0]) except ValueError: print("Could not parse: " + parts[0]) return False if len(parts) == 4: try: item_id = int(parts[3]) except ValueError: print("Could not parse: " + parts[3]) return False timestamp = timestamp_pb2.Timestamp() timestamp.seconds = timestamp_seconds action = entities_pb2.Action() action.action = entities_pb2.Action.ActionType.Value(parts[2]) action.user_id = user_id if item_id != -1: action.item_id = item_id action.time.seconds = timestamp_seconds return action @staticmethod def parse_from_csv(file): with open(file) as fp: for _, line in enumerate(fp): yield ActionUtils.parse_from_csv_line(line) @staticmethod def encode_action(action): return action.SerializeToString() @staticmethod def decode_action(str): action = entities_pb2.Action() return action.ParseFromString(str) @staticmethod def encode_action_as_json(action): return json_format.MessageToJson(action) @staticmethod def decode_action_from_json(str): action = entities_pb2.Action() return json_format.Parse(str, action) ```
USS Valeda (SP-592) was a United States Navy patrol vessel in commission from 1917 to 1919. Valeda was built as a wooden-hulled private cabin motor launch of the same name by the Stamford Motor Company at Stamford, Connecticut, in 1908. On 9 July 1917, the U.S. Navy acquired her from her owner, F.B. Richards of Cleveland, Ohio, for use as a section patrol boat during World War I. She was commissioned as USS Valeda (SP-592) on 12 July 1917 at Rockland, Maine. Assigned to the Rockland Section in the 1st Naval District in northern New England, Valeda carried out harbor and harbor entrance patrol duties at Rockland for the rest of World War I and into early 1919. Valeda was decommissioned at Baker's Yacht Basin at Quincy, Massachusetts, on 4 February 1919, stricken from the Navy List on 1 October 1919, and sold to J. R. C. McBeath of Atlantic, Massachusetts, on 2 January 1920. References NavSource Online: Section Patrol Craft Photo Archive Valdea (SP 592) Patrol vessels of the United States Navy World War I patrol vessels of the United States Ships built in Stamford, Connecticut 1908 ships
```php <?php /* * * * path_to_url * * Unless required by applicable law or agreed to in writing, software * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the */ namespace Google\Service\DataLabeling; class GoogleCloudDatalabelingV1beta1VideoClassificationAnnotation extends \Google\Model { protected $annotationSpecType = GoogleCloudDatalabelingV1beta1AnnotationSpec::class; protected $annotationSpecDataType = ''; protected $timeSegmentType = GoogleCloudDatalabelingV1beta1TimeSegment::class; protected $timeSegmentDataType = ''; /** * @param GoogleCloudDatalabelingV1beta1AnnotationSpec */ public function setAnnotationSpec(GoogleCloudDatalabelingV1beta1AnnotationSpec $annotationSpec) { $this->annotationSpec = $annotationSpec; } /** * @return GoogleCloudDatalabelingV1beta1AnnotationSpec */ public function getAnnotationSpec() { return $this->annotationSpec; } /** * @param GoogleCloudDatalabelingV1beta1TimeSegment */ public function setTimeSegment(GoogleCloudDatalabelingV1beta1TimeSegment $timeSegment) { $this->timeSegment = $timeSegment; } /** * @return GoogleCloudDatalabelingV1beta1TimeSegment */ public function getTimeSegment() { return $this->timeSegment; } } // Adding a class alias for backwards compatibility with the previous class name. class_alias(GoogleCloudDatalabelingV1beta1VideoClassificationAnnotation::class, your_sha256_hashlassificationAnnotation'); ```
Zindagi Aur Toofan is a 1975 Bollywood film directed by Umesh Mathur. The music was composed by Laxmikant-Pyarelal. Cast Sajid Khan as Tony Yogeeta Bali as Nalini Rakesh Pandey as Govind Rehana Sultan as Tara Helen Sulabha Deshpande Anwar Hussain Asit Sen Songs External links 1975 films 1970s Hindi-language films Films scored by Laxmikant–Pyarelal
```java /* * * This program and the accompanying materials are made * which is available at path_to_url * */ package org.eclipse.milo.opcua.stack.core.serialization.binary; import org.eclipse.milo.opcua.stack.core.types.builtin.LocalizedText; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import static org.testng.Assert.assertEquals; public class LocalizedTextSerializationTest extends BinarySerializationFixture { @DataProvider public Object[][] getLocalizedTexts() { return new Object[][]{ {new LocalizedText(null, null)}, {new LocalizedText("locale", null)}, {new LocalizedText(null, "text")}, {LocalizedText.english("hello, world!")}, }; } @Test(dataProvider = "getLocalizedTexts", description = "LocalizedText is round-trip serializable.") public void testLocalizedText(LocalizedText localizedText) throws Exception { writer.writeLocalizedText(localizedText); LocalizedText decoded = reader.readLocalizedText(); assertEquals(decoded, localizedText); } } ```
```html <html lang="en"> <head> <title>MIPS Options - Using the GNU Compiler Collection (GCC)</title> <meta http-equiv="Content-Type" content="text/html"> <meta name="description" content="Using the GNU Compiler Collection (GCC)"> <meta name="generator" content="makeinfo 4.11"> <link title="Top" rel="start" href="index.html#Top"> <link rel="up" href="Submodel-Options.html#Submodel-Options" title="Submodel Options"> <link rel="prev" href="MicroBlaze-Options.html#MicroBlaze-Options" title="MicroBlaze Options"> <link rel="next" href="MMIX-Options.html#MMIX-Options" title="MMIX Options"> <link href="path_to_url" rel="generator-home" title="Texinfo Homepage"> <!-- Permission is granted to copy, distribute and/or modify this document any later version published by the Free Software Foundation; with the Invariant Sections being ``Funding Free Software'', the Front-Cover Texts being (a) (see below), and with the Back-Cover Texts being (b) (see below). A copy of the license is included in the section entitled (a) The FSF's Front-Cover Text is: A GNU Manual (b) The FSF's Back-Cover Text is: You have freedom to copy and modify this GNU Manual, like GNU software. Copies published by the Free Software Foundation raise funds for GNU development.--> <meta http-equiv="Content-Style-Type" content="text/css"> <style type="text/css"><!-- pre.display { font-family:inherit } pre.format { font-family:inherit } pre.smalldisplay { font-family:inherit; font-size:smaller } pre.smallformat { font-family:inherit; font-size:smaller } pre.smallexample { font-size:smaller } pre.smalllisp { font-size:smaller } span.sc { font-variant:small-caps } span.roman { font-family:serif; font-weight:normal; } span.sansserif { font-family:sans-serif; font-weight:normal; } --></style> </head> <body> <div class="node"> <p> <a name="MIPS-Options"></a> Next:&nbsp;<a rel="next" accesskey="n" href="MMIX-Options.html#MMIX-Options">MMIX Options</a>, Previous:&nbsp;<a rel="previous" accesskey="p" href="MicroBlaze-Options.html#MicroBlaze-Options">MicroBlaze Options</a>, Up:&nbsp;<a rel="up" accesskey="u" href="Submodel-Options.html#Submodel-Options">Submodel Options</a> <hr> </div> <h4 class="subsection">3.17.25 MIPS Options</h4> <p><a name="index-MIPS-options-1926"></a> <dl> <dt><code>-EB</code><dd><a name="index-EB-1927"></a>Generate big-endian code. <br><dt><code>-EL</code><dd><a name="index-EL-1928"></a>Generate little-endian code. This is the default for &lsquo;<samp><span class="samp">mips*el-*-*</span></samp>&rsquo; configurations. <br><dt><code>-march=</code><var>arch</var><dd><a name="index-march-1929"></a>Generate code that runs on <var>arch</var>, which can be the name of a generic MIPS ISA, or the name of a particular processor. The ISA names are: &lsquo;<samp><span class="samp">mips1</span></samp>&rsquo;, &lsquo;<samp><span class="samp">mips2</span></samp>&rsquo;, &lsquo;<samp><span class="samp">mips3</span></samp>&rsquo;, &lsquo;<samp><span class="samp">mips4</span></samp>&rsquo;, &lsquo;<samp><span class="samp">mips32</span></samp>&rsquo;, &lsquo;<samp><span class="samp">mips32r2</span></samp>&rsquo;, &lsquo;<samp><span class="samp">mips32r3</span></samp>&rsquo;, &lsquo;<samp><span class="samp">mips32r5</span></samp>&rsquo;, &lsquo;<samp><span class="samp">mips32r6</span></samp>&rsquo;, &lsquo;<samp><span class="samp">mips64</span></samp>&rsquo;, &lsquo;<samp><span class="samp">mips64r2</span></samp>&rsquo;, &lsquo;<samp><span class="samp">mips64r3</span></samp>&rsquo;, &lsquo;<samp><span class="samp">mips64r5</span></samp>&rsquo; and &lsquo;<samp><span class="samp">mips64r6</span></samp>&rsquo;. The processor names are: &lsquo;<samp><span class="samp">4kc</span></samp>&rsquo;, &lsquo;<samp><span class="samp">4km</span></samp>&rsquo;, &lsquo;<samp><span class="samp">4kp</span></samp>&rsquo;, &lsquo;<samp><span class="samp">4ksc</span></samp>&rsquo;, &lsquo;<samp><span class="samp">4kec</span></samp>&rsquo;, &lsquo;<samp><span class="samp">4kem</span></samp>&rsquo;, &lsquo;<samp><span class="samp">4kep</span></samp>&rsquo;, &lsquo;<samp><span class="samp">4ksd</span></samp>&rsquo;, &lsquo;<samp><span class="samp">5kc</span></samp>&rsquo;, &lsquo;<samp><span class="samp">5kf</span></samp>&rsquo;, &lsquo;<samp><span class="samp">20kc</span></samp>&rsquo;, &lsquo;<samp><span class="samp">24kc</span></samp>&rsquo;, &lsquo;<samp><span class="samp">24kf2_1</span></samp>&rsquo;, &lsquo;<samp><span class="samp">24kf1_1</span></samp>&rsquo;, &lsquo;<samp><span class="samp">24kec</span></samp>&rsquo;, &lsquo;<samp><span class="samp">24kef2_1</span></samp>&rsquo;, &lsquo;<samp><span class="samp">24kef1_1</span></samp>&rsquo;, &lsquo;<samp><span class="samp">34kc</span></samp>&rsquo;, &lsquo;<samp><span class="samp">34kf2_1</span></samp>&rsquo;, &lsquo;<samp><span class="samp">34kf1_1</span></samp>&rsquo;, &lsquo;<samp><span class="samp">34kn</span></samp>&rsquo;, &lsquo;<samp><span class="samp">74kc</span></samp>&rsquo;, &lsquo;<samp><span class="samp">74kf2_1</span></samp>&rsquo;, &lsquo;<samp><span class="samp">74kf1_1</span></samp>&rsquo;, &lsquo;<samp><span class="samp">74kf3_2</span></samp>&rsquo;, &lsquo;<samp><span class="samp">1004kc</span></samp>&rsquo;, &lsquo;<samp><span class="samp">1004kf2_1</span></samp>&rsquo;, &lsquo;<samp><span class="samp">1004kf1_1</span></samp>&rsquo;, &lsquo;<samp><span class="samp">loongson2e</span></samp>&rsquo;, &lsquo;<samp><span class="samp">loongson2f</span></samp>&rsquo;, &lsquo;<samp><span class="samp">loongson3a</span></samp>&rsquo;, &lsquo;<samp><span class="samp">m4k</span></samp>&rsquo;, &lsquo;<samp><span class="samp">m14k</span></samp>&rsquo;, &lsquo;<samp><span class="samp">m14kc</span></samp>&rsquo;, &lsquo;<samp><span class="samp">m14ke</span></samp>&rsquo;, &lsquo;<samp><span class="samp">m14kec</span></samp>&rsquo;, &lsquo;<samp><span class="samp">octeon</span></samp>&rsquo;, &lsquo;<samp><span class="samp">octeon+</span></samp>&rsquo;, &lsquo;<samp><span class="samp">octeon2</span></samp>&rsquo;, &lsquo;<samp><span class="samp">octeon3</span></samp>&rsquo;, &lsquo;<samp><span class="samp">orion</span></samp>&rsquo;, &lsquo;<samp><span class="samp">p5600</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r2000</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r3000</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r3900</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r4000</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r4400</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r4600</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r4650</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r4700</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r6000</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r8000</span></samp>&rsquo;, &lsquo;<samp><span class="samp">rm7000</span></samp>&rsquo;, &lsquo;<samp><span class="samp">rm9000</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r10000</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r12000</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r14000</span></samp>&rsquo;, &lsquo;<samp><span class="samp">r16000</span></samp>&rsquo;, &lsquo;<samp><span class="samp">sb1</span></samp>&rsquo;, &lsquo;<samp><span class="samp">sr71000</span></samp>&rsquo;, &lsquo;<samp><span class="samp">vr4100</span></samp>&rsquo;, &lsquo;<samp><span class="samp">vr4111</span></samp>&rsquo;, &lsquo;<samp><span class="samp">vr4120</span></samp>&rsquo;, &lsquo;<samp><span class="samp">vr4130</span></samp>&rsquo;, &lsquo;<samp><span class="samp">vr4300</span></samp>&rsquo;, &lsquo;<samp><span class="samp">vr5000</span></samp>&rsquo;, &lsquo;<samp><span class="samp">vr5400</span></samp>&rsquo;, &lsquo;<samp><span class="samp">vr5500</span></samp>&rsquo;, &lsquo;<samp><span class="samp">xlr</span></samp>&rsquo; and &lsquo;<samp><span class="samp">xlp</span></samp>&rsquo;. The special value &lsquo;<samp><span class="samp">from-abi</span></samp>&rsquo; selects the most compatible architecture for the selected ABI (that is, &lsquo;<samp><span class="samp">mips1</span></samp>&rsquo; for 32-bit ABIs and &lsquo;<samp><span class="samp">mips3</span></samp>&rsquo; for 64-bit ABIs). <p>The native Linux/GNU toolchain also supports the value &lsquo;<samp><span class="samp">native</span></samp>&rsquo;, which selects the best architecture option for the host processor. <samp><span class="option">-march=native</span></samp> has no effect if GCC does not recognize the processor. <p>In processor names, a final &lsquo;<samp><span class="samp">000</span></samp>&rsquo; can be abbreviated as &lsquo;<samp><span class="samp">k</span></samp>&rsquo; (for example, <samp><span class="option">-march=r2k</span></samp>). Prefixes are optional, and &lsquo;<samp><span class="samp">vr</span></samp>&rsquo; may be written &lsquo;<samp><span class="samp">r</span></samp>&rsquo;. <p>Names of the form &lsquo;<samp><var>n</var><span class="samp">f2_1</span></samp>&rsquo; refer to processors with FPUs clocked at half the rate of the core, names of the form &lsquo;<samp><var>n</var><span class="samp">f1_1</span></samp>&rsquo; refer to processors with FPUs clocked at the same rate as the core, and names of the form &lsquo;<samp><var>n</var><span class="samp">f3_2</span></samp>&rsquo; refer to processors with FPUs clocked a ratio of 3:2 with respect to the core. For compatibility reasons, &lsquo;<samp><var>n</var><span class="samp">f</span></samp>&rsquo; is accepted as a synonym for &lsquo;<samp><var>n</var><span class="samp">f2_1</span></samp>&rsquo; while &lsquo;<samp><var>n</var><span class="samp">x</span></samp>&rsquo; and &lsquo;<samp><var>b</var><span class="samp">fx</span></samp>&rsquo; are accepted as synonyms for &lsquo;<samp><var>n</var><span class="samp">f1_1</span></samp>&rsquo;. <p>GCC defines two macros based on the value of this option. The first is <code>_MIPS_ARCH</code>, which gives the name of target architecture, as a string. The second has the form <code>_MIPS_ARCH_</code><var>foo</var>, where <var>foo</var> is the capitalized value of <code>_MIPS_ARCH</code>. For example, <samp><span class="option">-march=r2000</span></samp> sets <code>_MIPS_ARCH</code> to <code>"r2000"</code> and defines the macro <code>_MIPS_ARCH_R2000</code>. <p>Note that the <code>_MIPS_ARCH</code> macro uses the processor names given above. In other words, it has the full prefix and does not abbreviate &lsquo;<samp><span class="samp">000</span></samp>&rsquo; as &lsquo;<samp><span class="samp">k</span></samp>&rsquo;. In the case of &lsquo;<samp><span class="samp">from-abi</span></samp>&rsquo;, the macro names the resolved architecture (either <code>"mips1"</code> or <code>"mips3"</code>). It names the default architecture when no <samp><span class="option">-march</span></samp> option is given. <br><dt><code>-mtune=</code><var>arch</var><dd><a name="index-mtune-1930"></a>Optimize for <var>arch</var>. Among other things, this option controls the way instructions are scheduled, and the perceived cost of arithmetic operations. The list of <var>arch</var> values is the same as for <samp><span class="option">-march</span></samp>. <p>When this option is not used, GCC optimizes for the processor specified by <samp><span class="option">-march</span></samp>. By using <samp><span class="option">-march</span></samp> and <samp><span class="option">-mtune</span></samp> together, it is possible to generate code that runs on a family of processors, but optimize the code for one particular member of that family. <p><samp><span class="option">-mtune</span></samp> defines the macros <code>_MIPS_TUNE</code> and <code>_MIPS_TUNE_</code><var>foo</var>, which work in the same way as the <samp><span class="option">-march</span></samp> ones described above. <br><dt><code>-mips1</code><dd><a name="index-mips1-1931"></a>Equivalent to <samp><span class="option">-march=mips1</span></samp>. <br><dt><code>-mips2</code><dd><a name="index-mips2-1932"></a>Equivalent to <samp><span class="option">-march=mips2</span></samp>. <br><dt><code>-mips3</code><dd><a name="index-mips3-1933"></a>Equivalent to <samp><span class="option">-march=mips3</span></samp>. <br><dt><code>-mips4</code><dd><a name="index-mips4-1934"></a>Equivalent to <samp><span class="option">-march=mips4</span></samp>. <br><dt><code>-mips32</code><dd><a name="index-mips32-1935"></a>Equivalent to <samp><span class="option">-march=mips32</span></samp>. <br><dt><code>-mips32r3</code><dd><a name="index-mips32r3-1936"></a>Equivalent to <samp><span class="option">-march=mips32r3</span></samp>. <br><dt><code>-mips32r5</code><dd><a name="index-mips32r5-1937"></a>Equivalent to <samp><span class="option">-march=mips32r5</span></samp>. <br><dt><code>-mips32r6</code><dd><a name="index-mips32r6-1938"></a>Equivalent to <samp><span class="option">-march=mips32r6</span></samp>. <br><dt><code>-mips64</code><dd><a name="index-mips64-1939"></a>Equivalent to <samp><span class="option">-march=mips64</span></samp>. <br><dt><code>-mips64r2</code><dd><a name="index-mips64r2-1940"></a>Equivalent to <samp><span class="option">-march=mips64r2</span></samp>. <br><dt><code>-mips64r3</code><dd><a name="index-mips64r3-1941"></a>Equivalent to <samp><span class="option">-march=mips64r3</span></samp>. <br><dt><code>-mips64r5</code><dd><a name="index-mips64r5-1942"></a>Equivalent to <samp><span class="option">-march=mips64r5</span></samp>. <br><dt><code>-mips64r6</code><dd><a name="index-mips64r6-1943"></a>Equivalent to <samp><span class="option">-march=mips64r6</span></samp>. <br><dt><code>-mips16</code><dt><code>-mno-mips16</code><dd><a name="index-mips16-1944"></a><a name="index-mno_002dmips16-1945"></a>Generate (do not generate) MIPS16 code. If GCC is targeting a MIPS32 or MIPS64 architecture, it makes use of the MIPS16e ASE. <p>MIPS16 code generation can also be controlled on a per-function basis by means of <code>mips16</code> and <code>nomips16</code> attributes. See <a href="Function-Attributes.html#Function-Attributes">Function Attributes</a>, for more information. <br><dt><code>-mflip-mips16</code><dd><a name="index-mflip_002dmips16-1946"></a>Generate MIPS16 code on alternating functions. This option is provided for regression testing of mixed MIPS16/non-MIPS16 code generation, and is not intended for ordinary use in compiling user code. <br><dt><code>-minterlink-compressed</code><br><dt><code>-mno-interlink-compressed</code><dd><a name="index-minterlink_002dcompressed-1947"></a><a name="index-mno_002dinterlink_002dcompressed-1948"></a>Require (do not require) that code using the standard (uncompressed) MIPS ISA be link-compatible with MIPS16 and microMIPS code, and vice versa. <p>For example, code using the standard ISA encoding cannot jump directly to MIPS16 or microMIPS code; it must either use a call or an indirect jump. <samp><span class="option">-minterlink-compressed</span></samp> therefore disables direct jumps unless GCC knows that the target of the jump is not compressed. <br><dt><code>-minterlink-mips16</code><dt><code>-mno-interlink-mips16</code><dd><a name="index-minterlink_002dmips16-1949"></a><a name="index-mno_002dinterlink_002dmips16-1950"></a>Aliases of <samp><span class="option">-minterlink-compressed</span></samp> and <samp><span class="option">-mno-interlink-compressed</span></samp>. These options predate the microMIPS ASE and are retained for backwards compatibility. <br><dt><code>-mabi=32</code><dt><code>-mabi=o64</code><dt><code>-mabi=n32</code><dt><code>-mabi=64</code><dt><code>-mabi=eabi</code><dd><a name="index-mabi_003d32-1951"></a><a name="index-mabi_003do64-1952"></a><a name="index-mabi_003dn32-1953"></a><a name="index-mabi_003d64-1954"></a><a name="index-mabi_003deabi-1955"></a>Generate code for the given ABI. <p>Note that the EABI has a 32-bit and a 64-bit variant. GCC normally generates 64-bit code when you select a 64-bit architecture, but you can use <samp><span class="option">-mgp32</span></samp> to get 32-bit code instead. <p>For information about the O64 ABI, see <a href="path_to_url">path_to_url <p>GCC supports a variant of the o32 ABI in which floating-point registers are 64 rather than 32 bits wide. You can select this combination with <samp><span class="option">-mabi=32</span></samp> <samp><span class="option">-mfp64</span></samp>. This ABI relies on the <code>mthc1</code> and <code>mfhc1</code> instructions and is therefore only supported for MIPS32R2, MIPS32R3 and MIPS32R5 processors. <p>The register assignments for arguments and return values remain the same, but each scalar value is passed in a single 64-bit register rather than a pair of 32-bit registers. For example, scalar floating-point values are returned in &lsquo;<samp><span class="samp">$f0</span></samp>&rsquo; only, not a &lsquo;<samp><span class="samp">$f0</span></samp>&rsquo;/&lsquo;<samp><span class="samp">$f1</span></samp>&rsquo; pair. The set of call-saved registers also remains the same in that the even-numbered double-precision registers are saved. <p>Two additional variants of the o32 ABI are supported to enable a transition from 32-bit to 64-bit registers. These are FPXX (<samp><span class="option">-mfpxx</span></samp>) and FP64A (<samp><span class="option">-mfp64</span></samp> <samp><span class="option">-mno-odd-spreg</span></samp>). The FPXX extension mandates that all code must execute correctly when run using 32-bit or 64-bit registers. The code can be interlinked with either FP32 or FP64, but not both. The FP64A extension is similar to the FP64 extension but forbids the use of odd-numbered single-precision registers. This can be used in conjunction with the <code>FRE</code> mode of FPUs in MIPS32R5 processors and allows both FP32 and FP64A code to interlink and run in the same process without changing FPU modes. <br><dt><code>-mabicalls</code><dt><code>-mno-abicalls</code><dd><a name="index-mabicalls-1956"></a><a name="index-mno_002dabicalls-1957"></a>Generate (do not generate) code that is suitable for SVR4-style dynamic objects. <samp><span class="option">-mabicalls</span></samp> is the default for SVR4-based systems. <br><dt><code>-mshared</code><dt><code>-mno-shared</code><dd>Generate (do not generate) code that is fully position-independent, and that can therefore be linked into shared libraries. This option only affects <samp><span class="option">-mabicalls</span></samp>. <p>All <samp><span class="option">-mabicalls</span></samp> code has traditionally been position-independent, regardless of options like <samp><span class="option">-fPIC</span></samp> and <samp><span class="option">-fpic</span></samp>. However, as an extension, the GNU toolchain allows executables to use absolute accesses for locally-binding symbols. It can also use shorter GP initialization sequences and generate direct calls to locally-defined functions. This mode is selected by <samp><span class="option">-mno-shared</span></samp>. <p><samp><span class="option">-mno-shared</span></samp> depends on binutils 2.16 or higher and generates objects that can only be linked by the GNU linker. However, the option does not affect the ABI of the final executable; it only affects the ABI of relocatable objects. Using <samp><span class="option">-mno-shared</span></samp> generally makes executables both smaller and quicker. <p><samp><span class="option">-mshared</span></samp> is the default. <br><dt><code>-mplt</code><dt><code>-mno-plt</code><dd><a name="index-mplt-1958"></a><a name="index-mno_002dplt-1959"></a>Assume (do not assume) that the static and dynamic linkers support PLTs and copy relocations. This option only affects <samp><span class="option">-mno-shared -mabicalls</span></samp>. For the n64 ABI, this option has no effect without <samp><span class="option">-msym32</span></samp>. <p>You can make <samp><span class="option">-mplt</span></samp> the default by configuring GCC with <samp><span class="option">--with-mips-plt</span></samp>. The default is <samp><span class="option">-mno-plt</span></samp> otherwise. <br><dt><code>-mxgot</code><dt><code>-mno-xgot</code><dd><a name="index-mxgot-1960"></a><a name="index-mno_002dxgot-1961"></a>Lift (do not lift) the usual restrictions on the size of the global offset table. <p>GCC normally uses a single instruction to load values from the GOT. While this is relatively efficient, it only works if the GOT is smaller than about 64k. Anything larger causes the linker to report an error such as: <p><a name="index-relocation-truncated-to-fit-_0028MIPS_0029-1962"></a> <pre class="smallexample"> relocation truncated to fit: R_MIPS_GOT16 foobar </pre> <p>If this happens, you should recompile your code with <samp><span class="option">-mxgot</span></samp>. This works with very large GOTs, although the code is also less efficient, since it takes three instructions to fetch the value of a global symbol. <p>Note that some linkers can create multiple GOTs. If you have such a linker, you should only need to use <samp><span class="option">-mxgot</span></samp> when a single object file accesses more than 64k's worth of GOT entries. Very few do. <p>These options have no effect unless GCC is generating position independent code. <br><dt><code>-mgp32</code><dd><a name="index-mgp32-1963"></a>Assume that general-purpose registers are 32 bits wide. <br><dt><code>-mgp64</code><dd><a name="index-mgp64-1964"></a>Assume that general-purpose registers are 64 bits wide. <br><dt><code>-mfp32</code><dd><a name="index-mfp32-1965"></a>Assume that floating-point registers are 32 bits wide. <br><dt><code>-mfp64</code><dd><a name="index-mfp64-1966"></a>Assume that floating-point registers are 64 bits wide. <br><dt><code>-mfpxx</code><dd><a name="index-mfpxx-1967"></a>Do not assume the width of floating-point registers. <br><dt><code>-mhard-float</code><dd><a name="index-mhard_002dfloat-1968"></a>Use floating-point coprocessor instructions. <br><dt><code>-msoft-float</code><dd><a name="index-msoft_002dfloat-1969"></a>Do not use floating-point coprocessor instructions. Implement floating-point calculations using library calls instead. <br><dt><code>-mno-float</code><dd><a name="index-mno_002dfloat-1970"></a>Equivalent to <samp><span class="option">-msoft-float</span></samp>, but additionally asserts that the program being compiled does not perform any floating-point operations. This option is presently supported only by some bare-metal MIPS configurations, where it may select a special set of libraries that lack all floating-point support (including, for example, the floating-point <code>printf</code> formats). If code compiled with <samp><span class="option">-mno-float</span></samp> accidentally contains floating-point operations, it is likely to suffer a link-time or run-time failure. <br><dt><code>-msingle-float</code><dd><a name="index-msingle_002dfloat-1971"></a>Assume that the floating-point coprocessor only supports single-precision operations. <br><dt><code>-mdouble-float</code><dd><a name="index-mdouble_002dfloat-1972"></a>Assume that the floating-point coprocessor supports double-precision operations. This is the default. <br><dt><code>-modd-spreg</code><dt><code>-mno-odd-spreg</code><dd><a name="index-modd_002dspreg-1973"></a><a name="index-mno_002dodd_002dspreg-1974"></a>Enable the use of odd-numbered single-precision floating-point registers for the o32 ABI. This is the default for processors that are known to support these registers. When using the o32 FPXX ABI, <samp><span class="option">-mno-odd-spreg</span></samp> is set by default. <br><dt><code>-mabs=2008</code><dt><code>-mabs=legacy</code><dd><a name="index-mabs_003d2008-1975"></a><a name="index-mabs_003dlegacy-1976"></a>These options control the treatment of the special not-a-number (NaN) IEEE 754 floating-point data with the <code>abs.</code><i>fmt</i> and <code>neg.</code><i>fmt</i> machine instructions. <p>By default or when <samp><span class="option">-mabs=legacy</span></samp> is used the legacy treatment is selected. In this case these instructions are considered arithmetic and avoided where correct operation is required and the input operand might be a NaN. A longer sequence of instructions that manipulate the sign bit of floating-point datum manually is used instead unless the <samp><span class="option">-ffinite-math-only</span></samp> option has also been specified. <p>The <samp><span class="option">-mabs=2008</span></samp> option selects the IEEE 754-2008 treatment. In this case these instructions are considered non-arithmetic and therefore operating correctly in all cases, including in particular where the input operand is a NaN. These instructions are therefore always used for the respective operations. <br><dt><code>-mnan=2008</code><dt><code>-mnan=legacy</code><dd><a name="index-mnan_003d2008-1977"></a><a name="index-mnan_003dlegacy-1978"></a>These options control the encoding of the special not-a-number (NaN) IEEE 754 floating-point data. <p>The <samp><span class="option">-mnan=legacy</span></samp> option selects the legacy encoding. In this case quiet NaNs (qNaNs) are denoted by the first bit of their trailing significand field being 0, whereas signalling NaNs (sNaNs) are denoted by the first bit of their trailing significand field being 1. <p>The <samp><span class="option">-mnan=2008</span></samp> option selects the IEEE 754-2008 encoding. In this case qNaNs are denoted by the first bit of their trailing significand field being 1, whereas sNaNs are denoted by the first bit of their trailing significand field being 0. <p>The default is <samp><span class="option">-mnan=legacy</span></samp> unless GCC has been configured with <samp><span class="option">--with-nan=2008</span></samp>. <br><dt><code>-mllsc</code><dt><code>-mno-llsc</code><dd><a name="index-mllsc-1979"></a><a name="index-mno_002dllsc-1980"></a>Use (do not use) &lsquo;<samp><span class="samp">ll</span></samp>&rsquo;, &lsquo;<samp><span class="samp">sc</span></samp>&rsquo;, and &lsquo;<samp><span class="samp">sync</span></samp>&rsquo; instructions to implement atomic memory built-in functions. When neither option is specified, GCC uses the instructions if the target architecture supports them. <p><samp><span class="option">-mllsc</span></samp> is useful if the runtime environment can emulate the instructions and <samp><span class="option">-mno-llsc</span></samp> can be useful when compiling for nonstandard ISAs. You can make either option the default by configuring GCC with <samp><span class="option">--with-llsc</span></samp> and <samp><span class="option">--without-llsc</span></samp> respectively. <samp><span class="option">--with-llsc</span></samp> is the default for some configurations; see the installation documentation for details. <br><dt><code>-mdsp</code><dt><code>-mno-dsp</code><dd><a name="index-mdsp-1981"></a><a name="index-mno_002ddsp-1982"></a>Use (do not use) revision 1 of the MIPS DSP ASE. See <a href="MIPS-DSP-Built_002din-Functions.html#MIPS-DSP-Built_002din-Functions">MIPS DSP Built-in Functions</a>. This option defines the preprocessor macro <code>__mips_dsp</code>. It also defines <code>__mips_dsp_rev</code> to 1. <br><dt><code>-mdspr2</code><dt><code>-mno-dspr2</code><dd><a name="index-mdspr2-1983"></a><a name="index-mno_002ddspr2-1984"></a>Use (do not use) revision 2 of the MIPS DSP ASE. See <a href="MIPS-DSP-Built_002din-Functions.html#MIPS-DSP-Built_002din-Functions">MIPS DSP Built-in Functions</a>. This option defines the preprocessor macros <code>__mips_dsp</code> and <code>__mips_dspr2</code>. It also defines <code>__mips_dsp_rev</code> to 2. <br><dt><code>-msmartmips</code><dt><code>-mno-smartmips</code><dd><a name="index-msmartmips-1985"></a><a name="index-mno_002dsmartmips-1986"></a>Use (do not use) the MIPS SmartMIPS ASE. <br><dt><code>-mpaired-single</code><dt><code>-mno-paired-single</code><dd><a name="index-mpaired_002dsingle-1987"></a><a name="index-mno_002dpaired_002dsingle-1988"></a>Use (do not use) paired-single floating-point instructions. See <a href="MIPS-Paired_002dSingle-Support.html#MIPS-Paired_002dSingle-Support">MIPS Paired-Single Support</a>. This option requires hardware floating-point support to be enabled. <br><dt><code>-mdmx</code><dt><code>-mno-mdmx</code><dd><a name="index-mdmx-1989"></a><a name="index-mno_002dmdmx-1990"></a>Use (do not use) MIPS Digital Media Extension instructions. This option can only be used when generating 64-bit code and requires hardware floating-point support to be enabled. <br><dt><code>-mips3d</code><dt><code>-mno-mips3d</code><dd><a name="index-mips3d-1991"></a><a name="index-mno_002dmips3d-1992"></a>Use (do not use) the MIPS-3D ASE. See <a href="MIPS_002d3D-Built_002din-Functions.html#MIPS_002d3D-Built_002din-Functions">MIPS-3D Built-in Functions</a>. The option <samp><span class="option">-mips3d</span></samp> implies <samp><span class="option">-mpaired-single</span></samp>. <br><dt><code>-mmicromips</code><dt><code>-mno-micromips</code><dd><a name="index-mmicromips-1993"></a><a name="index-mno_002dmmicromips-1994"></a>Generate (do not generate) microMIPS code. <p>MicroMIPS code generation can also be controlled on a per-function basis by means of <code>micromips</code> and <code>nomicromips</code> attributes. See <a href="Function-Attributes.html#Function-Attributes">Function Attributes</a>, for more information. <br><dt><code>-mmt</code><dt><code>-mno-mt</code><dd><a name="index-mmt-1995"></a><a name="index-mno_002dmt-1996"></a>Use (do not use) MT Multithreading instructions. <br><dt><code>-mmcu</code><dt><code>-mno-mcu</code><dd><a name="index-mmcu-1997"></a><a name="index-mno_002dmcu-1998"></a>Use (do not use) the MIPS MCU ASE instructions. <br><dt><code>-meva</code><dt><code>-mno-eva</code><dd><a name="index-meva-1999"></a><a name="index-mno_002deva-2000"></a>Use (do not use) the MIPS Enhanced Virtual Addressing instructions. <br><dt><code>-mvirt</code><dt><code>-mno-virt</code><dd><a name="index-mvirt-2001"></a><a name="index-mno_002dvirt-2002"></a>Use (do not use) the MIPS Virtualization Application Specific instructions. <br><dt><code>-mxpa</code><dt><code>-mno-xpa</code><dd><a name="index-mxpa-2003"></a><a name="index-mno_002dxpa-2004"></a>Use (do not use) the MIPS eXtended Physical Address (XPA) instructions. <br><dt><code>-mlong64</code><dd><a name="index-mlong64-2005"></a>Force <code>long</code> types to be 64 bits wide. See <samp><span class="option">-mlong32</span></samp> for an explanation of the default and the way that the pointer size is determined. <br><dt><code>-mlong32</code><dd><a name="index-mlong32-2006"></a>Force <code>long</code>, <code>int</code>, and pointer types to be 32 bits wide. <p>The default size of <code>int</code>s, <code>long</code>s and pointers depends on the ABI. All the supported ABIs use 32-bit <code>int</code>s. The n64 ABI uses 64-bit <code>long</code>s, as does the 64-bit EABI; the others use 32-bit <code>long</code>s. Pointers are the same size as <code>long</code>s, or the same size as integer registers, whichever is smaller. <br><dt><code>-msym32</code><dt><code>-mno-sym32</code><dd><a name="index-msym32-2007"></a><a name="index-mno_002dsym32-2008"></a>Assume (do not assume) that all symbols have 32-bit values, regardless of the selected ABI. This option is useful in combination with <samp><span class="option">-mabi=64</span></samp> and <samp><span class="option">-mno-abicalls</span></samp> because it allows GCC to generate shorter and faster references to symbolic addresses. <br><dt><code>-G </code><var>num</var><dd><a name="index-G-2009"></a>Put definitions of externally-visible data in a small data section if that data is no bigger than <var>num</var> bytes. GCC can then generate more efficient accesses to the data; see <samp><span class="option">-mgpopt</span></samp> for details. <p>The default <samp><span class="option">-G</span></samp> option depends on the configuration. <br><dt><code>-mlocal-sdata</code><dt><code>-mno-local-sdata</code><dd><a name="index-mlocal_002dsdata-2010"></a><a name="index-mno_002dlocal_002dsdata-2011"></a>Extend (do not extend) the <samp><span class="option">-G</span></samp> behavior to local data too, such as to static variables in C. <samp><span class="option">-mlocal-sdata</span></samp> is the default for all configurations. <p>If the linker complains that an application is using too much small data, you might want to try rebuilding the less performance-critical parts with <samp><span class="option">-mno-local-sdata</span></samp>. You might also want to build large libraries with <samp><span class="option">-mno-local-sdata</span></samp>, so that the libraries leave more room for the main program. <br><dt><code>-mextern-sdata</code><dt><code>-mno-extern-sdata</code><dd><a name="index-mextern_002dsdata-2012"></a><a name="index-mno_002dextern_002dsdata-2013"></a>Assume (do not assume) that externally-defined data is in a small data section if the size of that data is within the <samp><span class="option">-G</span></samp> limit. <samp><span class="option">-mextern-sdata</span></samp> is the default for all configurations. <p>If you compile a module <var>Mod</var> with <samp><span class="option">-mextern-sdata</span></samp> <samp><span class="option">-G </span><var>num</var></samp> <samp><span class="option">-mgpopt</span></samp>, and <var>Mod</var> references a variable <var>Var</var> that is no bigger than <var>num</var> bytes, you must make sure that <var>Var</var> is placed in a small data section. If <var>Var</var> is defined by another module, you must either compile that module with a high-enough <samp><span class="option">-G</span></samp> setting or attach a <code>section</code> attribute to <var>Var</var>'s definition. If <var>Var</var> is common, you must link the application with a high-enough <samp><span class="option">-G</span></samp> setting. <p>The easiest way of satisfying these restrictions is to compile and link every module with the same <samp><span class="option">-G</span></samp> option. However, you may wish to build a library that supports several different small data limits. You can do this by compiling the library with the highest supported <samp><span class="option">-G</span></samp> setting and additionally using <samp><span class="option">-mno-extern-sdata</span></samp> to stop the library from making assumptions about externally-defined data. <br><dt><code>-mgpopt</code><dt><code>-mno-gpopt</code><dd><a name="index-mgpopt-2014"></a><a name="index-mno_002dgpopt-2015"></a>Use (do not use) GP-relative accesses for symbols that are known to be in a small data section; see <samp><span class="option">-G</span></samp>, <samp><span class="option">-mlocal-sdata</span></samp> and <samp><span class="option">-mextern-sdata</span></samp>. <samp><span class="option">-mgpopt</span></samp> is the default for all configurations. <p><samp><span class="option">-mno-gpopt</span></samp> is useful for cases where the <code>$gp</code> register might not hold the value of <code>_gp</code>. For example, if the code is part of a library that might be used in a boot monitor, programs that call boot monitor routines pass an unknown value in <code>$gp</code>. (In such situations, the boot monitor itself is usually compiled with <samp><span class="option">-G0</span></samp>.) <p><samp><span class="option">-mno-gpopt</span></samp> implies <samp><span class="option">-mno-local-sdata</span></samp> and <samp><span class="option">-mno-extern-sdata</span></samp>. <br><dt><code>-membedded-data</code><dt><code>-mno-embedded-data</code><dd><a name="index-membedded_002ddata-2016"></a><a name="index-mno_002dembedded_002ddata-2017"></a>Allocate variables to the read-only data section first if possible, then next in the small data section if possible, otherwise in data. This gives slightly slower code than the default, but reduces the amount of RAM required when executing, and thus may be preferred for some embedded systems. <br><dt><code>-muninit-const-in-rodata</code><dt><code>-mno-uninit-const-in-rodata</code><dd><a name="index-muninit_002dconst_002din_002drodata-2018"></a><a name="index-mno_002duninit_002dconst_002din_002drodata-2019"></a>Put uninitialized <code>const</code> variables in the read-only data section. This option is only meaningful in conjunction with <samp><span class="option">-membedded-data</span></samp>. <br><dt><code>-mcode-readable=</code><var>setting</var><dd><a name="index-mcode_002dreadable-2020"></a>Specify whether GCC may generate code that reads from executable sections. There are three possible settings: <dl> <dt><code>-mcode-readable=yes</code><dd>Instructions may freely access executable sections. This is the default setting. <br><dt><code>-mcode-readable=pcrel</code><dd>MIPS16 PC-relative load instructions can access executable sections, but other instructions must not do so. This option is useful on 4KSc and 4KSd processors when the code TLBs have the Read Inhibit bit set. It is also useful on processors that can be configured to have a dual instruction/data SRAM interface and that, like the M4K, automatically redirect PC-relative loads to the instruction RAM. <br><dt><code>-mcode-readable=no</code><dd>Instructions must not access executable sections. This option can be useful on targets that are configured to have a dual instruction/data SRAM interface but that (unlike the M4K) do not automatically redirect PC-relative loads to the instruction RAM. </dl> <br><dt><code>-msplit-addresses</code><dt><code>-mno-split-addresses</code><dd><a name="index-msplit_002daddresses-2021"></a><a name="index-mno_002dsplit_002daddresses-2022"></a>Enable (disable) use of the <code>%hi()</code> and <code>%lo()</code> assembler relocation operators. This option has been superseded by <samp><span class="option">-mexplicit-relocs</span></samp> but is retained for backwards compatibility. <br><dt><code>-mexplicit-relocs</code><dt><code>-mno-explicit-relocs</code><dd><a name="index-mexplicit_002drelocs-2023"></a><a name="index-mno_002dexplicit_002drelocs-2024"></a>Use (do not use) assembler relocation operators when dealing with symbolic addresses. The alternative, selected by <samp><span class="option">-mno-explicit-relocs</span></samp>, is to use assembler macros instead. <p><samp><span class="option">-mexplicit-relocs</span></samp> is the default if GCC was configured to use an assembler that supports relocation operators. <br><dt><code>-mcheck-zero-division</code><dt><code>-mno-check-zero-division</code><dd><a name="index-mcheck_002dzero_002ddivision-2025"></a><a name="index-mno_002dcheck_002dzero_002ddivision-2026"></a>Trap (do not trap) on integer division by zero. <p>The default is <samp><span class="option">-mcheck-zero-division</span></samp>. <br><dt><code>-mdivide-traps</code><dt><code>-mdivide-breaks</code><dd><a name="index-mdivide_002dtraps-2027"></a><a name="index-mdivide_002dbreaks-2028"></a>MIPS systems check for division by zero by generating either a conditional trap or a break instruction. Using traps results in smaller code, but is only supported on MIPS II and later. Also, some versions of the Linux kernel have a bug that prevents trap from generating the proper signal (<code>SIGFPE</code>). Use <samp><span class="option">-mdivide-traps</span></samp> to allow conditional traps on architectures that support them and <samp><span class="option">-mdivide-breaks</span></samp> to force the use of breaks. <p>The default is usually <samp><span class="option">-mdivide-traps</span></samp>, but this can be overridden at configure time using <samp><span class="option">--with-divide=breaks</span></samp>. Divide-by-zero checks can be completely disabled using <samp><span class="option">-mno-check-zero-division</span></samp>. <br><dt><code>-mmemcpy</code><dt><code>-mno-memcpy</code><dd><a name="index-mmemcpy-2029"></a><a name="index-mno_002dmemcpy-2030"></a>Force (do not force) the use of <code>memcpy</code> for non-trivial block moves. The default is <samp><span class="option">-mno-memcpy</span></samp>, which allows GCC to inline most constant-sized copies. <br><dt><code>-mlong-calls</code><dt><code>-mno-long-calls</code><dd><a name="index-mlong_002dcalls-2031"></a><a name="index-mno_002dlong_002dcalls-2032"></a>Disable (do not disable) use of the <code>jal</code> instruction. Calling functions using <code>jal</code> is more efficient but requires the caller and callee to be in the same 256 megabyte segment. <p>This option has no effect on abicalls code. The default is <samp><span class="option">-mno-long-calls</span></samp>. <br><dt><code>-mmad</code><dt><code>-mno-mad</code><dd><a name="index-mmad-2033"></a><a name="index-mno_002dmad-2034"></a>Enable (disable) use of the <code>mad</code>, <code>madu</code> and <code>mul</code> instructions, as provided by the R4650 ISA. <br><dt><code>-mimadd</code><dt><code>-mno-imadd</code><dd><a name="index-mimadd-2035"></a><a name="index-mno_002dimadd-2036"></a>Enable (disable) use of the <code>madd</code> and <code>msub</code> integer instructions. The default is <samp><span class="option">-mimadd</span></samp> on architectures that support <code>madd</code> and <code>msub</code> except for the 74k architecture where it was found to generate slower code. <br><dt><code>-mfused-madd</code><dt><code>-mno-fused-madd</code><dd><a name="index-mfused_002dmadd-2037"></a><a name="index-mno_002dfused_002dmadd-2038"></a>Enable (disable) use of the floating-point multiply-accumulate instructions, when they are available. The default is <samp><span class="option">-mfused-madd</span></samp>. <p>On the R8000 CPU when multiply-accumulate instructions are used, the intermediate product is calculated to infinite precision and is not subject to the FCSR Flush to Zero bit. This may be undesirable in some circumstances. On other processors the result is numerically identical to the equivalent computation using separate multiply, add, subtract and negate instructions. <br><dt><code>-nocpp</code><dd><a name="index-nocpp-2039"></a>Tell the MIPS assembler to not run its preprocessor over user assembler files (with a &lsquo;<samp><span class="samp">.s</span></samp>&rsquo; suffix) when assembling them. <br><dt><code>-mfix-24k</code><br><dt><code>-mno-fix-24k</code><dd><a name="index-mfix_002d24k-2040"></a><a name="index-mno_002dfix_002d24k-2041"></a>Work around the 24K E48 (lost data on stores during refill) errata. The workarounds are implemented by the assembler rather than by GCC. <br><dt><code>-mfix-r4000</code><dt><code>-mno-fix-r4000</code><dd><a name="index-mfix_002dr4000-2042"></a><a name="index-mno_002dfix_002dr4000-2043"></a>Work around certain R4000 CPU errata: <ul> <li>A double-word or a variable shift may give an incorrect result if executed immediately after starting an integer division. <li>A double-word or a variable shift may give an incorrect result if executed while an integer multiplication is in progress. <li>An integer division may give an incorrect result if started in a delay slot of a taken branch or a jump. </ul> <br><dt><code>-mfix-r4400</code><dt><code>-mno-fix-r4400</code><dd><a name="index-mfix_002dr4400-2044"></a><a name="index-mno_002dfix_002dr4400-2045"></a>Work around certain R4400 CPU errata: <ul> <li>A double-word or a variable shift may give an incorrect result if executed immediately after starting an integer division. </ul> <br><dt><code>-mfix-r10000</code><dt><code>-mno-fix-r10000</code><dd><a name="index-mfix_002dr10000-2046"></a><a name="index-mno_002dfix_002dr10000-2047"></a>Work around certain R10000 errata: <ul> <li><code>ll</code>/<code>sc</code> sequences may not behave atomically on revisions prior to 3.0. They may deadlock on revisions 2.6 and earlier. </ul> <p>This option can only be used if the target architecture supports branch-likely instructions. <samp><span class="option">-mfix-r10000</span></samp> is the default when <samp><span class="option">-march=r10000</span></samp> is used; <samp><span class="option">-mno-fix-r10000</span></samp> is the default otherwise. <br><dt><code>-mfix-rm7000</code><dt><code>-mno-fix-rm7000</code><dd><a name="index-mfix_002drm7000-2048"></a>Work around the RM7000 <code>dmult</code>/<code>dmultu</code> errata. The workarounds are implemented by the assembler rather than by GCC. <br><dt><code>-mfix-vr4120</code><dt><code>-mno-fix-vr4120</code><dd><a name="index-mfix_002dvr4120-2049"></a>Work around certain VR4120 errata: <ul> <li><code>dmultu</code> does not always produce the correct result. <li><code>div</code> and <code>ddiv</code> do not always produce the correct result if one of the operands is negative. </ul> The workarounds for the division errata rely on special functions in <samp><span class="file">libgcc.a</span></samp>. At present, these functions are only provided by the <code>mips64vr*-elf</code> configurations. <p>Other VR4120 errata require a NOP to be inserted between certain pairs of instructions. These errata are handled by the assembler, not by GCC itself. <br><dt><code>-mfix-vr4130</code><dd><a name="index-mfix_002dvr4130-2050"></a>Work around the VR4130 <code>mflo</code>/<code>mfhi</code> errata. The workarounds are implemented by the assembler rather than by GCC, although GCC avoids using <code>mflo</code> and <code>mfhi</code> if the VR4130 <code>macc</code>, <code>macchi</code>, <code>dmacc</code> and <code>dmacchi</code> instructions are available instead. <br><dt><code>-mfix-sb1</code><dt><code>-mno-fix-sb1</code><dd><a name="index-mfix_002dsb1-2051"></a>Work around certain SB-1 CPU core errata. (This flag currently works around the SB-1 revision 2 &ldquo;F1&rdquo; and &ldquo;F2&rdquo; floating-point errata.) <br><dt><code>-mr10k-cache-barrier=</code><var>setting</var><dd><a name="index-mr10k_002dcache_002dbarrier-2052"></a>Specify whether GCC should insert cache barriers to avoid the side-effects of speculation on R10K processors. <p>In common with many processors, the R10K tries to predict the outcome of a conditional branch and speculatively executes instructions from the &ldquo;taken&rdquo; branch. It later aborts these instructions if the predicted outcome is wrong. However, on the R10K, even aborted instructions can have side effects. <p>This problem only affects kernel stores and, depending on the system, kernel loads. As an example, a speculatively-executed store may load the target memory into cache and mark the cache line as dirty, even if the store itself is later aborted. If a DMA operation writes to the same area of memory before the &ldquo;dirty&rdquo; line is flushed, the cached data overwrites the DMA-ed data. See the R10K processor manual for a full description, including other potential problems. <p>One workaround is to insert cache barrier instructions before every memory access that might be speculatively executed and that might have side effects even if aborted. <samp><span class="option">-mr10k-cache-barrier=</span><var>setting</var></samp> controls GCC's implementation of this workaround. It assumes that aborted accesses to any byte in the following regions does not have side effects: <ol type=1 start=1> <li>the memory occupied by the current function's stack frame; <li>the memory occupied by an incoming stack argument; <li>the memory occupied by an object with a link-time-constant address. </ol> <p>It is the kernel's responsibility to ensure that speculative accesses to these regions are indeed safe. <p>If the input program contains a function declaration such as: <pre class="smallexample"> void foo (void); </pre> <p>then the implementation of <code>foo</code> must allow <code>j foo</code> and <code>jal foo</code> to be executed speculatively. GCC honors this restriction for functions it compiles itself. It expects non-GCC functions (such as hand-written assembly code) to do the same. <p>The option has three forms: <dl> <dt><code>-mr10k-cache-barrier=load-store</code><dd>Insert a cache barrier before a load or store that might be speculatively executed and that might have side effects even if aborted. <br><dt><code>-mr10k-cache-barrier=store</code><dd>Insert a cache barrier before a store that might be speculatively executed and that might have side effects even if aborted. <br><dt><code>-mr10k-cache-barrier=none</code><dd>Disable the insertion of cache barriers. This is the default setting. </dl> <br><dt><code>-mflush-func=</code><var>func</var><dt><code>-mno-flush-func</code><dd><a name="index-mflush_002dfunc-2053"></a>Specifies the function to call to flush the I and D caches, or to not call any such function. If called, the function must take the same arguments as the common <code>_flush_func</code>, that is, the address of the memory range for which the cache is being flushed, the size of the memory range, and the number 3 (to flush both caches). The default depends on the target GCC was configured for, but commonly is either <code>_flush_func</code> or <code>__cpu_flush</code>. <br><dt><code>mbranch-cost=</code><var>num</var><dd><a name="index-mbranch_002dcost-2054"></a>Set the cost of branches to roughly <var>num</var> &ldquo;simple&rdquo; instructions. This cost is only a heuristic and is not guaranteed to produce consistent results across releases. A zero cost redundantly selects the default, which is based on the <samp><span class="option">-mtune</span></samp> setting. <br><dt><code>-mbranch-likely</code><dt><code>-mno-branch-likely</code><dd><a name="index-mbranch_002dlikely-2055"></a><a name="index-mno_002dbranch_002dlikely-2056"></a>Enable or disable use of Branch Likely instructions, regardless of the default for the selected architecture. By default, Branch Likely instructions may be generated if they are supported by the selected architecture. An exception is for the MIPS32 and MIPS64 architectures and processors that implement those architectures; for those, Branch Likely instructions are not be generated by default because the MIPS32 and MIPS64 architectures specifically deprecate their use. <br><dt><code>-mfp-exceptions</code><dt><code>-mno-fp-exceptions</code><dd><a name="index-mfp_002dexceptions-2057"></a>Specifies whether FP exceptions are enabled. This affects how FP instructions are scheduled for some processors. The default is that FP exceptions are enabled. <p>For instance, on the SB-1, if FP exceptions are disabled, and we are emitting 64-bit code, then we can use both FP pipes. Otherwise, we can only use one FP pipe. <br><dt><code>-mvr4130-align</code><dt><code>-mno-vr4130-align</code><dd><a name="index-mvr4130_002dalign-2058"></a>The VR4130 pipeline is two-way superscalar, but can only issue two instructions together if the first one is 8-byte aligned. When this option is enabled, GCC aligns pairs of instructions that it thinks should execute in parallel. <p>This option only has an effect when optimizing for the VR4130. It normally makes code faster, but at the expense of making it bigger. It is enabled by default at optimization level <samp><span class="option">-O3</span></samp>. <br><dt><code>-msynci</code><dt><code>-mno-synci</code><dd><a name="index-msynci-2059"></a>Enable (disable) generation of <code>synci</code> instructions on architectures that support it. The <code>synci</code> instructions (if enabled) are generated when <code>__builtin___clear_cache</code> is compiled. <p>This option defaults to <samp><span class="option">-mno-synci</span></samp>, but the default can be overridden by configuring GCC with <samp><span class="option">--with-synci</span></samp>. <p>When compiling code for single processor systems, it is generally safe to use <code>synci</code>. However, on many multi-core (SMP) systems, it does not invalidate the instruction caches on all cores and may lead to undefined behavior. <br><dt><code>-mrelax-pic-calls</code><dt><code>-mno-relax-pic-calls</code><dd><a name="index-mrelax_002dpic_002dcalls-2060"></a>Try to turn PIC calls that are normally dispatched via register <code>$25</code> into direct calls. This is only possible if the linker can resolve the destination at link-time and if the destination is within range for a direct call. <p><samp><span class="option">-mrelax-pic-calls</span></samp> is the default if GCC was configured to use an assembler and a linker that support the <code>.reloc</code> assembly directive and <samp><span class="option">-mexplicit-relocs</span></samp> is in effect. With <samp><span class="option">-mno-explicit-relocs</span></samp>, this optimization can be performed by the assembler and the linker alone without help from the compiler. <br><dt><code>-mmcount-ra-address</code><dt><code>-mno-mcount-ra-address</code><dd><a name="index-mmcount_002dra_002daddress-2061"></a><a name="index-mno_002dmcount_002dra_002daddress-2062"></a>Emit (do not emit) code that allows <code>_mcount</code> to modify the calling function's return address. When enabled, this option extends the usual <code>_mcount</code> interface with a new <var>ra-address</var> parameter, which has type <code>intptr_t *</code> and is passed in register <code>$12</code>. <code>_mcount</code> can then modify the return address by doing both of the following: <ul> <li>Returning the new address in register <code>$31</code>. <li>Storing the new address in <code>*</code><var>ra-address</var>, if <var>ra-address</var> is nonnull. </ul> <p>The default is <samp><span class="option">-mno-mcount-ra-address</span></samp>. </dl> </body></html> ```
```java /* * */ package io.debezium.testing.system.tools.kafka; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.debezium.testing.system.tools.AbstractOcpDeployer; import io.debezium.testing.system.tools.operatorutil.OpenshiftOperatorEnum; import io.debezium.testing.system.tools.operatorutil.OperatorUtil; import io.fabric8.openshift.client.OpenShiftClient; import okhttp3.OkHttpClient; public class StrimziOperatorDeployer extends AbstractOcpDeployer<StrimziOperatorController> { private static final Logger LOGGER = LoggerFactory.getLogger(StrimziOperatorDeployer.class); public StrimziOperatorDeployer(String project, OpenShiftClient ocp, OkHttpClient http) { super(project, ocp, http); } @Override public StrimziOperatorController deploy() throws Exception { LOGGER.info("Deploying " + OpenshiftOperatorEnum.STRIMZI.getName() + " operator to project " + project); OperatorUtil.deployOperator(ocp, OpenshiftOperatorEnum.STRIMZI, project); return StrimziOperatorController.forProject(project, ocp); } } ```
```python ############################################################################# ## ## All rights reserved. ## ## This file is part of the examples of PyQt. ## ## $QT_BEGIN_LICENSE:LGPL$ ## Commercial Usage ## Software or, alternatively, in accordance with the terms contained in ## a written agreement between you and Nokia. ## ## Alternatively, this file may be used under the terms of the GNU Lesser ## Foundation and appearing in the file LICENSE.LGPL included in the ## packaging of this file. Please review the following information to ## will be met: path_to_url ## ## In addition, as a special exception, Nokia gives you certain additional ## rights. These rights are described in the Nokia Qt LGPL Exception ## version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ## ## Alternatively, this file may be used under the terms of the GNU ## Foundation and appearing in the file LICENSE.GPL included in the ## packaging of this file. Please review the following information to ## met: path_to_url ## ## If you have questions regarding the use of this file, please contact ## Nokia at qt-info@nokia.com. ## $QT_END_LICENSE$ ## ############################################################################# import sys from xml.dom.minidom import parseString from PyQt5.QtCore import (QByteArray, QDir, QEasingCurve, QFile, QFileInfo, QLibraryInfo, QObject, QPointF, QProcess, QProcessEnvironment, QStandardPaths, Qt, QT_VERSION, QT_VERSION_STR, QTextStream, QUrl) from PyQt5.QtWidgets import QApplication, QMessageBox from colors import Colors from demoitemanimation import DemoItemAnimation from examplecontent import ExampleContent from itemcircleanimation import ItemCircleAnimation from menucontent import MenuContentItem from score import Score from textbutton import TextButton class MenuManager(QObject): ROOT, MENU1, MENU2, LAUNCH, DOCUMENTATION, QUIT, FULLSCREEN, UP, DOWN, \ BACK, LAUNCH_QML = range(11) pInstance = None def __init__(self): super(MenuManager, self).__init__() self.contentsDoc = None self.assistantProcess = QProcess() self.helpRootUrl = '' self.docDir = QDir() self.imgDir = QDir() self.info = {} self.window = None self.ticker = None self.tickerInAnim = None self.upButton = None self.downButton = None self.score = Score() self.currentMenu = "[no menu visible]" self.currentCategory = "[no category visible]" self.currentMenuButtons = "[no menu buttons visible]" self.currentInfo = "[no info visible]" self.currentMenuCode = -1 self.readXmlDocument() @classmethod def instance(cls): if cls.pInstance is None: cls.pInstance = cls() return cls.pInstance def getResource(self, name): return QByteArray() def readXmlDocument(self): root = QFileInfo(__file__).absolutePath() xml_file = QFile(root + '/examples.xml') xml_file.open(QFile.ReadOnly | QFile.Text) contents = xml_file.readAll().data() xml_file.close() self.contentsDoc = parseString(contents) def itemSelected(self, userCode, menuName): if userCode == MenuManager.LAUNCH: self.launchExample(self.currentInfo) elif userCode == MenuManager.LAUNCH_QML: self.launchQml(self.currentInfo) elif userCode == MenuManager.DOCUMENTATION: self.showDocInAssistant(self.currentInfo) elif userCode == MenuManager.QUIT: QApplication.quit() elif userCode == MenuManager.FULLSCREEN: self.window.toggleFullscreen() elif userCode == MenuManager.ROOT: # Out. self.score.queueMovie(self.currentMenu + ' -out', Score.FROM_START, Score.LOCK_ITEMS) self.score.queueMovie(self.currentMenuButtons + ' -out', Score.FROM_START, Score.LOCK_ITEMS) self.score.queueMovie(self.currentInfo + ' -out') self.score.queueMovie(self.currentInfo + ' -buttons -out', Score.NEW_ANIMATION_ONLY) self.score.queueMovie('back -out', Score.ONLY_IF_VISIBLE) # Book-keeping. self.currentMenuCode = MenuManager.ROOT self.currentMenu = menuName + ' -menu1' self.currentMenuButtons = menuName + ' -buttons' self.currentInfo = menuName + ' -info' # In. self.score.queueMovie('upndown -shake') self.score.queueMovie(self.currentMenu, Score.FROM_START, Score.UNLOCK_ITEMS) self.score.queueMovie(self.currentMenuButtons, Score.FROM_START, Score.UNLOCK_ITEMS) self.score.queueMovie(self.currentInfo) if not Colors.noTicker: self.ticker.doIntroTransitions = True self.tickerInAnim.setStartDelay(2000) self.ticker.useGuideQt() self.score.queueMovie('ticker', Score.NEW_ANIMATION_ONLY) elif userCode == MenuManager.MENU1: # Out. self.score.queueMovie(self.currentMenu + ' -out', Score.FROM_START, Score.LOCK_ITEMS) self.score.queueMovie(self.currentMenuButtons + ' -out', Score.FROM_START, Score.LOCK_ITEMS) self.score.queueMovie(self.currentInfo + ' -out') # Book-keeping. self.currentMenuCode = MenuManager.MENU1 self.currentCategory = menuName self.currentMenu = menuName + ' -menu1' self.currentInfo = menuName + ' -info' # In. self.score.queueMovie('upndown -shake') self.score.queueMovie('back -in') self.score.queueMovie(self.currentMenu, Score.FROM_START, Score.UNLOCK_ITEMS) self.score.queueMovie(self.currentInfo) if not Colors.noTicker: self.ticker.useGuideTt() elif userCode == MenuManager.MENU2: # Out. self.score.queueMovie(self.currentInfo + ' -out', Score.NEW_ANIMATION_ONLY) self.score.queueMovie(self.currentInfo + ' -buttons -out', Score.NEW_ANIMATION_ONLY) # Book-keeping. self.currentMenuCode = MenuManager.MENU2 self.currentInfo = menuName # In/shake. self.score.queueMovie('upndown -shake') self.score.queueMovie('back -shake') self.score.queueMovie(self.currentMenu + ' -shake') self.score.queueMovie(self.currentInfo, Score.NEW_ANIMATION_ONLY) self.score.queueMovie(self.currentInfo + ' -buttons', Score.NEW_ANIMATION_ONLY) if not Colors.noTicker: self.score.queueMovie('ticker -out', Score.NEW_ANIMATION_ONLY) elif userCode == MenuManager.UP: backMenu = self.info[self.currentMenu]['back'] if backMenu: self.score.queueMovie(self.currentMenu + ' -top_out', Score.FROM_START, Score.LOCK_ITEMS) self.score.queueMovie(backMenu + ' -bottom_in', Score.FROM_START, Score.UNLOCK_ITEMS) self.currentMenu = backMenu elif userCode == MenuManager.DOWN: moreMenu = self.info[self.currentMenu]['more'] if moreMenu: self.score.queueMovie(self.currentMenu + ' -bottom_out', Score.FROM_START, Score.LOCK_ITEMS) self.score.queueMovie(moreMenu + ' -top_in', Score.FROM_START, Score.UNLOCK_ITEMS) self.currentMenu = moreMenu elif userCode == MenuManager.BACK: if self.currentMenuCode == MenuManager.MENU2: # Out. self.score.queueMovie(self.currentInfo + ' -out', Score.NEW_ANIMATION_ONLY) self.score.queueMovie(self.currentInfo + ' -buttons -out', Score.NEW_ANIMATION_ONLY) # Book-keeping. self.currentMenuCode = MenuManager.MENU1 self.currentMenuButtons = self.currentCategory + ' -buttons' self.currentInfo = self.currentCategory + ' -info' # In/shake. self.score.queueMovie('upndown -shake') self.score.queueMovie(self.currentMenu + ' -shake') self.score.queueMovie(self.currentInfo, Score.NEW_ANIMATION_ONLY) self.score.queueMovie(self.currentInfo + ' -buttons', Score.NEW_ANIMATION_ONLY) if not Colors.noTicker: self.ticker.doIntroTransitions = False self.tickerInAnim.setStartDelay(500) self.score.queueMovie('ticker', Score.NEW_ANIMATION_ONLY) elif self.currentMenuCode != MenuManager.ROOT: self.itemSelected(MenuManager.ROOT, Colors.rootMenuName) # Update back and more buttons. if self.info.setdefault(self.currentMenu, {}).get('back'): back_state = TextButton.OFF else: back_state = TextButton.DISABLED if self.info[self.currentMenu].get('more'): more_state = TextButton.OFF else: more_state = TextButton.DISABLED self.upButton.setState(back_state) self.downButton.setState(more_state) if self.score.hasQueuedMovies(): self.score.playQue() # Playing new movies might include loading etc., so ignore the FPS # at this point. self.window.fpsHistory = [] def showDocInAssistant(self, name): url = self.resolveDocUrl(name) Colors.debug("Sending URL to Assistant:", url) # Start assistant if it's not already running. if self.assistantProcess.state() != QProcess.Running: app = QLibraryInfo.location(QLibraryInfo.BinariesPath) + QDir.separator() if sys.platform == 'darwin': app += 'Assistant.app/Contents/MacOS/Assistant' else: app += 'assistant' args = ['-enableRemoteControl'] self.assistantProcess.start(app, args) if not self.assistantProcess.waitForStarted(): QMessageBox.critical(None, "PyQt Demo", "Could not start %s." % app) return # Send command through remote control even if the process was just # started to activate assistant and bring it to the front. cmd_str = QTextStream(self.assistantProcess) cmd_str << 'SetSource ' << url << '\n' def launchExample(self, name): executable = self.resolveExeFile(name) process = QProcess(self) process.error.connect(self.launchError) if sys.platform == 'win32': # Make sure it finds the DLLs on Windows. env = QProcessEnvironment.systemEnvironment() env.insert('PATH', QLibraryInfo.location(QLibraryInfo.BinariesPath) + ';' + env.value('PATH')) process.setProcessEnvironment(env) if self.info[name]['changedirectory'] != 'false': workingDirectory = self.resolveDataDir(name) process.setWorkingDirectory(workingDirectory) Colors.debug("Setting working directory:", workingDirectory) Colors.debug("Launching:", executable) process.start(sys.executable, [executable]) def launchQml(self, name): import_path = self.resolveDataDir(name) qml = self.resolveQmlFile(name) process = QProcess(self) process.error.connect(self.launchError) env = QProcessEnvironment.systemEnvironment() env.insert('QML2_IMPORT_PATH', import_path) process.setProcessEnvironment(env) executable = QLibraryInfo.location(QLibraryInfo.BinariesPath) + '/qmlscene' Colors.debug("Launching:", executable) process.start(executable, [qml]) def launchError(self, error): if error != QProcess.Crashed: QMessageBox.critical(None, "Failed to launch the example", "Could not launch the example. Ensure that it has been " "built.", QMessageBox.Cancel) def init(self, window): self.window = window # Create div. self.createTicker() self.createUpnDownButtons() self.createBackButton() # Create first level menu. rootElement = self.contentsDoc.documentElement self.createRootMenu(rootElement) # Create second level menus. level2Menu = self._first_element(rootElement) while level2Menu is not None: self.createSubMenu(level2Menu) # Create leaf menu and example info. example = self._first_element(level2Menu) while example is not None: self.readInfoAboutExample(example) self.createLeafMenu(example) example = self._next_element(example) level2Menu = self._next_element(level2Menu) @classmethod def _first_element(cls, node): return cls._skip_nonelements(node.firstChild) @classmethod def _next_element(cls, node): return cls._skip_nonelements(node.nextSibling) @staticmethod def _skip_nonelements(node): while node is not None and node.nodeType != node.ELEMENT_NODE: node = node.nextSibling return node def readInfoAboutExample(self, example): name = example.getAttribute('name') if name in self.info: Colors.debug("__WARNING: MenuManager.readInfoAboutExample: " "Demo/example with name", name, "appears twice in " "the xml-file!__") self.info.setdefault(name, {})['filename'] = example.getAttribute('filename') self.info[name]['dirname'] = example.parentNode.getAttribute('dirname') self.info[name]['changedirectory'] = example.getAttribute('changedirectory') self.info[name]['image'] = example.getAttribute('image') self.info[name]['qml'] = example.getAttribute('qml') def resolveDir(self, name): dirName = self.info[name]['dirname'] fileName = self.info[name]['filename'].split('/') dir = QFileInfo(__file__).dir() # To the 'examples' directory. dir.cdUp() dir.cd(dirName) if len(fileName) > 1: dir.cd('/'.join(fileName[:-1])) # This may legitimately fail if the example is just a simple .py file. dir.cd(fileName[-1]) return dir def resolveDataDir(self, name): return self.resolveDir(name).absolutePath() def resolveExeFile(self, name): dir = self.resolveDir(name) fileName = self.info[name]['filename'].split('/')[-1] pyFile = QFile(dir.path() + '/' + fileName + '.py') if pyFile.exists(): return pyFile.fileName() pywFile = QFile(dir.path() + '/' + fileName + '.pyw') if pywFile.exists(): return pywFile.fileName() Colors.debug("- WARNING: Could not resolve executable:", dir.path(), fileName) return '__executable not found__' def resolveQmlFile(self, name): dir = self.resolveDir(name) fileName = self.info[name]['filename'].split('/')[-1] qmlFile = QFile(dir.path() + '/' + fileName + '.qml') if qmlFile.exists(): return qmlFile.fileName() Colors.debug("- WARNING: Could not resolve QML file:", dir.path(), fileName) return '__QML not found__' def resolveDocUrl(self, name): dirName = self.info[name]['dirname'] fileName = self.info[name]['filename'] return self.helpRootUrl + dirName.replace('/', '-') + '-' + fileName + '.html' def resolveImageUrl(self, name): return self.helpRootUrl + 'images/' + name def getHtml(self, name): return self.getResource(self.resolveDocUrl(name)) def getImage(self, name): imageName = self.info[name]['image'] fileName = self.info[name]['filename'] if self.info[name]['qml'] == 'true': fileName = 'qml-' + fileName.split('/')[-1] if not imageName: imageName = fileName + '-example.png' if self.getResource(self.resolveImageUrl(imageName)).isEmpty(): imageName = fileName + '.png' if self.getResource(self.resolveImageUrl(imageName)).isEmpty(): imageName = fileName + 'example.png' return self.getResource(self.resolveImageUrl(imageName)) def createRootMenu(self, el): name = el.getAttribute('name') self.createMenu(el, MenuManager.MENU1) self.createInfo( MenuContentItem(el, self.window.mainSceneRoot), name + ' -info') menuButtonsIn = self.score.insertMovie(name + ' -buttons') menuButtonsOut = self.score.insertMovie(name + ' -buttons -out') self.createLowLeftButton("Quit", MenuManager.QUIT, menuButtonsIn, menuButtonsOut, None) self.createLowRightButton("Toggle fullscreen", MenuManager.FULLSCREEN, menuButtonsIn, menuButtonsOut, None) def createSubMenu(self, el): name = el.getAttribute('name') self.createMenu(el, MenuManager.MENU2) self.createInfo( MenuContentItem(el, self.window.mainSceneRoot), name + ' -info') def createLeafMenu(self, el): name = el.getAttribute('name') self.createInfo(ExampleContent(name, self.window.mainSceneRoot), name) infoButtonsIn = self.score.insertMovie(name + ' -buttons') infoButtonsOut = self.score.insertMovie(name + ' -buttons -out') self.createLowRightLeafButton("Documentation", 600, MenuManager.DOCUMENTATION, infoButtonsIn, infoButtonsOut, None) if el.getAttribute('executable') != 'false': self.createLowRightLeafButton("Launch", 405, MenuManager.LAUNCH, infoButtonsIn, infoButtonsOut, None) elif el.getAttribute('qml') == 'true': self.createLowRightLeafButton("Display", 405, MenuManager.LAUNCH_QML, infoButtonsIn, infoButtonsOut, None) def createMenu(self, category, type): sw = self.window.scene.sceneRect().width() xOffset = 15 yOffset = 10 maxExamples = Colors.menuCount menuIndex = 1 name = category.getAttribute('name') currentNode = self._first_element(category) currentMenu = '%s -menu%d' % (name, menuIndex) while currentNode is not None: movieIn = self.score.insertMovie(currentMenu) movieOut = self.score.insertMovie(currentMenu + ' -out') movieNextTopOut = self.score.insertMovie(currentMenu + ' -top_out') movieNextBottomOut = self.score.insertMovie(currentMenu + ' -bottom_out') movieNextTopIn = self.score.insertMovie(currentMenu + ' -top_in') movieNextBottomIn = self.score.insertMovie(currentMenu + ' -bottom_in') movieShake = self.score.insertMovie(currentMenu + ' -shake') i = 0 while currentNode is not None and i < maxExamples: # Create a normal menu button. label = currentNode.getAttribute('name') item = TextButton(label, TextButton.LEFT, type, self.window.mainSceneRoot) item.setRecursiveVisible(False) item.setZValue(10) ih = item.sceneBoundingRect().height() iw = item.sceneBoundingRect().width() ihp = ih + 3 # Create in-animation. anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_IN) anim.setDuration(1000 + (i * 20)) anim.setStartValue(QPointF(xOffset, -ih)) anim.setKeyValueAt(0.20, QPointF(xOffset, -ih)) anim.setKeyValueAt(0.50, QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY + (10 * float(i / 4.0)))) anim.setKeyValueAt(0.60, QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY)) anim.setKeyValueAt(0.70, QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY + (5 * float(i / 4.0)))) anim.setKeyValueAt(0.80, QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY)) anim.setKeyValueAt(0.90, QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY + (2 * float(i / 4.0)))) anim.setEndValue(QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY)) movieIn.append(anim) # Create out-animation. anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_OUT) anim.setHideOnFinished(True) anim.setDuration(700 + (30 * i)) anim.setStartValue(QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY)) anim.setKeyValueAt(0.60, QPointF(xOffset, 600 - ih - ih)) anim.setKeyValueAt(0.65, QPointF(xOffset + 20, 600 - ih)) anim.setEndValue(QPointF(sw + iw, 600 - ih)) movieOut.append(anim) # Create shake-animation. anim = DemoItemAnimation(item) anim.setDuration(700) anim.setStartValue(QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY)) anim.setKeyValueAt(0.55, QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY - i*2.0)) anim.setKeyValueAt(0.70, QPointF(xOffset - 10, (i * ihp) + yOffset + Colors.contentStartY - i*1.5)) anim.setKeyValueAt(0.80, QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY - i*1.0)) anim.setKeyValueAt(0.90, QPointF(xOffset - 2, (i * ihp) + yOffset + Colors.contentStartY - i*0.5)) anim.setEndValue(QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY)) movieShake.append(anim) # Create next-menu top-out-animation. anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_OUT) anim.setHideOnFinished(True) anim.setDuration(200 + (30 * i)) anim.setStartValue(QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY)) anim.setKeyValueAt(0.70, QPointF(xOffset, yOffset + Colors.contentStartY)) anim.setEndValue(QPointF(-iw, yOffset + Colors.contentStartY)) movieNextTopOut.append(anim) # Create next-menu bottom-out-animation. anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_OUT) anim.setHideOnFinished(True) anim.setDuration(200 + (30 * i)) anim.setStartValue(QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY)) anim.setKeyValueAt(0.70, QPointF(xOffset, (maxExamples * ihp) + yOffset + Colors.contentStartY)) anim.setEndValue(QPointF(-iw, (maxExamples * ihp) + yOffset + Colors.contentStartY)) movieNextBottomOut.append(anim) # Create next-menu top-in-animation. anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_IN) anim.setDuration(700 - (30 * i)) anim.setStartValue(QPointF(-iw, yOffset + Colors.contentStartY)) anim.setKeyValueAt(0.30, QPointF(xOffset, yOffset + Colors.contentStartY)) anim.setEndValue(QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY)) movieNextTopIn.append(anim) # Create next-menu bottom-in-animation. reverse = maxExamples - i anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_IN) anim.setDuration(1000 - (30 * reverse)) anim.setStartValue(QPointF(-iw, (maxExamples * ihp) + yOffset + Colors.contentStartY)) anim.setKeyValueAt(0.30, QPointF(xOffset, (maxExamples * ihp) + yOffset + Colors.contentStartY)) anim.setEndValue(QPointF(xOffset, (i * ihp) + yOffset + Colors.contentStartY)) movieNextBottomIn.append(anim) i += 1 currentNode = self._next_element(currentNode) if currentNode is not None and i == maxExamples: # We need another menu, so register for 'more' and 'back' # buttons. menuIndex += 1 self.info.setdefault(currentMenu, {})['more'] = '%s -menu%d' % (name, menuIndex) currentMenu = '%s -menu%d' % (name, menuIndex) self.info.setdefault(currentMenu, {})['back'] = '%s -menu%d' % (name, menuIndex - 1) def createLowLeftButton(self, label, type, movieIn, movieOut, movieShake, menuString=""): button = TextButton(label, TextButton.RIGHT, type, self.window.mainSceneRoot, TextButton.PANEL) if menuString: button.setMenuString(menuString) button.setRecursiveVisible(False) button.setZValue(10) iw = button.sceneBoundingRect().width() xOffset = 15 # Create in-animation. buttonIn = DemoItemAnimation(button, DemoItemAnimation.ANIM_IN) buttonIn.setDuration(1800) buttonIn.setStartValue(QPointF(-iw, Colors.contentStartY + Colors.contentHeight - 35)) buttonIn.setKeyValueAt(0.5, QPointF(-iw, Colors.contentStartY + Colors.contentHeight - 35)) buttonIn.setKeyValueAt(0.7, QPointF(xOffset, Colors.contentStartY + Colors.contentHeight - 35)) buttonIn.setEndValue(QPointF(xOffset, Colors.contentStartY + Colors.contentHeight - 26)) movieIn.append(buttonIn) # Create out-animation. buttonOut = DemoItemAnimation(button, DemoItemAnimation.ANIM_OUT) buttonOut.setHideOnFinished(True) buttonOut.setDuration(400) buttonOut.setStartValue(QPointF(xOffset, Colors.contentStartY + Colors.contentHeight - 26)) buttonOut.setEndValue(QPointF(-iw, Colors.contentStartY + Colors.contentHeight - 26)) movieOut.append(buttonOut) if movieShake is not None: shakeAnim = DemoItemAnimation(button, DemoItemAnimation.ANIM_UNSPECIFIED) shakeAnim.setDuration(650) shakeAnim.setStartValue(buttonIn.endValue()) shakeAnim.setKeyValueAt(0.60, buttonIn.endValue()) shakeAnim.setKeyValueAt(0.70, buttonIn.endValue() + QPointF(-3, 0)) shakeAnim.setKeyValueAt(0.80, buttonIn.endValue() + QPointF(2, 0)) shakeAnim.setKeyValueAt(0.90, buttonIn.endValue() + QPointF(-1, 0)) shakeAnim.setEndValue(buttonIn.endValue()) movieShake.append(shakeAnim) def createLowRightButton(self, label, type, movieIn, movieOut, movieShake): item = TextButton(label, TextButton.RIGHT, type, self.window.mainSceneRoot, TextButton.PANEL) item.setRecursiveVisible(False) item.setZValue(10) sw = self.window.scene.sceneRect().width() xOffset = 70 # Create in-animation. anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_IN) anim.setDuration(1800) anim.setStartValue(QPointF(sw, Colors.contentStartY + Colors.contentHeight - 35)) anim.setKeyValueAt(0.5, QPointF(sw, Colors.contentStartY + Colors.contentHeight - 35)) anim.setKeyValueAt(0.7, QPointF(xOffset + 535, Colors.contentStartY + Colors.contentHeight - 35)) anim.setEndValue(QPointF(xOffset + 535, Colors.contentStartY + Colors.contentHeight - 26)) movieIn.append(anim) # Create out-animation. anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_OUT) anim.setHideOnFinished(True) anim.setDuration(400) anim.setStartValue(QPointF(xOffset + 535, Colors.contentStartY + Colors.contentHeight - 26)) anim.setEndValue(QPointF(sw, Colors.contentStartY + Colors.contentHeight - 26)) movieOut.append(anim) def createLowRightLeafButton(self, label, xOffset, type, movieIn, movieOut, movieShake): item = TextButton(label, TextButton.RIGHT, type, self.window.mainSceneRoot, TextButton.PANEL) item.setRecursiveVisible(False) item.setZValue(10) sw = self.window.scene.sceneRect().width() sh = self.window.scene.sceneRect().height() # Create in-animation. anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_IN) anim.setDuration(1050) anim.setStartValue(QPointF(sw, Colors.contentStartY + Colors.contentHeight - 35)) anim.setKeyValueAt(0.10, QPointF(sw, Colors.contentStartY + Colors.contentHeight - 35)) anim.setKeyValueAt(0.30, QPointF(xOffset, Colors.contentStartY + Colors.contentHeight - 35)) anim.setKeyValueAt(0.35, QPointF(xOffset + 30, Colors.contentStartY + Colors.contentHeight - 35)) anim.setKeyValueAt(0.40, QPointF(xOffset, Colors.contentStartY + Colors.contentHeight - 35)) anim.setKeyValueAt(0.45, QPointF(xOffset + 5, Colors.contentStartY + Colors.contentHeight - 35)) anim.setKeyValueAt(0.50, QPointF(xOffset, Colors.contentStartY + Colors.contentHeight - 35)) anim.setEndValue(QPointF(xOffset, Colors.contentStartY + Colors.contentHeight - 26)) movieIn.append(anim) # Create out-animation. anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_OUT) anim.setHideOnFinished(True) anim.setDuration(300) anim.setStartValue(QPointF(xOffset, Colors.contentStartY + Colors.contentHeight - 26)) anim.setEndValue(QPointF(xOffset, sh)) movieOut.append(anim) def createInfo(self, item, name): movie_in = self.score.insertMovie(name) movie_out = self.score.insertMovie(name + ' -out') item.setZValue(8) item.setRecursiveVisible(False) xOffset = 230.0 infoIn = DemoItemAnimation(item, DemoItemAnimation.ANIM_IN) infoIn.setDuration(650) infoIn.setStartValue(QPointF(self.window.scene.sceneRect().width(), Colors.contentStartY)) infoIn.setKeyValueAt(0.60, QPointF(xOffset, Colors.contentStartY)) infoIn.setKeyValueAt(0.70, QPointF(xOffset + 20, Colors.contentStartY)) infoIn.setKeyValueAt(0.80, QPointF(xOffset, Colors.contentStartY)) infoIn.setKeyValueAt(0.90, QPointF(xOffset + 7, Colors.contentStartY)) infoIn.setEndValue(QPointF(xOffset, Colors.contentStartY)) movie_in.append(infoIn) infoOut = DemoItemAnimation(item, DemoItemAnimation.ANIM_OUT) infoOut.setCurveShape(QEasingCurve.InQuad) infoOut.setDuration(300) infoOut.setHideOnFinished(True) infoOut.setStartValue(QPointF(xOffset, Colors.contentStartY)) infoOut.setEndValue(QPointF(-600, Colors.contentStartY)) movie_out.append(infoOut) def createTicker(self): if Colors.noTicker: return movie_in = self.score.insertMovie('ticker') movie_out = self.score.insertMovie('ticker -out') movie_activate = self.score.insertMovie('ticker -activate') movie_deactivate = self.score.insertMovie('ticker -deactivate') self.ticker = ItemCircleAnimation() self.ticker.setZValue(50) self.ticker.hide() # Move ticker in. qtendpos = 485 qtPosY = 120 self.tickerInAnim = DemoItemAnimation(self.ticker, DemoItemAnimation.ANIM_IN) self.tickerInAnim.setDuration(500) self.tickerInAnim.setStartValue(QPointF(self.window.scene.sceneRect().width(), Colors.contentStartY + qtPosY)) self.tickerInAnim.setKeyValueAt(0.60, QPointF(qtendpos, Colors.contentStartY + qtPosY)) self.tickerInAnim.setKeyValueAt(0.70, QPointF(qtendpos + 30, Colors.contentStartY + qtPosY)) self.tickerInAnim.setKeyValueAt(0.80, QPointF(qtendpos, Colors.contentStartY + qtPosY)) self.tickerInAnim.setKeyValueAt(0.90, QPointF(qtendpos + 5, Colors.contentStartY + qtPosY)) self.tickerInAnim.setEndValue(QPointF(qtendpos, Colors.contentStartY + qtPosY)) movie_in.append(self.tickerInAnim) # Move ticker out. qtOut = DemoItemAnimation(self.ticker, DemoItemAnimation.ANIM_OUT) qtOut.setHideOnFinished(True) qtOut.setDuration(500) qtOut.setStartValue(QPointF(qtendpos, Colors.contentStartY + qtPosY)) qtOut.setEndValue(QPointF(self.window.scene.sceneRect().width() + 700, Colors.contentStartY + qtPosY)) movie_out.append(qtOut) # Move ticker in on activate. qtActivate = DemoItemAnimation(self.ticker) qtActivate.setDuration(400) qtActivate.setStartValue(QPointF(self.window.scene.sceneRect().width(), Colors.contentStartY + qtPosY)) qtActivate.setKeyValueAt(0.60, QPointF(qtendpos, Colors.contentStartY + qtPosY)) qtActivate.setKeyValueAt(0.70, QPointF(qtendpos + 30, Colors.contentStartY + qtPosY)) qtActivate.setKeyValueAt(0.80, QPointF(qtendpos, Colors.contentStartY + qtPosY)) qtActivate.setKeyValueAt(0.90, QPointF(qtendpos + 5, Colors.contentStartY + qtPosY)) qtActivate.setEndValue(QPointF(qtendpos, Colors.contentStartY + qtPosY)) movie_activate.append(qtActivate) # Move ticker out on deactivate. qtDeactivate = DemoItemAnimation(self.ticker) qtDeactivate.setHideOnFinished(True) qtDeactivate.setDuration(400) qtDeactivate.setStartValue(QPointF(qtendpos, Colors.contentStartY + qtPosY)) qtDeactivate.setEndValue(QPointF(qtendpos, 800)) movie_deactivate.append(qtDeactivate) def createUpnDownButtons(self): xOffset = 15.0 yOffset = 450.0 self.upButton = TextButton("", TextButton.LEFT, MenuManager.UP, self.window.mainSceneRoot, TextButton.UP) self.upButton.prepare() self.upButton.setPos(xOffset, yOffset) self.upButton.setState(TextButton.DISABLED) self.downButton = TextButton("", TextButton.LEFT, MenuManager.DOWN, self.window.mainSceneRoot, TextButton.DOWN) self.downButton.prepare() self.downButton.setPos(xOffset + 10 + self.downButton.sceneBoundingRect().width(), yOffset) movieShake = self.score.insertMovie('upndown -shake') shakeAnim = DemoItemAnimation(self.upButton, DemoItemAnimation.ANIM_UNSPECIFIED) shakeAnim.setDuration(650) shakeAnim.setStartValue(self.upButton.pos()) shakeAnim.setKeyValueAt(0.60, self.upButton.pos()) shakeAnim.setKeyValueAt(0.70, self.upButton.pos() + QPointF(-2, 0)) shakeAnim.setKeyValueAt(0.80, self.upButton.pos() + QPointF(1, 0)) shakeAnim.setKeyValueAt(0.90, self.upButton.pos() + QPointF(-1, 0)) shakeAnim.setEndValue(self.upButton.pos()) movieShake.append(shakeAnim) shakeAnim = DemoItemAnimation(self.downButton, DemoItemAnimation.ANIM_UNSPECIFIED) shakeAnim.setDuration(650) shakeAnim.setStartValue(self.downButton.pos()) shakeAnim.setKeyValueAt(0.60, self.downButton.pos()) shakeAnim.setKeyValueAt(0.70, self.downButton.pos() + QPointF(-5, 0)) shakeAnim.setKeyValueAt(0.80, self.downButton.pos() + QPointF(-3, 0)) shakeAnim.setKeyValueAt(0.90, self.downButton.pos() + QPointF(-1, 0)) shakeAnim.setEndValue(self.downButton.pos()) movieShake.append(shakeAnim) def createBackButton(self): backIn = self.score.insertMovie('back -in') backOut = self.score.insertMovie('back -out') backShake = self.score.insertMovie('back -shake') self.createLowLeftButton("Back", MenuManager.ROOT, backIn, backOut, backShake, Colors.rootMenuName) ```
```c++ // sol2 // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of // the Software, and to permit persons to whom the Software is furnished to do so, // subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include <sol/compatibility.hpp> ```
Kalali may refer to: Places Kalali, Pakpattan, Kalali village, Pakpattan city, Punjab Pakistan Kalali, Vadodara, Kalali village, Vadodara city, Gujarat India Ghukasavan, Armenia Noraber, Armenia Kalali, Iran Kalaleh-ye Sofla, Iran Other Kalali people, an indigenous Australian people Amirteymour Kalali (1895–1988), Iranian politician and noble
The AIIMS metro station is located on the Yellow Line of the Delhi Metro. The entrances are on Aurobindo Marg, the east side just outside the All India Institute of Medical Sciences. Safdarjung Hospital is just west of the station. The station Station layout Facilities List of available ATMs at AIIMS metro station: HDFC Bank, YES Bank, State Bank of India, IndusInd Bank Entry/exit Connections Bus Delhi Transport Corporation bus routes number 335, 502, 503, 505, 507CL, 512, 516, 517, 519, 520, 536, 542, 548, 548CL, 548EXT, 605, 725, serve the station from the nearby AIIMS bus stop. See also References External links Delhi Metro Rail Corporation Ltd. (Official site) Delhi Metro Annual Reports Railway stations in India opened in 2010 Delhi Metro stations Railway stations in South Delhi district 2010 establishments in Delhi
```prolog #!/usr/bin/perl -w # # Check the tree against missing VERSIONs. # # Originally by Larry Shatzer # use strict; use File::Find; find( sub { return unless -f; if (/\.pm$/ && $File::Find::name !~ m:/t/:) { # pm but not in a test unless (parse_file($_)) { print "$File::Find::name\n"; } } }, @ARGV ? shift : "."); sub parse_file { my $parsefile = shift; my $result; open(FH,'<',$parsefile) or warn "Could not open '$parsefile': $!"; my $inpod = 0; while (<FH>) { $inpod = /^=(?!cut)/ ? 1 : /^=cut/ ? 0 : $inpod; next if $inpod || /^\s*\#/; chomp; next unless /([\$*])(([\w\:\']*)\bVERSION)\b.*\=/; my $eval = qq{ package ExtUtils::MakeMaker::_version; no strict; local $1$2; \$$2=undef; do { $_ }; \$$2 }; no warnings; $result = eval($eval); warn "Could not eval '$eval' in $parsefile: $@" if $@; $result = "undef" unless defined $result; last; } close FH; return $result; } ```
```java /* * * * path_to_url * * Unless required by applicable law or agreed to in writing, * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * specific language governing permissions and limitations */ package org.ballerinalang.test.klass; import org.ballerinalang.nativeimpl.jvm.servicetests.ServiceValue; import org.ballerinalang.test.BAssertUtil; import org.ballerinalang.test.BCompileUtil; import org.ballerinalang.test.BRunUtil; import org.ballerinalang.test.CompileResult; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.Test; import static org.ballerinalang.test.BAssertUtil.validateError; /** * Test cases for service classes. * * @since 2.0 */ public class ServiceClassTest { @Test public void testBasicStructAsObject() { CompileResult compileResult = BCompileUtil.compile("test-src/klass/simple_service_class.bal"); BRunUtil.invoke(compileResult, "testServiceObjectValue"); } @Test public void testResourceMethodsDoesNotAffectAssignability() { CompileResult result = BCompileUtil.compile("test-src/klass/resource_method_assignability_negative_test.bal"); int index = 0; validateError(result, index++, "incompatible types: expected 'Foo', found 'Bar'", 38, 13); validateError(result, index++, "incompatible types: expected 'Bar', found 'isolated object { " + "public function hello () returns (); function foo () returns (int); }'", 40, 15); validateError(result, index++, "incompatible types: expected 'object { resource function get " + ".() returns (); }', found 'isolated object { resource function post .() returns (); }'", 56, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "[int]() returns (); }', found 'isolated object { resource function get [string]() returns (); }'", 63, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "foo/[int]() returns (); }', found 'isolated object { resource function get foo/[string]() returns " + "(); }'", 70, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "[int]() returns (); }', found 'isolated object { resource function get [string]() returns (); }'", 78, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "foo/[int]() returns (); }', found 'isolated object { resource function get foo/[string]() " + "returns (); }'", 85, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "[int]() returns (); }', found 'isolated object { resource function get [byte]() returns (); }'", 93, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "[int]() returns (); }', found 'isolated object { resource function get [string]() returns (); " + "resource function post [int]() returns (); }'", 100, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "[int]() returns (); }', found 'isolated object { resource function get [string]() returns (); " + "resource function post [int]() returns (); }'", 109, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "bar/[int...]() returns (); }', found 'isolated object { resource function get bar/[string...]() " + "returns (); }'", 118, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "bar/[int...]() returns (); }', found 'isolated object { resource function get bar/[byte...]() " + "returns (); }'", 125, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "bar/[int...]() returns (); }', found 'isolated object { resource function get bar/[int]() " + "returns (); }'", 132, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "foo/[int]() returns (); }', found 'isolated object { resource function get foo2/[int]() returns " + "(); }'", 139, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "foo/[int]() returns (); }', found 'isolated object { resource function get foo/[string]() returns " + "(); }'", 146, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + ".(int) returns (); }', found 'isolated object { resource function get .() returns (); }'", 153, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + ".() returns (); }', found 'isolated object { resource function get .(int) returns (); }'", 160, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + ".(int...) returns (); }', found 'isolated object { resource function get .(int) returns (); }'", 167, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "foo() returns (); }', found 'isolated object { resource function get foo(int) returns (); }'", 174, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "[int]() returns (); }', found 'isolated object { resource function get .(int) returns (); }'", 181, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "[int](int) returns (); }', found 'isolated object { resource function get .(int) returns (); }'", 188, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + ".(int) returns (); }', found 'isolated object { resource function get [int]() returns (); }'", 195, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + ".(int) returns (); }', found 'isolated object { resource function get [int]() returns (); }'", 202, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + ".() returns (); }', found 'isolated object { resource function post .() returns (); }'", 209, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "[int]() returns (); }', found 'isolated object { resource function get [string]() returns (); }'", 216, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "foo/[int]() returns (); }', found 'isolated object { resource function get foo/[string]() returns " + "(); }'", 223, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "[int]() returns (); }', found 'isolated object { resource function get [string]() returns (); }'", 230, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "[int]() returns (); }', found 'isolated object { resource function get [int]() returns (); }'", 237, 11); validateError(result, index++, "incompatible types: expected 'object { resource function get " + "foo/[string...]() returns (); }', found 'isolated object { " + "resource function get foo/[int...]() returns (); }'", 244, 11); Assert.assertEquals(index, result.getErrorCount()); } @Test public void testResourcePathParamNegative() { CompileResult result = BCompileUtil.compile("test-src/klass/simple_service_class_neg_path_param.bal"); int index = 0; validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path params, " + "found 'json'", 37, 32); validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path params, " + "found 'anydata'", 37, 41); validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as rest path param, " + "found 'anydata'", 37, 65); Assert.assertEquals(index, 3); } @Test public void testServiceObjectAndUsingServiceObjectAsATypeInclusionNegative() { CompileResult result = BCompileUtil.compile("test-src/klass/service_type_resource_method_decl_neg.bal"); int index = 0; validateError(result, index++, "no implementation found for the method 'onMesage' of class 'SClass'", 23, 1); validateError(result, index++, "no implementation found for the method 'resource function get " + "foo/[string]() returns ()' of class 'SClass'", 23, 1); validateError(result, index++, "no implementation found for the method 'resource function get " + "foo/bar() returns ()' of class 'SClass'", 23, 1); validateError(result, index++, "no implementation found for the method 'resource function get " + "foo() returns (string)' of class 'RTwo'", 34, 1); validateError(result, index++, "no implementation found for the method 'resource function get " + "foo() returns (string)' of class 'RTypeImpl'", 43, 1); validateError(result, index++, "no implementation found for the method 'resource function do " + "f() returns (int)' of class 'Do'", 56, 1); validateError(result, index++, "no implementation found for the method 'resource function done " + "f() returns (int)' of class 'Do'", 56, 1); validateError(result, index++, "incompatible types: expected 'Foo', found 'Bar'", 80, 13); validateError(result, index++, "incompatible types: expected 'Foo', found 'Baz'", 88, 13); Assert.assertEquals(result.getErrorCount(), index); } @Test public void testResourceFunctionWithInvalidPathParam() { CompileResult result = BCompileUtil.compile("test-src/klass/resource_function_with_invalid_path_param_type_negative.bal"); int index = 0; validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types " + "are supported as path params, found 'other'", 24, 29); validateError(result, index++, "undefined module 'module1'", 24, 29); validateError(result, index++, "unknown type 'RequestMessage'", 24, 29); BAssertUtil.validateError(result, index++, "redeclared symbol 'a'", 35, 56); BAssertUtil.validateError(result, index++, "redeclared symbol 'name'", 39, 69); BAssertUtil.validateError(result, index++, "redeclared symbol '$anonType$_2.$get$path$^$foo2'", 43, 27); BAssertUtil.validateError(result, index++, "resource path segment is not allowed after resource path rest parameter", 47, 47); BAssertUtil.validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types " + "are supported as path params, found 'string?'", 51, 38); BAssertUtil.validateError(result, index++, "missing resource path in resource accessor definition", 55, 27); BAssertUtil.validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as" + " path params, found 'record {| int a; anydata...; |}'", 59, 43); BAssertUtil.validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as" + " path params, found 'record {| int a; anydata...; |}'", 63, 44); BAssertUtil.validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as rest path " + "param, found 'xml'", 67, 40); BAssertUtil.validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " + "params, found 'xml'", 71, 41); BAssertUtil.validateError(result, index++, "redeclared symbol '$anonType$_2.$get$xmlPath2$^'", 75, 27); BAssertUtil.validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path" + " params, found 'xml'", 75, 41); BAssertUtil.validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " + "params, found 'map<string>'", 79, 40); BAssertUtil.validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " + "params, found 'map<string>'", 83, 41); BAssertUtil.validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " + "params, found '(int|error)'", 87, 47); BAssertUtil.validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " + "params, found '(int|error)'", 91, 48); BAssertUtil.validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " + "params, found 'error'", 95, 42); BAssertUtil.validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " + "params, found 'error'", 99, 43); Assert.assertEquals(result.getErrorCount(), index); } @Test public void testResourceMethodAssignability() { CompileResult compileResult = BCompileUtil.compile( "test-src/klass/resource_method_assignability_test.bal"); BRunUtil.invoke(compileResult, "testResourceMethodAssignability"); } @AfterClass public void reset() { ServiceValue.reset(); } } ```
HWT may refer to: Harvey World Travel, a former Australian travel agency franchise Health and welfare trust, a Canadian healthcare plan The Herald and Weekly Times, an Australian newspaper publishing company Heritage Walk Trivandrum, an Indian non-profit organisation Hypersonic wind tunnel Herschel Walker trade, the largest player trade in the history of the U.S. National Football League Humanoid Walking Tank, a type of robot in the Japanese video game Gunparade March
The Hyderabad women's cricket team is a women's cricket team that represents the Indian city of Hyderabad. The team competes in the Women's Senior One Day Trophy and the Women's Senior T20 Trophy. Current squad Doli Ramya Keerthi Reddy Gongadi Trisha Mamatha Kanojia Anuradha Nayak Mahesh Kavya Himani Yadav Rachna Kumar Madiwala Mamatha (wk) Soppadhandi Yashasri Bhogi Shravani Honours Women's Senior One Day Trophy: Runners-up (1): 2011–12 Women's Senior T20 Trophy: Runners-up (3): 2012–13, 2013–14, 2016–17 See also Hyderabad cricket team References Women's cricket teams in India Cricket in Hyderabad, India Cricket in Telangana
The Walney to Wear and Whitby Cycle Route (or W2W) is the name of a cross-country cycle route in Northern England. It runs from Walney Island in Cumbria to Sunderland on the River Wear or Whitby. History The route was launched on 1 June 2005 to complement the popular Sea to Sea Cycle Route (C2C) that runs from Whitehaven to Sunderland. It is designed to be slightly harder and longer than this other route, totalling either . When launched the route used sections of NCN Routes 72, 68 and 71 west of the Pennines. From Tan Hill to Sunderland a new Regional Route was created and given the number 20, with a blue background. In 2012, after improvements to meet National Cycle Network standards, it was upgrade to National Route 70. Route signs were changed to the number 70 with a red background. Around the same time the sections on the route that had previously been Route 71 and 72 were re-signed as Route 70. In 2007 a southern branch from Barnard Castle to Whitby was added. Originally classified as regional route 52, it was upgraded to National Cycle Route 165 in 2012. The Walney to Whitby route is . Route The W2W is a Y-shaped route with one western leg from Walney to Barnard Castle, and two eastern legs Barnard Castle to Sunderland (northern) and Barnard Castle to Whitby (Southern). Western leg Walney to Barnard Castle From Walney Island in the Irish Sea it goes on to the industrial port of Barrow-in-Furness It then runs through the Furness peninsula, passing towns such as Ulverston and the picturesque Grange-Over-Sands where the route follows the Promenade. The route continues to skirt the Lake District national park towards the historical market town of Kendal After this, the path continues northwards, before moving in an easterly direction towards Kirkby Stephen and through the Pennines to Barnard Castle in County Durham North Eastern leg Barnard Castle to Sunderland The northerly spur passes through Hamsterley, to reach the beautiful cathedral city of Durham. National Route 715 between Barnard Castle and Willington is an alternative W2W route. It is shorter and easier ride via Bishop Auckland. The final of the W2W follows the River Wear to the North Sea at Sunderland South Eastern leg Barnard Castle to Whitby From Barnard Castle the route descends off the Durham Dales along the lower Teesdale for to Croft-on-Tees near Darlington. Continuing on flat roads for until it reaches the North York Moors at Great Ayton. Following the Eskdale for the final the route cross the River Esk five times before reaching Whitby. This section includes several steep climbs and descents, some of them are on off road tracks. Footnotes External links The official website of the W2W Wearside Sustrans website with links to details on the route in Wearside Cycleways in England National Cycle Routes Furness
```objective-c % contributor license agreements. See the NOTICE file distributed with % this work for additional information regarding copyright ownership. % % path_to_url % % Unless required by applicable law or agreed to in writing, software % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or classdef StringTraits < arrow.type.traits.TypeTraits properties (Constant) ArrayConstructor = @arrow.array.StringArray ArrayClassName = "arrow.array.StringArray" ArrayProxyClassName = "arrow.array.proxy.StringArray" ArrayStaticConstructor = @arrow.array.StringArray.fromMATLAB TypeConstructor = @arrow.type.StringType TypeClassName = "arrow.type.StringType" TypeProxyClassName = "arrow.type.proxy.StringType" MatlabConstructor = @string MatlabClassName = "string" end end ```
```c++ // // file LICENSE_1_0.txt or copy at path_to_url // <boost/thread/synchronized_value.hpp> // class synchronized_value<T,M> // void swap(synchronized_value&,synchronized_value&); #define BOOST_THREAD_VERSION 4 #include <boost/thread/synchronized_value.hpp> #include <boost/detail/lightweight_test.hpp> int main() { { boost::synchronized_value<int> v1(1); int v2(2); boost::swap(v1,v2); BOOST_TEST(v1.value() == 2); BOOST_TEST(v2 == 1); } { boost::synchronized_value<int> v1(1); int v2(2); boost::swap(v2,v1); BOOST_TEST(v1.value() == 2); BOOST_TEST(v2 == 1); } return boost::report_errors(); } ```
Battery E, 1st U.S. Artillery was a United States Army field artillery battery that was in service between 1821 and 1901, most notably in extensive service with the Union Army during the American Civil War. During the Civil War, the battery was present at the Siege of Fort Sumter in April 1861 under the command of Captain Abner Doubleday. Returned to the artillery defenses of Washington, D.C., and rearmed as a field artillery battery, the unit was merged with Battery G, 1st U.S. Artillery in February 1862. Consolidated and renamed Battery E & G, 1st U.S. Artillery, the unit continued with this designation until the end of the war. The unit is also known for its participation in the Wounded Knee Massacre in 1890. Service Battery E, 1st U.S. Artillery was formed by a reorganization of the U.S. Army artillery service in 1821. The battery saw service in the Second Seminole War and the early engagements of the Mexican–American War. From 1856 through 1858, the battery was stationed in Florida during the Third Seminole War. At the outbreak of the Civil War, the battery was stationed at Fort Sumter, Charleston Harbor, South Carolina in January 1861, where it was present during the Battle of Fort Sumter in April which sparked the war. Following the surrender of that post, it moved to Washington, D.C., where it was attached to Patterson's army to October 1861, in the field but not present at the First Battle of Bull Run. Its new commander, Captain Jefferson C. Davis, was absent on detached service throughout the duration of the war; under the command of subaltern battery officer Lieutenant Alanson Merwin Randol, the battery was merged with Battery G, 1st U.S. Artillery in February 1862, serving as Battery E & G with the Artillery Reserve, Army of the Potomac, to May 1862; 2nd Brigade, Artillery Reserve, V Corps, Army of the Potomac, to September 1862; Artillery, 2nd Division, V Corps, to October 1862; Artillery, 3rd Division, V Corps, to May 1863; 2nd Regular Brigade, Artillery Reserve, Army of the Potomac, to June 1863; 2nd Brigade, Horse Artillery, Artillery Reserve, Army of the Potomac, to June 1864; 3rd Brigade, DeRussy's Division, XXII Corps, to July 1864; and 1st Brigade, DeRussy's Division, XXII Corps, to October 1865. On garrison duty along the East Coast post-war, the battery also participated in the Indian Wars at Wounded Knee. Detailed service Defense of Fort Sumter, Charleston Harbor, April 12–13, 1861. Evacuation of Fort Sumter April 13. Reached Fort Hamilton, New York Harbor, April 19. Moved to Chambersburg, Pennsylvania, June 3. Ordered to Washington, D.C., August 26, 1861. Duty at the federal arsenal and at Camp Duncan, defenses of Washington, until March 1862. Moved to the Virginia Peninsula. Siege of Yorktown April 5-May 4. Battle of Seven Pines, May 31-June 1. Seven Days Battles before Richmond June 26-July 1. Savage's Station and Peach Orchard June 29. White Oak Swamp and Glendale June 30. Malvern Hill July 1. At Harrison's Landing until August 16. Moved to Fortress Monroe, then to Centerville August 16–28. Pope's Virginia Campaign August 28-September 2. Battle of Groveton August 29. Second Battle of Bull Run August 30. Maryland Campaign September 6–22. Battle of Antietam September 16–17. Shepherdstown Ford September 19–20. At Sharpsburg until October 30. Movement to Falmouth, Virginia, October 29-November 19. Battle of Fredericksburg, December 11–15. At Falmouth, Virginia, until April 1863. Chancellorsville Campaign April 27-May 6. Battle of Chancellorsville May 1–5. Gettysburg Campaign June 11-July 24. Aldie June 17. Middleburg June 19. Upperville June 21. Ashby's Gap June 21. Battle of Gettysburg, July 1–3. Near Harpers Ferry July 14. Shepherdstown July 16. Bristoe Campaign October 9–22. Advanced to line of the Rappahannock November 7–8. Mine Run Campaign November 26-December 2. Custer's Raid into Albemarle County February 28-March 1, 1864. Near Charlottesville February 29. Stannardsville March 1. Rapidan Campaign May 4-June 8. Battle of the Wilderness May 5–7. Spotsylvania May 8–21. Milford Station May 21. Chesterfield May 23. North Anna River May 23–26. On line of the Pamunkey May 26–28. Totopotomoy May 28–31. Mechump's Creek May 31. Cold Harbor June 1–5. Sharp's Farm June 3. Moved to Washington, D.C., June 18. Garrison duty at Fort Willard and Fort Strong, defenses of Washington, D.C., until October 1865. Later service One of the most notable actions of Battery E, 1st U.S. Artillery was in support of the Colonel James W. Forsyth's 7th U.S. Cavalry at the now-controversial engagement at Wounded Knee on December 29, 1890. The battery was commanded by Captain Allyn Capron and equipped with four Hotchkiss M1875 mountain guns which were used against the Wounded Knee encampment with devastating effect after the fight broke out. Some estimates report that as many as 200 Lakota Indians were killed or wounded in the engagement, the majority of whom were women and children. The battery participated in the Spanish–American War in Cuba in 1898, under the command of Captain Capron. In 1899, the battery was deployed to the Philippine Islands during the Philippine–American War; it remained there until 1901, when the unit was dissolved and reorganized as the First Battery, Field Artillery in the newly organized Artillery Corps of the United States Army. Commanders Captain Sylvester Churchill Captain Henry Saunders Captain Ebenezer Sibley Captain James Henry Prentiss Captain William Henry French Captain Abner Doubleday Captain Jefferson C. Davis Lieutenant Samuel Sherer Elder Captain Alanson Merwin Randol 1st Lieutenant Egbert Worth Olcott 1st Lieutenant Frank Sands French Lieutenant Edward Alexander Duer Captain Franck Eveleigh Taylor Captain Tully McCrea Captain Allyn Capron Captain Henry Merritt Andrews See also List of United States Regular Army Civil War units 1st Air Defense Artillery Regiment Notes References Buel. Clarence C. and Robert U. Johnson. Battles and Leaders of the Civil War. Vol. 1. New York: The Century Company, 1887. Dyer, Frederick H. A Compendium of the War of the Rebellion. Des Moines, IA: Dyer Pub. Co., 1908. Haskin, William L., ed. The History of the First Regiment of Artillery. Portland, ME: B. Thurston and Company, 1879. Haskin, William L. "The First Regiment of Artillery." In Haskin, William L. and Theophilus F. Rodenbough. The Army of the United States. New York: Maynard, Merrill, & Co., 1896. Heitman, Francis B. Historical Register and Dictionary of the United States Army. Washington, D.C.: Government Printing Office, 1903. Randol, Alanson M. “From January, 1862, to August, 1864” in Haskin, William L., ed. The History of the First Regiment of Artillery. Portland, ME: B. Thurston and Company, 1879. External links Battery E, 1st U.S. Light Artillery monuments at Gettysburg Battlefield United States Regular Army Civil War units and formations U
The Canadian Parliamentary Guide, formerly known as the Canadian Parliamentary Companion and the Canadian Parliamentary Companion and Annual Register, is a reference publication which lists the members of the House of Commons of Canada and the Senate of Canada as well as of the provincial and territorial legislatures. It also includes short biographies of each member and results from the most recent election and by-elections. It was first published in 1862 and was published annually (occasionally biennially) since 1867. It is currently published by Grey House Publishing Canada. Editors included: Henry James Morgan Charles Herbert Mackintosh John Alexander Gemmill Arnott James Magurn P.G. Normandin A.L. Normandin External links Canadian Parliamentary Guide, Grey House Publishing Canada Parliament of Canada, Government of Canada publications, University of British Columbia Online editions from 1862 to 1897 Canadian biographies Canadian political websites 1862 establishments in Canada
Nord-Ubangi (French for "North Ubangi") is one of the 21 new provinces of the Democratic Republic of the Congo created in the 2015 repartitioning. Nord-Ubangi, Équateur, Mongala, Sud-Ubangi, and Tshuapa provinces are the result of the dismemberment of the former Équateur province. The province is located in the northwestern part of the country on the Ubangi River and was formed from the Nord-Ubangi district and the independently administered city of Gbadolite which became the capital of the new province. Administration The capital is the city of Gbadolite. It is divided into four territories: Bosobolo Businga Mobayi-Mbongo Yakoma References Provinces of the Democratic Republic of the Congo
Paradidymocentrus parterufipennis is a species of beetle in the family Cerambycidae. It was described by Breuning in 1956. References Acanthocinini Beetles described in 1956
Huaya District is one of twelve districts of the province Víctor Fajardo in Peru. Ethnic groups The people in the district are mainly indigenous citizens of Quechua descent. Quechua is the language which the majority of the population (90.38%) learnt to speak in childhood, 9.35% of the residents started speaking using the Spanish language (2007 Peru Census). See also Kinwamayu References
Baixa da Banheira e Vale da Amoreira is a civil parish in the municipality of Moita, Portugal. It was formed in 2013 by the merger of the former parishes Baixa da Banheira and Vale da Amoreira. The population in 2011 was 30,949, in an area of 6.42 km2. References Freguesias of Moita
```objective-c // // MPEmployee.h // MobileProject // // //MPEmployee *employee=[MPEmployee employeeWithType:MPEmployeeTypeDesigner]; //[employee doADaysWork]; // // // Created by wujunyang on 2017/2/23. // #import <Foundation/Foundation.h> typedef NS_ENUM(NSInteger,MPEmployeeType) { MPEmployeeTypeDeveloper, MPEmployeeTypeDesigner, MPEmployeeTypeFinance }; @interface MPEmployee : NSObject @property(copy) NSString *name; @property NSUInteger salary; +(MPEmployee *)employeeWithType:(MPEmployeeType)type; // -(void)doADaysWork; @end ```
```javascript var wrappy = require('wrappy') var reqs = Object.create(null) var once = require('once') module.exports = wrappy(inflight) function inflight (key, cb) { if (reqs[key]) { reqs[key].push(cb) return null } else { reqs[key] = [cb] return makeres(key) } } function makeres (key) { return once(function RES () { var cbs = reqs[key] var len = cbs.length var args = slice(arguments) // XXX It's somewhat ambiguous whether a new callback added in this // pass should be queued for later execution if something in the // list of callbacks throws, or if it should just be discarded. // However, it's such an edge case that it hardly matters, and either // choice is likely as surprising as the other. // As it happens, we do go ahead and schedule it for later execution. try { for (var i = 0; i < len; i++) { cbs[i].apply(null, args) } } finally { if (cbs.length > len) { // added more in the interim. // de-zalgo, just in case, but don't call again. cbs.splice(0, len) process.nextTick(function () { RES.apply(null, args) }) } else { delete reqs[key] } } }) } function slice (args) { var length = args.length var array = [] for (var i = 0; i < length; i++) array[i] = args[i] return array } ```
Brent Charleton, CFA, (born 11 April 1982) is a Canadian businessman and former basketball player who currently serves as the President and chief executive officer for an industrial technology company called EnWave Corporation. After graduating from Simon Fraser University in 2005, Charleton was a professional basketball player for three consecutive years, competing in both the New Zealand National Basketball League and Australian National Basketball League. Early life and education Brent Charleton was born in Burnaby, British Columbia, Canada. He is the oldest of three, having two younger sisters. His father has the New Zealand nationality, while his mother is Canadian. After graduating from Carson Graham High School in 2000, Charleton attended Simon Fraser University, from which he graduated with a Bachelor of Arts degree and a Major in Criminology. While attending Simon Fraser University, he was President of the Student Athlete Council and Captain of the university's Varsity Men's Basketball Team. He passed the school's all-time scoring record set by Jay Triano - Triano set the record over four seasons while Charleton's mark was set over five. Professional Basketball Career At Simon Fraser University Charleton contacted the only New Zealand basketball team that also played in the Australian National Basketball League, the New Zealand Breakers. This led to a tryout in China, where the New Zealand Breakers were touring. Following that tour, Charleton was asked to play for the North Harbour Heat in the National Basketball League (NBL) – the New Zealand basketball league. He played for the North Harbour Heat in the 2005 and 2006 seasons. Also in 2005, Charleton made his New Zealand national team debut for the Tall Blacks. As his father is a New Zealander, he qualified to play for the country. After 2006, he played a season for the Otago Nuggets, a New Zealand basketball team based in Dunedin in the south of New Zealand. Finally, he played three seasons for the New Zealand Breakers back in Auckland. Despite the successes in the NBL, Charleton decided to return to Canada in 2008 to focus on his education and business career. British Columbia Institute of Technology Between September 2008 and May 2010, Charleton was a student of the Marketing Management Diploma program at the British Columbia Institute of Technology (BCIT), where he focused on Entrepreneurship. In addition, as President of Students in Free Enterprise at BCIT he managed over 100 members and completed 13 socially responsible projects. These projects generated over $800,000 in economic activity. He graduated with distinction, as one of the top students within the business program. During the two year time frame he received several scholarships, and was awarded the BCIT Alumni Outstanding Student Leadership Award. This led to Charleton being hired at EnWave Corporation as a Marketing Coordinator in March 2010, and he became CEO of the company in 2018. Social & Personal Life Charleton is a volunteer mentor for the well-known business education program Junior Achievement, where he mentors high school students on how to start and structure a business. In addition, he's volunteered as an assistant basketball coach at Carson Graham Secondary School. He's married and has one daughter. References 1982 births Living people Basketball people from British Columbia Canadian expatriates in New Zealand Canadian men's basketball players Canadian people of English descent Guards (basketball) New Zealand men's basketball players New Zealand people of Canadian descent Simon Fraser Red Leafs men's basketball players Basketball players from Vancouver
"Teacher's Pet" is the fourth episode of the first season of the television series Buffy the Vampire Slayer. The episode originally aired on The WB on March 24, 1997, attracting 2.0 million viewers. The episode was written by co-executive producer David Greenwalt and directed by Bruce Seth Green. Xander and the other high school boys fall for a substitute teacher who has the aspects of a killer preying mantis. Plot After a biology class, Dr. Gregory is killed by an unseen monster, which only shows a pair of large eyes and an insectile limb. The next day, Buffy is alarmed by news of Gregory's disappearance, but the boys in her class are more interested in the beautiful substitute teacher, Natalie French (Musetta Vander), who seems to have a fixation on insects, especially the praying mantis. French suggests making model egg sacs for the upcoming science fair and asks the class for help. She selects Blayne as her lab partner for that day, to be followed by Xander the next day. Cordelia finds Gregory's headless body inside a cafeteria refrigerator. That night Buffy goes into the park and confronts a vampire who has a large claw in place of his right hand. They fight but are interrupted by the police and the vampire escapes. He encounters French while she is walking home with groceries and flees in terror, indicating that French is not human. The next day Buffy is late for her biology class and is horrified to watch as French seems to sense somebody at the door and then turns her head 180 degrees to see who it is. After the class, French claims to have left supplies at home so she asks Xander to come over to her house that evening and work on the egg-sac project there instead. Back in the library, Buffy realizes that Blayne never returned home from helping French. Giles recalls a creature known as the "She-Mantis", or the "Virgin Thief", which preys on virgin males to fertilize its young. That night, Xander arrives at French's house to find her wearing a tight dress and acting in a sexually suggestive manner. She offers him a drink, which he takes and then collapses. She turns into her mantis form and takes his body to a cage in the basement, where he wakes up next to Blayne. Meanwhile, Willow calls Xander's mother and finds out that he is not home. The Scoobies then go to the house where French supposedly lives, but find a retired teacher there whose name the mantis has stolen. Desperate to find the real house before it is too late, Buffy tracks the one-handed vampire and forces him to locate the correct house. Buffy breaks through the window just as French in mantis form is about to mate with Xander. She burns the monster with insect repellent while the others free Xander and Blayne. Giles and Buffy, using recorded bat sonar, send French into convulsions so Buffy can hack her to death with a machete. The next day, Buffy sadly puts Gregory's glasses back in his closet, not noticing that a sac of she-mantis eggs is attached to the bottom of a shelf and one begins to hatch. Broadcast and reception "Teacher's Pet" was first broadcast on The WB on March 24, 1997. It pulled in an audience of 2 million households. Noel Murray of The A.V. Club gave the episode a grade of B. He wrote that the "fundamental goofiness" of the premise was a "strike against" the episode, but that it benefited from "depth of characterization". Murray commented that the episode's subtext was the teenage fear of the reproductive practicalities of sex. A BBC review said that the episode "struggles to tread new ground" and was "uncomfortably paced". However, the review praised the effects of the praying mantis and some "delightful moments". DVD Talk's Phillip Duncan was somewhat disappointed with the episode, calling it a "by-the-book monster thriller set in the high school". Despite the standardness, he felt that it was still "worth watching". References External links Buffy the Vampire Slayer (season 1) episodes 1997 American television episodes Television episodes written by David Greenwalt Television episodes about insects it:Episodi di Buffy l'ammazzavampiri (prima stagione)#La mantide
CEPCI may refer to: Cashew Export Promotion Council of India Chemical Engineering Plant Cost Index
```javascript Check if an argument is a number Setting default values with `short circuiting` Types of numbers Precision How to merge two arrays ```
Burmese Days is the first novel by English writer George Orwell, published in 1934. Set in British Burma during the waning days of empire, when Burma was ruled from Delhi as part of British India, the novel serves as "a portrait of the dark side of the British Raj." At the centre of the novel is John Flory, "the lone and lacking individual trapped within a bigger system that is undermining the better side of human nature." The novel describes "both indigenous corruption and imperial bigotry" in a society where, "after all, natives were natives—interesting, no doubt, but finally...an inferior people". Burmese Days was first published "further afield," in the United States, because of concerns that it might be potentially libelous; that the real provincial town of Katha had been described too realistically; and that some of its fictional characters were based too closely on identifiable people. A British edition, with altered names, appeared a year later. Nonetheless, Orwell's harsh portrayal of colonial society was felt by "some old Burma hands" to have "rather let the side down". In a letter from 1946, Orwell wrote, "I dare say it's unfair in some ways and inaccurate in some details, but much of it is simply reporting what I have seen". Background Orwell spent five years from 1922 to 1927 as a police officer in the Indian Imperial Police force in Burma (now Myanmar). The British had gradually annexed Burma in stages, and it was not until 1885, when they captured the royal capital of Mandalay, that Burma as a whole could be declared part of the British Empire. Migrant workers from India and China supplemented the native Burmese population. Although Burma was the wealthiest country in Southeast Asia under British rule, as a colony it was seen very much as a backwater. The image which Britons were meant to uphold in these communities was a huge burden and the majority of them carried expectations all the way from Britain with the intention of maintaining their customs and rule. Among its exports, the country produced 75 per cent of the world's teak from up-country forests. When Orwell came to the Irrawaddy Delta in January 1924 to begin his career as an imperial policeman, the delta was Burma's leading exporting region, providing three million tons of rice annually, half the world's supply. Orwell served in a number of locations in Burma. After a year of training in Mandalay and Maymyo, his postings included Myaungmya, Twante, Syriam, Moulmein, and Kathar. It also included Insein, situated north of Rangoon, the site of the colony's most secure prison, and now Burma's most notorious jail. Burmese Days was several years in the writing. Orwell drafted it in Paris from 1928 to 1929. He revised it in 1932 at Southwold while doing up the family home during the summer holidays. By December 1933 he had typed the final version, and in 1934 delivered it to his agent, Leonard Moore, who submitted it to Victor Gollancz, the publisher of Orwell's previous book. Gollancz, already fearing prosecution from having published another author's work, turned it down because he was worried about charges of libel. Heinemann and Cape turned it down for the same reason. After demanding alterations, Harpers was prepared to publish it in the United States, where it appeared in 1934. In the spring of 1935, Gollancz declared that he was prepared to publish a British edition provided Orwell could demonstrate he had not named real people. To that end, extensive checks were made in colonial lists before Gollancz brought out the English version on 24 June 1935. Nonetheless, many of the main European names appearing in the novel have since been identified in the Rangoon Gazette as belonging to real people, with the name "U Po Kyin" in particular belonging to a Burmese officer who was at the Police Training School in Mandalay with Orwell. Plot summary Burmese Days is set in 1920s British Burma, in the fictional district of Kyauktada, based on Kathar (formerly spelled Katha), a town where Orwell served. Like the fictional town, it is the head of a branch railway line above Mandalay on the Ayeyarwady (Irrawaddy) River. As the story opens, U Po Kyin, a corrupt Burmese magistrate, is planning to destroy the reputation of the Indian, Dr Veraswami. The doctor hopes for help from his friend John Flory who, as a pukka sahib (European white man), has higher prestige. Dr Veraswami also desires election to the town's European Club, of which Flory is a member, expecting that good standing among the Europeans will protect him from U Po Kyin's intrigues. U Po Kyin begins a campaign to persuade the Europeans that the doctor holds anti-British opinions in the belief that anonymous letters with false stories about the doctor "will work wonders". He even sends a threatening letter to Flory. John Flory, a jaded 35-year-old teak merchant with a birthmark on his face in the shape of a ragged crescent, spends three weeks of every month acquiring jungle timber. Friendless among his fellow Europeans and unmarried, but with a Burmese mistress, he has become disillusioned with life in an expatriate community centred round the local European Club in a remote provincial town. At the same time, he has become so embedded in Burma that it is impossible for him to leave and return to England. Flory has one good friend, the Indian, Dr Veraswami, whom he often visits for what the Doctor delightedly calls "cultured conversation". But when Flory dismisses the British as mere moneymakers, living a lie, "the lie that we're here to uplift our poor black brothers instead of to rob them," he provokes consternation in the doctor, who defends the British as the efficient administrators of an unrivalled empire. Toward his mistress, Flory is emotionally ambivalent: "On the one hand, Flory loves Burma and craves a partner who will share his passion, which the other local Europeans find incomprehensible; on the other hand, for essentially racist reasons, Flory feels that only a European woman is acceptable as a partner". Flory's wish seems to be answered with the arrival of Elizabeth Lackersteen, the orphaned niece of Mr Lackersteen, manager of the local timber firm. Flory rescues her when she believes she is about to be attacked by a small water buffalo. He is immediately taken with her and they spend some time together, culminating in a highly successful shooting expedition. Flory shoots a leopard, promising the skin to Elizabeth as a trophy. Lost in romantic fantasy, Flory imagines Elizabeth to be the sensitive object of his desire, the European woman who will "understand him and give him the companionship he needed". He turns Ma Hla May, his pretty, scheming Burmese mistress, out of his house. However, whereas Flory extols the virtues of the rich culture of the Burmese, the latter frighten and repel Elizabeth, who regards them as "beastly." Worse still is Flory's interest in high art and literature, which reminds Elizabeth of her pretentious mother who died in disgrace in Paris of ptomaine poisoning as a result of living in squalid conditions while masquerading as a Bohemian artist. Despite these reservations, of which Flory is entirely unaware, she is willing to marry him to escape poverty, spinsterhood, and the unwelcome advances of her perpetually inebriated uncle. Flory is about to ask her to marry him, but they are interrupted first by her aunt and secondly by an earthquake. Mrs Lackersteen's interruption is deliberate because she has discovered that a military police lieutenant named Verrall is arriving in Kyauktada. As he comes from an extremely good family, she sees him as a better prospect as a husband for Elizabeth. Mrs Lackersteen tells Elizabeth that Flory is keeping a Burmese mistress as a deliberate ploy to send her to Verrall. Indeed, Flory had been keeping a mistress, but had dismissed her almost the moment Elizabeth had arrived. Elizabeth is appalled and falls at the first opportunity for Verrall, who is arrogant and ill-mannered to all but her. Flory is devastated and after a period of exile attempts to make amends by delivering to her the leopard skin. A bungled curing process has left the skin mangy and stinking and the gesture merely compounds his status as a poor suitor. When Flory delivers it to Elizabeth she accepts it regardless of the fact that it stinks and he talks of their relationship, telling her he still loves her. She responds by telling him that unfortunately the feelings aren't mutual and leaves the house to go horse riding with Verrall. When Flory and Elizabeth part ways, Mrs Lackersteen orders the servants to burn the reeking leopard skin, representing the deterioration of Flory and Elizabeth's relationship. U Po Kyin's campaign against Dr Veraswami turns out to be intended simply to further his aim of becoming a member of the European Club in Kyauktada. The club has been put under pressure to elect a native member and Dr Veraswami is the most likely candidate. U Po Kyin arranges the escape of a prisoner and plans a rebellion for which he intends that Dr Veraswami should get the blame. The rebellion begins and is quickly put down, but a native rebel is killed by acting Divisional Forest Officer, Maxwell. Uncharacteristically courageous, Flory speaks up for Dr Veraswami and proposes him as a member of the club. At this moment the body of Maxwell, cut almost to pieces with dahs by two relatives of the man he had shot, is brought back to the town. This creates tension between the Burmese and the Europeans which is exacerbated by a vicious attack on native children by the spiteful and racist timber merchant, Ellis. A large but ineffectual anti-British riot begins and Flory becomes the hero for bringing it under control with some support by Dr Veraswami. U Po Kyin tries to claim credit but is disbelieved and Dr Veraswami's prestige is restored. Verrall leaves Kyauktada without saying goodbye to Elizabeth and she falls for Flory again. Flory is happy and plans to marry Elizabeth. However, U Po Kyin has not given up. He hires Flory's former Burmese mistress to create a scene in front of Elizabeth during the sermon at church. Flory is disgraced and Elizabeth refuses to have anything more to do with him. Overcome by the loss and seeing no future for himself, Flory kills first his dog, and then himself. Dr Veraswami is demoted and sent to a different district and U Po Kyin is elected to the club. U Po Kyin's plans have succeeded and he plans to redeem his life and cleanse his sins by financing the construction of pagodas. He dies of apoplexy before he can start building the first pagoda and his wife envisages him returning to life as a frog or rat. Elizabeth eventually marries Macgregor, the deputy commissioner, and lives happily in contempt of the natives, who in turn live in fear of her, fulfilling her destiny of becoming a "burra memsahib", a respectful term given to white European women. Characters John (in some editions, James) Flory: referred to as just "Flory" throughout the novel. He is the central character, a timber merchant in his mid-thirties. He has a long, dark blue birthmark that stretches from his eye to the side of his mouth on his left cheek, and he tries to avoid showing people the left side of his face as he thinks the birthmark is hideous. Whenever he is ashamed or looks down upon himself he remembers his birthmark, a symbol of his weakness. He is very friendly with the Indian Dr Veraswami, and appreciates Burmese culture. This brings him into conflict with members of the club, who dislike his slightly radical views. Because of his rather shy personality and the fact that he dislikes quarrels, he is an easy target in arguments, especially with Ellis. This discourages him from fully advocating for the Burmese. He suffers a great deal emotionally because he is infatuated with Elizabeth. All he can think about is Elizabeth but they have conflicting interests and she does not reciprocate the love. Flory supports the Burmese whereas Elizabeth regards them as beasts. Flory wants Elizabeth to appreciate him, especially with his hindering birthmark, yet he wants to support the Burmese. Due to his indecisive personality he is caught between supporting the Burmese and the English. After Elizabeth leaves Flory the second time, he commits suicide. Elizabeth Lackersteen: An unmarried English girl who has lost both her parents and comes to stay with her remaining relatives, the Lackersteens, in Burma. Before her flighty mother died, they had lived together in Paris. Her mother fancied herself an artist, and Elizabeth grew to hate the Bohemian lifestyle and cultural connections. Elizabeth is 22, 'tallish for a girl, slender", with fashionably short hair and wears tortoise shell glasses. Throughout the novel, she seeks to marry a man because her aunt keeps pressuring her and she idolises wealth and social class, neither of which she could achieve without a husband during this time period. When she first meets Flory, he falls in love because he values white women over Burmese women. After leaving Flory for the first time, she courts Verrall, who leaves her abruptly without saying goodbye. The second time she leaves Flory (and following his suicide), she marries Deputy Commissioner MacGregor. Mr Lackersteen: Elizabeth's uncle and Mrs Lackersteen's husband. Lackersteen is the manager of a timber firm. He is a heavy drinker whose main object in life is to have a "good time". However his activities are curtailed by his wife who is ever watching "like a cat over a bloody mousehole" because ever since she returned after leaving him alone one day to find him surrounded by three naked Burmese girls, she does not trust him alone. Lackersteen's lechery extends to making sexual advances towards his niece, Elizabeth. Mrs Lackersteen: Elizabeth's aunt and Mr Lackersteen's wife. Mrs Lackersteen is "a woman of about thirty-five, handsome in a contourless, elongated way, like a fashion plate". She is a classic memsahib, the title used for wives of officials in the Raj. Neither she nor her niece have taken to the alien country or its culture. (In Burmese Days Orwell defines the memsahib as "yellow and thin, scandal mongering over cocktails—living twenty years in the country without learning a word of the language."). And because of this, she strongly believes that Elizabeth should marry an upper-class man who can provide her with a home and accompanying riches. She pesters Elizabeth into finding a husband: first she wants her to wed Verrall, then after he leaves, Flory. Dr Veraswami: An Indian doctor and a good friend of Flory. He has nothing but respect for the Britons living in Burma and often makes a point of vocally expressing his support for British colonial rule when he can, even though many in the European community, including Ellis, don't respect him. Veraswami and Flory often discuss various topics, with Veraswami presenting the British point of view and Flory taking the side of the Burmese. Dr Veraswami is targeted by U Po Kyin in pursuit of membership of the European club. Dr Veraswami wants to become a member of the club so that it will give him prestige which will protect him from U Po Kyin's attempts to exile him from the district. Because he respects Flory, he does not pester him to get him admitted into the club. Eventually U Po Kyin's plan to exile Dr Veraswami comes through. He is sent away to work in another run-down hospital elsewhere. U Po Kyin: A corrupt and cunning magistrate who is hideously overweight, but perfectly groomed and wealthy. He is 56 and the "U" in his name is his title, which is an honorific in Burmese society. He feels he can commit whatever wicked acts he wants—cheat people of their money, jail the innocent, abuse young girls—because although, "According to Buddhist belief those who have done evil in their lives will spend the next incarnation in the shape of a rat, frog, or some other low animal", he intends to provide against these sins by devoting the rest of his life to good works such as financing the building of pagodas, "and balance the scales of karmic justice". He continues his plans to attack Dr Veraswami, instigating a rebellion as part of the exercise, to make Dr Veraswami look bad and eliminate him as a potential candidate of the club, so he can secure the membership for himself. He believes his status as a member of the club will cease the intrigues that are directed against him. He loses pre-eminence when Flory and Vereswami suppress the riot. After Flory dies, Kyin becomes a member of the European Club. Shortly after his admission into the club he dies, unredeemed, before the building of the pagodas. "U Po has advanced himself by thievery, bribery, blackmail and betrayal, and his corrupt career is a serious criticism of both the English rule that permits his success and his English superiors who so disastrously misjudge his character". Ma Hla May: Flory's Burmese mistress who has been with him for two years before he meets Elizabeth. Ma Hla May believes herself to be Flory's unofficial wife and takes advantage of the privileges that come along with being associated with the European community in Burma. Flory has been paying her expenses throughout their time together. However, after he becomes enchanted with Elizabeth, he informs her that he no longer wants anything to do with her. Ma Hla May is distraught and repeatedly blackmails him. Once thrown out of Flory's house, the other villagers dissociate themselves from her and she cannot find herself a husband to support her. Encouraged by U Po Kyin, who has an alternate agenda to ruin Flory's reputation within the club, she approaches Flory in front of the Europeans and creates a dramatic scene so everyone knows of his intimacy with her. This outburst taints Elizabeth's perception of Flory for good. Eventually she goes to work in a brothel elsewhere. Ko S'la: Flory's devoted servant since the day he arrived in Burma. They are close to the same age and Ko S’la has since taken care of Flory. Though he serves Flory well, he does not approve of many of his activities, especially his relationship with Ma Hla May and his drinking habits. He believes that Flory should get married. Flory has remained in the same reckless state that he was in upon arriving in Burma. In Ko S’la's eyes, Flory is still a boy. Ko S’la, on the other hand, has moved on with his life as he has taken wives and fathered five children. He pities Flory due to his childish behaviour and his birthmark. Lieutenant Verrall: A military policeman who has a temporary posting in the town. He is everything that Flory is not—young, handsome, privileged. He is the youngest son of a peer and looks down on everyone, making no concessions to civility and good manners. His only concern while in town is playing polo. He takes no notice of a person's race, everyone is beneath him. Verrall is smug and self-centered. Encouraged by her aunt, Elizabeth pursues Verrall as a suitor, but he uses her only for temporary entertainment. In the end, he vanishes from town without a word to Elizabeth. Mr Macgregor: Deputy Commissioner and secretary of the club. He is upright and well-meaning, although also pompous and self-important. U Po Kyin contacts Mr Macgregor through anonymous letters as he continues his attacks on Dr Veraswami to gain a position in the club. As one of the only single men left in the town, he marries Elizabeth. Ellis: A spiteful and violent racist who manages a timber company in upper Burma. He is a vulgar and rude member of the club who likes to stir up scandals. Ellis firmly maintains that the Burmese people are completely incapable of ruling the country themselves. His hatred of the Burmese culture causes some clashes with Flory due to Flory's friendliness with the Burmese, especially Dr Veraswami. Ellis is in support of U Po Kyin's plan to ruin the reputation of Dr Veraswami and needs no evidence whatsoever of Dr Veraswami's guilt. Francis and Samuel: Francis is a Eurasian clerk to an Indian money lender, whilst Samuel is a clerk to some of the pleaders. Both are sons of Christian missionaries, the book explores attitudes towards their mixed heritage. Style Orwell biographer D. J. Taylor notes that "the most striking thing about the novel is the extravagance of its language: a riot of rococo imagery that gets dangerously out of hand."<ref>D. J. Taylor, Orwell: The Life, Chatto & Windus, 2003.</ref> Another of Orwell's biographers, Michael Shelden, notes that Joseph Conrad, Somerset Maugham and E. M. Forster have been suggested as possible influences, but believes also that "the ghost of Housman hangs heavily over the book." The writers Stansky and Abrahams, while noting that the character Flory probably had his roots in Captain Robinson, a cashiered ex-officer whom Orwell had met in Mandalay, "with his opium-smoking and native women", affirmed that Flory's "deepest roots are traceable to fiction, from Joseph Conrad's Lord Jim through all those Englishmen gone to seed in the East which are one of Maugham's better-known specialities." Jeffrey Meyers, in a 1975 guide to Orwell's work, wrote of the E. M. Forster connection that, "Burmese Days was strongly influenced by A Passage to India, which was published in 1924 when Orwell was serving in Burma. Both novels concern an Englishman's friendship with an Indian doctor, and a girl who goes out to the colonies, gets engaged and then breaks it off. Both use the Club scenes to reveal a cross-section of colonial society, and both measure the personality and value of the characters by their racial attitudes...But Burmese Days is a far more pessimistic book than A Passage to India, because official failures are not redeemed by successful personal relations." Orwell himself was to note in Why I Write (1946) that "I wanted to write enormous naturalistic novels with unhappy endings, full of detailed descriptions and arresting similes, and also full of purple passages in which my words were used partly for the sake of their sound. And in fact my first complete novel, Burmese Days...is rather that kind of book." Themes Colonialism Each of the characters in the novel hold differing views towards colonialism, influenced by their background and status in society. According to University of Singapore scholar Steven L. Keck, the novel's depiction of colonialism led it to become "a part of the mythology of imperial experience not only for Burma, but for the British Empire as a whole". Burmese Days takes place during a period of Burmese history when it was under British colonial rule, and Orwell intended the novel to serve as a critique of colonialism, both in the effects it had on the Burmese and the British. Colonial society in Burma is depicted as being divided on racial lines, "with [the Europeans] exploiting both the land and peoples of Burma, while finding that the cost of exile and isolation was to fight a continuous battle against despair"; the Burmese and Indians, on the other hand, are depicted as both supportive and opposed to colonial rule. Keck speculated that the fact that the Saya San peasant rebellion was ongoing during the period influenced Orwell's pessimistic attitude towards colonialism.Burmese Days frequently uses characters in the novel to illustrate larger arguments about colonial rule. When Flory, someone who had grown disillusioned with colonialism, enters into a debate with Dr Veraswami about British colonial rule, each makes several points about the effects of colonialism in Burma. Flory charges that the British are only interested in Burma due to the economic opportunities the colony provides, and are living a "lie that we're here to uplift our poor black brothers rather than to rob them". Dr Veraswami counters that British rule has improved Burma, pointing to the levels of infrastructure, healthcare and education in the colony. Veraswami also notes how if it were not a British colony, he would not have been able to become a doctor in Burma. Their argument continues, but are unable to come to an agreement and ends inconclusively. The novel also explored the status of Burma as being part of the British Raj instead of being a separate colony. Burmese scholar Maung Htin Aung, in an article written about Burmese Days, claimed that the novel served as a "valuable historical document" due to the fact that it “recorded vividly the tensions that prevailed in Burma, and the mutual suspicion, despair and disgust that crept into Anglo-Burmese relations as the direct result of the Government of India Act leaving out Burma from the course of its reforms”. Isolation The character of Flory, a Pukka sahib, serves as an emblematic depiction of the isolation faced by the European community in colonial-era Burma, a topic Orwell also explored in his short story Shooting an Elephant. He is torn between his fascination with Burmese culture, which sees him attempt to befriend several Burmese, and his role as a colonial teak merchant (ensuring that, as a member of the ruling class, he can never become intimately familiar with such a culture). He also defends the Burmese and sympathizes with various issues they face in the argument with Dr Veraswami, confirming his disillusion with colonialism which isolates him from the European community. Flory aims to satisfy both the Burmese and the Europeans while ultimately pleasing neither, further confirming his isolation. Racism The theme of racism frequently appears in Burmese Days, being depicted in the interactions between the three primary ethnic groups in Burma- the Europeans, Indians and Burmese. The European gentleman's club in which Flory spends a significant portion of the novel in, holds a debate on whether or not to admit a "native" (referring to Burmese people) into the club, with the violent and spiteful racist Ellis immediately objecting and declaring that he will never share a club with "natives". Racism remains a strong theme throughout the novel, with the European community in Burma frequently expressing racist attitudes towards the Indians and Burmese they interact with. In the view of Keck, the depiction of racism in Burmese Days has been cited by numerous historians "as a means to explore some of the more important features of modern history". Reactions Harpers brought out Burmese Days in the US on 25 October 1934, in an edition of 2,000 copies. In February 1935, just four months after publication, 976 copies were remaindered. The only American review that Orwell himself saw, in the New York Herald Tribune, by Margaret Carson Hubbard, was unfavourable: "The ghastly vulgarity of the third-rate characters who endure the heat and talk ad nausea of the glorious days of the British Raj, when fifteen lashes settled any native insolence, is such that they kill all interest in their doings." A positive review however came from an anonymous writer in the Boston Evening Transcript, for whom the central figure was, "analyzed with rare insight and unprejudiced if inexorable justice", and the book itself praised as full of "realities faithfully and unflinchingly realised." On its publication in Britain, Burmese Days earned a review in the New Statesman from Cyril Connolly as follows: Burmese Days is an admirable novel. It is a crisp, fierce, and almost boisterous attack on the Anglo-Indian. The author loves Burma, he goes to great length to describe the vices of the Burmese and the horror of the climate, but he loves it, and nothing can palliate for him, the presence of a handful of inefficient complacent public school types who make their living there... I liked it and recommend it to anyone who enjoys a spate of efficient indignation, graphic description, excellent narrative, excitement, and irony tempered with vitriol. Orwell received a letter from the anthropologist Geoffrey Gorer as follows Will you allow me to tell you how very much indeed I admire your novel Burmese Days: it seems to me an absolutely admirable statement of fact told as vividly and with as little bitterness as possible. It was as a result of these responses that Orwell renewed his friendship with Connolly, which was to give him useful literary connections, a positive evaluation in Enemies of Promise and an outlet on Horizon. He also became a close friend of Gorer. In 2013, the Burmese Ministry of Information named the new translation (by Maung Myint Kywe) of Burmese Days'' the winner of the 2012 Burma National Literature Award's "informative literature" (translation) category. The National Literary Awards are the highest literary awards in Burma. See also References External links "Orwell's Burma", an essay that originally appeared in Time Online version, The Literary Encyclopedia Another look at Burmese Days Burmese Days as a "valuable historical document" which "recorded vividly the tensions that prevailed in Burma, and the mutual suspicion, despair, and disgust that crept into Anglo-Burmese relations." Discusses the role that English clubs, like the one in Burmese Days, played in British India Discusses how Burmese Days is not a novel but a political statement based on the events that take place in the novel 1934 British novels Anti-imperialism Fictional suicides Novels set in the British Empire British rule in Burma Harper & Brothers books Novels by George Orwell Novels set in Myanmar 1934 debut novels
```php <?php use Illuminate\Database\Schema\Blueprint; use Illuminate\Database\Migrations\Migration; class AddTaxamtToOrdersTable extends Migration { /** * Run the migrations. * * @return void */ public function up() { Schema::table('orders', function($table) { $table->float('taxamt')->default(0.00); }); } /** * Reverse the migrations. * * @return void */ public function down() { Schema::table('orders', function($table) { $table->dropColumn('taxamt'); }); } } ```
```javascript 'use strict'; const JsonML = require('jsonml.js/lib/utils'); module.exports = (markdownData) => { const content = markdownData.content; const contentChildren = JsonML.getChildren(content); const dividerIndex = contentChildren.findIndex((node) => JsonML.getTagName(node) === 'hr'); if (dividerIndex >= 0) { markdownData.description = ['section'] .concat(contentChildren.slice(0, dividerIndex)); markdownData.content = [ JsonML.getTagName(content), JsonML.getAttributes(content) || {}, ].concat(contentChildren.slice(dividerIndex + 1)); } return markdownData; }; ```
Jan Kiedrowicz (born 11 March 1960) is a Polish chess International Master (1987). Chess career In 1982, Jan Kiedrowicz took 2nd place at the tournament held in Gdańsk. In 1983, he appeared in the final of Polish Chess Championship, played in Piotrków Trybunalski (taking 15th place). In the same year, he took 2nd place in Lesko and twice shared 3rd places at international chess tournaments in Białystok and Sopot. In 1990, in Gdynia Jan Kiedrowicz won silver medal in Polish Blitz Chess Championship. In 1992, he won a bronze medal in Legnica in Polish Team Blitz Championship, and in 1994 in Lubniewice - a bronze medal in Polish Team Chess Championship (both in the colors of the chess club Górnik Zabrze). In 1996, Jan Kiedrowicz shared the 1st place (together with Jurij Zezulkin) in the Emanuel Lasker memorial in Barlinek. In 1999, he won the title of chess champion of Gdańsk, and in 2004 he was very successful, winning in Rowy the title of the Polish Chess Champion for the Disabled Persons (he was also twice won silver medals in Poland Chess Championships for the Disabled Persons: 1998 and 2005). In 2010, he won the international chess tournament in Chojnice, and in 2014 - in Jastrzębia Góra. In 2015, in Jastrzębia Góra, he took 2nd place (behind Marcin Krzyżanowski). Jan Kiedrowicz reached his career highest rating on July 1, 1984, with a score of 2425 points, and was then 6th - 8th place among Polish chess players. References External links Jan Kiedrowicz - Szachy w Polsce 1960 births Living people People from Chojnice Chess International Masters Polish chess players
```javascript 'use strict'; var equal = require('ajv/lib/compile/equal'); var validate = (function() { var pattern0 = new RegExp('^[0-9]+$'); var refVal = []; var refVal1 = (function() { var pattern0 = new RegExp('^[0-9]+$'); return function validate(data, dataPath, parentData, parentDataProperty, rootData) { 'use strict'; var vErrors = null; var errors = 0; if (rootData === undefined) rootData = data; if ((data && typeof data === "object" && !Array.isArray(data))) { var errs__0 = errors; var valid1 = true; for (var key0 in data) { var isAdditional0 = !(false || validate.schema.properties.hasOwnProperty(key0)); if (isAdditional0) { valid1 = false; var err = { keyword: 'additionalProperties', dataPath: (dataPath || '') + "", schemaPath: '#/additionalProperties', params: { additionalProperty: '' + key0 + '' }, message: 'should NOT have additional properties' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } } if (data.topBody !== undefined) { var errs_1 = errors; if (!refVal2(data.topBody, (dataPath || '') + '.topBody', data, 'topBody', rootData)) { if (vErrors === null) vErrors = refVal2.errors; else vErrors = vErrors.concat(refVal2.errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.topJoin !== undefined) { var errs_1 = errors; if (!refVal[2](data.topJoin, (dataPath || '') + '.topJoin', data, 'topJoin', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.topLeft !== undefined) { var errs_1 = errors; if (!refVal[2](data.topLeft, (dataPath || '') + '.topLeft', data, 'topLeft', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.topRight !== undefined) { var errs_1 = errors; if (!refVal[2](data.topRight, (dataPath || '') + '.topRight', data, 'topRight', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.bottomBody !== undefined) { var errs_1 = errors; if (!refVal[2](data.bottomBody, (dataPath || '') + '.bottomBody', data, 'bottomBody', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.bottomJoin !== undefined) { var errs_1 = errors; if (!refVal[2](data.bottomJoin, (dataPath || '') + '.bottomJoin', data, 'bottomJoin', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.bottomLeft !== undefined) { var errs_1 = errors; if (!refVal[2](data.bottomLeft, (dataPath || '') + '.bottomLeft', data, 'bottomLeft', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.bottomRight !== undefined) { var errs_1 = errors; if (!refVal[2](data.bottomRight, (dataPath || '') + '.bottomRight', data, 'bottomRight', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.bodyLeft !== undefined) { var errs_1 = errors; if (!refVal[2](data.bodyLeft, (dataPath || '') + '.bodyLeft', data, 'bodyLeft', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.bodyRight !== undefined) { var errs_1 = errors; if (!refVal[2](data.bodyRight, (dataPath || '') + '.bodyRight', data, 'bodyRight', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.bodyJoin !== undefined) { var errs_1 = errors; if (!refVal[2](data.bodyJoin, (dataPath || '') + '.bodyJoin', data, 'bodyJoin', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.joinBody !== undefined) { var errs_1 = errors; if (!refVal[2](data.joinBody, (dataPath || '') + '.joinBody', data, 'joinBody', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.joinLeft !== undefined) { var errs_1 = errors; if (!refVal[2](data.joinLeft, (dataPath || '') + '.joinLeft', data, 'joinLeft', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.joinRight !== undefined) { var errs_1 = errors; if (!refVal[2](data.joinRight, (dataPath || '') + '.joinRight', data, 'joinRight', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.joinJoin !== undefined) { var errs_1 = errors; if (!refVal[2](data.joinJoin, (dataPath || '') + '.joinJoin', data, 'joinJoin', rootData)) { if (vErrors === null) vErrors = refVal[2].errors; else vErrors = vErrors.concat(refVal[2].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } } else { var err = { keyword: 'type', dataPath: (dataPath || '') + "", schemaPath: '#/type', params: { type: 'object' }, message: 'should be object' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } validate.errors = vErrors; return errors === 0; }; })(); refVal1.schema = { "type": "object", "properties": { "topBody": { "$ref": "#/definitions/border" }, "topJoin": { "$ref": "#/definitions/border" }, "topLeft": { "$ref": "#/definitions/border" }, "topRight": { "$ref": "#/definitions/border" }, "bottomBody": { "$ref": "#/definitions/border" }, "bottomJoin": { "$ref": "#/definitions/border" }, "bottomLeft": { "$ref": "#/definitions/border" }, "bottomRight": { "$ref": "#/definitions/border" }, "bodyLeft": { "$ref": "#/definitions/border" }, "bodyRight": { "$ref": "#/definitions/border" }, "bodyJoin": { "$ref": "#/definitions/border" }, "joinBody": { "$ref": "#/definitions/border" }, "joinLeft": { "$ref": "#/definitions/border" }, "joinRight": { "$ref": "#/definitions/border" }, "joinJoin": { "$ref": "#/definitions/border" } }, "additionalProperties": false }; refVal1.errors = null; refVal[1] = refVal1; var refVal2 = (function() { var pattern0 = new RegExp('^[0-9]+$'); return function validate(data, dataPath, parentData, parentDataProperty, rootData) { 'use strict'; var vErrors = null; var errors = 0; if (typeof data !== "string") { var err = { keyword: 'type', dataPath: (dataPath || '') + "", schemaPath: '#/type', params: { type: 'string' }, message: 'should be string' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } validate.errors = vErrors; return errors === 0; }; })(); refVal2.schema = { "type": "string" }; refVal2.errors = null; refVal[2] = refVal2; var refVal3 = (function() { var pattern0 = new RegExp('^[0-9]+$'); return function validate(data, dataPath, parentData, parentDataProperty, rootData) { 'use strict'; var vErrors = null; var errors = 0; if (rootData === undefined) rootData = data; if ((data && typeof data === "object" && !Array.isArray(data))) { var errs__0 = errors; var valid1 = true; for (var key0 in data) { var isAdditional0 = !(false || pattern0.test(key0)); if (isAdditional0) { valid1 = false; var err = { keyword: 'additionalProperties', dataPath: (dataPath || '') + "", schemaPath: '#/additionalProperties', params: { additionalProperty: '' + key0 + '' }, message: 'should NOT have additional properties' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } } for (var key0 in data) { if (pattern0.test(key0)) { var errs_1 = errors; if (!refVal4(data[key0], (dataPath || '') + '[\'' + key0 + '\']', data, key0, rootData)) { if (vErrors === null) vErrors = refVal4.errors; else vErrors = vErrors.concat(refVal4.errors); errors = vErrors.length; } var valid1 = errors === errs_1; } } } else { var err = { keyword: 'type', dataPath: (dataPath || '') + "", schemaPath: '#/type', params: { type: 'object' }, message: 'should be object' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } validate.errors = vErrors; return errors === 0; }; })(); refVal3.schema = { "type": "object", "patternProperties": { "^[0-9]+$": { "$ref": "#/definitions/column" } }, "additionalProperties": false }; refVal3.errors = null; refVal[3] = refVal3; var refVal4 = (function() { var pattern0 = new RegExp('^[0-9]+$'); return function validate(data, dataPath, parentData, parentDataProperty, rootData) { 'use strict'; var vErrors = null; var errors = 0; if ((data && typeof data === "object" && !Array.isArray(data))) { var errs__0 = errors; var valid1 = true; for (var key0 in data) { var isAdditional0 = !(false || key0 == 'alignment' || key0 == 'width' || key0 == 'wrapWord' || key0 == 'truncate' || key0 == 'paddingLeft' || key0 == 'paddingRight'); if (isAdditional0) { valid1 = false; var err = { keyword: 'additionalProperties', dataPath: (dataPath || '') + "", schemaPath: '#/additionalProperties', params: { additionalProperty: '' + key0 + '' }, message: 'should NOT have additional properties' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } } var data1 = data.alignment; if (data1 !== undefined) { var errs_1 = errors; if (typeof data1 !== "string") { var err = { keyword: 'type', dataPath: (dataPath || '') + '.alignment', schemaPath: '#/properties/alignment/type', params: { type: 'string' }, message: 'should be string' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } var schema1 = validate.schema.properties.alignment.enum; var valid1; valid1 = false; for (var i1 = 0; i1 < schema1.length; i1++) if (equal(data1, schema1[i1])) { valid1 = true; break; } if (!valid1) { var err = { keyword: 'enum', dataPath: (dataPath || '') + '.alignment', schemaPath: '#/properties/alignment/enum', params: { allowedValues: schema1 }, message: 'should be equal to one of the allowed values' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } var valid1 = errors === errs_1; } if (data.width !== undefined) { var errs_1 = errors; if (typeof data.width !== "number") { var err = { keyword: 'type', dataPath: (dataPath || '') + '.width', schemaPath: '#/properties/width/type', params: { type: 'number' }, message: 'should be number' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } var valid1 = errors === errs_1; } if (data.wrapWord !== undefined) { var errs_1 = errors; if (typeof data.wrapWord !== "boolean") { var err = { keyword: 'type', dataPath: (dataPath || '') + '.wrapWord', schemaPath: '#/properties/wrapWord/type', params: { type: 'boolean' }, message: 'should be boolean' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } var valid1 = errors === errs_1; } if (data.truncate !== undefined) { var errs_1 = errors; if (typeof data.truncate !== "number") { var err = { keyword: 'type', dataPath: (dataPath || '') + '.truncate', schemaPath: '#/properties/truncate/type', params: { type: 'number' }, message: 'should be number' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } var valid1 = errors === errs_1; } if (data.paddingLeft !== undefined) { var errs_1 = errors; if (typeof data.paddingLeft !== "number") { var err = { keyword: 'type', dataPath: (dataPath || '') + '.paddingLeft', schemaPath: '#/properties/paddingLeft/type', params: { type: 'number' }, message: 'should be number' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } var valid1 = errors === errs_1; } if (data.paddingRight !== undefined) { var errs_1 = errors; if (typeof data.paddingRight !== "number") { var err = { keyword: 'type', dataPath: (dataPath || '') + '.paddingRight', schemaPath: '#/properties/paddingRight/type', params: { type: 'number' }, message: 'should be number' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } var valid1 = errors === errs_1; } } else { var err = { keyword: 'type', dataPath: (dataPath || '') + "", schemaPath: '#/type', params: { type: 'object' }, message: 'should be object' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } validate.errors = vErrors; return errors === 0; }; })(); refVal4.schema = { "type": "object", "properties": { "alignment": { "type": "string", "enum": ["left", "right", "center"] }, "width": { "type": "number" }, "wrapWord": { "type": "boolean" }, "truncate": { "type": "number" }, "paddingLeft": { "type": "number" }, "paddingRight": { "type": "number" } }, "additionalProperties": false }; refVal4.errors = null; refVal[4] = refVal4; return function validate(data, dataPath, parentData, parentDataProperty, rootData) { 'use strict'; /*# sourceURL=config.json */ var vErrors = null; var errors = 0; if (rootData === undefined) rootData = data; if ((data && typeof data === "object" && !Array.isArray(data))) { var errs__0 = errors; var valid1 = true; for (var key0 in data) { var isAdditional0 = !(false || key0 == 'border' || key0 == 'columns' || key0 == 'columnDefault' || key0 == 'drawHorizontalLine'); if (isAdditional0) { valid1 = false; var err = { keyword: 'additionalProperties', dataPath: (dataPath || '') + "", schemaPath: '#/additionalProperties', params: { additionalProperty: '' + key0 + '' }, message: 'should NOT have additional properties' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } } if (data.border !== undefined) { var errs_1 = errors; if (!refVal1(data.border, (dataPath || '') + '.border', data, 'border', rootData)) { if (vErrors === null) vErrors = refVal1.errors; else vErrors = vErrors.concat(refVal1.errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.columns !== undefined) { var errs_1 = errors; if (!refVal3(data.columns, (dataPath || '') + '.columns', data, 'columns', rootData)) { if (vErrors === null) vErrors = refVal3.errors; else vErrors = vErrors.concat(refVal3.errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.columnDefault !== undefined) { var errs_1 = errors; if (!refVal[4](data.columnDefault, (dataPath || '') + '.columnDefault', data, 'columnDefault', rootData)) { if (vErrors === null) vErrors = refVal[4].errors; else vErrors = vErrors.concat(refVal[4].errors); errors = vErrors.length; } var valid1 = errors === errs_1; } if (data.drawHorizontalLine !== undefined) { var errs_1 = errors; var errs__1 = errors; var valid1; valid1 = typeof data.drawHorizontalLine == "function"; if (!valid1) { if (errs__1 == errors) { var err = { keyword: 'typeof', dataPath: (dataPath || '') + '.drawHorizontalLine', schemaPath: '#/properties/drawHorizontalLine/typeof', params: { keyword: 'typeof' }, message: 'should pass "typeof" keyword validation' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } else { for (var i1 = errs__1; i1 < errors; i1++) { var ruleErr1 = vErrors[i1]; if (ruleErr1.dataPath === undefined) ruleErr1.dataPath = (dataPath || '') + '.drawHorizontalLine'; if (ruleErr1.schemaPath === undefined) { ruleErr1.schemaPath = "#/properties/drawHorizontalLine/typeof"; } } } } var valid1 = errors === errs_1; } } else { var err = { keyword: 'type', dataPath: (dataPath || '') + "", schemaPath: '#/type', params: { type: 'object' }, message: 'should be object' }; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } validate.errors = vErrors; return errors === 0; }; })(); validate.schema = { "$id": "config.json", "$schema": "path_to_url#", "type": "object", "properties": { "border": { "$ref": "#/definitions/borders" }, "columns": { "$ref": "#/definitions/columns" }, "columnDefault": { "$ref": "#/definitions/column" }, "drawHorizontalLine": { "typeof": "function" } }, "additionalProperties": false, "definitions": { "columns": { "type": "object", "patternProperties": { "^[0-9]+$": { "$ref": "#/definitions/column" } }, "additionalProperties": false }, "column": { "type": "object", "properties": { "alignment": { "type": "string", "enum": ["left", "right", "center"] }, "width": { "type": "number" }, "wrapWord": { "type": "boolean" }, "truncate": { "type": "number" }, "paddingLeft": { "type": "number" }, "paddingRight": { "type": "number" } }, "additionalProperties": false }, "borders": { "type": "object", "properties": { "topBody": { "$ref": "#/definitions/border" }, "topJoin": { "$ref": "#/definitions/border" }, "topLeft": { "$ref": "#/definitions/border" }, "topRight": { "$ref": "#/definitions/border" }, "bottomBody": { "$ref": "#/definitions/border" }, "bottomJoin": { "$ref": "#/definitions/border" }, "bottomLeft": { "$ref": "#/definitions/border" }, "bottomRight": { "$ref": "#/definitions/border" }, "bodyLeft": { "$ref": "#/definitions/border" }, "bodyRight": { "$ref": "#/definitions/border" }, "bodyJoin": { "$ref": "#/definitions/border" }, "joinBody": { "$ref": "#/definitions/border" }, "joinLeft": { "$ref": "#/definitions/border" }, "joinRight": { "$ref": "#/definitions/border" }, "joinJoin": { "$ref": "#/definitions/border" } }, "additionalProperties": false }, "border": { "type": "string" } } }; validate.errors = null; module.exports = validate; ```
Kim So-yeon (born October 6, 1995), known by the stage name Kassy (케이시), is a South Korean singer-songwriter. Since her debut in 2015, she has released five extended plays and numerous singles. In 2016 she competed on the survival reality show Unpretty Rapstar 3. Discography Extended plays Single albums Singles Soundtrack appearances Other charted songs Filmography Television Web Awards and nominations References 1995 births Living people South Korean women pop singers Unpretty Rapstar contestants
```go package main import ( "fmt" "os" "path/filepath" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" ) // Deletes the policy on a bucket. If the bucket doesn't exist, or there was // and error an error message will be printed instead. // // Usage: // // go run s3_delete_bucket_policy.go BUCKET_NAME func main() { if len(os.Args) != 2 { exitErrorf("bucket name required\nUsage: %s bucket_name", filepath.Base(os.Args[0])) } bucket := os.Args[1] // Initialize a session in us-west-2 that the SDK will use to load // credentials from the shared credentials file ~/.aws/credentials. sess, err := session.NewSession(&aws.Config{ Region: aws.String("us-west-2")}, ) // Create S3 service client svc := s3.New(sess) // Call S3 to delete the policy on the bucket. _, err = svc.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{ Bucket: aws.String(bucket), }) if err != nil { if aerr, ok := err.(awserr.Error); ok && aerr.Code() == s3.ErrCodeNoSuchBucket { // Special error handling for the when the bucket doesn't // exists so we can give a more direct error message from the CLI. exitErrorf("Bucket %q does not exist", bucket) } exitErrorf("Unable to delete bucket %q policy, %v", bucket, err) } fmt.Printf("Successfully deleted the policy on bucket %q.\n", bucket) } func exitErrorf(msg string, args ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", args...) os.Exit(1) } ```
National Highway 160C, commonly referred to as NH 160C is a national highway in India. It is a secondary route of National Highway 60. NH-160C runs in the state of Maharashtra in India. Route NH160C connects Rahuri, and Shani Shingnapur in the state of Maharashtra. Junctions Terminal near Rahuri. Terminal near Shingnapur. See also List of National Highways in India List of National Highways in India by state References External links NH 160C on OpenStreetMap National highways in India National Highways in Maharashtra
Hanns Theodor Wilhelm Freiherr von Gumppenberg (4 December 1866 – 29 March 1928) was a German poet, translator, cabaret artist and theatre critic. He used the pseudonyms Jodok and Professor Immanuel Tiefbohrer. Life Gumppenberg was born in 1866 in Landshut, the son of Karl Freiherr von Gumppenberg (1833–1893), a postal clerk from Bamberg and a scion of the original Bavarian noble family of Reichsfreiherren von Gumppenberg. His mother was Engelberta von Gumppenberg, née Sommer (1839–1920), daughter of a geographer. Both the father and already the grandfather (Bavarian member of parliament, landowner and major) were active in literature. The father wrote mostly dialectal drama and poetry, the grandfather belletristic works and witty Punch and Judy plays. Gumppenberg received an education at the in Munich, where he ventured his first attempts at poetry. After the page school and the Abitur at the Wilhelmsgymnasium München he took up studies in philosophy and literary history in Munich in 1885. For reasons of better livelihood, however, Gumppenberg decided three years later to take up legal studies. He eventually abandoned law studies to work as a freelance writer and journalist. In 1894, he married Charlotte Donnerstag (born 1870) in Berlin, who died in 1895. Gumppenberg was theatre critic of the Münchner Neueste Nachrichten from 1901 to 1909. From 1910 to 1913, together with Alfred Auscher, he was editor of the new artistic-literary journal Light and Shadow. Wochenschrift für Schwarz-Weiß-Kunst und Dichtung. Afterwards he worked as an author and editor for the magazine Jugend until his death. From 1902 onwards, Gumppenberg also regularly worked as a translator of foreign poetry, for example Swedish poems by Bellman, Fröding and Karlfeldt. After 1889, Gumppenberg moved in the circles of the Munich modernists, to which Michael Georg Conrad and his followers belonged first and foremost. Together with Georg Hoffmann, Julius Schaumberger and Otto Julius Bierbaum, he founded the in 1890.<ref>Cf. also Modernes Leben. Ein Sammelbuch der Münchner Modernen With contributions by Otto Julius Bierbaum, Julius Brand, M. G. Conrad, Anna Croissant-Rust, Hanns von Gumppenberg, Oskar Panizza, Ludwig Scharf, Georg Schaumberger, R. v. Seydlitz Fr. Wedekind. 1st series, Munich 1891. on the cultivation and dissemination of modern creative spirit in all fields: Social life, literature, art and science"</ref> In 1897, he married Helene Bondy (1868–1954), the daughter of the factory owner Ignaz Bondy and the Austrian women's rights activist Ottilie Bondy, in his second marriage. In 1901, under the pseudonym Jodok, he became a co-founder of the Munich cabaret as a writer of poetry and drama parodies. His parodistic work also eventually made him famous. Gumppenberg's collection of parodies Das Teutsche Dichterross, 1st edition 1901, went through a total of 14 editions. However, he remained unsuccessful with the main part of his work – mostly worldview and idea dramas. The First World War and inflation brought Gumppenberg into financial difficulties and from 1922 he was also in poor health. On 29 March 1928 he died in Munich of a heart condition at the age of 61. Gumppenberg's estate is housed in the Monacensia literary archive of the city of Munich. Work Thorwald. (Trauerspiel) München, 1888 Apollo. (comedy) J. Lindauer, München 1890 Das dritte Testament – Eine Offenbarung Gottes. Poesse, Munich 1891 Deutsche Lyrik von Gestern Kritik des Wirklich-Seienden – Grundlagen zu einer Philosophie des Wirklich-Seienden. Verlagsabtheilung der deutschen Schriftstellergenossenschaft, Berlin 1892 Alles und Nichts – Dichtung in 3 Abtheilungen und 12 Bildern. Baumert & Ronge, Großenhain und Leipzig: 1894 Die Minnekönigin. (comedy) Reclam, Leipzig 1894 Der fünfte Prophet. (novel) Verlag f. Deutsches Schriftthum, Berlin 1895 Der erste Hofnarr. (Schauspiel) Baumert & Ronge, Großenhain und Leipzig 1899 Das Teutsche Dichterross in allen Gangarten vorgeritten. (Parody) Verl. der Deutsch-Französischen Rundschau, Munich 1901. Die Verdammten. (Schauspiel) E. Bloch, Berlin 1901 (Jodok) Der Veterinärarzt – Mystodrama in einem Aufzug. in "Die elf Scharfrichter". Vo. 1, . Schuster und Loeffler, Berlin 1901 (Jodok) Der Nachbar – Monodrama in einem Satz. in "Die elf Scharfrichter". Vol. 1, . Schuster und Loeffler, Berlin 1901 (Jodok) Überdramen (Parodies, 3 volumes.) Th. Mayhofer Nachf., Berlin 1902 Die Einzige. (tragicomedy) Callwey, Munich 1903 Grundlagen der wissenschaftlichen Philosophie. Callwey, Munich 1903 König Konrad I. (geschichtliches Schauspiel) Callwey, Munich 1904 König Heinrich I. (geschichtliches Schauspiel) Callwey, Munich 1904 Herzog Philipps Brautfahrt. (Opernlustspiel) Callwey, Munich 1904 Aus meinem lyrischen Tagebuch. Callwey, Munich 1906 Bellman-Brevier – Aus Fredmans Episteln und Liedern, Deutsch von Hanns von Gumppenberg, Verlag von Albert Langen, Munich 1909 Beweis des Großen Fermat'schen Satzes für alle ungeraden Exponenten. Callwey, Munich 1913 Schauen und Sinnen. (poetry) G. Müller, Munich 1913 Schaurige Schicksale, fälschende Fama und leere Lorbeeren – Dokumentarisches über meine Bühnenwerke. Callwey, Munich 1914 Der Pinsel Yings. (comedy) Callwey, Munich 1914 Philosophie und Okkultismus. Rösl, Munich 1921 Das Teutsche Dichterross in allen Gangarten vorgeritten. (Parodies) 13. u. 14. erw. Aufl. Callwey, München 1929 Lebenserinnerungen. Aus dem Nachlass. Eigenbrödler Verlag, Berlin 1930 References Source External links Erlanger Liste (Texte) Nachlass in der Monacensia Die Gedichte on zgedichte.de Hanns von Gumppenberg im Literaturportal Bayern Gumppenberg als Paul Verlaine-Übersetzer: Meine Spitäler''.Insel-Verlag, Leipzig, in Project Gutenberg 19th-century German writers 20th-century German writers German theatre critics 19th-century German poets 19th-century German male writers 20th-century German poets German male poets Barons of Germany 1866 births 1928 deaths People from Landshut 20th-century German male writers
The Georgia Tech Yellow Jackets college football team represents the Georgia Institute of Technology in the Coastal Division of the Atlantic Coast Conference (ACC). The Yellow Jackets compete as part of the National Collegiate Athletic Association (NCAA) Division I Football Bowl Subdivision. The school has had 21 official head coaches and 3 interim head coaches since first fielding a team in 1892. Brent Key is the current head coach for the team, taking over after Geoff Collins was fired midway through the 2022 season. The team has played in over 1,300 games over 129 seasons. In that time, 10 official and 2 interim head coaches have overseen teams that have competed in postseason bowl games: William Alexander, Bobby Dodd, Bud Carson, Bill Fulcher, Pepper Rodgers, Bill Curry, Bobby Ross, George O'Leary, Mac McWhorter (interim), Chan Gailey, Jon Tenuta (interim), and Paul Johnson. Six coaches have also won a conference championship: John Heisman won three Southern Intercollegiate Athletic Association (SIAA) championships, Alexander won two SIAA, three Southern Conference, and three Southeastern Conference (SEC) championships, Dodd won two SEC championships, and Ross, O'Leary, and Johnson all have one ACC championship. During their tenures, Heisman, Alexander, Dodd, and Ross each won a national championship with the Yellow Jackets. Alexander holds the record for the longest tenure of any Georgia Tech coach, serving in that role for 244 games over 25 seasons. Cyrus W. Strickler has the highest winning percentage among full-time coaches, with a record of 4–0–1 (.900). If interim coaches were counted, McWhorter holds the highest percentage, with a record of 1–0 (1.000). Ernest E. West, Rufus B. Nalley, Harris T. Collier, and Tenuta had no wins during their career at Georgia Tech, with Collier holding the record for most losses without a win at 9. Four coaches (Heisman, Alexander, Dodd, and Johnson) have been inducted into the College Football Hall of Fame. Key Coaches Notes References Sources General Specific Georgia Tech Georgia (U.S. state) sports-related lists
```sqlpl -- path_to_url SET enable_analyzer = 1; SELECT tb1.owner_id AS owner_id, type FROM ( SELECT number AS owner_id FROM numbers(100) ) AS tb1 CROSS JOIN values('type varchar', 'type1', 'type2', 'type3') AS pt LEFT JOIN ( SELECT tb2.owner_id AS owner_id FROM ( SELECT number AS owner_id FROM numbers(100) GROUP BY owner_id ) AS tb2 ) AS merged USING (owner_id) WHERE tb1.owner_id = merged.owner_id GROUP BY tb1.owner_id, type FORMAT `Null`; ```
The Pittsburgh and West Virginia Railway was a railroad in the Pittsburgh, Pennsylvania, and Wheeling, West Virginia, areas. Originally built as the Wabash Pittsburgh Terminal Railway, a Pittsburgh extension of George J. Gould's Wabash Railroad, the venture entered receivership in 1908 and the line was cut loose. An extension completed in 1931 connected it to the Western Maryland Railway at Connellsville, Pennsylvania, forming part of the Alphabet Route, a coalition of independent lines between the Northeastern United States and the Midwest. It was leased by the Norfolk and Western Railway in 1964 in conjunction with the N&W acquiring several other sections of the former Alphabet Route, but was leased to the new spinoff Wheeling and Lake Erie Railway in 1990, just months before the N&W was merged into the Norfolk Southern Railway. The original Wabash Pittsburgh Terminal Railway built several massive engineering works, including the Wabash Terminal in downtown Pittsburgh, damaged by two fires in 1946 and demolished in 1953. The Wabash Bridge over the Monongahela River into Pittsburgh was torn down in 1948, and on December 27, 2004, the Wabash Tunnel just southwest of the bridge opened as a high occupancy vehicle roadway through Mount Washington. As of January 2020 the two piers of the long-gone Wabash Bridge remain standing. The line also had a branch to West End, Pennsylvania, that was abandoned in 2011, and a branch to West Mifflin, Pennsylvania, known as the Mifflin Branch. And it also has a small industrial branch located near Belle Vernon, Pennsylvania. At the end of 1960 P&WV operated of road on of track; that year it reported 439 million net ton-miles of revenue freight. History Gould system: 1901 to 1908 Around 1900 George J. Gould was assembling railroads to create a transcontinental system. The Western Pacific Railway, Denver and Rio Grande Railroad and Missouri Pacific Railroad formed the line from the Pacific Ocean at San Francisco to the Mississippi River at St. Louis (completed in 1909). Past St. Louis, Gould acquired the Wabash Railroad to Toledo. On February 1, 1901, Gould, along with Joseph Ramsey, Jr., of the Wabash and others, formed the Pittsburgh-Toledo Syndicate, a syndicate intending to extend the system to Pittsburgh. The next month, the syndicate bought the Pittsburgh and Mansfield Railroad, an unbuilt line with a charter to build into downtown Pittsburgh. By May 1 the syndicate gained control of the Wheeling and Lake Erie Railroad, extending the system from Toledo southeast to Zanesville, Ohio, and Wheeling, West Virginia. The extension to Pittsburgh was chartered in three parts—the Cross Creek Railroad April 23, 1900, in Ohio, Pittsburgh, Toledo and Western Railroad April 3, 1901, in West Virginia, and Pittsburgh, Carnegie and Western Railroad July 17, 1901, in Pennsylvania. Work on the line, branching off the P&WV's line to Wheeling at Pittsburgh Junction, Ohio, began June 14, 1901. On May 7, 1904, the three companies were consolidated into the Wabash Pittsburgh Terminal Railway, to which all the properties of the syndicate (including the W&LE) were transferred. The first train passed through the Wabash Tunnel and crossed the |Wabash Bridge over the Monongahela River into Pittsburgh on June 1, and passenger service into the new Wabash Terminal began July 2, with through service over the W&LE and Wabash to Toledo, Chicago, St. Louis and Kansas City. In addition to the Pittsburgh extension Gould planned a line from Zanesville southeast to Belington, West Virginia, built by the Little Kanawha Syndicate. From Belington east to tidewater in Baltimore, the Fuller Syndicate bought the West Virginia Central and Pittsburg Railway and a controlling interest in the Western Maryland Railroad in 1902. Another part of the plan was the Philadelphia and Western Railway, a high-speed third rail electric interurban line, which would have run from Philadelphia west to the Western Maryland at York, Pennsylvania. The lines of the Fuller Syndicate were completed to Baltimore, but the Little Kanawha line was not completed and a connection between the main system and the Fuller Syndicate was not built. As Gould's plans affected the Pennsylvania Railroad's business, PRR took measures to fight back. This included the eviction from PRR property of telegraph poles owned by Gould's Western Union. The Panic of 1907 hit Gould hard, due to the high costs of building the line when all the easy routes had been taken, and the Western Maryland Railroad was the first of his properties to fail, entering receivership on March 5, 1908. The Wabash Pittsburgh Terminal Railway entered receivership May 29 of that year, ending through traffic between Pittsburgh and the W&LE and Wabash system. Independence: 1908 to 1929 After years of operation by its receivers, the company was finally sold at foreclosure in August 1916 and reorganized November as the Pittsburgh and West Virginia Railway. The line was again being considered for part of a major system—the "Fifth System" to supplement the four major players, the Pennsylvania Railroad, New York Central Railroad, Baltimore and Ohio Railroad and Erie Railroad—but there was still the issue of the gap between the W&LE/P&WV and the Western Maryland, never filled by the Little Kanawha Syndicate. The existing West Side Belt Railroad provided for the beginning of this extension, crossing the P&WV at the southwest portal of the Wabash Tunnel under Mount Washington and running southeast and east to Clairton on the Monongahela River. After an initial denial, the Interstate Commerce Commission approved the P&WV's plan to acquire the West Side Belt in December 1928. Pennsylvania Railroad influence: 1929 to 1964 In 1929, the Pennsylvania Railroad incorporated the Pennroad Corporation as an investment and holding company. This allowed the PRR to indirectly invest in other transportation companies without ICC regulation. Among the initial purchases, 72% of the P&WV was acquired. On February 11, 1931, the extension to Connellsville, Pennsylvania, opened, where the Western Maryland continued east, splitting from the West Side Belt at Pierce. This formed what came to be known as the Alphabet Route, following roughly the same plan as Gould's system, but using the Nickel Plate Road rather than the Wabash to reach both St. Louis and Chicago. The P&WV and Western Maryland never actually physically connected to one another in Connellsville—a short section of Pittsburgh and Lake Erie Railroad trackage was used to connect the P&WV to the WM. The Nickel Plate leased the Wheeling and Lake Erie on December 1, 1949. In March 1950, the Pennroad announced plans to lease the P&WV to the Nickel Plate. In 1962, the Norfolk & Western Railway filed to include the P&WV in the upcoming merger of the Nickel Plate. On October 16, 1964, the Norfolk and Western acquired the Nickel Plate and leased the P&WV. On the other hand, the Western Maryland Railway eventually went to the competing Baltimore and Ohio Railroad and Chesapeake and Ohio Railway in 1967. Following the abandonment of the Western Maryland Railway mainline from Connellsville to Cumberland, Maryland, in 1975, a connection was established between the P&WV and the B&O at a location near Connellsville called Sodem, and the P&WV's connection to the P&LE and WM was abandoned at this same time. This enabled a semblance of the old Alphabet Route to continue under the Chessie System, although on B&O lines east from Connellsville instead of WM lines. Norfolk and Western: 1964 to 1990 The Pittsburgh and West Virginia Railroad was organized in 1967 as a real estate investment trust to own the property leased to the N&W. The railroad is now a subsidiary of Power REIT, real estate investment trust that is publicly traded on the NYSE under the symbol "PW". The leased properties consist of a railroad line 112 miles in length, extending from Connellsville, Washington, and Allegheny counties in the Commonwealth of Pennsylvania; Brooke County in the state of West Virginia, and Jefferson and Harrison counties in Ohio. There are also branch lines that total 20 miles in length located in Washington County and Allegheny County in Pennsylvania and Brooke County, West Virginia. The railroad was leased in 1964 to NSC, formerly Norfolk and Western Railway Company, by the company’s predecessor for 99 years with the right of unlimited renewal for additional 99-year period under the same terms and conditions, including annual rent payments. The lease provides that NSC at its own expense and without deduction from the rent, will maintain, manage and operate the leased property and make such improvements thereto as it considers desirable. Such improvements made by NSC become the property of the Pittsburgh & West Virginia Railroad, and the cost thereof constitutes a recorded indebtedness of the company to NSC. The company’s business consists solely of the ownership of the properties subject to the lease, and of collection of rent thereon. Upon termination of the lease, all properties covered by the lease would be returned to Pittsburgh & West Virginia Railroad, together with sufficient cash and other assets to permit operation of the railroad for one year. Wheeling and Lake Erie: 1990 to present On May 17, 1990, Norfolk Southern spun off most of the former W&LE as a new Wheeling and Lake Erie Railway. The P&WV lease was transferred to the new W&LE, which has also acquired trackage rights over CSX Transportation lines from Connellsville east to Hagerstown, Maryland. References Further reading Baer, Christopher T. "PRR Chronology" Excerpted from "A General Chronology of the Pennsylvania Railroad Company Predecessors and Successors and Its Historical Context". Pennsylvania Railroad Technical and Historical Society. Accessed 2009-12-14. Earlpleasants.com. "Railroad History Database" External links Pittsburgh and West Virginia Railway Company Drawings, 1948-1950 Archives Center, National Museum of American History, Smithsonian Institution The P&WV Hi-Line Historic photos, past company newsletters and documents, maps and discussion board Alphabet Route - Pittsburgh & West Virginia Railway History Predecessors of the Norfolk and Western Railway Railway companies established in 1916 Railway companies disestablished in 1967 Defunct Ohio railroads Defunct Pennsylvania railroads Defunct West Virginia railroads Wabash Railroad Former Class I railroads in the United States Transportation in Pittsburgh American companies established in 1893 Railway companies established in 1893 American companies disestablished in 1967
Heinrich Wilhelm von Freytag (17 March 1720, Estorf – 2 January 1798, Hannover ) was an officer in the service of the Electorate of Brunswick-Lüneburg (Hanover). Career B.1720 in Estorf, Freytag rose to prominence during the Seven Years' War, organising & commanding a corps of light infantry, the Freytag Jägers. At the Battle of Bergen 17 April 1759 he commanding 9 companies of Jägers & 2 squadrons of Prussian Hussars. Promoted Field Marshal in 1792, he was appointed to raise and command the 3,873 man Hanoverian electoral contingent to the Holy Roman Empire. This force was absorbed into the general army mobilization at the end of 1792. Freytag commanded the Hanoverian troops and the 13–15,000 man Austro-Hanoverian corps under the Duke of York in the Flanders Campaign in 1793, seeing action at Rüme (St.Amand) 1 May, Famars 23rd, the siege of Valenciennes 13 June-28 July, and Cæsar's Camp 7/8 August. In the Siege of Dunkirk he commanded the left wing covering column. On 6 September he was driven back by Houchard at the Battle of Hondschoote, where he was wounded and captured, but rescued the following day by Wallmoden’s counter-attack. He resigned soon after due to poor relations with the Duke of York & was replaced by Wallmoden. Freytag died on 2 January 1798 in Hanover. Assessment He knew the Duke of York from when the Prince studied in Hanover in the 1780s. Relations between the two were seriously strained right from the beginning of the campaign. At St. Amand 1 May York tried to locate him & Bussche to bring their troops forward, "but found neither at home, because they too had ridden out to reconnoitre, which annoyed the Duke very much, because he had expected to find them in their quarters. The Duke then went to Rüme, but first ordered the Light Dragoons to advance. The Field Marshal, however, forbade their marching, and it was only after fresh orders that they set out, but arrived too late… The Field Marshal declined to sit by the Duke at his table that day, and left before the joints were served". Fortescue claims that his 21-mile front in advance of Hondschoote was badly chosen, stamped him as a believer in the cordon system, and that a shorter front around the village would have been better. But Burne persuasively challenges this by citing 5 reasons for the weakness of Hondschoote – "it is difficult to see on what grounds Fortescue asserts that ‘the position at Hondschoote would have covered the besiegers quite as efficiently and with less risk’. In my opinion Freytag occupied the best possible position; his mistake was that he was forgetful of the principal of maintenance of the objective – namely, to cover the besieging army – till pulled up sharply and rightly by the Duke... The fact that even without the Duke's aid Freytag very nearly held up the French is a clear indication that with it the victory would almost certainly have gone to the Allies". The Duke of York criticised his actions in the battle thus: "On the 6th of September, the day of the first attack upon the Field Marshal’s Corps, He never would believe that the Enemy had forced the post on His left and turned His left flank in spite of repeated reports that were sent to Him, nor was it till six in the evening, that he consented to retreat, which he did in two Columns. Instead however of sending the Artillery and baggage with General Count Walmoden's Column which was the furthest from the Enemy, He chose to take them in the rear of His own Column. I shall not touch upon the subject of His and my brother Adolphus’s being taken prisoners. I do not imagine there can be two opinions on the subject. I will only say that in consequence of this misfortune, everything would have been lost if it had not been for the presence of mind and coolness of Count Walmoden". References Burne, Alfred, The Noble Duke of York: The Military Life of Frederick Duke of York and Albany, London: Staples Press (1949). Coutanceau, Michel Henri Marie, La Campagne de 1794 a l'Armée du Nord, (1903–08 5 Volumes) Paris: Chapelot . Fortescue, Sir John, British Campaigns in Flanders 1690–1794 (extracts from Volume 4 of A History of the British Army), London: Macmillan (1918). Ompteda, CF In the King's German Legion. Memoirs of Baron Ompteda, Colonel in the King's German Legion During the Napoleonic Wars Field marshals of Germany German military personnel of the Seven Years' War German military leaders of the French Revolutionary Wars People from Nienburg (district) 1720 births 1798 deaths
Nang County (; ) is a county under the jurisdiction of Nyingtri City in the Tibet Autonomous Region, China. Geography Nang is located in the south-west of Nyingtri, at the middle and lower reaches of the Yarlung Tsangpo River. The county de jure covers an area of 4,120 square kilometres, including the area claimed but de facto under control of the Arunachal Pradesh, India. The average altitude is 5,000 metres above sea level. Administrative divisions Nang County contains 3 towns and 3 townships. Gallery References External links Official website of Nang County government Counties of Tibet Nyingchi
The Treaty of Peace Between Japan and India (日本国とインドとの間の平和条約) was a peace treaty signed on 9 June 1952, restoring relations between the two nations. The British Empire, of which India was a part, had full diplomatic relations with Japan became involved in World War II. After the war Japan was under American occupation and India gained its independence on 15 August 1947. In 1951, the San Francisco Peace Conference was held with Indian Prime Minister Jawaharlal Nehru refusing to attend the conference, because he considered the provisions of the San Francisco Treaty to be limiting Japanese sovereignty, as seen to this day with the San Francisco System managed by the United States. See also Indian independence movement Treaty of Peace with Japan India–Japan relations References External links Treaty of Peace Between Japan and India George Washington University 日印平和条約(日本国とインドとの間の平和条約) Tokyo University Treaty of Peace between the Governments of India and Japan Ministry of External Affairs, India Hiroshi Sato, "India-Japan Peace Treaty in Japan's Post-War Asian Diplomacy" Journal of the Japanese Association for South Asian Studies, vol. 17(2005) India–Japan relations 1952 in India 1952 in Japan Peace treaties of Japan Peace treaties of India Treaties concluded in 1952 Bilateral treaties of Japan Bilateral treaties of India 1952 in Japanese politics
```java package com.alrubaye.mytracker; import android.*; import android.Manifest; import android.app.Activity; import android.content.Intent; import android.content.pm.PackageManager; import android.database.Cursor; import android.net.Uri; import android.os.Build; import android.provider.ContactsContract; import android.support.v4.app.ActivityCompat; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.view.LayoutInflater; import android.view.Menu; import android.view.MenuInflater; import android.view.MenuItem; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.BaseAdapter; import android.widget.ListView; import android.widget.TextView; import android.widget.Toast; import com.google.firebase.database.DatabaseReference; import com.google.firebase.database.FirebaseDatabase; import java.util.ArrayList; import java.util.Map; public class MyTrackers extends AppCompatActivity { ArrayList<AdapterItems> listnewsData = new ArrayList<AdapterItems>(); MyCustomAdapter myadapter; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_my_trackers); //listnewsData.add(new AdapterItems("hussein"," d34534")); myadapter=new MyCustomAdapter(listnewsData); ListView lsNews=(ListView)findViewById(R.id.listView); lsNews.setAdapter(myadapter);//intisal with data Refesh(); lsNews.setOnItemClickListener(new AdapterView.OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent, View view, int position, long id) { GlobalInfo.MyTrackers.remove(listnewsData.get(position).PhoneNumber); DatabaseReference mDatabase= FirebaseDatabase.getInstance().getReference(); mDatabase.child("Users").child(listnewsData.get(position).PhoneNumber).child("Finders") .child(GlobalInfo.PhoneNumber).removeValue(); GlobalInfo globalInfo= new GlobalInfo(getApplicationContext()); globalInfo.SaveData(); Refesh(); } }); } void Refesh(){ listnewsData.clear(); for (Map.Entry m:GlobalInfo.MyTrackers.entrySet()){ listnewsData.add(new AdapterItems( m.getValue().toString() , m.getKey().toString())); } myadapter.notifyDataSetChanged(); } @Override public boolean onCreateOptionsMenu(Menu menu) { MenuInflater inflater = getMenuInflater(); inflater.inflate(R.menu.menu_contact_list, menu); return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { // Handle item selection switch (item.getItemId()) { case R.id.goback: GlobalInfo globalInfo= new GlobalInfo(this); globalInfo.SaveData(); finish(); return true; case R.id.add: CheckUserPermsions(); return true; default: return super.onOptionsItemSelected(item); } } void CheckUserPermsions(){ if ( Build.VERSION.SDK_INT >= 23){ if (ActivityCompat.checkSelfPermission(this, Manifest.permission.READ_CONTACTS) != PackageManager.PERMISSION_GRANTED ){ requestPermissions(new String[]{ Manifest.permission.READ_CONTACTS}, REQUEST_CODE_ASK_PERMISSIONS); return ; } } PickContact(); } //get acces to location permsion final private int REQUEST_CODE_ASK_PERMISSIONS = 123; @Override public void onRequestPermissionsResult(int requestCode, String[] permissions, int[] grantResults) { switch (requestCode) { case REQUEST_CODE_ASK_PERMISSIONS: if (grantResults[0] == PackageManager.PERMISSION_GRANTED) { PickContact(); } else { // Permission Denied Toast.makeText( this,"your message" , Toast.LENGTH_SHORT) .show(); } break; default: super.onRequestPermissionsResult(requestCode, permissions, grantResults); } } void PickContact(){ Intent intent = new Intent(Intent.ACTION_PICK, ContactsContract.Contacts.CONTENT_URI); startActivityForResult(intent, PICK_CONTACT); } // Declare static final int PICK_CONTACT=1; @Override public void onActivityResult(int reqCode, int resultCode, Intent data) { super.onActivityResult(reqCode, resultCode, data); switch (reqCode) { case (PICK_CONTACT) : if (resultCode == Activity.RESULT_OK) { Uri contactData = data.getData(); Cursor c = getContentResolver().query(contactData, null, null, null, null); if (c.moveToFirst()) { String id =c.getString(c.getColumnIndexOrThrow(ContactsContract.Contacts._ID)); String hasPhone =c.getString(c.getColumnIndex(ContactsContract.Contacts.HAS_PHONE_NUMBER)); String cNumber="No number"; if (hasPhone.equalsIgnoreCase("1")) { Cursor phones = getContentResolver().query( ContactsContract.CommonDataKinds.Phone.CONTENT_URI,null, ContactsContract.CommonDataKinds.Phone.CONTACT_ID +" = "+ id, null, null); phones.moveToFirst(); cNumber =GlobalInfo.FormatPhoneNumber (phones.getString(phones.getColumnIndex("data1"))); System.out.println("number is:"+cNumber); } String name = c.getString(c.getColumnIndex(ContactsContract.Contacts.DISPLAY_NAME)); GlobalInfo.MyTrackers.put(cNumber,name); DatabaseReference mDatabase= FirebaseDatabase.getInstance().getReference(); mDatabase.child("Users").child(cNumber).child("Finders") .child(GlobalInfo.PhoneNumber).setValue(true); GlobalInfo globalInfo= new GlobalInfo(this); globalInfo.SaveData(); Refesh(); //update firebase and //update list //update database } } break; } } private class MyCustomAdapter extends BaseAdapter { public ArrayList<AdapterItems> listnewsDataAdpater ; public MyCustomAdapter(ArrayList<AdapterItems> listnewsDataAdpater) { this.listnewsDataAdpater=listnewsDataAdpater; } @Override public int getCount() { return listnewsDataAdpater.size(); } @Override public String getItem(int position) { return null; } @Override public long getItemId(int position) { return position; } @Override public View getView(int position, View convertView, ViewGroup parent) { LayoutInflater mInflater = getLayoutInflater(); View myView = mInflater.inflate(R.layout.single_row_conact, null); final AdapterItems s = listnewsDataAdpater.get(position); TextView tv_user_name=( TextView)myView.findViewById(R.id.tv_user_name); tv_user_name.setText(s.UserName); TextView tv_phone=( TextView)myView.findViewById(R.id.tv_phone); tv_phone.setText(s.PhoneNumber); return myView; } } } ```
Ambatomarina is a town and commune in Madagascar. It belongs to the district of Manandriana, which is a part of Amoron'i Mania Region. The population of the commune was estimated to be approximately 14,000 in 2001 commune census. Primary and junior level secondary education are available in town. Farming and raising livestock provides employment for 49.5% and 49.5% of the working population. The most important crop is rice, while other important products are beans, maize and cassava. Services provide employment for 1% of the population. References and notes Populated places in Amoron'i Mania
```xml import * as React from 'react' import { IMatches } from '../../lib/fuzzy-find' import { Octicon } from '../octicons' import * as octicons from '../octicons/octicons.generated' import { HighlightText } from '../lib/highlight-text' import { dragAndDropManager } from '../../lib/drag-and-drop-manager' import { DragType, DropTargetType } from '../../models/drag-drop' import { TooltippedContent } from '../lib/tooltipped-content' import { RelativeTime } from '../relative-time' import classNames from 'classnames' interface IBranchListItemProps { /** The name of the branch */ readonly name: string /** Specifies whether this item is currently selected */ readonly isCurrentBranch: boolean /** The date may be null if we haven't loaded the tip commit yet. */ readonly lastCommitDate: Date | null /** The characters in the branch name to highlight */ readonly matches: IMatches /** When a drag element has landed on a branch that is not current */ readonly onDropOntoBranch?: (branchName: string) => void /** When a drag element has landed on the current branch */ readonly onDropOntoCurrentBranch?: () => void } interface IBranchListItemState { /** * Whether or not there's currently a draggable item being dragged * on top of the branch item. We use this in order to disable pointer * events when dragging. */ readonly isDragInProgress: boolean } /** The branch component. */ export class BranchListItem extends React.Component< IBranchListItemProps, IBranchListItemState > { public constructor(props: IBranchListItemProps) { super(props) this.state = { isDragInProgress: false } } private onMouseEnter = () => { if (dragAndDropManager.isDragInProgress) { this.setState({ isDragInProgress: true }) } if (dragAndDropManager.isDragOfTypeInProgress(DragType.Commit)) { dragAndDropManager.emitEnterDropTarget({ type: DropTargetType.Branch, branchName: this.props.name, }) } } private onMouseLeave = () => { this.setState({ isDragInProgress: false }) if (dragAndDropManager.isDragOfTypeInProgress(DragType.Commit)) { dragAndDropManager.emitLeaveDropTarget() } } private onMouseUp = () => { const { onDropOntoBranch, onDropOntoCurrentBranch, name, isCurrentBranch } = this.props this.setState({ isDragInProgress: false }) if (!dragAndDropManager.isDragOfTypeInProgress(DragType.Commit)) { return } if (onDropOntoBranch !== undefined && !isCurrentBranch) { onDropOntoBranch(name) } if (onDropOntoCurrentBranch !== undefined && isCurrentBranch) { onDropOntoCurrentBranch() } } public render() { const { lastCommitDate, isCurrentBranch, name } = this.props const icon = isCurrentBranch ? octicons.check : octicons.gitBranch const className = classNames('branches-list-item', { 'drop-target': this.state.isDragInProgress, }) return ( // eslint-disable-next-line jsx-a11y/no-static-element-interactions <div className={className} onMouseEnter={this.onMouseEnter} onMouseLeave={this.onMouseLeave} onMouseUp={this.onMouseUp} > <Octicon className="icon" symbol={icon} /> <TooltippedContent className="name" tooltip={name} onlyWhenOverflowed={true} tagName="div" > <HighlightText text={name} highlight={this.props.matches.title} /> </TooltippedContent> {lastCommitDate && ( <RelativeTime className="description" date={lastCommitDate} onlyRelative={true} /> )} </div> ) } } ```
```yaml tr: locale_name: Trke nav: about: Hakknda contribute: Katkda Bulun index: lead: Ak kaynak yazlmlar, tpk sizin gibi insanlar tarafndan gelitiriliyor. Nasl proje balatp byteceinizi renin. opensourcefriday: Bugn cuma! Kullandnz ve sevdiiniz yazlma katkda bulunmak iin birka saat ayrn article: table_of_contents: indekiler back_to_all_guides: Anasayfaya Geri Dn related_guides: lgili Klavuzlar footer: contribute: heading: Katkda Bulun description: Bir neride bulunmak ister misiniz? Bu ierik de ak kaynak. Gelitirmemize yardmc olun. button: Katkda Bulun subscribe: heading: letiimde kaln description: GitHub'n ak kaynak ipularn ve gncel kaynaklarn ilk duyan siz olun. label: Email Adresiniz button: Abone Olun byline: # [code], [love], and [github] will be replaced by octicons format: "[github] ve [friends] tarafndan [love] ile [code]" # Label for code octicon code_label: code # Label for love octicon love_label: love # Label for the contributors link friends_label: gnlller ```
Agence Ecofin is an information agency specializing in public management and the African economy. History The Agence Ecofin was founded in 2010 in Geneva to meet a growing need for sectoral and specialized information on African economies. The agency's website was launched in June 2011. Activities The Ecofin Agency presents, on a web platform, several daily news feeds on strategic economic sectors for the African continent: Public management, Finance, Agriculture and Agro-industry, Electricity, Hydrocarbons, Mines, Telecom, Communication, etc. Audience The Ecofin Agency receives an average of 2 million visits per month (Webalizer) for 260,000 unique visitors. The agency's daily letters have 54,000 subscribers. The agency's information is also available on smartphone or tablet applications (Apple and Android), as well as on social networks Facebook and Twitter. References Newspapers published in Africa
Jon Rish (born 1973) is an American former radio personality, best known for his work in the Boston area. Career Rish was a radio host for WEEI-FM in Boston. Rish attended Boston College and began his sportscasting experience on WZBC, the school's 1000-watt FM radio station broadcasting to the Greater Boston area. Rish's broadcasting career began in 1993, when he was an intern on Boston sports radio host Dale Arnold's WEEI program. He subsequently broadcast college sports on several Boston-area stations. From 2006 to 2012, Rish hosted the Boston Red Sox pre and post game show on the Boston Red Sox Radio Network. Rish served in the Red Sox broadcast booth alongside Joe Castiglione since 2008, sharing time with Dale Arnold and Dave O'Brien. Arnold and Rish replaced Glenn Geffner, who left after the 2007 season for the Florida Marlins radio booth. Rish primarily announced games when O'Brien was away on assignment for ESPN. On June 10, 2010, Rish filled in on the NESN TV broadcast for Don Orsillo who fell ill just before the start of the Red Sox game at Cleveland. On April 8, 2013, after being asked to take a major pay cut by WEEI owner Entercom, Rish announced that he was leaving not only WEEI but the radio business as well, saying he had been "accepted into a training program to become a software developer and will begin that career path." His last assignment for Red Sox Radio Network was for the game between the Oakland Athletics and the Red Sox on April 24, 2013. In August 2013, Rish was chosen by NESN to serve as a substitute color analyst for Boston Red Sox games while regular analyst Jerry Remy was on leave following the arrest of his son, Jared Remy, for murder. Rish also served as substitute Boston Red Sox play-by-play announcer on NESN in July 2014 while regular announcer Don Orsillo was on vacation. References Year of birth missing (living people) Living people American radio personalities Boston sportscasters Boston Red Sox announcers Major League Baseball broadcasters
```go /* * path_to_url * All Rights Reserved. * * * path_to_url * * Unless required by applicable law or agreed to in writing, software * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ package replay import ( "context" "github.com/rabbitstack/fibratus/internal/bootstrap" "github.com/rabbitstack/fibratus/pkg/config" "github.com/rabbitstack/fibratus/pkg/util/multierror" "github.com/spf13/cobra" ) var Command = &cobra.Command{ Use: "replay", Short: "Replay event stream from the kcap (capture) file", RunE: replay, } var ( // replay command config cfg = config.NewWithOpts(config.WithReplay()) ) func init() { cfg.MustViperize(Command) } func replay(cmd *cobra.Command, args []string) error { app, err := bootstrap.NewApp(cfg, bootstrap.WithSignals(), bootstrap.WithCaptureReplay()) if err != nil { return err } ctx, cancel := context.WithCancel(context.Background()) defer cancel() if err := app.ReadCapture(ctx, args); err != nil { return multierror.Wrap(err, app.Shutdown()) } app.Wait() return app.Shutdown() } ```
```php <?php declare(strict_types=1); namespace Shlinkio\Shlink\Core\Model; use Shlinkio\Shlink\Common\Paginator\Paginator; abstract class AbstractInfinitePaginableListParams { private const FIRST_PAGE = 1; public readonly int $page; public readonly int $itemsPerPage; protected function __construct(?int $page, ?int $itemsPerPage) { $this->page = $this->determinePage($page); $this->itemsPerPage = $this->determineItemsPerPage($itemsPerPage); } private function determinePage(?int $page): int { return $page === null || $page <= 0 ? self::FIRST_PAGE : $page; } private function determineItemsPerPage(?int $itemsPerPage): int { return $itemsPerPage === null || $itemsPerPage < 0 ? Paginator::ALL_ITEMS : $itemsPerPage; } } ```
Vilana is a white Greek wine grape variety. Vilana may also refer to Vilana Udagama, a village in Sri Lanka Vilana Pallegama, a village in Sri Lanka Vilana (surname)
Zigzag is a jagged, regular pattern. Zigzag, ZigZag, zig zag or zig-zag may also refer to: Film and television Zigzag (1963 film) Zig Zag (1970 film), a film by Richard A. Colla Zig Zag (2002 film), a film by David S. Goyer Zig Zag (Canadian TV series) Zig Zag, an educational TV series on BBC Schools Zigzag, a character in The Thief and the Cobbler "Zig Zag", an episode of The Outer Limits, season 6 Music and theatre Zig-Zag!, a 1917 musical revue Zig Zag (The Hooters album) (1989) Zig Zag (Tha Mexakinz album) (1994) Zig Zag, a 2003 album by Earl Slick Zig Zags, a heavy metal/punk rock band Culture ZigZag (magazine), a UK rock music magazine Zig Zag (manga), a cartoon series by Yuki Nakaji Computer science ZigZag (software), a data model designed and patented by Ted Nelson Zig-zag product, a method for constructing graphs in computational complexity Zig-Zag, a tree-rotation variant used to balance splay trees Zig-zag entropy coding, a method used in JPEG images to compress data ZigZag encoding, a mapping of the signed integers to unsigned integers that shortens variable-length quantity representations of negative integers Geography Zig Zag Pass, site of the Struggle on Zig Zag Pass in the Philippines during World War II Zigzag, Oregon Zigzag Glacier Zigzag Ranger Station Zigzag River and Little Zigzag River Zigzag Bluff in Antarctica Zigzag Island off the coast of Antarctica Zigzag Pass on the island of South Georgia Mathematics and cryptography Boustrophedon transform, a zigzag reordering Fence (mathematics) or zigzag poset, a partially ordered set Isbell's zigzag theorem Zig-zag lemma, a mathematical lemma in homological algebra Zigzag cipher, a type of cipher, or code Railroads Zig zag (railway), a construction technique railroads use to climb hills; also called a switchback Lapstone Zig Zag, a walking track on the line of an abandoned railway Zig Zag Railway, a heritage railway near Lithgow Zig Zag railway station, a railway station on the CityRail network near Lithgow, New South Wales Kalamunda Zig Zag in Western Australia Perry Bridge or Zig Zag Bridge, a 1711 bridge over River Tame in Perry Barr, Birmingham, England Revolvers Mauser Zig-Zag, a 19th-century revolver Zig zag revolver, a 3D-printed revolver developed in Japan Architecture Zig-Zag Chair, designed by Gerrit Rietveld Zig-zag moulding on Norman arches Zigzag moderne, a term used in Art Deco Animals Phasianella zigzag, a species of sea snail Northern zigzag salamander Ozark zigzag salamander Southern zigzag salamander Zigzag barb, a ray-finned fish Zigzag heron, a bird Zigzagiceras, an extinct cephalopod genus Plants The zigzag model of plant-pathogen co-evolution by Jonathan D. G. Jones and Jeffery Dangl Other uses Boustrophedon, writing in alternating directions Zig Zag (1984 video game), a video game by DK'Tronics Zig Zag (1987 video game), a video game by Mirrorsoft Zig-Zag (company), a tobacco products company Zig-zag bridge, a type of pedestrian walkway USS Zigzag (SP-106), a patrol vessel that served in the United States Navy from 1917 to 1919 Agent Zigzag, code name of Eddie Chapman, a British double agent Zig-zag in-line package, a short-lived packaging technology for integrated circuits Zigzag transformer, an engineering device for electrical systems ZigZag, a ticketing scheme in Derbyshire, UK, by bus operator Trent Barton Zig Zag Girl, a stage magic illusion See also Zag (disambiguation) Zig (disambiguation) Zig and Zag (disambiguation) Zik Zak Filmworks
```javascript CKEDITOR.plugins.setLang("showblocks","fr-ca",{toolbar:"Afficher les blocs"}); ```
```smalltalk /**************************************************************************** * * path_to_url * path_to_url * path_to_url ****************************************************************************/ #if UNITY_EDITOR using UnityEditor; namespace QFramework { public class TreeNode : VerticalLayout { public BindableProperty<bool> Spread = null; public string Content; private readonly IMGUIHorizontalLayout mFirstLine = EasyIMGUI.Horizontal(); private VerticalLayout mSpreadView = new VerticalLayout(); public TreeNode(bool spread, string content, int indent = 0, bool autosaveSpreadState = false) { if (autosaveSpreadState) { spread = EditorPrefs.GetBool(content, spread); } Content = content; Spread = new BindableProperty<bool>(spread); Style = new GUIStyleProperty(() => EditorStyles.foldout); mFirstLine.Parent(this); mFirstLine.AddChild(EasyIMGUI.Space().Pixel(indent)); if (autosaveSpreadState) { Spread.Register(value => EditorPrefs.SetBool(content, value)); } EasyIMGUI.Custom().OnGUI(() => { Spread.Value = EditorGUILayout.Foldout(Spread.Value, Content, true, Style.Value); }) .Parent(mFirstLine); EasyIMGUI.Custom().OnGUI(() => { if (Spread.Value) { mSpreadView.DrawGUI(); } }).Parent(this); } public TreeNode Add2FirstLine(IMGUIView view) { view.Parent(mFirstLine); return this; } public TreeNode FirstLineBox() { mFirstLine.Box(); return this; } public TreeNode SpreadBox() { mSpreadView.VerticalStyle = "box"; return this; } public TreeNode Add2Spread(IMGUIView view) { view.Parent(mSpreadView); return this; } } } #endif ```
```python # # # path_to_url # # Unless required by applicable law or agreed to in writing, software # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from semver import VersionInfo _VERSION = "3.129.1" version = VersionInfo.parse(_VERSION) """Version is the Pulumi SDK's release version.""" ```
```java /* * * * path_to_url * * Unless required by applicable law or agreed to in writing, software * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ package com.example.android.sunshine; import android.content.Context; import android.database.Cursor; import android.support.annotation.NonNull; import android.support.v7.widget.RecyclerView; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.TextView; import com.example.android.sunshine.utilities.SunshineDateUtils; import com.example.android.sunshine.utilities.SunshineWeatherUtils; /** * {@link ForecastAdapter} exposes a list of weather forecasts * from a {@link android.database.Cursor} to a {@link android.support.v7.widget.RecyclerView}. */ class ForecastAdapter extends RecyclerView.Adapter<ForecastAdapter.ForecastAdapterViewHolder> { /* The context we use to utility methods, app resources and layout inflaters */ private final Context mContext; /* * Below, we've defined an interface to handle clicks on items within this Adapter. In the * constructor of our ForecastAdapter, we receive an instance of a class that has implemented * said interface. We store that instance in this variable to call the onClick method whenever * an item is clicked in the list. */ final private ForecastAdapterOnClickHandler mClickHandler; /** * The interface that receives onClick messages. */ public interface ForecastAdapterOnClickHandler { // TODO (36) Refactor onClick to accept a long as its parameter rather than a String void onClick(String weatherForDay); } private Cursor mCursor; /** * Creates a ForecastAdapter. * * @param context Used to talk to the UI and app resources * @param clickHandler The on-click handler for this adapter. This single handler is called * when an item is clicked. */ public ForecastAdapter(@NonNull Context context, ForecastAdapterOnClickHandler clickHandler) { mContext = context; mClickHandler = clickHandler; } /** * This gets called when each new ViewHolder is created. This happens when the RecyclerView * is laid out. Enough ViewHolders will be created to fill the screen and allow for scrolling. * * @param viewGroup The ViewGroup that these ViewHolders are contained within. * @param viewType If your RecyclerView has more than one type of item (like ours does) you * can use this viewType integer to provide a different layout. See * {@link android.support.v7.widget.RecyclerView.Adapter#getItemViewType(int)} * for more details. * @return A new ForecastAdapterViewHolder that holds the View for each list item */ @Override public ForecastAdapterViewHolder onCreateViewHolder(ViewGroup viewGroup, int viewType) { View view = LayoutInflater .from(mContext) .inflate(R.layout.forecast_list_item, viewGroup, false); view.setFocusable(true); return new ForecastAdapterViewHolder(view); } /** * OnBindViewHolder is called by the RecyclerView to display the data at the specified * position. In this method, we update the contents of the ViewHolder to display the weather * details for this particular position, using the "position" argument that is conveniently * passed into us. * * @param forecastAdapterViewHolder The ViewHolder which should be updated to represent the * contents of the item at the given position in the data set. * @param position The position of the item within the adapter's data set. */ @Override public void onBindViewHolder(ForecastAdapterViewHolder forecastAdapterViewHolder, int position) { mCursor.moveToPosition(position); /******************* * Weather Summary * *******************/ /* Read date from the cursor */ long dateInMillis = mCursor.getLong(MainActivity.INDEX_WEATHER_DATE); /* Get human readable string using our utility method */ String dateString = SunshineDateUtils.getFriendlyDateString(mContext, dateInMillis, false); /* Use the weatherId to obtain the proper description */ int weatherId = mCursor.getInt(MainActivity.INDEX_WEATHER_CONDITION_ID); String description = SunshineWeatherUtils.getStringForWeatherCondition(mContext, weatherId); /* Read high temperature from the cursor (in degrees celsius) */ double highInCelsius = mCursor.getDouble(MainActivity.INDEX_WEATHER_MAX_TEMP); /* Read low temperature from the cursor (in degrees celsius) */ double lowInCelsius = mCursor.getDouble(MainActivity.INDEX_WEATHER_MIN_TEMP); String highAndLowTemperature = SunshineWeatherUtils.formatHighLows(mContext, highInCelsius, lowInCelsius); String weatherSummary = dateString + " - " + description + " - " + highAndLowTemperature; forecastAdapterViewHolder.weatherSummary.setText(weatherSummary); } /** * This method simply returns the number of items to display. It is used behind the scenes * to help layout our Views and for animations. * * @return The number of items available in our forecast */ @Override public int getItemCount() { if (null == mCursor) return 0; return mCursor.getCount(); } /** * Swaps the cursor used by the ForecastAdapter for its weather data. This method is called by * MainActivity after a load has finished, as well as when the Loader responsible for loading * the weather data is reset. When this method is called, we assume we have a completely new * set of data, so we call notifyDataSetChanged to tell the RecyclerView to update. * * @param newCursor the new cursor to use as ForecastAdapter's data source */ void swapCursor(Cursor newCursor) { mCursor = newCursor; notifyDataSetChanged(); } /** * A ViewHolder is a required part of the pattern for RecyclerViews. It mostly behaves as * a cache of the child views for a forecast item. It's also a convenient place to set an * OnClickListener, since it has access to the adapter and the views. */ class ForecastAdapterViewHolder extends RecyclerView.ViewHolder implements View.OnClickListener { final TextView weatherSummary; ForecastAdapterViewHolder(View view) { super(view); weatherSummary = (TextView) view.findViewById(R.id.tv_weather_data); view.setOnClickListener(this); } /** * This gets called by the child views during a click. We fetch the date that has been * selected, and then call the onClick handler registered with this adapter, passing that * date. * * @param v the View that was clicked */ @Override public void onClick(View v) { // TODO (37) Instead of passing the String for the clicked item, pass the date from the cursor String weatherForDay = weatherSummary.getText().toString(); mClickHandler.onClick(weatherForDay); } } } ```
```go // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package machinelearning import ( "fmt" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) const opAddTags = "AddTags" // AddTagsRequest generates a "aws/request.Request" representing the // client's request for the AddTags operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See AddTags for more information on using the AddTags // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the AddTagsRequest method. // req, resp := client.AddTagsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { op := &request.Operation{ Name: opAddTags, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &AddTagsInput{} } output = &AddTagsOutput{} req = c.newRequest(op, input, output) return } // AddTags API operation for Amazon Machine Learning. // // Adds one or more tags to an object, up to a limit of 10. Each tag consists // of a key and an optional value. If you add a tag using a key that is already // associated with the ML object, AddTags updates the tag's value. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation AddTags for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeInvalidTagException "InvalidTagException" // // * ErrCodeTagLimitExceededException "TagLimitExceededException" // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { req, out := c.AddTagsRequest(input) return out, req.Send() } // AddTagsWithContext is the same as AddTags with the addition of // the ability to pass a context and additional request options. // // See AddTags for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) AddTagsWithContext(ctx aws.Context, input *AddTagsInput, opts ...request.Option) (*AddTagsOutput, error) { req, out := c.AddTagsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateBatchPrediction = "CreateBatchPrediction" // CreateBatchPredictionRequest generates a "aws/request.Request" representing the // client's request for the CreateBatchPrediction operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateBatchPrediction for more information on using the CreateBatchPrediction // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateBatchPredictionRequest method. // req, resp := client.CreateBatchPredictionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) CreateBatchPredictionRequest(input *CreateBatchPredictionInput) (req *request.Request, output *CreateBatchPredictionOutput) { op := &request.Operation{ Name: opCreateBatchPrediction, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateBatchPredictionInput{} } output = &CreateBatchPredictionOutput{} req = c.newRequest(op, input, output) return } // CreateBatchPrediction API operation for Amazon Machine Learning. // // Generates predictions for a group of observations. The observations to process // exist in one or more data files referenced by a DataSource. This operation // creates a new BatchPrediction, and uses an MLModel and the data files referenced // by the DataSource as information sources. // // CreateBatchPrediction is an asynchronous operation. In response to CreateBatchPrediction, // Amazon Machine Learning (Amazon ML) immediately returns and sets the BatchPrediction // status to PENDING. After the BatchPrediction completes, Amazon ML sets the // status to COMPLETED. // // You can poll for status updates by using the GetBatchPrediction operation // and checking the Status parameter of the result. After the COMPLETED status // appears, the results are available in the location specified by the OutputUri // parameter. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation CreateBatchPrediction for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // // * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" // A second request to use or change an object was not allowed. This can result // from retrying a request using a parameter that was not present in the original // request. // func (c *MachineLearning) CreateBatchPrediction(input *CreateBatchPredictionInput) (*CreateBatchPredictionOutput, error) { req, out := c.CreateBatchPredictionRequest(input) return out, req.Send() } // CreateBatchPredictionWithContext is the same as CreateBatchPrediction with the addition of // the ability to pass a context and additional request options. // // See CreateBatchPrediction for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) CreateBatchPredictionWithContext(ctx aws.Context, input *CreateBatchPredictionInput, opts ...request.Option) (*CreateBatchPredictionOutput, error) { req, out := c.CreateBatchPredictionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateDataSourceFromRDS = "CreateDataSourceFromRDS" // CreateDataSourceFromRDSRequest generates a "aws/request.Request" representing the // client's request for the CreateDataSourceFromRDS operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateDataSourceFromRDS for more information on using the CreateDataSourceFromRDS // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateDataSourceFromRDSRequest method. // req, resp := client.CreateDataSourceFromRDSRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) CreateDataSourceFromRDSRequest(input *CreateDataSourceFromRDSInput) (req *request.Request, output *CreateDataSourceFromRDSOutput) { op := &request.Operation{ Name: opCreateDataSourceFromRDS, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateDataSourceFromRDSInput{} } output = &CreateDataSourceFromRDSOutput{} req = c.newRequest(op, input, output) return } // CreateDataSourceFromRDS API operation for Amazon Machine Learning. // // Creates a DataSource object from an Amazon Relational Database Service (path_to_url // (Amazon RDS). A DataSource references data that can be used to perform CreateMLModel, // CreateEvaluation, or CreateBatchPrediction operations. // // CreateDataSourceFromRDS is an asynchronous operation. In response to CreateDataSourceFromRDS, // Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource // status to PENDING. After the DataSource is created and ready for use, Amazon // ML sets the Status parameter to COMPLETED. DataSource in the COMPLETED or // PENDING state can be used only to perform >CreateMLModel>, CreateEvaluation, // or CreateBatchPrediction operations. // // If Amazon ML cannot accept the input source, it sets the Status parameter // to FAILED and includes an error message in the Message attribute of the GetDataSource // operation response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation CreateDataSourceFromRDS for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // // * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" // A second request to use or change an object was not allowed. This can result // from retrying a request using a parameter that was not present in the original // request. // func (c *MachineLearning) CreateDataSourceFromRDS(input *CreateDataSourceFromRDSInput) (*CreateDataSourceFromRDSOutput, error) { req, out := c.CreateDataSourceFromRDSRequest(input) return out, req.Send() } // CreateDataSourceFromRDSWithContext is the same as CreateDataSourceFromRDS with the addition of // the ability to pass a context and additional request options. // // See CreateDataSourceFromRDS for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) CreateDataSourceFromRDSWithContext(ctx aws.Context, input *CreateDataSourceFromRDSInput, opts ...request.Option) (*CreateDataSourceFromRDSOutput, error) { req, out := c.CreateDataSourceFromRDSRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateDataSourceFromRedshift = "CreateDataSourceFromRedshift" // CreateDataSourceFromRedshiftRequest generates a "aws/request.Request" representing the // client's request for the CreateDataSourceFromRedshift operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateDataSourceFromRedshift for more information on using the CreateDataSourceFromRedshift // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateDataSourceFromRedshiftRequest method. // req, resp := client.CreateDataSourceFromRedshiftRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) CreateDataSourceFromRedshiftRequest(input *CreateDataSourceFromRedshiftInput) (req *request.Request, output *CreateDataSourceFromRedshiftOutput) { op := &request.Operation{ Name: opCreateDataSourceFromRedshift, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateDataSourceFromRedshiftInput{} } output = &CreateDataSourceFromRedshiftOutput{} req = c.newRequest(op, input, output) return } // CreateDataSourceFromRedshift API operation for Amazon Machine Learning. // // Creates a DataSource from a database hosted on an Amazon Redshift cluster. // A DataSource references data that can be used to perform either CreateMLModel, // CreateEvaluation, or CreateBatchPrediction operations. // // CreateDataSourceFromRedshift is an asynchronous operation. In response to // CreateDataSourceFromRedshift, Amazon Machine Learning (Amazon ML) immediately // returns and sets the DataSource status to PENDING. After the DataSource is // created and ready for use, Amazon ML sets the Status parameter to COMPLETED. // DataSource in COMPLETED or PENDING states can be used to perform only CreateMLModel, // CreateEvaluation, or CreateBatchPrediction operations. // // If Amazon ML can't accept the input source, it sets the Status parameter // to FAILED and includes an error message in the Message attribute of the GetDataSource // operation response. // // The observations should be contained in the database hosted on an Amazon // Redshift cluster and should be specified by a SelectSqlQuery query. Amazon // ML executes an Unload command in Amazon Redshift to transfer the result set // of the SelectSqlQuery query to S3StagingLocation. // // After the DataSource has been created, it's ready for use in evaluations // and batch predictions. If you plan to use the DataSource to train an MLModel, // the DataSource also requires a recipe. A recipe describes how each input // variable will be used in training an MLModel. Will the variable be included // or excluded from training? Will the variable be manipulated; for example, // will it be combined with another variable or will it be split apart into // word combinations? The recipe provides answers to these questions. // // You can't change an existing datasource, but you can copy and modify the // settings from an existing Amazon Redshift datasource to create a new datasource. // To do so, call GetDataSource for an existing datasource and copy the values // to a CreateDataSource call. Change the settings that you want to change and // make sure that all required fields have the appropriate values. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation CreateDataSourceFromRedshift for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // // * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" // A second request to use or change an object was not allowed. This can result // from retrying a request using a parameter that was not present in the original // request. // func (c *MachineLearning) CreateDataSourceFromRedshift(input *CreateDataSourceFromRedshiftInput) (*CreateDataSourceFromRedshiftOutput, error) { req, out := c.CreateDataSourceFromRedshiftRequest(input) return out, req.Send() } // CreateDataSourceFromRedshiftWithContext is the same as CreateDataSourceFromRedshift with the addition of // the ability to pass a context and additional request options. // // See CreateDataSourceFromRedshift for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) CreateDataSourceFromRedshiftWithContext(ctx aws.Context, input *CreateDataSourceFromRedshiftInput, opts ...request.Option) (*CreateDataSourceFromRedshiftOutput, error) { req, out := c.CreateDataSourceFromRedshiftRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateDataSourceFromS3 = "CreateDataSourceFromS3" // CreateDataSourceFromS3Request generates a "aws/request.Request" representing the // client's request for the CreateDataSourceFromS3 operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateDataSourceFromS3 for more information on using the CreateDataSourceFromS3 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateDataSourceFromS3Request method. // req, resp := client.CreateDataSourceFromS3Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) CreateDataSourceFromS3Request(input *CreateDataSourceFromS3Input) (req *request.Request, output *CreateDataSourceFromS3Output) { op := &request.Operation{ Name: opCreateDataSourceFromS3, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateDataSourceFromS3Input{} } output = &CreateDataSourceFromS3Output{} req = c.newRequest(op, input, output) return } // CreateDataSourceFromS3 API operation for Amazon Machine Learning. // // Creates a DataSource object. A DataSource references data that can be used // to perform CreateMLModel, CreateEvaluation, or CreateBatchPrediction operations. // // CreateDataSourceFromS3 is an asynchronous operation. In response to CreateDataSourceFromS3, // Amazon Machine Learning (Amazon ML) immediately returns and sets the DataSource // status to PENDING. After the DataSource has been created and is ready for // use, Amazon ML sets the Status parameter to COMPLETED. DataSource in the // COMPLETED or PENDING state can be used to perform only CreateMLModel, CreateEvaluation // or CreateBatchPrediction operations. // // If Amazon ML can't accept the input source, it sets the Status parameter // to FAILED and includes an error message in the Message attribute of the GetDataSource // operation response. // // The observation data used in a DataSource should be ready to use; that is, // it should have a consistent structure, and missing data values should be // kept to a minimum. The observation data must reside in one or more .csv files // in an Amazon Simple Storage Service (Amazon S3) location, along with a schema // that describes the data items by name and type. The same schema must be used // for all of the data files referenced by the DataSource. // // After the DataSource has been created, it's ready to use in evaluations and // batch predictions. If you plan to use the DataSource to train an MLModel, // the DataSource also needs a recipe. A recipe describes how each input variable // will be used in training an MLModel. Will the variable be included or excluded // from training? Will the variable be manipulated; for example, will it be // combined with another variable or will it be split apart into word combinations? // The recipe provides answers to these questions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation CreateDataSourceFromS3 for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // // * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" // A second request to use or change an object was not allowed. This can result // from retrying a request using a parameter that was not present in the original // request. // func (c *MachineLearning) CreateDataSourceFromS3(input *CreateDataSourceFromS3Input) (*CreateDataSourceFromS3Output, error) { req, out := c.CreateDataSourceFromS3Request(input) return out, req.Send() } // CreateDataSourceFromS3WithContext is the same as CreateDataSourceFromS3 with the addition of // the ability to pass a context and additional request options. // // See CreateDataSourceFromS3 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) CreateDataSourceFromS3WithContext(ctx aws.Context, input *CreateDataSourceFromS3Input, opts ...request.Option) (*CreateDataSourceFromS3Output, error) { req, out := c.CreateDataSourceFromS3Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateEvaluation = "CreateEvaluation" // CreateEvaluationRequest generates a "aws/request.Request" representing the // client's request for the CreateEvaluation operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateEvaluation for more information on using the CreateEvaluation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateEvaluationRequest method. // req, resp := client.CreateEvaluationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) CreateEvaluationRequest(input *CreateEvaluationInput) (req *request.Request, output *CreateEvaluationOutput) { op := &request.Operation{ Name: opCreateEvaluation, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateEvaluationInput{} } output = &CreateEvaluationOutput{} req = c.newRequest(op, input, output) return } // CreateEvaluation API operation for Amazon Machine Learning. // // Creates a new Evaluation of an MLModel. An MLModel is evaluated on a set // of observations associated to a DataSource. Like a DataSource for an MLModel, // the DataSource for an Evaluation contains values for the Target Variable. // The Evaluation compares the predicted result for each observation to the // actual outcome and provides a summary so that you know how effective the // MLModel functions on the test data. Evaluation generates a relevant performance // metric, such as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on // the corresponding MLModelType: BINARY, REGRESSION or MULTICLASS. // // CreateEvaluation is an asynchronous operation. In response to CreateEvaluation, // Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation // status to PENDING. After the Evaluation is created and ready for use, Amazon // ML sets the status to COMPLETED. // // You can use the GetEvaluation operation to check progress of the evaluation // during the creation operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation CreateEvaluation for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // // * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" // A second request to use or change an object was not allowed. This can result // from retrying a request using a parameter that was not present in the original // request. // func (c *MachineLearning) CreateEvaluation(input *CreateEvaluationInput) (*CreateEvaluationOutput, error) { req, out := c.CreateEvaluationRequest(input) return out, req.Send() } // CreateEvaluationWithContext is the same as CreateEvaluation with the addition of // the ability to pass a context and additional request options. // // See CreateEvaluation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) CreateEvaluationWithContext(ctx aws.Context, input *CreateEvaluationInput, opts ...request.Option) (*CreateEvaluationOutput, error) { req, out := c.CreateEvaluationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateMLModel = "CreateMLModel" // CreateMLModelRequest generates a "aws/request.Request" representing the // client's request for the CreateMLModel operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateMLModel for more information on using the CreateMLModel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateMLModelRequest method. // req, resp := client.CreateMLModelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) CreateMLModelRequest(input *CreateMLModelInput) (req *request.Request, output *CreateMLModelOutput) { op := &request.Operation{ Name: opCreateMLModel, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateMLModelInput{} } output = &CreateMLModelOutput{} req = c.newRequest(op, input, output) return } // CreateMLModel API operation for Amazon Machine Learning. // // Creates a new MLModel using the DataSource and the recipe as information // sources. // // An MLModel is nearly immutable. Users can update only the MLModelName and // the ScoreThreshold in an MLModel without creating a new MLModel. // // CreateMLModel is an asynchronous operation. In response to CreateMLModel, // Amazon Machine Learning (Amazon ML) immediately returns and sets the MLModel // status to PENDING. After the MLModel has been created and ready is for use, // Amazon ML sets the status to COMPLETED. // // You can use the GetMLModel operation to check the progress of the MLModel // during the creation operation. // // CreateMLModel requires a DataSource with computed statistics, which can be // created by setting ComputeStatistics to true in CreateDataSourceFromRDS, // CreateDataSourceFromS3, or CreateDataSourceFromRedshift operations. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation CreateMLModel for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // // * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" // A second request to use or change an object was not allowed. This can result // from retrying a request using a parameter that was not present in the original // request. // func (c *MachineLearning) CreateMLModel(input *CreateMLModelInput) (*CreateMLModelOutput, error) { req, out := c.CreateMLModelRequest(input) return out, req.Send() } // CreateMLModelWithContext is the same as CreateMLModel with the addition of // the ability to pass a context and additional request options. // // See CreateMLModel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) CreateMLModelWithContext(ctx aws.Context, input *CreateMLModelInput, opts ...request.Option) (*CreateMLModelOutput, error) { req, out := c.CreateMLModelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateRealtimeEndpoint = "CreateRealtimeEndpoint" // CreateRealtimeEndpointRequest generates a "aws/request.Request" representing the // client's request for the CreateRealtimeEndpoint operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateRealtimeEndpoint for more information on using the CreateRealtimeEndpoint // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateRealtimeEndpointRequest method. // req, resp := client.CreateRealtimeEndpointRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) CreateRealtimeEndpointRequest(input *CreateRealtimeEndpointInput) (req *request.Request, output *CreateRealtimeEndpointOutput) { op := &request.Operation{ Name: opCreateRealtimeEndpoint, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateRealtimeEndpointInput{} } output = &CreateRealtimeEndpointOutput{} req = c.newRequest(op, input, output) return } // CreateRealtimeEndpoint API operation for Amazon Machine Learning. // // Creates a real-time endpoint for the MLModel. The endpoint contains the URI // of the MLModel; that is, the location to send real-time prediction requests // for the specified MLModel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation CreateRealtimeEndpoint for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) CreateRealtimeEndpoint(input *CreateRealtimeEndpointInput) (*CreateRealtimeEndpointOutput, error) { req, out := c.CreateRealtimeEndpointRequest(input) return out, req.Send() } // CreateRealtimeEndpointWithContext is the same as CreateRealtimeEndpoint with the addition of // the ability to pass a context and additional request options. // // See CreateRealtimeEndpoint for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) CreateRealtimeEndpointWithContext(ctx aws.Context, input *CreateRealtimeEndpointInput, opts ...request.Option) (*CreateRealtimeEndpointOutput, error) { req, out := c.CreateRealtimeEndpointRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteBatchPrediction = "DeleteBatchPrediction" // DeleteBatchPredictionRequest generates a "aws/request.Request" representing the // client's request for the DeleteBatchPrediction operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteBatchPrediction for more information on using the DeleteBatchPrediction // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteBatchPredictionRequest method. // req, resp := client.DeleteBatchPredictionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) DeleteBatchPredictionRequest(input *DeleteBatchPredictionInput) (req *request.Request, output *DeleteBatchPredictionOutput) { op := &request.Operation{ Name: opDeleteBatchPrediction, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteBatchPredictionInput{} } output = &DeleteBatchPredictionOutput{} req = c.newRequest(op, input, output) return } // DeleteBatchPrediction API operation for Amazon Machine Learning. // // Assigns the DELETED status to a BatchPrediction, rendering it unusable. // // After using the DeleteBatchPrediction operation, you can use the GetBatchPrediction // operation to verify that the status of the BatchPrediction changed to DELETED. // // Caution: The result of the DeleteBatchPrediction operation is irreversible. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation DeleteBatchPrediction for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) DeleteBatchPrediction(input *DeleteBatchPredictionInput) (*DeleteBatchPredictionOutput, error) { req, out := c.DeleteBatchPredictionRequest(input) return out, req.Send() } // DeleteBatchPredictionWithContext is the same as DeleteBatchPrediction with the addition of // the ability to pass a context and additional request options. // // See DeleteBatchPrediction for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DeleteBatchPredictionWithContext(ctx aws.Context, input *DeleteBatchPredictionInput, opts ...request.Option) (*DeleteBatchPredictionOutput, error) { req, out := c.DeleteBatchPredictionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteDataSource = "DeleteDataSource" // DeleteDataSourceRequest generates a "aws/request.Request" representing the // client's request for the DeleteDataSource operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteDataSource for more information on using the DeleteDataSource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteDataSourceRequest method. // req, resp := client.DeleteDataSourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) DeleteDataSourceRequest(input *DeleteDataSourceInput) (req *request.Request, output *DeleteDataSourceOutput) { op := &request.Operation{ Name: opDeleteDataSource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteDataSourceInput{} } output = &DeleteDataSourceOutput{} req = c.newRequest(op, input, output) return } // DeleteDataSource API operation for Amazon Machine Learning. // // Assigns the DELETED status to a DataSource, rendering it unusable. // // After using the DeleteDataSource operation, you can use the GetDataSource // operation to verify that the status of the DataSource changed to DELETED. // // Caution: The results of the DeleteDataSource operation are irreversible. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation DeleteDataSource for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) DeleteDataSource(input *DeleteDataSourceInput) (*DeleteDataSourceOutput, error) { req, out := c.DeleteDataSourceRequest(input) return out, req.Send() } // DeleteDataSourceWithContext is the same as DeleteDataSource with the addition of // the ability to pass a context and additional request options. // // See DeleteDataSource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DeleteDataSourceWithContext(ctx aws.Context, input *DeleteDataSourceInput, opts ...request.Option) (*DeleteDataSourceOutput, error) { req, out := c.DeleteDataSourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteEvaluation = "DeleteEvaluation" // DeleteEvaluationRequest generates a "aws/request.Request" representing the // client's request for the DeleteEvaluation operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteEvaluation for more information on using the DeleteEvaluation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteEvaluationRequest method. // req, resp := client.DeleteEvaluationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) DeleteEvaluationRequest(input *DeleteEvaluationInput) (req *request.Request, output *DeleteEvaluationOutput) { op := &request.Operation{ Name: opDeleteEvaluation, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteEvaluationInput{} } output = &DeleteEvaluationOutput{} req = c.newRequest(op, input, output) return } // DeleteEvaluation API operation for Amazon Machine Learning. // // Assigns the DELETED status to an Evaluation, rendering it unusable. // // After invoking the DeleteEvaluation operation, you can use the GetEvaluation // operation to verify that the status of the Evaluation changed to DELETED. // // CautionThe results of the DeleteEvaluation operation are irreversible. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation DeleteEvaluation for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) DeleteEvaluation(input *DeleteEvaluationInput) (*DeleteEvaluationOutput, error) { req, out := c.DeleteEvaluationRequest(input) return out, req.Send() } // DeleteEvaluationWithContext is the same as DeleteEvaluation with the addition of // the ability to pass a context and additional request options. // // See DeleteEvaluation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DeleteEvaluationWithContext(ctx aws.Context, input *DeleteEvaluationInput, opts ...request.Option) (*DeleteEvaluationOutput, error) { req, out := c.DeleteEvaluationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteMLModel = "DeleteMLModel" // DeleteMLModelRequest generates a "aws/request.Request" representing the // client's request for the DeleteMLModel operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteMLModel for more information on using the DeleteMLModel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteMLModelRequest method. // req, resp := client.DeleteMLModelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) DeleteMLModelRequest(input *DeleteMLModelInput) (req *request.Request, output *DeleteMLModelOutput) { op := &request.Operation{ Name: opDeleteMLModel, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteMLModelInput{} } output = &DeleteMLModelOutput{} req = c.newRequest(op, input, output) return } // DeleteMLModel API operation for Amazon Machine Learning. // // Assigns the DELETED status to an MLModel, rendering it unusable. // // After using the DeleteMLModel operation, you can use the GetMLModel operation // to verify that the status of the MLModel changed to DELETED. // // Caution: The result of the DeleteMLModel operation is irreversible. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation DeleteMLModel for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) DeleteMLModel(input *DeleteMLModelInput) (*DeleteMLModelOutput, error) { req, out := c.DeleteMLModelRequest(input) return out, req.Send() } // DeleteMLModelWithContext is the same as DeleteMLModel with the addition of // the ability to pass a context and additional request options. // // See DeleteMLModel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DeleteMLModelWithContext(ctx aws.Context, input *DeleteMLModelInput, opts ...request.Option) (*DeleteMLModelOutput, error) { req, out := c.DeleteMLModelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteRealtimeEndpoint = "DeleteRealtimeEndpoint" // DeleteRealtimeEndpointRequest generates a "aws/request.Request" representing the // client's request for the DeleteRealtimeEndpoint operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteRealtimeEndpoint for more information on using the DeleteRealtimeEndpoint // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteRealtimeEndpointRequest method. // req, resp := client.DeleteRealtimeEndpointRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) DeleteRealtimeEndpointRequest(input *DeleteRealtimeEndpointInput) (req *request.Request, output *DeleteRealtimeEndpointOutput) { op := &request.Operation{ Name: opDeleteRealtimeEndpoint, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteRealtimeEndpointInput{} } output = &DeleteRealtimeEndpointOutput{} req = c.newRequest(op, input, output) return } // DeleteRealtimeEndpoint API operation for Amazon Machine Learning. // // Deletes a real time endpoint of an MLModel. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation DeleteRealtimeEndpoint for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) DeleteRealtimeEndpoint(input *DeleteRealtimeEndpointInput) (*DeleteRealtimeEndpointOutput, error) { req, out := c.DeleteRealtimeEndpointRequest(input) return out, req.Send() } // DeleteRealtimeEndpointWithContext is the same as DeleteRealtimeEndpoint with the addition of // the ability to pass a context and additional request options. // // See DeleteRealtimeEndpoint for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DeleteRealtimeEndpointWithContext(ctx aws.Context, input *DeleteRealtimeEndpointInput, opts ...request.Option) (*DeleteRealtimeEndpointOutput, error) { req, out := c.DeleteRealtimeEndpointRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteTags = "DeleteTags" // DeleteTagsRequest generates a "aws/request.Request" representing the // client's request for the DeleteTags operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteTags for more information on using the DeleteTags // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteTagsRequest method. // req, resp := client.DeleteTagsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { op := &request.Operation{ Name: opDeleteTags, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteTagsInput{} } output = &DeleteTagsOutput{} req = c.newRequest(op, input, output) return } // DeleteTags API operation for Amazon Machine Learning. // // Deletes the specified tags associated with an ML object. After this operation // is complete, you can't recover deleted tags. // // If you specify a tag that doesn't exist, Amazon ML ignores it. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation DeleteTags for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeInvalidTagException "InvalidTagException" // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { req, out := c.DeleteTagsRequest(input) return out, req.Send() } // DeleteTagsWithContext is the same as DeleteTags with the addition of // the ability to pass a context and additional request options. // // See DeleteTags for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DeleteTagsWithContext(ctx aws.Context, input *DeleteTagsInput, opts ...request.Option) (*DeleteTagsOutput, error) { req, out := c.DeleteTagsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeBatchPredictions = "DescribeBatchPredictions" // DescribeBatchPredictionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeBatchPredictions operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeBatchPredictions for more information on using the DescribeBatchPredictions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeBatchPredictionsRequest method. // req, resp := client.DescribeBatchPredictionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) DescribeBatchPredictionsRequest(input *DescribeBatchPredictionsInput) (req *request.Request, output *DescribeBatchPredictionsOutput) { op := &request.Operation{ Name: opDescribeBatchPredictions, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { input = &DescribeBatchPredictionsInput{} } output = &DescribeBatchPredictionsOutput{} req = c.newRequest(op, input, output) return } // DescribeBatchPredictions API operation for Amazon Machine Learning. // // Returns a list of BatchPrediction operations that match the search criteria // in the request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation DescribeBatchPredictions for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) DescribeBatchPredictions(input *DescribeBatchPredictionsInput) (*DescribeBatchPredictionsOutput, error) { req, out := c.DescribeBatchPredictionsRequest(input) return out, req.Send() } // DescribeBatchPredictionsWithContext is the same as DescribeBatchPredictions with the addition of // the ability to pass a context and additional request options. // // See DescribeBatchPredictions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DescribeBatchPredictionsWithContext(ctx aws.Context, input *DescribeBatchPredictionsInput, opts ...request.Option) (*DescribeBatchPredictionsOutput, error) { req, out := c.DescribeBatchPredictionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // DescribeBatchPredictionsPages iterates over the pages of a DescribeBatchPredictions operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See DescribeBatchPredictions method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a DescribeBatchPredictions operation. // pageNum := 0 // err := client.DescribeBatchPredictionsPages(params, // func(page *DescribeBatchPredictionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *MachineLearning) DescribeBatchPredictionsPages(input *DescribeBatchPredictionsInput, fn func(*DescribeBatchPredictionsOutput, bool) bool) error { return c.DescribeBatchPredictionsPagesWithContext(aws.BackgroundContext(), input, fn) } // DescribeBatchPredictionsPagesWithContext same as DescribeBatchPredictionsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DescribeBatchPredictionsPagesWithContext(ctx aws.Context, input *DescribeBatchPredictionsInput, fn func(*DescribeBatchPredictionsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *DescribeBatchPredictionsInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.DescribeBatchPredictionsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } cont := true for p.Next() && cont { cont = fn(p.Page().(*DescribeBatchPredictionsOutput), !p.HasNextPage()) } return p.Err() } const opDescribeDataSources = "DescribeDataSources" // DescribeDataSourcesRequest generates a "aws/request.Request" representing the // client's request for the DescribeDataSources operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeDataSources for more information on using the DescribeDataSources // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeDataSourcesRequest method. // req, resp := client.DescribeDataSourcesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) DescribeDataSourcesRequest(input *DescribeDataSourcesInput) (req *request.Request, output *DescribeDataSourcesOutput) { op := &request.Operation{ Name: opDescribeDataSources, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { input = &DescribeDataSourcesInput{} } output = &DescribeDataSourcesOutput{} req = c.newRequest(op, input, output) return } // DescribeDataSources API operation for Amazon Machine Learning. // // Returns a list of DataSource that match the search criteria in the request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation DescribeDataSources for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) DescribeDataSources(input *DescribeDataSourcesInput) (*DescribeDataSourcesOutput, error) { req, out := c.DescribeDataSourcesRequest(input) return out, req.Send() } // DescribeDataSourcesWithContext is the same as DescribeDataSources with the addition of // the ability to pass a context and additional request options. // // See DescribeDataSources for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DescribeDataSourcesWithContext(ctx aws.Context, input *DescribeDataSourcesInput, opts ...request.Option) (*DescribeDataSourcesOutput, error) { req, out := c.DescribeDataSourcesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // DescribeDataSourcesPages iterates over the pages of a DescribeDataSources operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See DescribeDataSources method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a DescribeDataSources operation. // pageNum := 0 // err := client.DescribeDataSourcesPages(params, // func(page *DescribeDataSourcesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *MachineLearning) DescribeDataSourcesPages(input *DescribeDataSourcesInput, fn func(*DescribeDataSourcesOutput, bool) bool) error { return c.DescribeDataSourcesPagesWithContext(aws.BackgroundContext(), input, fn) } // DescribeDataSourcesPagesWithContext same as DescribeDataSourcesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DescribeDataSourcesPagesWithContext(ctx aws.Context, input *DescribeDataSourcesInput, fn func(*DescribeDataSourcesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *DescribeDataSourcesInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.DescribeDataSourcesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } cont := true for p.Next() && cont { cont = fn(p.Page().(*DescribeDataSourcesOutput), !p.HasNextPage()) } return p.Err() } const opDescribeEvaluations = "DescribeEvaluations" // DescribeEvaluationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeEvaluations operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeEvaluations for more information on using the DescribeEvaluations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeEvaluationsRequest method. // req, resp := client.DescribeEvaluationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) DescribeEvaluationsRequest(input *DescribeEvaluationsInput) (req *request.Request, output *DescribeEvaluationsOutput) { op := &request.Operation{ Name: opDescribeEvaluations, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { input = &DescribeEvaluationsInput{} } output = &DescribeEvaluationsOutput{} req = c.newRequest(op, input, output) return } // DescribeEvaluations API operation for Amazon Machine Learning. // // Returns a list of DescribeEvaluations that match the search criteria in the // request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation DescribeEvaluations for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) DescribeEvaluations(input *DescribeEvaluationsInput) (*DescribeEvaluationsOutput, error) { req, out := c.DescribeEvaluationsRequest(input) return out, req.Send() } // DescribeEvaluationsWithContext is the same as DescribeEvaluations with the addition of // the ability to pass a context and additional request options. // // See DescribeEvaluations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DescribeEvaluationsWithContext(ctx aws.Context, input *DescribeEvaluationsInput, opts ...request.Option) (*DescribeEvaluationsOutput, error) { req, out := c.DescribeEvaluationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // DescribeEvaluationsPages iterates over the pages of a DescribeEvaluations operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See DescribeEvaluations method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a DescribeEvaluations operation. // pageNum := 0 // err := client.DescribeEvaluationsPages(params, // func(page *DescribeEvaluationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *MachineLearning) DescribeEvaluationsPages(input *DescribeEvaluationsInput, fn func(*DescribeEvaluationsOutput, bool) bool) error { return c.DescribeEvaluationsPagesWithContext(aws.BackgroundContext(), input, fn) } // DescribeEvaluationsPagesWithContext same as DescribeEvaluationsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DescribeEvaluationsPagesWithContext(ctx aws.Context, input *DescribeEvaluationsInput, fn func(*DescribeEvaluationsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *DescribeEvaluationsInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.DescribeEvaluationsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } cont := true for p.Next() && cont { cont = fn(p.Page().(*DescribeEvaluationsOutput), !p.HasNextPage()) } return p.Err() } const opDescribeMLModels = "DescribeMLModels" // DescribeMLModelsRequest generates a "aws/request.Request" representing the // client's request for the DescribeMLModels operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeMLModels for more information on using the DescribeMLModels // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeMLModelsRequest method. // req, resp := client.DescribeMLModelsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) DescribeMLModelsRequest(input *DescribeMLModelsInput) (req *request.Request, output *DescribeMLModelsOutput) { op := &request.Operation{ Name: opDescribeMLModels, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { input = &DescribeMLModelsInput{} } output = &DescribeMLModelsOutput{} req = c.newRequest(op, input, output) return } // DescribeMLModels API operation for Amazon Machine Learning. // // Returns a list of MLModel that match the search criteria in the request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation DescribeMLModels for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) DescribeMLModels(input *DescribeMLModelsInput) (*DescribeMLModelsOutput, error) { req, out := c.DescribeMLModelsRequest(input) return out, req.Send() } // DescribeMLModelsWithContext is the same as DescribeMLModels with the addition of // the ability to pass a context and additional request options. // // See DescribeMLModels for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DescribeMLModelsWithContext(ctx aws.Context, input *DescribeMLModelsInput, opts ...request.Option) (*DescribeMLModelsOutput, error) { req, out := c.DescribeMLModelsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // DescribeMLModelsPages iterates over the pages of a DescribeMLModels operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See DescribeMLModels method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a DescribeMLModels operation. // pageNum := 0 // err := client.DescribeMLModelsPages(params, // func(page *DescribeMLModelsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *MachineLearning) DescribeMLModelsPages(input *DescribeMLModelsInput, fn func(*DescribeMLModelsOutput, bool) bool) error { return c.DescribeMLModelsPagesWithContext(aws.BackgroundContext(), input, fn) } // DescribeMLModelsPagesWithContext same as DescribeMLModelsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DescribeMLModelsPagesWithContext(ctx aws.Context, input *DescribeMLModelsInput, fn func(*DescribeMLModelsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *DescribeMLModelsInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.DescribeMLModelsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } cont := true for p.Next() && cont { cont = fn(p.Page().(*DescribeMLModelsOutput), !p.HasNextPage()) } return p.Err() } const opDescribeTags = "DescribeTags" // DescribeTagsRequest generates a "aws/request.Request" representing the // client's request for the DescribeTags operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeTags for more information on using the DescribeTags // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeTagsRequest method. // req, resp := client.DescribeTagsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { op := &request.Operation{ Name: opDescribeTags, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeTagsInput{} } output = &DescribeTagsOutput{} req = c.newRequest(op, input, output) return } // DescribeTags API operation for Amazon Machine Learning. // // Describes one or more of the tags for your Amazon ML object. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation DescribeTags for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { req, out := c.DescribeTagsRequest(input) return out, req.Send() } // DescribeTagsWithContext is the same as DescribeTags with the addition of // the ability to pass a context and additional request options. // // See DescribeTags for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) DescribeTagsWithContext(ctx aws.Context, input *DescribeTagsInput, opts ...request.Option) (*DescribeTagsOutput, error) { req, out := c.DescribeTagsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opGetBatchPrediction = "GetBatchPrediction" // GetBatchPredictionRequest generates a "aws/request.Request" representing the // client's request for the GetBatchPrediction operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See GetBatchPrediction for more information on using the GetBatchPrediction // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the GetBatchPredictionRequest method. // req, resp := client.GetBatchPredictionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) GetBatchPredictionRequest(input *GetBatchPredictionInput) (req *request.Request, output *GetBatchPredictionOutput) { op := &request.Operation{ Name: opGetBatchPrediction, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &GetBatchPredictionInput{} } output = &GetBatchPredictionOutput{} req = c.newRequest(op, input, output) return } // GetBatchPrediction API operation for Amazon Machine Learning. // // Returns a BatchPrediction that includes detailed metadata, status, and data // file information for a Batch Prediction request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation GetBatchPrediction for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) GetBatchPrediction(input *GetBatchPredictionInput) (*GetBatchPredictionOutput, error) { req, out := c.GetBatchPredictionRequest(input) return out, req.Send() } // GetBatchPredictionWithContext is the same as GetBatchPrediction with the addition of // the ability to pass a context and additional request options. // // See GetBatchPrediction for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) GetBatchPredictionWithContext(ctx aws.Context, input *GetBatchPredictionInput, opts ...request.Option) (*GetBatchPredictionOutput, error) { req, out := c.GetBatchPredictionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opGetDataSource = "GetDataSource" // GetDataSourceRequest generates a "aws/request.Request" representing the // client's request for the GetDataSource operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See GetDataSource for more information on using the GetDataSource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the GetDataSourceRequest method. // req, resp := client.GetDataSourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) GetDataSourceRequest(input *GetDataSourceInput) (req *request.Request, output *GetDataSourceOutput) { op := &request.Operation{ Name: opGetDataSource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &GetDataSourceInput{} } output = &GetDataSourceOutput{} req = c.newRequest(op, input, output) return } // GetDataSource API operation for Amazon Machine Learning. // // Returns a DataSource that includes metadata and data file information, as // well as the current status of the DataSource. // // GetDataSource provides results in normal or verbose format. The verbose format // adds the schema description and the list of files pointed to by the DataSource // to the normal format. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation GetDataSource for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) GetDataSource(input *GetDataSourceInput) (*GetDataSourceOutput, error) { req, out := c.GetDataSourceRequest(input) return out, req.Send() } // GetDataSourceWithContext is the same as GetDataSource with the addition of // the ability to pass a context and additional request options. // // See GetDataSource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) GetDataSourceWithContext(ctx aws.Context, input *GetDataSourceInput, opts ...request.Option) (*GetDataSourceOutput, error) { req, out := c.GetDataSourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opGetEvaluation = "GetEvaluation" // GetEvaluationRequest generates a "aws/request.Request" representing the // client's request for the GetEvaluation operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See GetEvaluation for more information on using the GetEvaluation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the GetEvaluationRequest method. // req, resp := client.GetEvaluationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) GetEvaluationRequest(input *GetEvaluationInput) (req *request.Request, output *GetEvaluationOutput) { op := &request.Operation{ Name: opGetEvaluation, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &GetEvaluationInput{} } output = &GetEvaluationOutput{} req = c.newRequest(op, input, output) return } // GetEvaluation API operation for Amazon Machine Learning. // // Returns an Evaluation that includes metadata as well as the current status // of the Evaluation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation GetEvaluation for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) GetEvaluation(input *GetEvaluationInput) (*GetEvaluationOutput, error) { req, out := c.GetEvaluationRequest(input) return out, req.Send() } // GetEvaluationWithContext is the same as GetEvaluation with the addition of // the ability to pass a context and additional request options. // // See GetEvaluation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) GetEvaluationWithContext(ctx aws.Context, input *GetEvaluationInput, opts ...request.Option) (*GetEvaluationOutput, error) { req, out := c.GetEvaluationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opGetMLModel = "GetMLModel" // GetMLModelRequest generates a "aws/request.Request" representing the // client's request for the GetMLModel operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See GetMLModel for more information on using the GetMLModel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the GetMLModelRequest method. // req, resp := client.GetMLModelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) GetMLModelRequest(input *GetMLModelInput) (req *request.Request, output *GetMLModelOutput) { op := &request.Operation{ Name: opGetMLModel, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &GetMLModelInput{} } output = &GetMLModelOutput{} req = c.newRequest(op, input, output) return } // GetMLModel API operation for Amazon Machine Learning. // // Returns an MLModel that includes detailed metadata, data source information, // and the current status of the MLModel. // // GetMLModel provides results in normal or verbose format. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation GetMLModel for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) GetMLModel(input *GetMLModelInput) (*GetMLModelOutput, error) { req, out := c.GetMLModelRequest(input) return out, req.Send() } // GetMLModelWithContext is the same as GetMLModel with the addition of // the ability to pass a context and additional request options. // // See GetMLModel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) GetMLModelWithContext(ctx aws.Context, input *GetMLModelInput, opts ...request.Option) (*GetMLModelOutput, error) { req, out := c.GetMLModelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opPredict = "Predict" // PredictRequest generates a "aws/request.Request" representing the // client's request for the Predict operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See Predict for more information on using the Predict // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the PredictRequest method. // req, resp := client.PredictRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) PredictRequest(input *PredictInput) (req *request.Request, output *PredictOutput) { op := &request.Operation{ Name: opPredict, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &PredictInput{} } output = &PredictOutput{} req = c.newRequest(op, input, output) return } // Predict API operation for Amazon Machine Learning. // // Generates a prediction for the observation using the specified ML Model. // // NoteNot all response parameters will be populated. Whether a response parameter // is populated depends on the type of model requested. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation Predict for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeLimitExceededException "LimitExceededException" // The subscriber exceeded the maximum number of operations. This exception // can occur when listing objects such as DataSource. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // // * ErrCodePredictorNotMountedException "PredictorNotMountedException" // The exception is thrown when a predict request is made to an unmounted MLModel. // func (c *MachineLearning) Predict(input *PredictInput) (*PredictOutput, error) { req, out := c.PredictRequest(input) return out, req.Send() } // PredictWithContext is the same as Predict with the addition of // the ability to pass a context and additional request options. // // See Predict for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) PredictWithContext(ctx aws.Context, input *PredictInput, opts ...request.Option) (*PredictOutput, error) { req, out := c.PredictRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateBatchPrediction = "UpdateBatchPrediction" // UpdateBatchPredictionRequest generates a "aws/request.Request" representing the // client's request for the UpdateBatchPrediction operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateBatchPrediction for more information on using the UpdateBatchPrediction // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateBatchPredictionRequest method. // req, resp := client.UpdateBatchPredictionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) UpdateBatchPredictionRequest(input *UpdateBatchPredictionInput) (req *request.Request, output *UpdateBatchPredictionOutput) { op := &request.Operation{ Name: opUpdateBatchPrediction, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateBatchPredictionInput{} } output = &UpdateBatchPredictionOutput{} req = c.newRequest(op, input, output) return } // UpdateBatchPrediction API operation for Amazon Machine Learning. // // Updates the BatchPredictionName of a BatchPrediction. // // You can use the GetBatchPrediction operation to view the contents of the // updated data element. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation UpdateBatchPrediction for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) UpdateBatchPrediction(input *UpdateBatchPredictionInput) (*UpdateBatchPredictionOutput, error) { req, out := c.UpdateBatchPredictionRequest(input) return out, req.Send() } // UpdateBatchPredictionWithContext is the same as UpdateBatchPrediction with the addition of // the ability to pass a context and additional request options. // // See UpdateBatchPrediction for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) UpdateBatchPredictionWithContext(ctx aws.Context, input *UpdateBatchPredictionInput, opts ...request.Option) (*UpdateBatchPredictionOutput, error) { req, out := c.UpdateBatchPredictionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateDataSource = "UpdateDataSource" // UpdateDataSourceRequest generates a "aws/request.Request" representing the // client's request for the UpdateDataSource operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateDataSource for more information on using the UpdateDataSource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateDataSourceRequest method. // req, resp := client.UpdateDataSourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) UpdateDataSourceRequest(input *UpdateDataSourceInput) (req *request.Request, output *UpdateDataSourceOutput) { op := &request.Operation{ Name: opUpdateDataSource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateDataSourceInput{} } output = &UpdateDataSourceOutput{} req = c.newRequest(op, input, output) return } // UpdateDataSource API operation for Amazon Machine Learning. // // Updates the DataSourceName of a DataSource. // // You can use the GetDataSource operation to view the contents of the updated // data element. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation UpdateDataSource for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) UpdateDataSource(input *UpdateDataSourceInput) (*UpdateDataSourceOutput, error) { req, out := c.UpdateDataSourceRequest(input) return out, req.Send() } // UpdateDataSourceWithContext is the same as UpdateDataSource with the addition of // the ability to pass a context and additional request options. // // See UpdateDataSource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) UpdateDataSourceWithContext(ctx aws.Context, input *UpdateDataSourceInput, opts ...request.Option) (*UpdateDataSourceOutput, error) { req, out := c.UpdateDataSourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateEvaluation = "UpdateEvaluation" // UpdateEvaluationRequest generates a "aws/request.Request" representing the // client's request for the UpdateEvaluation operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateEvaluation for more information on using the UpdateEvaluation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateEvaluationRequest method. // req, resp := client.UpdateEvaluationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) UpdateEvaluationRequest(input *UpdateEvaluationInput) (req *request.Request, output *UpdateEvaluationOutput) { op := &request.Operation{ Name: opUpdateEvaluation, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateEvaluationInput{} } output = &UpdateEvaluationOutput{} req = c.newRequest(op, input, output) return } // UpdateEvaluation API operation for Amazon Machine Learning. // // Updates the EvaluationName of an Evaluation. // // You can use the GetEvaluation operation to view the contents of the updated // data element. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation UpdateEvaluation for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) UpdateEvaluation(input *UpdateEvaluationInput) (*UpdateEvaluationOutput, error) { req, out := c.UpdateEvaluationRequest(input) return out, req.Send() } // UpdateEvaluationWithContext is the same as UpdateEvaluation with the addition of // the ability to pass a context and additional request options. // // See UpdateEvaluation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) UpdateEvaluationWithContext(ctx aws.Context, input *UpdateEvaluationInput, opts ...request.Option) (*UpdateEvaluationOutput, error) { req, out := c.UpdateEvaluationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateMLModel = "UpdateMLModel" // UpdateMLModelRequest generates a "aws/request.Request" representing the // client's request for the UpdateMLModel operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateMLModel for more information on using the UpdateMLModel // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateMLModelRequest method. // req, resp := client.UpdateMLModelRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *MachineLearning) UpdateMLModelRequest(input *UpdateMLModelInput) (req *request.Request, output *UpdateMLModelOutput) { op := &request.Operation{ Name: opUpdateMLModel, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateMLModelInput{} } output = &UpdateMLModelOutput{} req = c.newRequest(op, input, output) return } // UpdateMLModel API operation for Amazon Machine Learning. // // Updates the MLModelName and the ScoreThreshold of an MLModel. // // You can use the GetMLModel operation to view the contents of the updated // data element. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Machine Learning's // API operation UpdateMLModel for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidInputException "InvalidInputException" // An error on the client occurred. Typically, the cause is an invalid input // value. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // A specified resource cannot be located. // // * ErrCodeInternalServerException "InternalServerException" // An error on the server occurred when trying to process a request. // func (c *MachineLearning) UpdateMLModel(input *UpdateMLModelInput) (*UpdateMLModelOutput, error) { req, out := c.UpdateMLModelRequest(input) return out, req.Send() } // UpdateMLModelWithContext is the same as UpdateMLModel with the addition of // the ability to pass a context and additional request options. // // See UpdateMLModel for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See path_to_url // for more information on using Contexts. func (c *MachineLearning) UpdateMLModelWithContext(ctx aws.Context, input *UpdateMLModelInput, opts ...request.Option) (*UpdateMLModelOutput, error) { req, out := c.UpdateMLModelRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type AddTagsInput struct { _ struct{} `type:"structure"` // The ID of the ML object to tag. For example, exampleModelId. // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` // The type of the ML object to tag. // // ResourceType is a required field ResourceType *string `type:"string" required:"true" enum:"TaggableResourceType"` // The key-value pairs to use to create tags. If you specify a key without specifying // a value, Amazon ML creates a tag with the specified key and a value of null. // // Tags is a required field Tags []*Tag `type:"list" required:"true"` } // String returns the string representation func (s AddTagsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AddTagsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AddTagsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AddTagsInput"} if s.ResourceId == nil { invalidParams.Add(request.NewErrParamRequired("ResourceId")) } if s.ResourceId != nil && len(*s.ResourceId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) } if s.ResourceType == nil { invalidParams.Add(request.NewErrParamRequired("ResourceType")) } if s.Tags == nil { invalidParams.Add(request.NewErrParamRequired("Tags")) } if s.Tags != nil { for i, v := range s.Tags { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetResourceId sets the ResourceId field's value. func (s *AddTagsInput) SetResourceId(v string) *AddTagsInput { s.ResourceId = &v return s } // SetResourceType sets the ResourceType field's value. func (s *AddTagsInput) SetResourceType(v string) *AddTagsInput { s.ResourceType = &v return s } // SetTags sets the Tags field's value. func (s *AddTagsInput) SetTags(v []*Tag) *AddTagsInput { s.Tags = v return s } // Amazon ML returns the following elements. type AddTagsOutput struct { _ struct{} `type:"structure"` // The ID of the ML object that was tagged. ResourceId *string `min:"1" type:"string"` // The type of the ML object that was tagged. ResourceType *string `type:"string" enum:"TaggableResourceType"` } // String returns the string representation func (s AddTagsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AddTagsOutput) GoString() string { return s.String() } // SetResourceId sets the ResourceId field's value. func (s *AddTagsOutput) SetResourceId(v string) *AddTagsOutput { s.ResourceId = &v return s } // SetResourceType sets the ResourceType field's value. func (s *AddTagsOutput) SetResourceType(v string) *AddTagsOutput { s.ResourceType = &v return s } // Represents the output of a GetBatchPrediction operation. // // The content consists of the detailed metadata, the status, and the data file // information of a Batch Prediction. type BatchPrediction struct { _ struct{} `type:"structure"` // The ID of the DataSource that points to the group of observations to predict. BatchPredictionDataSourceId *string `min:"1" type:"string"` // The ID assigned to the BatchPrediction at creation. This value should be // identical to the value of the BatchPredictionID in the request. BatchPredictionId *string `min:"1" type:"string"` // Long integer type that is a 64-bit signed number. ComputeTime *int64 `type:"long"` // The time that the BatchPrediction was created. The time is expressed in epoch // time. CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The AWS user account that invoked the BatchPrediction. The account type can // be either an AWS root account or an AWS Identity and Access Management (IAM) // user account. CreatedByIamUser *string `type:"string"` // A timestamp represented in epoch time. FinishedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The location of the data file or directory in Amazon Simple Storage Service // (Amazon S3). InputDataLocationS3 *string `type:"string"` // Long integer type that is a 64-bit signed number. InvalidRecordCount *int64 `type:"long"` // The time of the most recent edit to the BatchPrediction. The time is expressed // in epoch time. LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The ID of the MLModel that generated predictions for the BatchPrediction // request. MLModelId *string `min:"1" type:"string"` // A description of the most recent details about processing the batch prediction // request. Message *string `type:"string"` // A user-supplied name or description of the BatchPrediction. Name *string `type:"string"` // The location of an Amazon S3 bucket or directory to receive the operation // results. The following substrings are not allowed in the s3 key portion of // the outputURI field: ':', '//', '/./', '/../'. OutputUri *string `type:"string"` // A timestamp represented in epoch time. StartedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The status of the BatchPrediction. This element can have one of the following // values: // // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to // generate predictions for a batch of observations. // * INPROGRESS - The process is underway. // * FAILED - The request to perform a batch prediction did not run to completion. // It is not usable. // * COMPLETED - The batch prediction process completed successfully. // * DELETED - The BatchPrediction is marked as deleted. It is not usable. Status *string `type:"string" enum:"EntityStatus"` // Long integer type that is a 64-bit signed number. TotalRecordCount *int64 `type:"long"` } // String returns the string representation func (s BatchPrediction) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s BatchPrediction) GoString() string { return s.String() } // SetBatchPredictionDataSourceId sets the BatchPredictionDataSourceId field's value. func (s *BatchPrediction) SetBatchPredictionDataSourceId(v string) *BatchPrediction { s.BatchPredictionDataSourceId = &v return s } // SetBatchPredictionId sets the BatchPredictionId field's value. func (s *BatchPrediction) SetBatchPredictionId(v string) *BatchPrediction { s.BatchPredictionId = &v return s } // SetComputeTime sets the ComputeTime field's value. func (s *BatchPrediction) SetComputeTime(v int64) *BatchPrediction { s.ComputeTime = &v return s } // SetCreatedAt sets the CreatedAt field's value. func (s *BatchPrediction) SetCreatedAt(v time.Time) *BatchPrediction { s.CreatedAt = &v return s } // SetCreatedByIamUser sets the CreatedByIamUser field's value. func (s *BatchPrediction) SetCreatedByIamUser(v string) *BatchPrediction { s.CreatedByIamUser = &v return s } // SetFinishedAt sets the FinishedAt field's value. func (s *BatchPrediction) SetFinishedAt(v time.Time) *BatchPrediction { s.FinishedAt = &v return s } // SetInputDataLocationS3 sets the InputDataLocationS3 field's value. func (s *BatchPrediction) SetInputDataLocationS3(v string) *BatchPrediction { s.InputDataLocationS3 = &v return s } // SetInvalidRecordCount sets the InvalidRecordCount field's value. func (s *BatchPrediction) SetInvalidRecordCount(v int64) *BatchPrediction { s.InvalidRecordCount = &v return s } // SetLastUpdatedAt sets the LastUpdatedAt field's value. func (s *BatchPrediction) SetLastUpdatedAt(v time.Time) *BatchPrediction { s.LastUpdatedAt = &v return s } // SetMLModelId sets the MLModelId field's value. func (s *BatchPrediction) SetMLModelId(v string) *BatchPrediction { s.MLModelId = &v return s } // SetMessage sets the Message field's value. func (s *BatchPrediction) SetMessage(v string) *BatchPrediction { s.Message = &v return s } // SetName sets the Name field's value. func (s *BatchPrediction) SetName(v string) *BatchPrediction { s.Name = &v return s } // SetOutputUri sets the OutputUri field's value. func (s *BatchPrediction) SetOutputUri(v string) *BatchPrediction { s.OutputUri = &v return s } // SetStartedAt sets the StartedAt field's value. func (s *BatchPrediction) SetStartedAt(v time.Time) *BatchPrediction { s.StartedAt = &v return s } // SetStatus sets the Status field's value. func (s *BatchPrediction) SetStatus(v string) *BatchPrediction { s.Status = &v return s } // SetTotalRecordCount sets the TotalRecordCount field's value. func (s *BatchPrediction) SetTotalRecordCount(v int64) *BatchPrediction { s.TotalRecordCount = &v return s } type CreateBatchPredictionInput struct { _ struct{} `type:"structure"` // The ID of the DataSource that points to the group of observations to predict. // // BatchPredictionDataSourceId is a required field BatchPredictionDataSourceId *string `min:"1" type:"string" required:"true"` // A user-supplied ID that uniquely identifies the BatchPrediction. // // BatchPredictionId is a required field BatchPredictionId *string `min:"1" type:"string" required:"true"` // A user-supplied name or description of the BatchPrediction. BatchPredictionName // can only use the UTF-8 character set. BatchPredictionName *string `type:"string"` // The ID of the MLModel that will generate predictions for the group of observations. // // MLModelId is a required field MLModelId *string `min:"1" type:"string" required:"true"` // The location of an Amazon Simple Storage Service (Amazon S3) bucket or directory // to store the batch prediction results. The following substrings are not allowed // in the s3 key portion of the outputURI field: ':', '//', '/./', '/../'. // // Amazon ML needs permissions to store and retrieve the logs on your behalf. // For information about how to set permissions, see the Amazon Machine Learning // Developer Guide (path_to_url // // OutputUri is a required field OutputUri *string `type:"string" required:"true"` } // String returns the string representation func (s CreateBatchPredictionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateBatchPredictionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateBatchPredictionInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateBatchPredictionInput"} if s.BatchPredictionDataSourceId == nil { invalidParams.Add(request.NewErrParamRequired("BatchPredictionDataSourceId")) } if s.BatchPredictionDataSourceId != nil && len(*s.BatchPredictionDataSourceId) < 1 { invalidParams.Add(request.NewErrParamMinLen("BatchPredictionDataSourceId", 1)) } if s.BatchPredictionId == nil { invalidParams.Add(request.NewErrParamRequired("BatchPredictionId")) } if s.BatchPredictionId != nil && len(*s.BatchPredictionId) < 1 { invalidParams.Add(request.NewErrParamMinLen("BatchPredictionId", 1)) } if s.MLModelId == nil { invalidParams.Add(request.NewErrParamRequired("MLModelId")) } if s.MLModelId != nil && len(*s.MLModelId) < 1 { invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) } if s.OutputUri == nil { invalidParams.Add(request.NewErrParamRequired("OutputUri")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBatchPredictionDataSourceId sets the BatchPredictionDataSourceId field's value. func (s *CreateBatchPredictionInput) SetBatchPredictionDataSourceId(v string) *CreateBatchPredictionInput { s.BatchPredictionDataSourceId = &v return s } // SetBatchPredictionId sets the BatchPredictionId field's value. func (s *CreateBatchPredictionInput) SetBatchPredictionId(v string) *CreateBatchPredictionInput { s.BatchPredictionId = &v return s } // SetBatchPredictionName sets the BatchPredictionName field's value. func (s *CreateBatchPredictionInput) SetBatchPredictionName(v string) *CreateBatchPredictionInput { s.BatchPredictionName = &v return s } // SetMLModelId sets the MLModelId field's value. func (s *CreateBatchPredictionInput) SetMLModelId(v string) *CreateBatchPredictionInput { s.MLModelId = &v return s } // SetOutputUri sets the OutputUri field's value. func (s *CreateBatchPredictionInput) SetOutputUri(v string) *CreateBatchPredictionInput { s.OutputUri = &v return s } // Represents the output of a CreateBatchPrediction operation, and is an acknowledgement // that Amazon ML received the request. // // The CreateBatchPrediction operation is asynchronous. You can poll for status // updates by using the >GetBatchPrediction operation and checking the Status // parameter of the result. type CreateBatchPredictionOutput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the BatchPrediction. This value // is identical to the value of the BatchPredictionId in the request. BatchPredictionId *string `min:"1" type:"string"` } // String returns the string representation func (s CreateBatchPredictionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateBatchPredictionOutput) GoString() string { return s.String() } // SetBatchPredictionId sets the BatchPredictionId field's value. func (s *CreateBatchPredictionOutput) SetBatchPredictionId(v string) *CreateBatchPredictionOutput { s.BatchPredictionId = &v return s } type CreateDataSourceFromRDSInput struct { _ struct{} `type:"structure"` // The compute statistics for a DataSource. The statistics are generated from // the observation data referenced by a DataSource. Amazon ML uses the statistics // internally during MLModel training. This parameter must be set to true if // the DataSource needs to be used for MLModel training. ComputeStatistics *bool `type:"boolean"` // A user-supplied ID that uniquely identifies the DataSource. Typically, an // Amazon Resource Number (ARN) becomes the ID for a DataSource. // // DataSourceId is a required field DataSourceId *string `min:"1" type:"string" required:"true"` // A user-supplied name or description of the DataSource. DataSourceName *string `type:"string"` // The data specification of an Amazon RDS DataSource: // // RDSData is a required field RDSData *RDSDataSpec `type:"structure" required:"true"` // The role that Amazon ML assumes on behalf of the user to create and activate // a data pipeline in the user's account and copy data using the SelectSqlQuery // query from Amazon RDS to Amazon S3. // // RoleARN is a required field RoleARN *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s CreateDataSourceFromRDSInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateDataSourceFromRDSInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateDataSourceFromRDSInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateDataSourceFromRDSInput"} if s.DataSourceId == nil { invalidParams.Add(request.NewErrParamRequired("DataSourceId")) } if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) } if s.RDSData == nil { invalidParams.Add(request.NewErrParamRequired("RDSData")) } if s.RoleARN == nil { invalidParams.Add(request.NewErrParamRequired("RoleARN")) } if s.RoleARN != nil && len(*s.RoleARN) < 1 { invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) } if s.RDSData != nil { if err := s.RDSData.Validate(); err != nil { invalidParams.AddNested("RDSData", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetComputeStatistics sets the ComputeStatistics field's value. func (s *CreateDataSourceFromRDSInput) SetComputeStatistics(v bool) *CreateDataSourceFromRDSInput { s.ComputeStatistics = &v return s } // SetDataSourceId sets the DataSourceId field's value. func (s *CreateDataSourceFromRDSInput) SetDataSourceId(v string) *CreateDataSourceFromRDSInput { s.DataSourceId = &v return s } // SetDataSourceName sets the DataSourceName field's value. func (s *CreateDataSourceFromRDSInput) SetDataSourceName(v string) *CreateDataSourceFromRDSInput { s.DataSourceName = &v return s } // SetRDSData sets the RDSData field's value. func (s *CreateDataSourceFromRDSInput) SetRDSData(v *RDSDataSpec) *CreateDataSourceFromRDSInput { s.RDSData = v return s } // SetRoleARN sets the RoleARN field's value. func (s *CreateDataSourceFromRDSInput) SetRoleARN(v string) *CreateDataSourceFromRDSInput { s.RoleARN = &v return s } // Represents the output of a CreateDataSourceFromRDS operation, and is an acknowledgement // that Amazon ML received the request. // // The CreateDataSourceFromRDS> operation is asynchronous. You can poll for // updates by using the GetBatchPrediction operation and checking the Status // parameter. You can inspect the Message when Status shows up as FAILED. You // can also check the progress of the copy operation by going to the DataPipeline // console and looking up the pipeline using the pipelineId from the describe // call. type CreateDataSourceFromRDSOutput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the datasource. This value should // be identical to the value of the DataSourceID in the request. DataSourceId *string `min:"1" type:"string"` } // String returns the string representation func (s CreateDataSourceFromRDSOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateDataSourceFromRDSOutput) GoString() string { return s.String() } // SetDataSourceId sets the DataSourceId field's value. func (s *CreateDataSourceFromRDSOutput) SetDataSourceId(v string) *CreateDataSourceFromRDSOutput { s.DataSourceId = &v return s } type CreateDataSourceFromRedshiftInput struct { _ struct{} `type:"structure"` // The compute statistics for a DataSource. The statistics are generated from // the observation data referenced by a DataSource. Amazon ML uses the statistics // internally during MLModel training. This parameter must be set to true if // the DataSource needs to be used for MLModel training. ComputeStatistics *bool `type:"boolean"` // A user-supplied ID that uniquely identifies the DataSource. // // DataSourceId is a required field DataSourceId *string `min:"1" type:"string" required:"true"` // A user-supplied name or description of the DataSource. DataSourceName *string `type:"string"` // The data specification of an Amazon Redshift DataSource: // // * DatabaseInformation - DatabaseName - The name of the Amazon Redshift // database. // ClusterIdentifier - The unique ID for the Amazon Redshift cluster. // // * DatabaseCredentials - The AWS Identity and Access Management (IAM) credentials // that are used to connect to the Amazon Redshift database. // // * SelectSqlQuery - The query that is used to retrieve the observation // data for the Datasource. // // * S3StagingLocation - The Amazon Simple Storage Service (Amazon S3) location // for staging Amazon Redshift data. The data retrieved from Amazon Redshift // using the SelectSqlQuery query is stored in this location. // // * DataSchemaUri - The Amazon S3 location of the DataSchema. // // * DataSchema - A JSON string representing the schema. This is not required // if DataSchemaUri is specified. // // * DataRearrangement - A JSON string that represents the splitting and // rearrangement requirements for the DataSource. // // Sample - "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" // // DataSpec is a required field DataSpec *RedshiftDataSpec `type:"structure" required:"true"` // A fully specified role Amazon Resource Name (ARN). Amazon ML assumes the // role on behalf of the user to create the following: // // A security group to allow Amazon ML to execute the SelectSqlQuery query on // an Amazon Redshift cluster // // An Amazon S3 bucket policy to grant Amazon ML read/write permissions on the // S3StagingLocation // // RoleARN is a required field RoleARN *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s CreateDataSourceFromRedshiftInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateDataSourceFromRedshiftInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateDataSourceFromRedshiftInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateDataSourceFromRedshiftInput"} if s.DataSourceId == nil { invalidParams.Add(request.NewErrParamRequired("DataSourceId")) } if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) } if s.DataSpec == nil { invalidParams.Add(request.NewErrParamRequired("DataSpec")) } if s.RoleARN == nil { invalidParams.Add(request.NewErrParamRequired("RoleARN")) } if s.RoleARN != nil && len(*s.RoleARN) < 1 { invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) } if s.DataSpec != nil { if err := s.DataSpec.Validate(); err != nil { invalidParams.AddNested("DataSpec", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetComputeStatistics sets the ComputeStatistics field's value. func (s *CreateDataSourceFromRedshiftInput) SetComputeStatistics(v bool) *CreateDataSourceFromRedshiftInput { s.ComputeStatistics = &v return s } // SetDataSourceId sets the DataSourceId field's value. func (s *CreateDataSourceFromRedshiftInput) SetDataSourceId(v string) *CreateDataSourceFromRedshiftInput { s.DataSourceId = &v return s } // SetDataSourceName sets the DataSourceName field's value. func (s *CreateDataSourceFromRedshiftInput) SetDataSourceName(v string) *CreateDataSourceFromRedshiftInput { s.DataSourceName = &v return s } // SetDataSpec sets the DataSpec field's value. func (s *CreateDataSourceFromRedshiftInput) SetDataSpec(v *RedshiftDataSpec) *CreateDataSourceFromRedshiftInput { s.DataSpec = v return s } // SetRoleARN sets the RoleARN field's value. func (s *CreateDataSourceFromRedshiftInput) SetRoleARN(v string) *CreateDataSourceFromRedshiftInput { s.RoleARN = &v return s } // Represents the output of a CreateDataSourceFromRedshift operation, and is // an acknowledgement that Amazon ML received the request. // // The CreateDataSourceFromRedshift operation is asynchronous. You can poll // for updates by using the GetBatchPrediction operation and checking the Status // parameter. type CreateDataSourceFromRedshiftOutput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the datasource. This value should // be identical to the value of the DataSourceID in the request. DataSourceId *string `min:"1" type:"string"` } // String returns the string representation func (s CreateDataSourceFromRedshiftOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateDataSourceFromRedshiftOutput) GoString() string { return s.String() } // SetDataSourceId sets the DataSourceId field's value. func (s *CreateDataSourceFromRedshiftOutput) SetDataSourceId(v string) *CreateDataSourceFromRedshiftOutput { s.DataSourceId = &v return s } type CreateDataSourceFromS3Input struct { _ struct{} `type:"structure"` // The compute statistics for a DataSource. The statistics are generated from // the observation data referenced by a DataSource. Amazon ML uses the statistics // internally during MLModel training. This parameter must be set to true if // the DataSource needs to be used for MLModel training. ComputeStatistics *bool `type:"boolean"` // A user-supplied identifier that uniquely identifies the DataSource. // // DataSourceId is a required field DataSourceId *string `min:"1" type:"string" required:"true"` // A user-supplied name or description of the DataSource. DataSourceName *string `type:"string"` // The data specification of a DataSource: // // * DataLocationS3 - The Amazon S3 location of the observation data. // // * DataSchemaLocationS3 - The Amazon S3 location of the DataSchema. // // * DataSchema - A JSON string representing the schema. This is not required // if DataSchemaUri is specified. // // * DataRearrangement - A JSON string that represents the splitting and // rearrangement requirements for the Datasource. // // Sample - "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" // // DataSpec is a required field DataSpec *S3DataSpec `type:"structure" required:"true"` } // String returns the string representation func (s CreateDataSourceFromS3Input) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateDataSourceFromS3Input) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateDataSourceFromS3Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateDataSourceFromS3Input"} if s.DataSourceId == nil { invalidParams.Add(request.NewErrParamRequired("DataSourceId")) } if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) } if s.DataSpec == nil { invalidParams.Add(request.NewErrParamRequired("DataSpec")) } if s.DataSpec != nil { if err := s.DataSpec.Validate(); err != nil { invalidParams.AddNested("DataSpec", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetComputeStatistics sets the ComputeStatistics field's value. func (s *CreateDataSourceFromS3Input) SetComputeStatistics(v bool) *CreateDataSourceFromS3Input { s.ComputeStatistics = &v return s } // SetDataSourceId sets the DataSourceId field's value. func (s *CreateDataSourceFromS3Input) SetDataSourceId(v string) *CreateDataSourceFromS3Input { s.DataSourceId = &v return s } // SetDataSourceName sets the DataSourceName field's value. func (s *CreateDataSourceFromS3Input) SetDataSourceName(v string) *CreateDataSourceFromS3Input { s.DataSourceName = &v return s } // SetDataSpec sets the DataSpec field's value. func (s *CreateDataSourceFromS3Input) SetDataSpec(v *S3DataSpec) *CreateDataSourceFromS3Input { s.DataSpec = v return s } // Represents the output of a CreateDataSourceFromS3 operation, and is an acknowledgement // that Amazon ML received the request. // // The CreateDataSourceFromS3 operation is asynchronous. You can poll for updates // by using the GetBatchPrediction operation and checking the Status parameter. type CreateDataSourceFromS3Output struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the DataSource. This value should // be identical to the value of the DataSourceID in the request. DataSourceId *string `min:"1" type:"string"` } // String returns the string representation func (s CreateDataSourceFromS3Output) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateDataSourceFromS3Output) GoString() string { return s.String() } // SetDataSourceId sets the DataSourceId field's value. func (s *CreateDataSourceFromS3Output) SetDataSourceId(v string) *CreateDataSourceFromS3Output { s.DataSourceId = &v return s } type CreateEvaluationInput struct { _ struct{} `type:"structure"` // The ID of the DataSource for the evaluation. The schema of the DataSource // must match the schema used to create the MLModel. // // EvaluationDataSourceId is a required field EvaluationDataSourceId *string `min:"1" type:"string" required:"true"` // A user-supplied ID that uniquely identifies the Evaluation. // // EvaluationId is a required field EvaluationId *string `min:"1" type:"string" required:"true"` // A user-supplied name or description of the Evaluation. EvaluationName *string `type:"string"` // The ID of the MLModel to evaluate. // // The schema used in creating the MLModel must match the schema of the DataSource // used in the Evaluation. // // MLModelId is a required field MLModelId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s CreateEvaluationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateEvaluationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateEvaluationInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateEvaluationInput"} if s.EvaluationDataSourceId == nil { invalidParams.Add(request.NewErrParamRequired("EvaluationDataSourceId")) } if s.EvaluationDataSourceId != nil && len(*s.EvaluationDataSourceId) < 1 { invalidParams.Add(request.NewErrParamMinLen("EvaluationDataSourceId", 1)) } if s.EvaluationId == nil { invalidParams.Add(request.NewErrParamRequired("EvaluationId")) } if s.EvaluationId != nil && len(*s.EvaluationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("EvaluationId", 1)) } if s.MLModelId == nil { invalidParams.Add(request.NewErrParamRequired("MLModelId")) } if s.MLModelId != nil && len(*s.MLModelId) < 1 { invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetEvaluationDataSourceId sets the EvaluationDataSourceId field's value. func (s *CreateEvaluationInput) SetEvaluationDataSourceId(v string) *CreateEvaluationInput { s.EvaluationDataSourceId = &v return s } // SetEvaluationId sets the EvaluationId field's value. func (s *CreateEvaluationInput) SetEvaluationId(v string) *CreateEvaluationInput { s.EvaluationId = &v return s } // SetEvaluationName sets the EvaluationName field's value. func (s *CreateEvaluationInput) SetEvaluationName(v string) *CreateEvaluationInput { s.EvaluationName = &v return s } // SetMLModelId sets the MLModelId field's value. func (s *CreateEvaluationInput) SetMLModelId(v string) *CreateEvaluationInput { s.MLModelId = &v return s } // Represents the output of a CreateEvaluation operation, and is an acknowledgement // that Amazon ML received the request. // // CreateEvaluation operation is asynchronous. You can poll for status updates // by using the GetEvcaluation operation and checking the Status parameter. type CreateEvaluationOutput struct { _ struct{} `type:"structure"` // The user-supplied ID that uniquely identifies the Evaluation. This value // should be identical to the value of the EvaluationId in the request. EvaluationId *string `min:"1" type:"string"` } // String returns the string representation func (s CreateEvaluationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateEvaluationOutput) GoString() string { return s.String() } // SetEvaluationId sets the EvaluationId field's value. func (s *CreateEvaluationOutput) SetEvaluationId(v string) *CreateEvaluationOutput { s.EvaluationId = &v return s } type CreateMLModelInput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the MLModel. // // MLModelId is a required field MLModelId *string `min:"1" type:"string" required:"true"` // A user-supplied name or description of the MLModel. MLModelName *string `type:"string"` // The category of supervised learning that this MLModel will address. Choose // from the following types: // // * Choose REGRESSION if the MLModel will be used to predict a numeric value. // // * Choose BINARY if the MLModel result has two possible values. // * Choose MULTICLASS if the MLModel result has a limited number of values. // // For more information, see the Amazon Machine Learning Developer Guide (path_to_url // // MLModelType is a required field MLModelType *string `type:"string" required:"true" enum:"MLModelType"` // A list of the training parameters in the MLModel. The list is implemented // as a map of key-value pairs. // // The following is the current set of training parameters: // // * sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending // on the input data, the size of the model might affect its performance. // // The value is an integer that ranges from 100000 to 2147483648. The default // value is 33554432. // // * sgd.maxPasses - The number of times that the training process traverses // the observations to build the MLModel. The value is an integer that ranges // from 1 to 10000. The default value is 10. // // * sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling // the data improves a model's ability to find the optimal solution for a // variety of data types. The valid values are auto and none. The default // value is none. We strongly recommend that you shuffle your data. // // * sgd.l1RegularizationAmount - The coefficient regularization L1 norm. // It controls overfitting the data by penalizing large coefficients. This // tends to drive coefficients to zero, resulting in a sparse feature set. // If you use this parameter, start by specifying a small value, such as // 1.0E-08. // // The value is a double that ranges from 0 to MAX_DOUBLE. The default is to // not use L1 normalization. This parameter can't be used when L2 is specified. // Use this parameter sparingly. // // * sgd.l2RegularizationAmount - The coefficient regularization L2 norm. // It controls overfitting the data by penalizing large coefficients. This // tends to drive coefficients to small, nonzero values. If you use this // parameter, start by specifying a small value, such as 1.0E-08. // // The value is a double that ranges from 0 to MAX_DOUBLE. The default is to // not use L2 normalization. This parameter can't be used when L1 is specified. // Use this parameter sparingly. Parameters map[string]*string `type:"map"` // The data recipe for creating the MLModel. You must specify either the recipe // or its URI. If you don't specify a recipe or its URI, Amazon ML creates a // default. Recipe *string `type:"string"` // The Amazon Simple Storage Service (Amazon S3) location and file name that // contains the MLModel recipe. You must specify either the recipe or its URI. // If you don't specify a recipe or its URI, Amazon ML creates a default. RecipeUri *string `type:"string"` // The DataSource that points to the training data. // // TrainingDataSourceId is a required field TrainingDataSourceId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s CreateMLModelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateMLModelInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateMLModelInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateMLModelInput"} if s.MLModelId == nil { invalidParams.Add(request.NewErrParamRequired("MLModelId")) } if s.MLModelId != nil && len(*s.MLModelId) < 1 { invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) } if s.MLModelType == nil { invalidParams.Add(request.NewErrParamRequired("MLModelType")) } if s.TrainingDataSourceId == nil { invalidParams.Add(request.NewErrParamRequired("TrainingDataSourceId")) } if s.TrainingDataSourceId != nil && len(*s.TrainingDataSourceId) < 1 { invalidParams.Add(request.NewErrParamMinLen("TrainingDataSourceId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetMLModelId sets the MLModelId field's value. func (s *CreateMLModelInput) SetMLModelId(v string) *CreateMLModelInput { s.MLModelId = &v return s } // SetMLModelName sets the MLModelName field's value. func (s *CreateMLModelInput) SetMLModelName(v string) *CreateMLModelInput { s.MLModelName = &v return s } // SetMLModelType sets the MLModelType field's value. func (s *CreateMLModelInput) SetMLModelType(v string) *CreateMLModelInput { s.MLModelType = &v return s } // SetParameters sets the Parameters field's value. func (s *CreateMLModelInput) SetParameters(v map[string]*string) *CreateMLModelInput { s.Parameters = v return s } // SetRecipe sets the Recipe field's value. func (s *CreateMLModelInput) SetRecipe(v string) *CreateMLModelInput { s.Recipe = &v return s } // SetRecipeUri sets the RecipeUri field's value. func (s *CreateMLModelInput) SetRecipeUri(v string) *CreateMLModelInput { s.RecipeUri = &v return s } // SetTrainingDataSourceId sets the TrainingDataSourceId field's value. func (s *CreateMLModelInput) SetTrainingDataSourceId(v string) *CreateMLModelInput { s.TrainingDataSourceId = &v return s } // Represents the output of a CreateMLModel operation, and is an acknowledgement // that Amazon ML received the request. // // The CreateMLModel operation is asynchronous. You can poll for status updates // by using the GetMLModel operation and checking the Status parameter. type CreateMLModelOutput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the MLModel. This value should // be identical to the value of the MLModelId in the request. MLModelId *string `min:"1" type:"string"` } // String returns the string representation func (s CreateMLModelOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateMLModelOutput) GoString() string { return s.String() } // SetMLModelId sets the MLModelId field's value. func (s *CreateMLModelOutput) SetMLModelId(v string) *CreateMLModelOutput { s.MLModelId = &v return s } type CreateRealtimeEndpointInput struct { _ struct{} `type:"structure"` // The ID assigned to the MLModel during creation. // // MLModelId is a required field MLModelId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s CreateRealtimeEndpointInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateRealtimeEndpointInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateRealtimeEndpointInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateRealtimeEndpointInput"} if s.MLModelId == nil { invalidParams.Add(request.NewErrParamRequired("MLModelId")) } if s.MLModelId != nil && len(*s.MLModelId) < 1 { invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetMLModelId sets the MLModelId field's value. func (s *CreateRealtimeEndpointInput) SetMLModelId(v string) *CreateRealtimeEndpointInput { s.MLModelId = &v return s } // Represents the output of an CreateRealtimeEndpoint operation. // // The result contains the MLModelId and the endpoint information for the MLModel. // // The endpoint information includes the URI of the MLModel; that is, the location // to send online prediction requests for the specified MLModel. type CreateRealtimeEndpointOutput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the MLModel. This value should // be identical to the value of the MLModelId in the request. MLModelId *string `min:"1" type:"string"` // The endpoint information of the MLModel RealtimeEndpointInfo *RealtimeEndpointInfo `type:"structure"` } // String returns the string representation func (s CreateRealtimeEndpointOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateRealtimeEndpointOutput) GoString() string { return s.String() } // SetMLModelId sets the MLModelId field's value. func (s *CreateRealtimeEndpointOutput) SetMLModelId(v string) *CreateRealtimeEndpointOutput { s.MLModelId = &v return s } // SetRealtimeEndpointInfo sets the RealtimeEndpointInfo field's value. func (s *CreateRealtimeEndpointOutput) SetRealtimeEndpointInfo(v *RealtimeEndpointInfo) *CreateRealtimeEndpointOutput { s.RealtimeEndpointInfo = v return s } // Represents the output of the GetDataSource operation. // // The content consists of the detailed metadata and data file information and // the current status of the DataSource. type DataSource struct { _ struct{} `type:"structure"` // The parameter is true if statistics need to be generated from the observation // data. ComputeStatistics *bool `type:"boolean"` // Long integer type that is a 64-bit signed number. ComputeTime *int64 `type:"long"` // The time that the DataSource was created. The time is expressed in epoch // time. CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The AWS user account from which the DataSource was created. The account type // can be either an AWS root account or an AWS Identity and Access Management // (IAM) user account. CreatedByIamUser *string `type:"string"` // The location and name of the data in Amazon Simple Storage Service (Amazon // S3) that is used by a DataSource. DataLocationS3 *string `type:"string"` // A JSON string that represents the splitting and rearrangement requirement // used when this DataSource was created. DataRearrangement *string `type:"string"` // The total number of observations contained in the data files that the DataSource // references. DataSizeInBytes *int64 `type:"long"` // The ID that is assigned to the DataSource during creation. DataSourceId *string `min:"1" type:"string"` // A timestamp represented in epoch time. FinishedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The time of the most recent edit to the BatchPrediction. The time is expressed // in epoch time. LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // A description of the most recent details about creating the DataSource. Message *string `type:"string"` // A user-supplied name or description of the DataSource. Name *string `type:"string"` // The number of data files referenced by the DataSource. NumberOfFiles *int64 `type:"long"` // The datasource details that are specific to Amazon RDS. RDSMetadata *RDSMetadata `type:"structure"` // Describes the DataSource details specific to Amazon Redshift. RedshiftMetadata *RedshiftMetadata `type:"structure"` // The Amazon Resource Name (ARN) of an AWS IAM Role (path_to_url#roles-about-termsandconcepts), // such as the following: arn:aws:iam::account:role/rolename. RoleARN *string `min:"1" type:"string"` // A timestamp represented in epoch time. StartedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The current status of the DataSource. This element can have one of the following // values: // // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to // create a DataSource. // * INPROGRESS - The creation process is underway. // * FAILED - The request to create a DataSource did not run to completion. // It is not usable. // * COMPLETED - The creation process completed successfully. // * DELETED - The DataSource is marked as deleted. It is not usable. Status *string `type:"string" enum:"EntityStatus"` } // String returns the string representation func (s DataSource) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DataSource) GoString() string { return s.String() } // SetComputeStatistics sets the ComputeStatistics field's value. func (s *DataSource) SetComputeStatistics(v bool) *DataSource { s.ComputeStatistics = &v return s } // SetComputeTime sets the ComputeTime field's value. func (s *DataSource) SetComputeTime(v int64) *DataSource { s.ComputeTime = &v return s } // SetCreatedAt sets the CreatedAt field's value. func (s *DataSource) SetCreatedAt(v time.Time) *DataSource { s.CreatedAt = &v return s } // SetCreatedByIamUser sets the CreatedByIamUser field's value. func (s *DataSource) SetCreatedByIamUser(v string) *DataSource { s.CreatedByIamUser = &v return s } // SetDataLocationS3 sets the DataLocationS3 field's value. func (s *DataSource) SetDataLocationS3(v string) *DataSource { s.DataLocationS3 = &v return s } // SetDataRearrangement sets the DataRearrangement field's value. func (s *DataSource) SetDataRearrangement(v string) *DataSource { s.DataRearrangement = &v return s } // SetDataSizeInBytes sets the DataSizeInBytes field's value. func (s *DataSource) SetDataSizeInBytes(v int64) *DataSource { s.DataSizeInBytes = &v return s } // SetDataSourceId sets the DataSourceId field's value. func (s *DataSource) SetDataSourceId(v string) *DataSource { s.DataSourceId = &v return s } // SetFinishedAt sets the FinishedAt field's value. func (s *DataSource) SetFinishedAt(v time.Time) *DataSource { s.FinishedAt = &v return s } // SetLastUpdatedAt sets the LastUpdatedAt field's value. func (s *DataSource) SetLastUpdatedAt(v time.Time) *DataSource { s.LastUpdatedAt = &v return s } // SetMessage sets the Message field's value. func (s *DataSource) SetMessage(v string) *DataSource { s.Message = &v return s } // SetName sets the Name field's value. func (s *DataSource) SetName(v string) *DataSource { s.Name = &v return s } // SetNumberOfFiles sets the NumberOfFiles field's value. func (s *DataSource) SetNumberOfFiles(v int64) *DataSource { s.NumberOfFiles = &v return s } // SetRDSMetadata sets the RDSMetadata field's value. func (s *DataSource) SetRDSMetadata(v *RDSMetadata) *DataSource { s.RDSMetadata = v return s } // SetRedshiftMetadata sets the RedshiftMetadata field's value. func (s *DataSource) SetRedshiftMetadata(v *RedshiftMetadata) *DataSource { s.RedshiftMetadata = v return s } // SetRoleARN sets the RoleARN field's value. func (s *DataSource) SetRoleARN(v string) *DataSource { s.RoleARN = &v return s } // SetStartedAt sets the StartedAt field's value. func (s *DataSource) SetStartedAt(v time.Time) *DataSource { s.StartedAt = &v return s } // SetStatus sets the Status field's value. func (s *DataSource) SetStatus(v string) *DataSource { s.Status = &v return s } type DeleteBatchPredictionInput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the BatchPrediction. // // BatchPredictionId is a required field BatchPredictionId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s DeleteBatchPredictionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteBatchPredictionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteBatchPredictionInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteBatchPredictionInput"} if s.BatchPredictionId == nil { invalidParams.Add(request.NewErrParamRequired("BatchPredictionId")) } if s.BatchPredictionId != nil && len(*s.BatchPredictionId) < 1 { invalidParams.Add(request.NewErrParamMinLen("BatchPredictionId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBatchPredictionId sets the BatchPredictionId field's value. func (s *DeleteBatchPredictionInput) SetBatchPredictionId(v string) *DeleteBatchPredictionInput { s.BatchPredictionId = &v return s } // Represents the output of a DeleteBatchPrediction operation. // // You can use the GetBatchPrediction operation and check the value of the Status // parameter to see whether a BatchPrediction is marked as DELETED. type DeleteBatchPredictionOutput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the BatchPrediction. This value // should be identical to the value of the BatchPredictionID in the request. BatchPredictionId *string `min:"1" type:"string"` } // String returns the string representation func (s DeleteBatchPredictionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteBatchPredictionOutput) GoString() string { return s.String() } // SetBatchPredictionId sets the BatchPredictionId field's value. func (s *DeleteBatchPredictionOutput) SetBatchPredictionId(v string) *DeleteBatchPredictionOutput { s.BatchPredictionId = &v return s } type DeleteDataSourceInput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the DataSource. // // DataSourceId is a required field DataSourceId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s DeleteDataSourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteDataSourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteDataSourceInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteDataSourceInput"} if s.DataSourceId == nil { invalidParams.Add(request.NewErrParamRequired("DataSourceId")) } if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDataSourceId sets the DataSourceId field's value. func (s *DeleteDataSourceInput) SetDataSourceId(v string) *DeleteDataSourceInput { s.DataSourceId = &v return s } // Represents the output of a DeleteDataSource operation. type DeleteDataSourceOutput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the DataSource. This value should // be identical to the value of the DataSourceID in the request. DataSourceId *string `min:"1" type:"string"` } // String returns the string representation func (s DeleteDataSourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteDataSourceOutput) GoString() string { return s.String() } // SetDataSourceId sets the DataSourceId field's value. func (s *DeleteDataSourceOutput) SetDataSourceId(v string) *DeleteDataSourceOutput { s.DataSourceId = &v return s } type DeleteEvaluationInput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the Evaluation to delete. // // EvaluationId is a required field EvaluationId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s DeleteEvaluationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteEvaluationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteEvaluationInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteEvaluationInput"} if s.EvaluationId == nil { invalidParams.Add(request.NewErrParamRequired("EvaluationId")) } if s.EvaluationId != nil && len(*s.EvaluationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("EvaluationId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetEvaluationId sets the EvaluationId field's value. func (s *DeleteEvaluationInput) SetEvaluationId(v string) *DeleteEvaluationInput { s.EvaluationId = &v return s } // Represents the output of a DeleteEvaluation operation. The output indicates // that Amazon Machine Learning (Amazon ML) received the request. // // You can use the GetEvaluation operation and check the value of the Status // parameter to see whether an Evaluation is marked as DELETED. type DeleteEvaluationOutput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the Evaluation. This value should // be identical to the value of the EvaluationId in the request. EvaluationId *string `min:"1" type:"string"` } // String returns the string representation func (s DeleteEvaluationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteEvaluationOutput) GoString() string { return s.String() } // SetEvaluationId sets the EvaluationId field's value. func (s *DeleteEvaluationOutput) SetEvaluationId(v string) *DeleteEvaluationOutput { s.EvaluationId = &v return s } type DeleteMLModelInput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the MLModel. // // MLModelId is a required field MLModelId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s DeleteMLModelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteMLModelInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteMLModelInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteMLModelInput"} if s.MLModelId == nil { invalidParams.Add(request.NewErrParamRequired("MLModelId")) } if s.MLModelId != nil && len(*s.MLModelId) < 1 { invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetMLModelId sets the MLModelId field's value. func (s *DeleteMLModelInput) SetMLModelId(v string) *DeleteMLModelInput { s.MLModelId = &v return s } // Represents the output of a DeleteMLModel operation. // // You can use the GetMLModel operation and check the value of the Status parameter // to see whether an MLModel is marked as DELETED. type DeleteMLModelOutput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the MLModel. This value should // be identical to the value of the MLModelID in the request. MLModelId *string `min:"1" type:"string"` } // String returns the string representation func (s DeleteMLModelOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteMLModelOutput) GoString() string { return s.String() } // SetMLModelId sets the MLModelId field's value. func (s *DeleteMLModelOutput) SetMLModelId(v string) *DeleteMLModelOutput { s.MLModelId = &v return s } type DeleteRealtimeEndpointInput struct { _ struct{} `type:"structure"` // The ID assigned to the MLModel during creation. // // MLModelId is a required field MLModelId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s DeleteRealtimeEndpointInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteRealtimeEndpointInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteRealtimeEndpointInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteRealtimeEndpointInput"} if s.MLModelId == nil { invalidParams.Add(request.NewErrParamRequired("MLModelId")) } if s.MLModelId != nil && len(*s.MLModelId) < 1 { invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetMLModelId sets the MLModelId field's value. func (s *DeleteRealtimeEndpointInput) SetMLModelId(v string) *DeleteRealtimeEndpointInput { s.MLModelId = &v return s } // Represents the output of an DeleteRealtimeEndpoint operation. // // The result contains the MLModelId and the endpoint information for the MLModel. type DeleteRealtimeEndpointOutput struct { _ struct{} `type:"structure"` // A user-supplied ID that uniquely identifies the MLModel. This value should // be identical to the value of the MLModelId in the request. MLModelId *string `min:"1" type:"string"` // The endpoint information of the MLModel RealtimeEndpointInfo *RealtimeEndpointInfo `type:"structure"` } // String returns the string representation func (s DeleteRealtimeEndpointOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteRealtimeEndpointOutput) GoString() string { return s.String() } // SetMLModelId sets the MLModelId field's value. func (s *DeleteRealtimeEndpointOutput) SetMLModelId(v string) *DeleteRealtimeEndpointOutput { s.MLModelId = &v return s } // SetRealtimeEndpointInfo sets the RealtimeEndpointInfo field's value. func (s *DeleteRealtimeEndpointOutput) SetRealtimeEndpointInfo(v *RealtimeEndpointInfo) *DeleteRealtimeEndpointOutput { s.RealtimeEndpointInfo = v return s } type DeleteTagsInput struct { _ struct{} `type:"structure"` // The ID of the tagged ML object. For example, exampleModelId. // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` // The type of the tagged ML object. // // ResourceType is a required field ResourceType *string `type:"string" required:"true" enum:"TaggableResourceType"` // One or more tags to delete. // // TagKeys is a required field TagKeys []*string `type:"list" required:"true"` } // String returns the string representation func (s DeleteTagsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteTagsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteTagsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"} if s.ResourceId == nil { invalidParams.Add(request.NewErrParamRequired("ResourceId")) } if s.ResourceId != nil && len(*s.ResourceId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) } if s.ResourceType == nil { invalidParams.Add(request.NewErrParamRequired("ResourceType")) } if s.TagKeys == nil { invalidParams.Add(request.NewErrParamRequired("TagKeys")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetResourceId sets the ResourceId field's value. func (s *DeleteTagsInput) SetResourceId(v string) *DeleteTagsInput { s.ResourceId = &v return s } // SetResourceType sets the ResourceType field's value. func (s *DeleteTagsInput) SetResourceType(v string) *DeleteTagsInput { s.ResourceType = &v return s } // SetTagKeys sets the TagKeys field's value. func (s *DeleteTagsInput) SetTagKeys(v []*string) *DeleteTagsInput { s.TagKeys = v return s } // Amazon ML returns the following elements. type DeleteTagsOutput struct { _ struct{} `type:"structure"` // The ID of the ML object from which tags were deleted. ResourceId *string `min:"1" type:"string"` // The type of the ML object from which tags were deleted. ResourceType *string `type:"string" enum:"TaggableResourceType"` } // String returns the string representation func (s DeleteTagsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteTagsOutput) GoString() string { return s.String() } // SetResourceId sets the ResourceId field's value. func (s *DeleteTagsOutput) SetResourceId(v string) *DeleteTagsOutput { s.ResourceId = &v return s } // SetResourceType sets the ResourceType field's value. func (s *DeleteTagsOutput) SetResourceType(v string) *DeleteTagsOutput { s.ResourceType = &v return s } type DescribeBatchPredictionsInput struct { _ struct{} `type:"structure"` // The equal to operator. The BatchPrediction results will have FilterVariable // values that exactly match the value specified with EQ. EQ *string `type:"string"` // Use one of the following variables to filter a list of BatchPrediction: // // * CreatedAt - Sets the search criteria to the BatchPrediction creation // date. // * Status - Sets the search criteria to the BatchPrediction status. // * Name - Sets the search criteria to the contents of the BatchPredictionName. // // * IAMUser - Sets the search criteria to the user account that invoked // the BatchPrediction creation. // * MLModelId - Sets the search criteria to the MLModel used in the BatchPrediction. // // * DataSourceId - Sets the search criteria to the DataSource used in the // BatchPrediction. // * DataURI - Sets the search criteria to the data file(s) used in the BatchPrediction. // The URL can identify either a file or an Amazon Simple Storage Solution // (Amazon S3) bucket or directory. FilterVariable *string `type:"string" enum:"BatchPredictionFilterVariable"` // The greater than or equal to operator. The BatchPrediction results will have // FilterVariable values that are greater than or equal to the value specified // with GE. GE *string `type:"string"` // The greater than operator. The BatchPrediction results will have FilterVariable // values that are greater than the value specified with GT. GT *string `type:"string"` // The less than or equal to operator. The BatchPrediction results will have // FilterVariable values that are less than or equal to the value specified // with LE. LE *string `type:"string"` // The less than operator. The BatchPrediction results will have FilterVariable // values that are less than the value specified with LT. LT *string `type:"string"` // The number of pages of information to include in the result. The range of // acceptable values is 1 through 100. The default value is 100. Limit *int64 `min:"1" type:"integer"` // The not equal to operator. The BatchPrediction results will have FilterVariable // values not equal to the value specified with NE. NE *string `type:"string"` // An ID of the page in the paginated results. NextToken *string `type:"string"` // A string that is found at the beginning of a variable, such as Name or Id. // // For example, a Batch Prediction operation could have the Name2014-09-09-HolidayGiftMailer. // To search for this BatchPrediction, select Name for the FilterVariable and // any of the following strings for the Prefix: // // * 2014-09 // // * 2014-09-09 // // * 2014-09-09-Holiday Prefix *string `type:"string"` // A two-value parameter that determines the sequence of the resulting list // of MLModels. // // * asc - Arranges the list in ascending order (A-Z, 0-9). // * dsc - Arranges the list in descending order (Z-A, 9-0). // Results are sorted by FilterVariable. SortOrder *string `type:"string" enum:"SortOrder"` } // String returns the string representation func (s DescribeBatchPredictionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeBatchPredictionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeBatchPredictionsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeBatchPredictionsInput"} if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetEQ sets the EQ field's value. func (s *DescribeBatchPredictionsInput) SetEQ(v string) *DescribeBatchPredictionsInput { s.EQ = &v return s } // SetFilterVariable sets the FilterVariable field's value. func (s *DescribeBatchPredictionsInput) SetFilterVariable(v string) *DescribeBatchPredictionsInput { s.FilterVariable = &v return s } // SetGE sets the GE field's value. func (s *DescribeBatchPredictionsInput) SetGE(v string) *DescribeBatchPredictionsInput { s.GE = &v return s } // SetGT sets the GT field's value. func (s *DescribeBatchPredictionsInput) SetGT(v string) *DescribeBatchPredictionsInput { s.GT = &v return s } // SetLE sets the LE field's value. func (s *DescribeBatchPredictionsInput) SetLE(v string) *DescribeBatchPredictionsInput { s.LE = &v return s } // SetLT sets the LT field's value. func (s *DescribeBatchPredictionsInput) SetLT(v string) *DescribeBatchPredictionsInput { s.LT = &v return s } // SetLimit sets the Limit field's value. func (s *DescribeBatchPredictionsInput) SetLimit(v int64) *DescribeBatchPredictionsInput { s.Limit = &v return s } // SetNE sets the NE field's value. func (s *DescribeBatchPredictionsInput) SetNE(v string) *DescribeBatchPredictionsInput { s.NE = &v return s } // SetNextToken sets the NextToken field's value. func (s *DescribeBatchPredictionsInput) SetNextToken(v string) *DescribeBatchPredictionsInput { s.NextToken = &v return s } // SetPrefix sets the Prefix field's value. func (s *DescribeBatchPredictionsInput) SetPrefix(v string) *DescribeBatchPredictionsInput { s.Prefix = &v return s } // SetSortOrder sets the SortOrder field's value. func (s *DescribeBatchPredictionsInput) SetSortOrder(v string) *DescribeBatchPredictionsInput { s.SortOrder = &v return s } // Represents the output of a DescribeBatchPredictions operation. The content // is essentially a list of BatchPredictions. type DescribeBatchPredictionsOutput struct { _ struct{} `type:"structure"` // The ID of the next page in the paginated results that indicates at least // one more page follows. NextToken *string `type:"string"` // A list of BatchPrediction objects that meet the search criteria. Results []*BatchPrediction `type:"list"` } // String returns the string representation func (s DescribeBatchPredictionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeBatchPredictionsOutput) GoString() string { return s.String() } // SetNextToken sets the NextToken field's value. func (s *DescribeBatchPredictionsOutput) SetNextToken(v string) *DescribeBatchPredictionsOutput { s.NextToken = &v return s } // SetResults sets the Results field's value. func (s *DescribeBatchPredictionsOutput) SetResults(v []*BatchPrediction) *DescribeBatchPredictionsOutput { s.Results = v return s } type DescribeDataSourcesInput struct { _ struct{} `type:"structure"` // The equal to operator. The DataSource results will have FilterVariable values // that exactly match the value specified with EQ. EQ *string `type:"string"` // Use one of the following variables to filter a list of DataSource: // // * CreatedAt - Sets the search criteria to DataSource creation dates. // * Status - Sets the search criteria to DataSource statuses. // * Name - Sets the search criteria to the contents of DataSourceName. // * DataUri - Sets the search criteria to the URI of data files used to // create the DataSource. The URI can identify either a file or an Amazon // Simple Storage Service (Amazon S3) bucket or directory. // * IAMUser - Sets the search criteria to the user account that invoked // the DataSource creation. FilterVariable *string `type:"string" enum:"DataSourceFilterVariable"` // The greater than or equal to operator. The DataSource results will have FilterVariable // values that are greater than or equal to the value specified with GE. GE *string `type:"string"` // The greater than operator. The DataSource results will have FilterVariable // values that are greater than the value specified with GT. GT *string `type:"string"` // The less than or equal to operator. The DataSource results will have FilterVariable // values that are less than or equal to the value specified with LE. LE *string `type:"string"` // The less than operator. The DataSource results will have FilterVariable values // that are less than the value specified with LT. LT *string `type:"string"` // The maximum number of DataSource to include in the result. Limit *int64 `min:"1" type:"integer"` // The not equal to operator. The DataSource results will have FilterVariable // values not equal to the value specified with NE. NE *string `type:"string"` // The ID of the page in the paginated results. NextToken *string `type:"string"` // A string that is found at the beginning of a variable, such as Name or Id. // // For example, a DataSource could have the Name2014-09-09-HolidayGiftMailer. // To search for this DataSource, select Name for the FilterVariable and any // of the following strings for the Prefix: // // * 2014-09 // // * 2014-09-09 // // * 2014-09-09-Holiday Prefix *string `type:"string"` // A two-value parameter that determines the sequence of the resulting list // of DataSource. // // * asc - Arranges the list in ascending order (A-Z, 0-9). // * dsc - Arranges the list in descending order (Z-A, 9-0). // Results are sorted by FilterVariable. SortOrder *string `type:"string" enum:"SortOrder"` } // String returns the string representation func (s DescribeDataSourcesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeDataSourcesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeDataSourcesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeDataSourcesInput"} if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetEQ sets the EQ field's value. func (s *DescribeDataSourcesInput) SetEQ(v string) *DescribeDataSourcesInput { s.EQ = &v return s } // SetFilterVariable sets the FilterVariable field's value. func (s *DescribeDataSourcesInput) SetFilterVariable(v string) *DescribeDataSourcesInput { s.FilterVariable = &v return s } // SetGE sets the GE field's value. func (s *DescribeDataSourcesInput) SetGE(v string) *DescribeDataSourcesInput { s.GE = &v return s } // SetGT sets the GT field's value. func (s *DescribeDataSourcesInput) SetGT(v string) *DescribeDataSourcesInput { s.GT = &v return s } // SetLE sets the LE field's value. func (s *DescribeDataSourcesInput) SetLE(v string) *DescribeDataSourcesInput { s.LE = &v return s } // SetLT sets the LT field's value. func (s *DescribeDataSourcesInput) SetLT(v string) *DescribeDataSourcesInput { s.LT = &v return s } // SetLimit sets the Limit field's value. func (s *DescribeDataSourcesInput) SetLimit(v int64) *DescribeDataSourcesInput { s.Limit = &v return s } // SetNE sets the NE field's value. func (s *DescribeDataSourcesInput) SetNE(v string) *DescribeDataSourcesInput { s.NE = &v return s } // SetNextToken sets the NextToken field's value. func (s *DescribeDataSourcesInput) SetNextToken(v string) *DescribeDataSourcesInput { s.NextToken = &v return s } // SetPrefix sets the Prefix field's value. func (s *DescribeDataSourcesInput) SetPrefix(v string) *DescribeDataSourcesInput { s.Prefix = &v return s } // SetSortOrder sets the SortOrder field's value. func (s *DescribeDataSourcesInput) SetSortOrder(v string) *DescribeDataSourcesInput { s.SortOrder = &v return s } // Represents the query results from a DescribeDataSources operation. The content // is essentially a list of DataSource. type DescribeDataSourcesOutput struct { _ struct{} `type:"structure"` // An ID of the next page in the paginated results that indicates at least one // more page follows. NextToken *string `type:"string"` // A list of DataSource that meet the search criteria. Results []*DataSource `type:"list"` } // String returns the string representation func (s DescribeDataSourcesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeDataSourcesOutput) GoString() string { return s.String() } // SetNextToken sets the NextToken field's value. func (s *DescribeDataSourcesOutput) SetNextToken(v string) *DescribeDataSourcesOutput { s.NextToken = &v return s } // SetResults sets the Results field's value. func (s *DescribeDataSourcesOutput) SetResults(v []*DataSource) *DescribeDataSourcesOutput { s.Results = v return s } type DescribeEvaluationsInput struct { _ struct{} `type:"structure"` // The equal to operator. The Evaluation results will have FilterVariable values // that exactly match the value specified with EQ. EQ *string `type:"string"` // Use one of the following variable to filter a list of Evaluation objects: // // * CreatedAt - Sets the search criteria to the Evaluation creation date. // // * Status - Sets the search criteria to the Evaluation status. // * Name - Sets the search criteria to the contents of EvaluationName. // * IAMUser - Sets the search criteria to the user account that invoked // an Evaluation. // * MLModelId - Sets the search criteria to the MLModel that was evaluated. // // * DataSourceId - Sets the search criteria to the DataSource used in Evaluation. // // * DataUri - Sets the search criteria to the data file(s) used in Evaluation. // The URL can identify either a file or an Amazon Simple Storage Solution // (Amazon S3) bucket or directory. FilterVariable *string `type:"string" enum:"EvaluationFilterVariable"` // The greater than or equal to operator. The Evaluation results will have FilterVariable // values that are greater than or equal to the value specified with GE. GE *string `type:"string"` // The greater than operator. The Evaluation results will have FilterVariable // values that are greater than the value specified with GT. GT *string `type:"string"` // The less than or equal to operator. The Evaluation results will have FilterVariable // values that are less than or equal to the value specified with LE. LE *string `type:"string"` // The less than operator. The Evaluation results will have FilterVariable values // that are less than the value specified with LT. LT *string `type:"string"` // The maximum number of Evaluation to include in the result. Limit *int64 `min:"1" type:"integer"` // The not equal to operator. The Evaluation results will have FilterVariable // values not equal to the value specified with NE. NE *string `type:"string"` // The ID of the page in the paginated results. NextToken *string `type:"string"` // A string that is found at the beginning of a variable, such as Name or Id. // // For example, an Evaluation could have the Name2014-09-09-HolidayGiftMailer. // To search for this Evaluation, select Name for the FilterVariable and any // of the following strings for the Prefix: // // * 2014-09 // // * 2014-09-09 // // * 2014-09-09-Holiday Prefix *string `type:"string"` // A two-value parameter that determines the sequence of the resulting list // of Evaluation. // // * asc - Arranges the list in ascending order (A-Z, 0-9). // * dsc - Arranges the list in descending order (Z-A, 9-0). // Results are sorted by FilterVariable. SortOrder *string `type:"string" enum:"SortOrder"` } // String returns the string representation func (s DescribeEvaluationsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeEvaluationsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeEvaluationsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeEvaluationsInput"} if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetEQ sets the EQ field's value. func (s *DescribeEvaluationsInput) SetEQ(v string) *DescribeEvaluationsInput { s.EQ = &v return s } // SetFilterVariable sets the FilterVariable field's value. func (s *DescribeEvaluationsInput) SetFilterVariable(v string) *DescribeEvaluationsInput { s.FilterVariable = &v return s } // SetGE sets the GE field's value. func (s *DescribeEvaluationsInput) SetGE(v string) *DescribeEvaluationsInput { s.GE = &v return s } // SetGT sets the GT field's value. func (s *DescribeEvaluationsInput) SetGT(v string) *DescribeEvaluationsInput { s.GT = &v return s } // SetLE sets the LE field's value. func (s *DescribeEvaluationsInput) SetLE(v string) *DescribeEvaluationsInput { s.LE = &v return s } // SetLT sets the LT field's value. func (s *DescribeEvaluationsInput) SetLT(v string) *DescribeEvaluationsInput { s.LT = &v return s } // SetLimit sets the Limit field's value. func (s *DescribeEvaluationsInput) SetLimit(v int64) *DescribeEvaluationsInput { s.Limit = &v return s } // SetNE sets the NE field's value. func (s *DescribeEvaluationsInput) SetNE(v string) *DescribeEvaluationsInput { s.NE = &v return s } // SetNextToken sets the NextToken field's value. func (s *DescribeEvaluationsInput) SetNextToken(v string) *DescribeEvaluationsInput { s.NextToken = &v return s } // SetPrefix sets the Prefix field's value. func (s *DescribeEvaluationsInput) SetPrefix(v string) *DescribeEvaluationsInput { s.Prefix = &v return s } // SetSortOrder sets the SortOrder field's value. func (s *DescribeEvaluationsInput) SetSortOrder(v string) *DescribeEvaluationsInput { s.SortOrder = &v return s } // Represents the query results from a DescribeEvaluations operation. The content // is essentially a list of Evaluation. type DescribeEvaluationsOutput struct { _ struct{} `type:"structure"` // The ID of the next page in the paginated results that indicates at least // one more page follows. NextToken *string `type:"string"` // A list of Evaluation that meet the search criteria. Results []*Evaluation `type:"list"` } // String returns the string representation func (s DescribeEvaluationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeEvaluationsOutput) GoString() string { return s.String() } // SetNextToken sets the NextToken field's value. func (s *DescribeEvaluationsOutput) SetNextToken(v string) *DescribeEvaluationsOutput { s.NextToken = &v return s } // SetResults sets the Results field's value. func (s *DescribeEvaluationsOutput) SetResults(v []*Evaluation) *DescribeEvaluationsOutput { s.Results = v return s } type DescribeMLModelsInput struct { _ struct{} `type:"structure"` // The equal to operator. The MLModel results will have FilterVariable values // that exactly match the value specified with EQ. EQ *string `type:"string"` // Use one of the following variables to filter a list of MLModel: // // * CreatedAt - Sets the search criteria to MLModel creation date. // * Status - Sets the search criteria to MLModel status. // * Name - Sets the search criteria to the contents of MLModelName. // * IAMUser - Sets the search criteria to the user account that invoked // the MLModel creation. // * TrainingDataSourceId - Sets the search criteria to the DataSource used // to train one or more MLModel. // * RealtimeEndpointStatus - Sets the search criteria to the MLModel real-time // endpoint status. // * MLModelType - Sets the search criteria to MLModel type: binary, regression, // or multi-class. // * Algorithm - Sets the search criteria to the algorithm that the MLModel // uses. // * TrainingDataURI - Sets the search criteria to the data file(s) used // in training a MLModel. The URL can identify either a file or an Amazon // Simple Storage Service (Amazon S3) bucket or directory. FilterVariable *string `type:"string" enum:"MLModelFilterVariable"` // The greater than or equal to operator. The MLModel results will have FilterVariable // values that are greater than or equal to the value specified with GE. GE *string `type:"string"` // The greater than operator. The MLModel results will have FilterVariable values // that are greater than the value specified with GT. GT *string `type:"string"` // The less than or equal to operator. The MLModel results will have FilterVariable // values that are less than or equal to the value specified with LE. LE *string `type:"string"` // The less than operator. The MLModel results will have FilterVariable values // that are less than the value specified with LT. LT *string `type:"string"` // The number of pages of information to include in the result. The range of // acceptable values is 1 through 100. The default value is 100. Limit *int64 `min:"1" type:"integer"` // The not equal to operator. The MLModel results will have FilterVariable values // not equal to the value specified with NE. NE *string `type:"string"` // The ID of the page in the paginated results. NextToken *string `type:"string"` // A string that is found at the beginning of a variable, such as Name or Id. // // For example, an MLModel could have the Name2014-09-09-HolidayGiftMailer. // To search for this MLModel, select Name for the FilterVariable and any of // the following strings for the Prefix: // // * 2014-09 // // * 2014-09-09 // // * 2014-09-09-Holiday Prefix *string `type:"string"` // A two-value parameter that determines the sequence of the resulting list // of MLModel. // // * asc - Arranges the list in ascending order (A-Z, 0-9). // * dsc - Arranges the list in descending order (Z-A, 9-0). // Results are sorted by FilterVariable. SortOrder *string `type:"string" enum:"SortOrder"` } // String returns the string representation func (s DescribeMLModelsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeMLModelsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeMLModelsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeMLModelsInput"} if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetEQ sets the EQ field's value. func (s *DescribeMLModelsInput) SetEQ(v string) *DescribeMLModelsInput { s.EQ = &v return s } // SetFilterVariable sets the FilterVariable field's value. func (s *DescribeMLModelsInput) SetFilterVariable(v string) *DescribeMLModelsInput { s.FilterVariable = &v return s } // SetGE sets the GE field's value. func (s *DescribeMLModelsInput) SetGE(v string) *DescribeMLModelsInput { s.GE = &v return s } // SetGT sets the GT field's value. func (s *DescribeMLModelsInput) SetGT(v string) *DescribeMLModelsInput { s.GT = &v return s } // SetLE sets the LE field's value. func (s *DescribeMLModelsInput) SetLE(v string) *DescribeMLModelsInput { s.LE = &v return s } // SetLT sets the LT field's value. func (s *DescribeMLModelsInput) SetLT(v string) *DescribeMLModelsInput { s.LT = &v return s } // SetLimit sets the Limit field's value. func (s *DescribeMLModelsInput) SetLimit(v int64) *DescribeMLModelsInput { s.Limit = &v return s } // SetNE sets the NE field's value. func (s *DescribeMLModelsInput) SetNE(v string) *DescribeMLModelsInput { s.NE = &v return s } // SetNextToken sets the NextToken field's value. func (s *DescribeMLModelsInput) SetNextToken(v string) *DescribeMLModelsInput { s.NextToken = &v return s } // SetPrefix sets the Prefix field's value. func (s *DescribeMLModelsInput) SetPrefix(v string) *DescribeMLModelsInput { s.Prefix = &v return s } // SetSortOrder sets the SortOrder field's value. func (s *DescribeMLModelsInput) SetSortOrder(v string) *DescribeMLModelsInput { s.SortOrder = &v return s } // Represents the output of a DescribeMLModels operation. The content is essentially // a list of MLModel. type DescribeMLModelsOutput struct { _ struct{} `type:"structure"` // The ID of the next page in the paginated results that indicates at least // one more page follows. NextToken *string `type:"string"` // A list of MLModel that meet the search criteria. Results []*MLModel `type:"list"` } // String returns the string representation func (s DescribeMLModelsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeMLModelsOutput) GoString() string { return s.String() } // SetNextToken sets the NextToken field's value. func (s *DescribeMLModelsOutput) SetNextToken(v string) *DescribeMLModelsOutput { s.NextToken = &v return s } // SetResults sets the Results field's value. func (s *DescribeMLModelsOutput) SetResults(v []*MLModel) *DescribeMLModelsOutput { s.Results = v return s } type DescribeTagsInput struct { _ struct{} `type:"structure"` // The ID of the ML object. For example, exampleModelId. // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` // The type of the ML object. // // ResourceType is a required field ResourceType *string `type:"string" required:"true" enum:"TaggableResourceType"` } // String returns the string representation func (s DescribeTagsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeTagsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeTagsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeTagsInput"} if s.ResourceId == nil { invalidParams.Add(request.NewErrParamRequired("ResourceId")) } if s.ResourceId != nil && len(*s.ResourceId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) } if s.ResourceType == nil { invalidParams.Add(request.NewErrParamRequired("ResourceType")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetResourceId sets the ResourceId field's value. func (s *DescribeTagsInput) SetResourceId(v string) *DescribeTagsInput { s.ResourceId = &v return s } // SetResourceType sets the ResourceType field's value. func (s *DescribeTagsInput) SetResourceType(v string) *DescribeTagsInput { s.ResourceType = &v return s } // Amazon ML returns the following elements. type DescribeTagsOutput struct { _ struct{} `type:"structure"` // The ID of the tagged ML object. ResourceId *string `min:"1" type:"string"` // The type of the tagged ML object. ResourceType *string `type:"string" enum:"TaggableResourceType"` // A list of tags associated with the ML object. Tags []*Tag `type:"list"` } // String returns the string representation func (s DescribeTagsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeTagsOutput) GoString() string { return s.String() } // SetResourceId sets the ResourceId field's value. func (s *DescribeTagsOutput) SetResourceId(v string) *DescribeTagsOutput { s.ResourceId = &v return s } // SetResourceType sets the ResourceType field's value. func (s *DescribeTagsOutput) SetResourceType(v string) *DescribeTagsOutput { s.ResourceType = &v return s } // SetTags sets the Tags field's value. func (s *DescribeTagsOutput) SetTags(v []*Tag) *DescribeTagsOutput { s.Tags = v return s } // Represents the output of GetEvaluation operation. // // The content consists of the detailed metadata and data file information and // the current status of the Evaluation. type Evaluation struct { _ struct{} `type:"structure"` // Long integer type that is a 64-bit signed number. ComputeTime *int64 `type:"long"` // The time that the Evaluation was created. The time is expressed in epoch // time. CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The AWS user account that invoked the evaluation. The account type can be // either an AWS root account or an AWS Identity and Access Management (IAM) // user account. CreatedByIamUser *string `type:"string"` // The ID of the DataSource that is used to evaluate the MLModel. EvaluationDataSourceId *string `min:"1" type:"string"` // The ID that is assigned to the Evaluation at creation. EvaluationId *string `min:"1" type:"string"` // A timestamp represented in epoch time. FinishedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The location and name of the data in Amazon Simple Storage Server (Amazon // S3) that is used in the evaluation. InputDataLocationS3 *string `type:"string"` // The time of the most recent edit to the Evaluation. The time is expressed // in epoch time. LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The ID of the MLModel that is the focus of the evaluation. MLModelId *string `min:"1" type:"string"` // A description of the most recent details about evaluating the MLModel. Message *string `type:"string"` // A user-supplied name or description of the Evaluation. Name *string `type:"string"` // Measurements of how well the MLModel performed, using observations referenced // by the DataSource. One of the following metrics is returned, based on the // type of the MLModel: // // * BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) technique // to measure performance. // // * RegressionRMSE: A regression MLModel uses the Root Mean Square Error // (RMSE) technique to measure performance. RMSE measures the difference // between predicted and actual values for a single variable. // // * MulticlassAvgFScore: A multiclass MLModel uses the F1 score technique // to measure performance. // // For more information about performance metrics, please see the Amazon Machine // Learning Developer Guide (path_to_url PerformanceMetrics *PerformanceMetrics `type:"structure"` // A timestamp represented in epoch time. StartedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The status of the evaluation. This element can have one of the following // values: // // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to // evaluate an MLModel. // * INPROGRESS - The evaluation is underway. // * FAILED - The request to evaluate an MLModel did not run to completion. // It is not usable. // * COMPLETED - The evaluation process completed successfully. // * DELETED - The Evaluation is marked as deleted. It is not usable. Status *string `type:"string" enum:"EntityStatus"` } // String returns the string representation func (s Evaluation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Evaluation) GoString() string { return s.String() } // SetComputeTime sets the ComputeTime field's value. func (s *Evaluation) SetComputeTime(v int64) *Evaluation { s.ComputeTime = &v return s } // SetCreatedAt sets the CreatedAt field's value. func (s *Evaluation) SetCreatedAt(v time.Time) *Evaluation { s.CreatedAt = &v return s } // SetCreatedByIamUser sets the CreatedByIamUser field's value. func (s *Evaluation) SetCreatedByIamUser(v string) *Evaluation { s.CreatedByIamUser = &v return s } // SetEvaluationDataSourceId sets the EvaluationDataSourceId field's value. func (s *Evaluation) SetEvaluationDataSourceId(v string) *Evaluation { s.EvaluationDataSourceId = &v return s } // SetEvaluationId sets the EvaluationId field's value. func (s *Evaluation) SetEvaluationId(v string) *Evaluation { s.EvaluationId = &v return s } // SetFinishedAt sets the FinishedAt field's value. func (s *Evaluation) SetFinishedAt(v time.Time) *Evaluation { s.FinishedAt = &v return s } // SetInputDataLocationS3 sets the InputDataLocationS3 field's value. func (s *Evaluation) SetInputDataLocationS3(v string) *Evaluation { s.InputDataLocationS3 = &v return s } // SetLastUpdatedAt sets the LastUpdatedAt field's value. func (s *Evaluation) SetLastUpdatedAt(v time.Time) *Evaluation { s.LastUpdatedAt = &v return s } // SetMLModelId sets the MLModelId field's value. func (s *Evaluation) SetMLModelId(v string) *Evaluation { s.MLModelId = &v return s } // SetMessage sets the Message field's value. func (s *Evaluation) SetMessage(v string) *Evaluation { s.Message = &v return s } // SetName sets the Name field's value. func (s *Evaluation) SetName(v string) *Evaluation { s.Name = &v return s } // SetPerformanceMetrics sets the PerformanceMetrics field's value. func (s *Evaluation) SetPerformanceMetrics(v *PerformanceMetrics) *Evaluation { s.PerformanceMetrics = v return s } // SetStartedAt sets the StartedAt field's value. func (s *Evaluation) SetStartedAt(v time.Time) *Evaluation { s.StartedAt = &v return s } // SetStatus sets the Status field's value. func (s *Evaluation) SetStatus(v string) *Evaluation { s.Status = &v return s } type GetBatchPredictionInput struct { _ struct{} `type:"structure"` // An ID assigned to the BatchPrediction at creation. // // BatchPredictionId is a required field BatchPredictionId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s GetBatchPredictionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetBatchPredictionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *GetBatchPredictionInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetBatchPredictionInput"} if s.BatchPredictionId == nil { invalidParams.Add(request.NewErrParamRequired("BatchPredictionId")) } if s.BatchPredictionId != nil && len(*s.BatchPredictionId) < 1 { invalidParams.Add(request.NewErrParamMinLen("BatchPredictionId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBatchPredictionId sets the BatchPredictionId field's value. func (s *GetBatchPredictionInput) SetBatchPredictionId(v string) *GetBatchPredictionInput { s.BatchPredictionId = &v return s } // Represents the output of a GetBatchPrediction operation and describes a BatchPrediction. type GetBatchPredictionOutput struct { _ struct{} `type:"structure"` // The ID of the DataSource that was used to create the BatchPrediction. BatchPredictionDataSourceId *string `min:"1" type:"string"` // An ID assigned to the BatchPrediction at creation. This value should be identical // to the value of the BatchPredictionID in the request. BatchPredictionId *string `min:"1" type:"string"` // The approximate CPU time in milliseconds that Amazon Machine Learning spent // processing the BatchPrediction, normalized and scaled on computation resources. // ComputeTime is only available if the BatchPrediction is in the COMPLETED // state. ComputeTime *int64 `type:"long"` // The time when the BatchPrediction was created. The time is expressed in epoch // time. CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The AWS user account that invoked the BatchPrediction. The account type can // be either an AWS root account or an AWS Identity and Access Management (IAM) // user account. CreatedByIamUser *string `type:"string"` // The epoch time when Amazon Machine Learning marked the BatchPrediction as // COMPLETED or FAILED. FinishedAt is only available when the BatchPrediction // is in the COMPLETED or FAILED state. FinishedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The location of the data file or directory in Amazon Simple Storage Service // (Amazon S3). InputDataLocationS3 *string `type:"string"` // The number of invalid records that Amazon Machine Learning saw while processing // the BatchPrediction. InvalidRecordCount *int64 `type:"long"` // The time of the most recent edit to BatchPrediction. The time is expressed // in epoch time. LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // A link to the file that contains logs of the CreateBatchPrediction operation. LogUri *string `type:"string"` // The ID of the MLModel that generated predictions for the BatchPrediction // request. MLModelId *string `min:"1" type:"string"` // A description of the most recent details about processing the batch prediction // request. Message *string `type:"string"` // A user-supplied name or description of the BatchPrediction. Name *string `type:"string"` // The location of an Amazon S3 bucket or directory to receive the operation // results. OutputUri *string `type:"string"` // The epoch time when Amazon Machine Learning marked the BatchPrediction as // INPROGRESS. StartedAt isn't available if the BatchPrediction is in the PENDING // state. StartedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The status of the BatchPrediction, which can be one of the following values: // // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to // generate batch predictions. // * INPROGRESS - The batch predictions are in progress. // * FAILED - The request to perform a batch prediction did not run to completion. // It is not usable. // * COMPLETED - The batch prediction process completed successfully. // * DELETED - The BatchPrediction is marked as deleted. It is not usable. Status *string `type:"string" enum:"EntityStatus"` // The number of total records that Amazon Machine Learning saw while processing // the BatchPrediction. TotalRecordCount *int64 `type:"long"` } // String returns the string representation func (s GetBatchPredictionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetBatchPredictionOutput) GoString() string { return s.String() } // SetBatchPredictionDataSourceId sets the BatchPredictionDataSourceId field's value. func (s *GetBatchPredictionOutput) SetBatchPredictionDataSourceId(v string) *GetBatchPredictionOutput { s.BatchPredictionDataSourceId = &v return s } // SetBatchPredictionId sets the BatchPredictionId field's value. func (s *GetBatchPredictionOutput) SetBatchPredictionId(v string) *GetBatchPredictionOutput { s.BatchPredictionId = &v return s } // SetComputeTime sets the ComputeTime field's value. func (s *GetBatchPredictionOutput) SetComputeTime(v int64) *GetBatchPredictionOutput { s.ComputeTime = &v return s } // SetCreatedAt sets the CreatedAt field's value. func (s *GetBatchPredictionOutput) SetCreatedAt(v time.Time) *GetBatchPredictionOutput { s.CreatedAt = &v return s } // SetCreatedByIamUser sets the CreatedByIamUser field's value. func (s *GetBatchPredictionOutput) SetCreatedByIamUser(v string) *GetBatchPredictionOutput { s.CreatedByIamUser = &v return s } // SetFinishedAt sets the FinishedAt field's value. func (s *GetBatchPredictionOutput) SetFinishedAt(v time.Time) *GetBatchPredictionOutput { s.FinishedAt = &v return s } // SetInputDataLocationS3 sets the InputDataLocationS3 field's value. func (s *GetBatchPredictionOutput) SetInputDataLocationS3(v string) *GetBatchPredictionOutput { s.InputDataLocationS3 = &v return s } // SetInvalidRecordCount sets the InvalidRecordCount field's value. func (s *GetBatchPredictionOutput) SetInvalidRecordCount(v int64) *GetBatchPredictionOutput { s.InvalidRecordCount = &v return s } // SetLastUpdatedAt sets the LastUpdatedAt field's value. func (s *GetBatchPredictionOutput) SetLastUpdatedAt(v time.Time) *GetBatchPredictionOutput { s.LastUpdatedAt = &v return s } // SetLogUri sets the LogUri field's value. func (s *GetBatchPredictionOutput) SetLogUri(v string) *GetBatchPredictionOutput { s.LogUri = &v return s } // SetMLModelId sets the MLModelId field's value. func (s *GetBatchPredictionOutput) SetMLModelId(v string) *GetBatchPredictionOutput { s.MLModelId = &v return s } // SetMessage sets the Message field's value. func (s *GetBatchPredictionOutput) SetMessage(v string) *GetBatchPredictionOutput { s.Message = &v return s } // SetName sets the Name field's value. func (s *GetBatchPredictionOutput) SetName(v string) *GetBatchPredictionOutput { s.Name = &v return s } // SetOutputUri sets the OutputUri field's value. func (s *GetBatchPredictionOutput) SetOutputUri(v string) *GetBatchPredictionOutput { s.OutputUri = &v return s } // SetStartedAt sets the StartedAt field's value. func (s *GetBatchPredictionOutput) SetStartedAt(v time.Time) *GetBatchPredictionOutput { s.StartedAt = &v return s } // SetStatus sets the Status field's value. func (s *GetBatchPredictionOutput) SetStatus(v string) *GetBatchPredictionOutput { s.Status = &v return s } // SetTotalRecordCount sets the TotalRecordCount field's value. func (s *GetBatchPredictionOutput) SetTotalRecordCount(v int64) *GetBatchPredictionOutput { s.TotalRecordCount = &v return s } type GetDataSourceInput struct { _ struct{} `type:"structure"` // The ID assigned to the DataSource at creation. // // DataSourceId is a required field DataSourceId *string `min:"1" type:"string" required:"true"` // Specifies whether the GetDataSource operation should return DataSourceSchema. // // If true, DataSourceSchema is returned. // // If false, DataSourceSchema is not returned. Verbose *bool `type:"boolean"` } // String returns the string representation func (s GetDataSourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetDataSourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *GetDataSourceInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetDataSourceInput"} if s.DataSourceId == nil { invalidParams.Add(request.NewErrParamRequired("DataSourceId")) } if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDataSourceId sets the DataSourceId field's value. func (s *GetDataSourceInput) SetDataSourceId(v string) *GetDataSourceInput { s.DataSourceId = &v return s } // SetVerbose sets the Verbose field's value. func (s *GetDataSourceInput) SetVerbose(v bool) *GetDataSourceInput { s.Verbose = &v return s } // Represents the output of a GetDataSource operation and describes a DataSource. type GetDataSourceOutput struct { _ struct{} `type:"structure"` // The parameter is true if statistics need to be generated from the observation // data. ComputeStatistics *bool `type:"boolean"` // The approximate CPU time in milliseconds that Amazon Machine Learning spent // processing the DataSource, normalized and scaled on computation resources. // ComputeTime is only available if the DataSource is in the COMPLETED state // and the ComputeStatistics is set to true. ComputeTime *int64 `type:"long"` // The time that the DataSource was created. The time is expressed in epoch // time. CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The AWS user account from which the DataSource was created. The account type // can be either an AWS root account or an AWS Identity and Access Management // (IAM) user account. CreatedByIamUser *string `type:"string"` // The location of the data file or directory in Amazon Simple Storage Service // (Amazon S3). DataLocationS3 *string `type:"string"` // A JSON string that represents the splitting and rearrangement requirement // used when this DataSource was created. DataRearrangement *string `type:"string"` // The total size of observations in the data files. DataSizeInBytes *int64 `type:"long"` // The ID assigned to the DataSource at creation. This value should be identical // to the value of the DataSourceId in the request. DataSourceId *string `min:"1" type:"string"` // The schema used by all of the data files of this DataSource. // // NoteThis parameter is provided as part of the verbose format. DataSourceSchema *string `type:"string"` // The epoch time when Amazon Machine Learning marked the DataSource as COMPLETED // or FAILED. FinishedAt is only available when the DataSource is in the COMPLETED // or FAILED state. FinishedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The time of the most recent edit to the DataSource. The time is expressed // in epoch time. LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // A link to the file containing logs of CreateDataSourceFrom* operations. LogUri *string `type:"string"` // The user-supplied description of the most recent details about creating the // DataSource. Message *string `type:"string"` // A user-supplied name or description of the DataSource. Name *string `type:"string"` // The number of data files referenced by the DataSource. NumberOfFiles *int64 `type:"long"` // The datasource details that are specific to Amazon RDS. RDSMetadata *RDSMetadata `type:"structure"` // Describes the DataSource details specific to Amazon Redshift. RedshiftMetadata *RedshiftMetadata `type:"structure"` // The Amazon Resource Name (ARN) of an AWS IAM Role (path_to_url#roles-about-termsandconcepts), // such as the following: arn:aws:iam::account:role/rolename. RoleARN *string `min:"1" type:"string"` // The epoch time when Amazon Machine Learning marked the DataSource as INPROGRESS. // StartedAt isn't available if the DataSource is in the PENDING state. StartedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The current status of the DataSource. This element can have one of the following // values: // // * PENDING - Amazon ML submitted a request to create a DataSource. // * INPROGRESS - The creation process is underway. // * FAILED - The request to create a DataSource did not run to completion. // It is not usable. // * COMPLETED - The creation process completed successfully. // * DELETED - The DataSource is marked as deleted. It is not usable. Status *string `type:"string" enum:"EntityStatus"` } // String returns the string representation func (s GetDataSourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetDataSourceOutput) GoString() string { return s.String() } // SetComputeStatistics sets the ComputeStatistics field's value. func (s *GetDataSourceOutput) SetComputeStatistics(v bool) *GetDataSourceOutput { s.ComputeStatistics = &v return s } // SetComputeTime sets the ComputeTime field's value. func (s *GetDataSourceOutput) SetComputeTime(v int64) *GetDataSourceOutput { s.ComputeTime = &v return s } // SetCreatedAt sets the CreatedAt field's value. func (s *GetDataSourceOutput) SetCreatedAt(v time.Time) *GetDataSourceOutput { s.CreatedAt = &v return s } // SetCreatedByIamUser sets the CreatedByIamUser field's value. func (s *GetDataSourceOutput) SetCreatedByIamUser(v string) *GetDataSourceOutput { s.CreatedByIamUser = &v return s } // SetDataLocationS3 sets the DataLocationS3 field's value. func (s *GetDataSourceOutput) SetDataLocationS3(v string) *GetDataSourceOutput { s.DataLocationS3 = &v return s } // SetDataRearrangement sets the DataRearrangement field's value. func (s *GetDataSourceOutput) SetDataRearrangement(v string) *GetDataSourceOutput { s.DataRearrangement = &v return s } // SetDataSizeInBytes sets the DataSizeInBytes field's value. func (s *GetDataSourceOutput) SetDataSizeInBytes(v int64) *GetDataSourceOutput { s.DataSizeInBytes = &v return s } // SetDataSourceId sets the DataSourceId field's value. func (s *GetDataSourceOutput) SetDataSourceId(v string) *GetDataSourceOutput { s.DataSourceId = &v return s } // SetDataSourceSchema sets the DataSourceSchema field's value. func (s *GetDataSourceOutput) SetDataSourceSchema(v string) *GetDataSourceOutput { s.DataSourceSchema = &v return s } // SetFinishedAt sets the FinishedAt field's value. func (s *GetDataSourceOutput) SetFinishedAt(v time.Time) *GetDataSourceOutput { s.FinishedAt = &v return s } // SetLastUpdatedAt sets the LastUpdatedAt field's value. func (s *GetDataSourceOutput) SetLastUpdatedAt(v time.Time) *GetDataSourceOutput { s.LastUpdatedAt = &v return s } // SetLogUri sets the LogUri field's value. func (s *GetDataSourceOutput) SetLogUri(v string) *GetDataSourceOutput { s.LogUri = &v return s } // SetMessage sets the Message field's value. func (s *GetDataSourceOutput) SetMessage(v string) *GetDataSourceOutput { s.Message = &v return s } // SetName sets the Name field's value. func (s *GetDataSourceOutput) SetName(v string) *GetDataSourceOutput { s.Name = &v return s } // SetNumberOfFiles sets the NumberOfFiles field's value. func (s *GetDataSourceOutput) SetNumberOfFiles(v int64) *GetDataSourceOutput { s.NumberOfFiles = &v return s } // SetRDSMetadata sets the RDSMetadata field's value. func (s *GetDataSourceOutput) SetRDSMetadata(v *RDSMetadata) *GetDataSourceOutput { s.RDSMetadata = v return s } // SetRedshiftMetadata sets the RedshiftMetadata field's value. func (s *GetDataSourceOutput) SetRedshiftMetadata(v *RedshiftMetadata) *GetDataSourceOutput { s.RedshiftMetadata = v return s } // SetRoleARN sets the RoleARN field's value. func (s *GetDataSourceOutput) SetRoleARN(v string) *GetDataSourceOutput { s.RoleARN = &v return s } // SetStartedAt sets the StartedAt field's value. func (s *GetDataSourceOutput) SetStartedAt(v time.Time) *GetDataSourceOutput { s.StartedAt = &v return s } // SetStatus sets the Status field's value. func (s *GetDataSourceOutput) SetStatus(v string) *GetDataSourceOutput { s.Status = &v return s } type GetEvaluationInput struct { _ struct{} `type:"structure"` // The ID of the Evaluation to retrieve. The evaluation of each MLModel is recorded // and cataloged. The ID provides the means to access the information. // // EvaluationId is a required field EvaluationId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s GetEvaluationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetEvaluationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *GetEvaluationInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetEvaluationInput"} if s.EvaluationId == nil { invalidParams.Add(request.NewErrParamRequired("EvaluationId")) } if s.EvaluationId != nil && len(*s.EvaluationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("EvaluationId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetEvaluationId sets the EvaluationId field's value. func (s *GetEvaluationInput) SetEvaluationId(v string) *GetEvaluationInput { s.EvaluationId = &v return s } // Represents the output of a GetEvaluation operation and describes an Evaluation. type GetEvaluationOutput struct { _ struct{} `type:"structure"` // The approximate CPU time in milliseconds that Amazon Machine Learning spent // processing the Evaluation, normalized and scaled on computation resources. // ComputeTime is only available if the Evaluation is in the COMPLETED state. ComputeTime *int64 `type:"long"` // The time that the Evaluation was created. The time is expressed in epoch // time. CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The AWS user account that invoked the evaluation. The account type can be // either an AWS root account or an AWS Identity and Access Management (IAM) // user account. CreatedByIamUser *string `type:"string"` // The DataSource used for this evaluation. EvaluationDataSourceId *string `min:"1" type:"string"` // The evaluation ID which is same as the EvaluationId in the request. EvaluationId *string `min:"1" type:"string"` // The epoch time when Amazon Machine Learning marked the Evaluation as COMPLETED // or FAILED. FinishedAt is only available when the Evaluation is in the COMPLETED // or FAILED state. FinishedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The location of the data file or directory in Amazon Simple Storage Service // (Amazon S3). InputDataLocationS3 *string `type:"string"` // The time of the most recent edit to the Evaluation. The time is expressed // in epoch time. LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // A link to the file that contains logs of the CreateEvaluation operation. LogUri *string `type:"string"` // The ID of the MLModel that was the focus of the evaluation. MLModelId *string `min:"1" type:"string"` // A description of the most recent details about evaluating the MLModel. Message *string `type:"string"` // A user-supplied name or description of the Evaluation. Name *string `type:"string"` // Measurements of how well the MLModel performed using observations referenced // by the DataSource. One of the following metric is returned based on the type // of the MLModel: // // * BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) technique // to measure performance. // // * RegressionRMSE: A regression MLModel uses the Root Mean Square Error // (RMSE) technique to measure performance. RMSE measures the difference // between predicted and actual values for a single variable. // // * MulticlassAvgFScore: A multiclass MLModel uses the F1 score technique // to measure performance. // // For more information about performance metrics, please see the Amazon Machine // Learning Developer Guide (path_to_url PerformanceMetrics *PerformanceMetrics `type:"structure"` // The epoch time when Amazon Machine Learning marked the Evaluation as INPROGRESS. // StartedAt isn't available if the Evaluation is in the PENDING state. StartedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The status of the evaluation. This element can have one of the following // values: // // * PENDING - Amazon Machine Language (Amazon ML) submitted a request to // evaluate an MLModel. // * INPROGRESS - The evaluation is underway. // * FAILED - The request to evaluate an MLModel did not run to completion. // It is not usable. // * COMPLETED - The evaluation process completed successfully. // * DELETED - The Evaluation is marked as deleted. It is not usable. Status *string `type:"string" enum:"EntityStatus"` } // String returns the string representation func (s GetEvaluationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetEvaluationOutput) GoString() string { return s.String() } // SetComputeTime sets the ComputeTime field's value. func (s *GetEvaluationOutput) SetComputeTime(v int64) *GetEvaluationOutput { s.ComputeTime = &v return s } // SetCreatedAt sets the CreatedAt field's value. func (s *GetEvaluationOutput) SetCreatedAt(v time.Time) *GetEvaluationOutput { s.CreatedAt = &v return s } // SetCreatedByIamUser sets the CreatedByIamUser field's value. func (s *GetEvaluationOutput) SetCreatedByIamUser(v string) *GetEvaluationOutput { s.CreatedByIamUser = &v return s } // SetEvaluationDataSourceId sets the EvaluationDataSourceId field's value. func (s *GetEvaluationOutput) SetEvaluationDataSourceId(v string) *GetEvaluationOutput { s.EvaluationDataSourceId = &v return s } // SetEvaluationId sets the EvaluationId field's value. func (s *GetEvaluationOutput) SetEvaluationId(v string) *GetEvaluationOutput { s.EvaluationId = &v return s } // SetFinishedAt sets the FinishedAt field's value. func (s *GetEvaluationOutput) SetFinishedAt(v time.Time) *GetEvaluationOutput { s.FinishedAt = &v return s } // SetInputDataLocationS3 sets the InputDataLocationS3 field's value. func (s *GetEvaluationOutput) SetInputDataLocationS3(v string) *GetEvaluationOutput { s.InputDataLocationS3 = &v return s } // SetLastUpdatedAt sets the LastUpdatedAt field's value. func (s *GetEvaluationOutput) SetLastUpdatedAt(v time.Time) *GetEvaluationOutput { s.LastUpdatedAt = &v return s } // SetLogUri sets the LogUri field's value. func (s *GetEvaluationOutput) SetLogUri(v string) *GetEvaluationOutput { s.LogUri = &v return s } // SetMLModelId sets the MLModelId field's value. func (s *GetEvaluationOutput) SetMLModelId(v string) *GetEvaluationOutput { s.MLModelId = &v return s } // SetMessage sets the Message field's value. func (s *GetEvaluationOutput) SetMessage(v string) *GetEvaluationOutput { s.Message = &v return s } // SetName sets the Name field's value. func (s *GetEvaluationOutput) SetName(v string) *GetEvaluationOutput { s.Name = &v return s } // SetPerformanceMetrics sets the PerformanceMetrics field's value. func (s *GetEvaluationOutput) SetPerformanceMetrics(v *PerformanceMetrics) *GetEvaluationOutput { s.PerformanceMetrics = v return s } // SetStartedAt sets the StartedAt field's value. func (s *GetEvaluationOutput) SetStartedAt(v time.Time) *GetEvaluationOutput { s.StartedAt = &v return s } // SetStatus sets the Status field's value. func (s *GetEvaluationOutput) SetStatus(v string) *GetEvaluationOutput { s.Status = &v return s } type GetMLModelInput struct { _ struct{} `type:"structure"` // The ID assigned to the MLModel at creation. // // MLModelId is a required field MLModelId *string `min:"1" type:"string" required:"true"` // Specifies whether the GetMLModel operation should return Recipe. // // If true, Recipe is returned. // // If false, Recipe is not returned. Verbose *bool `type:"boolean"` } // String returns the string representation func (s GetMLModelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetMLModelInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *GetMLModelInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetMLModelInput"} if s.MLModelId == nil { invalidParams.Add(request.NewErrParamRequired("MLModelId")) } if s.MLModelId != nil && len(*s.MLModelId) < 1 { invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetMLModelId sets the MLModelId field's value. func (s *GetMLModelInput) SetMLModelId(v string) *GetMLModelInput { s.MLModelId = &v return s } // SetVerbose sets the Verbose field's value. func (s *GetMLModelInput) SetVerbose(v bool) *GetMLModelInput { s.Verbose = &v return s } // Represents the output of a GetMLModel operation, and provides detailed information // about a MLModel. type GetMLModelOutput struct { _ struct{} `type:"structure"` // The approximate CPU time in milliseconds that Amazon Machine Learning spent // processing the MLModel, normalized and scaled on computation resources. ComputeTime // is only available if the MLModel is in the COMPLETED state. ComputeTime *int64 `type:"long"` // The time that the MLModel was created. The time is expressed in epoch time. CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The AWS user account from which the MLModel was created. The account type // can be either an AWS root account or an AWS Identity and Access Management // (IAM) user account. CreatedByIamUser *string `type:"string"` // The current endpoint of the MLModel EndpointInfo *RealtimeEndpointInfo `type:"structure"` // The epoch time when Amazon Machine Learning marked the MLModel as COMPLETED // or FAILED. FinishedAt is only available when the MLModel is in the COMPLETED // or FAILED state. FinishedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The location of the data file or directory in Amazon Simple Storage Service // (Amazon S3). InputDataLocationS3 *string `type:"string"` // The time of the most recent edit to the MLModel. The time is expressed in // epoch time. LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // A link to the file that contains logs of the CreateMLModel operation. LogUri *string `type:"string"` // The MLModel ID, which is same as the MLModelId in the request. MLModelId *string `min:"1" type:"string"` // Identifies the MLModel category. The following are the available types: // // * REGRESSION -- Produces a numeric result. For example, "What price should // a house be listed at?" // * BINARY -- Produces one of two possible results. For example, "Is this // an e-commerce website?" // * MULTICLASS -- Produces one of several possible results. For example, // "Is this a HIGH, LOW or MEDIUM risk trade?" MLModelType *string `type:"string" enum:"MLModelType"` // A description of the most recent details about accessing the MLModel. Message *string `type:"string"` // A user-supplied name or description of the MLModel. Name *string `type:"string"` // The recipe to use when training the MLModel. The Recipe provides detailed // information about the observation data to use during training, and manipulations // to perform on the observation data during training. // // NoteThis parameter is provided as part of the verbose format. Recipe *string `type:"string"` // The schema used by all of the data files referenced by the DataSource. // // NoteThis parameter is provided as part of the verbose format. Schema *string `type:"string"` // The scoring threshold is used in binary classification MLModelmodels. It // marks the boundary between a positive prediction and a negative prediction. // // Output values greater than or equal to the threshold receive a positive result // from the MLModel, such as true. Output values less than the threshold receive // a negative response from the MLModel, such as false. ScoreThreshold *float64 `type:"float"` // The time of the most recent edit to the ScoreThreshold. The time is expressed // in epoch time. ScoreThresholdLastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // Long integer type that is a 64-bit signed number. SizeInBytes *int64 `type:"long"` // The epoch time when Amazon Machine Learning marked the MLModel as INPROGRESS. // StartedAt isn't available if the MLModel is in the PENDING state. StartedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The current status of the MLModel. This element can have one of the following // values: // // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to // describe a MLModel. // * INPROGRESS - The request is processing. // * FAILED - The request did not run to completion. The ML model isn't usable. // // * COMPLETED - The request completed successfully. // * DELETED - The MLModel is marked as deleted. It isn't usable. Status *string `type:"string" enum:"EntityStatus"` // The ID of the training DataSource. TrainingDataSourceId *string `min:"1" type:"string"` // A list of the training parameters in the MLModel. The list is implemented // as a map of key-value pairs. // // The following is the current set of training parameters: // // * sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending // on the input data, the size of the model might affect its performance. // // The value is an integer that ranges from 100000 to 2147483648. The default // value is 33554432. // // * sgd.maxPasses - The number of times that the training process traverses // the observations to build the MLModel. The value is an integer that ranges // from 1 to 10000. The default value is 10. // // * sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling // data improves a model's ability to find the optimal solution for a variety // of data types. The valid values are auto and none. The default value is // none. We strongly recommend that you shuffle your data. // // * sgd.l1RegularizationAmount - The coefficient regularization L1 norm. // It controls overfitting the data by penalizing large coefficients. This // tends to drive coefficients to zero, resulting in a sparse feature set. // If you use this parameter, start by specifying a small value, such as // 1.0E-08. // // The value is a double that ranges from 0 to MAX_DOUBLE. The default is to // not use L1 normalization. This parameter can't be used when L2 is specified. // Use this parameter sparingly. // // * sgd.l2RegularizationAmount - The coefficient regularization L2 norm. // It controls overfitting the data by penalizing large coefficients. This // tends to drive coefficients to small, nonzero values. If you use this // parameter, start by specifying a small value, such as 1.0E-08. // // The value is a double that ranges from 0 to MAX_DOUBLE. The default is to // not use L2 normalization. This parameter can't be used when L1 is specified. // Use this parameter sparingly. TrainingParameters map[string]*string `type:"map"` } // String returns the string representation func (s GetMLModelOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetMLModelOutput) GoString() string { return s.String() } // SetComputeTime sets the ComputeTime field's value. func (s *GetMLModelOutput) SetComputeTime(v int64) *GetMLModelOutput { s.ComputeTime = &v return s } // SetCreatedAt sets the CreatedAt field's value. func (s *GetMLModelOutput) SetCreatedAt(v time.Time) *GetMLModelOutput { s.CreatedAt = &v return s } // SetCreatedByIamUser sets the CreatedByIamUser field's value. func (s *GetMLModelOutput) SetCreatedByIamUser(v string) *GetMLModelOutput { s.CreatedByIamUser = &v return s } // SetEndpointInfo sets the EndpointInfo field's value. func (s *GetMLModelOutput) SetEndpointInfo(v *RealtimeEndpointInfo) *GetMLModelOutput { s.EndpointInfo = v return s } // SetFinishedAt sets the FinishedAt field's value. func (s *GetMLModelOutput) SetFinishedAt(v time.Time) *GetMLModelOutput { s.FinishedAt = &v return s } // SetInputDataLocationS3 sets the InputDataLocationS3 field's value. func (s *GetMLModelOutput) SetInputDataLocationS3(v string) *GetMLModelOutput { s.InputDataLocationS3 = &v return s } // SetLastUpdatedAt sets the LastUpdatedAt field's value. func (s *GetMLModelOutput) SetLastUpdatedAt(v time.Time) *GetMLModelOutput { s.LastUpdatedAt = &v return s } // SetLogUri sets the LogUri field's value. func (s *GetMLModelOutput) SetLogUri(v string) *GetMLModelOutput { s.LogUri = &v return s } // SetMLModelId sets the MLModelId field's value. func (s *GetMLModelOutput) SetMLModelId(v string) *GetMLModelOutput { s.MLModelId = &v return s } // SetMLModelType sets the MLModelType field's value. func (s *GetMLModelOutput) SetMLModelType(v string) *GetMLModelOutput { s.MLModelType = &v return s } // SetMessage sets the Message field's value. func (s *GetMLModelOutput) SetMessage(v string) *GetMLModelOutput { s.Message = &v return s } // SetName sets the Name field's value. func (s *GetMLModelOutput) SetName(v string) *GetMLModelOutput { s.Name = &v return s } // SetRecipe sets the Recipe field's value. func (s *GetMLModelOutput) SetRecipe(v string) *GetMLModelOutput { s.Recipe = &v return s } // SetSchema sets the Schema field's value. func (s *GetMLModelOutput) SetSchema(v string) *GetMLModelOutput { s.Schema = &v return s } // SetScoreThreshold sets the ScoreThreshold field's value. func (s *GetMLModelOutput) SetScoreThreshold(v float64) *GetMLModelOutput { s.ScoreThreshold = &v return s } // SetScoreThresholdLastUpdatedAt sets the ScoreThresholdLastUpdatedAt field's value. func (s *GetMLModelOutput) SetScoreThresholdLastUpdatedAt(v time.Time) *GetMLModelOutput { s.ScoreThresholdLastUpdatedAt = &v return s } // SetSizeInBytes sets the SizeInBytes field's value. func (s *GetMLModelOutput) SetSizeInBytes(v int64) *GetMLModelOutput { s.SizeInBytes = &v return s } // SetStartedAt sets the StartedAt field's value. func (s *GetMLModelOutput) SetStartedAt(v time.Time) *GetMLModelOutput { s.StartedAt = &v return s } // SetStatus sets the Status field's value. func (s *GetMLModelOutput) SetStatus(v string) *GetMLModelOutput { s.Status = &v return s } // SetTrainingDataSourceId sets the TrainingDataSourceId field's value. func (s *GetMLModelOutput) SetTrainingDataSourceId(v string) *GetMLModelOutput { s.TrainingDataSourceId = &v return s } // SetTrainingParameters sets the TrainingParameters field's value. func (s *GetMLModelOutput) SetTrainingParameters(v map[string]*string) *GetMLModelOutput { s.TrainingParameters = v return s } // Represents the output of a GetMLModel operation. // // The content consists of the detailed metadata and the current status of the // MLModel. type MLModel struct { _ struct{} `type:"structure"` // The algorithm used to train the MLModel. The following algorithm is supported: // // * SGD -- Stochastic gradient descent. The goal of SGD is to minimize the // gradient of the loss function. Algorithm *string `type:"string" enum:"Algorithm"` // Long integer type that is a 64-bit signed number. ComputeTime *int64 `type:"long"` // The time that the MLModel was created. The time is expressed in epoch time. CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The AWS user account from which the MLModel was created. The account type // can be either an AWS root account or an AWS Identity and Access Management // (IAM) user account. CreatedByIamUser *string `type:"string"` // The current endpoint of the MLModel. EndpointInfo *RealtimeEndpointInfo `type:"structure"` // A timestamp represented in epoch time. FinishedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The location of the data file or directory in Amazon Simple Storage Service // (Amazon S3). InputDataLocationS3 *string `type:"string"` // The time of the most recent edit to the MLModel. The time is expressed in // epoch time. LastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The ID assigned to the MLModel at creation. MLModelId *string `min:"1" type:"string"` // Identifies the MLModel category. The following are the available types: // // * REGRESSION - Produces a numeric result. For example, "What price should // a house be listed at?" // * BINARY - Produces one of two possible results. For example, "Is this // a child-friendly web site?". // * MULTICLASS - Produces one of several possible results. For example, // "Is this a HIGH-, LOW-, or MEDIUM-risk trade?". MLModelType *string `type:"string" enum:"MLModelType"` // A description of the most recent details about accessing the MLModel. Message *string `type:"string"` // A user-supplied name or description of the MLModel. Name *string `type:"string"` ScoreThreshold *float64 `type:"float"` // The time of the most recent edit to the ScoreThreshold. The time is expressed // in epoch time. ScoreThresholdLastUpdatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // Long integer type that is a 64-bit signed number. SizeInBytes *int64 `type:"long"` // A timestamp represented in epoch time. StartedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The current status of an MLModel. This element can have one of the following // values: // // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to // create an MLModel. // * INPROGRESS - The creation process is underway. // * FAILED - The request to create an MLModel didn't run to completion. // The model isn't usable. // * COMPLETED - The creation process completed successfully. // * DELETED - The MLModel is marked as deleted. It isn't usable. Status *string `type:"string" enum:"EntityStatus"` // The ID of the training DataSource. The CreateMLModel operation uses the TrainingDataSourceId. TrainingDataSourceId *string `min:"1" type:"string"` // A list of the training parameters in the MLModel. The list is implemented // as a map of key-value pairs. // // The following is the current set of training parameters: // // * sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending // on the input data, the size of the model might affect its performance. // // The value is an integer that ranges from 100000 to 2147483648. The default // value is 33554432. // // * sgd.maxPasses - The number of times that the training process traverses // the observations to build the MLModel. The value is an integer that ranges // from 1 to 10000. The default value is 10. // // * sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling // the data improves a model's ability to find the optimal solution for a // variety of data types. The valid values are auto and none. The default // value is none. // // * sgd.l1RegularizationAmount - The coefficient regularization L1 norm, // which controls overfitting the data by penalizing large coefficients. // This parameter tends to drive coefficients to zero, resulting in sparse // feature set. If you use this parameter, start by specifying a small value, // such as 1.0E-08. // // The value is a double that ranges from 0 to MAX_DOUBLE. The default is to // not use L1 normalization. This parameter can't be used when L2 is specified. // Use this parameter sparingly. // // * sgd.l2RegularizationAmount - The coefficient regularization L2 norm, // which controls overfitting the data by penalizing large coefficients. // This tends to drive coefficients to small, nonzero values. If you use // this parameter, start by specifying a small value, such as 1.0E-08. // // The value is a double that ranges from 0 to MAX_DOUBLE. The default is to // not use L2 normalization. This parameter can't be used when L1 is specified. // Use this parameter sparingly. TrainingParameters map[string]*string `type:"map"` } // String returns the string representation func (s MLModel) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MLModel) GoString() string { return s.String() } // SetAlgorithm sets the Algorithm field's value. func (s *MLModel) SetAlgorithm(v string) *MLModel { s.Algorithm = &v return s } // SetComputeTime sets the ComputeTime field's value. func (s *MLModel) SetComputeTime(v int64) *MLModel { s.ComputeTime = &v return s } // SetCreatedAt sets the CreatedAt field's value. func (s *MLModel) SetCreatedAt(v time.Time) *MLModel { s.CreatedAt = &v return s } // SetCreatedByIamUser sets the CreatedByIamUser field's value. func (s *MLModel) SetCreatedByIamUser(v string) *MLModel { s.CreatedByIamUser = &v return s } // SetEndpointInfo sets the EndpointInfo field's value. func (s *MLModel) SetEndpointInfo(v *RealtimeEndpointInfo) *MLModel { s.EndpointInfo = v return s } // SetFinishedAt sets the FinishedAt field's value. func (s *MLModel) SetFinishedAt(v time.Time) *MLModel { s.FinishedAt = &v return s } // SetInputDataLocationS3 sets the InputDataLocationS3 field's value. func (s *MLModel) SetInputDataLocationS3(v string) *MLModel { s.InputDataLocationS3 = &v return s } // SetLastUpdatedAt sets the LastUpdatedAt field's value. func (s *MLModel) SetLastUpdatedAt(v time.Time) *MLModel { s.LastUpdatedAt = &v return s } // SetMLModelId sets the MLModelId field's value. func (s *MLModel) SetMLModelId(v string) *MLModel { s.MLModelId = &v return s } // SetMLModelType sets the MLModelType field's value. func (s *MLModel) SetMLModelType(v string) *MLModel { s.MLModelType = &v return s } // SetMessage sets the Message field's value. func (s *MLModel) SetMessage(v string) *MLModel { s.Message = &v return s } // SetName sets the Name field's value. func (s *MLModel) SetName(v string) *MLModel { s.Name = &v return s } // SetScoreThreshold sets the ScoreThreshold field's value. func (s *MLModel) SetScoreThreshold(v float64) *MLModel { s.ScoreThreshold = &v return s } // SetScoreThresholdLastUpdatedAt sets the ScoreThresholdLastUpdatedAt field's value. func (s *MLModel) SetScoreThresholdLastUpdatedAt(v time.Time) *MLModel { s.ScoreThresholdLastUpdatedAt = &v return s } // SetSizeInBytes sets the SizeInBytes field's value. func (s *MLModel) SetSizeInBytes(v int64) *MLModel { s.SizeInBytes = &v return s } // SetStartedAt sets the StartedAt field's value. func (s *MLModel) SetStartedAt(v time.Time) *MLModel { s.StartedAt = &v return s } // SetStatus sets the Status field's value. func (s *MLModel) SetStatus(v string) *MLModel { s.Status = &v return s } // SetTrainingDataSourceId sets the TrainingDataSourceId field's value. func (s *MLModel) SetTrainingDataSourceId(v string) *MLModel { s.TrainingDataSourceId = &v return s } // SetTrainingParameters sets the TrainingParameters field's value. func (s *MLModel) SetTrainingParameters(v map[string]*string) *MLModel { s.TrainingParameters = v return s } // Measurements of how well the MLModel performed on known observations. One // of the following metrics is returned, based on the type of the MLModel: // // * BinaryAUC: The binary MLModel uses the Area Under the Curve (AUC) technique // to measure performance. // // * RegressionRMSE: The regression MLModel uses the Root Mean Square Error // (RMSE) technique to measure performance. RMSE measures the difference // between predicted and actual values for a single variable. // // * MulticlassAvgFScore: The multiclass MLModel uses the F1 score technique // to measure performance. // // For more information about performance metrics, please see the Amazon Machine // Learning Developer Guide (path_to_url type PerformanceMetrics struct { _ struct{} `type:"structure"` Properties map[string]*string `type:"map"` } // String returns the string representation func (s PerformanceMetrics) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PerformanceMetrics) GoString() string { return s.String() } // SetProperties sets the Properties field's value. func (s *PerformanceMetrics) SetProperties(v map[string]*string) *PerformanceMetrics { s.Properties = v return s } type PredictInput struct { _ struct{} `type:"structure"` // A unique identifier of the MLModel. // // MLModelId is a required field MLModelId *string `min:"1" type:"string" required:"true"` // PredictEndpoint is a required field PredictEndpoint *string `type:"string" required:"true"` // A map of variable name-value pairs that represent an observation. // // Record is a required field Record map[string]*string `type:"map" required:"true"` } // String returns the string representation func (s PredictInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PredictInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *PredictInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PredictInput"} if s.MLModelId == nil { invalidParams.Add(request.NewErrParamRequired("MLModelId")) } if s.MLModelId != nil && len(*s.MLModelId) < 1 { invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) } if s.PredictEndpoint == nil { invalidParams.Add(request.NewErrParamRequired("PredictEndpoint")) } if s.Record == nil { invalidParams.Add(request.NewErrParamRequired("Record")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetMLModelId sets the MLModelId field's value. func (s *PredictInput) SetMLModelId(v string) *PredictInput { s.MLModelId = &v return s } // SetPredictEndpoint sets the PredictEndpoint field's value. func (s *PredictInput) SetPredictEndpoint(v string) *PredictInput { s.PredictEndpoint = &v return s } // SetRecord sets the Record field's value. func (s *PredictInput) SetRecord(v map[string]*string) *PredictInput { s.Record = v return s } type PredictOutput struct { _ struct{} `type:"structure"` // The output from a Predict operation: // // * Details - Contains the following attributes: DetailsAttributes.PREDICTIVE_MODEL_TYPE // - REGRESSION | BINARY | MULTICLASSDetailsAttributes.ALGORITHM - SGD // // * PredictedLabel - Present for either a BINARY or MULTICLASSMLModel request. // // // * PredictedScores - Contains the raw classification score corresponding // to each label. // // * PredictedValue - Present for a REGRESSIONMLModel request. Prediction *Prediction `type:"structure"` } // String returns the string representation func (s PredictOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PredictOutput) GoString() string { return s.String() } // SetPrediction sets the Prediction field's value. func (s *PredictOutput) SetPrediction(v *Prediction) *PredictOutput { s.Prediction = v return s } // The output from a Predict operation: // // * Details - Contains the following attributes: DetailsAttributes.PREDICTIVE_MODEL_TYPE // - REGRESSION | BINARY | MULTICLASSDetailsAttributes.ALGORITHM - SGD // // * PredictedLabel - Present for either a BINARY or MULTICLASSMLModel request. // // // * PredictedScores - Contains the raw classification score corresponding // to each label. // // * PredictedValue - Present for a REGRESSIONMLModel request. type Prediction struct { _ struct{} `type:"structure"` // Provides any additional details regarding the prediction. Details map[string]*string `locationName:"details" type:"map"` // The prediction label for either a BINARY or MULTICLASSMLModel. PredictedLabel *string `locationName:"predictedLabel" min:"1" type:"string"` // Provides the raw classification score corresponding to each label. PredictedScores map[string]*float64 `locationName:"predictedScores" type:"map"` // The prediction value for REGRESSIONMLModel PredictedValue *float64 `locationName:"predictedValue" type:"float"` } // String returns the string representation func (s Prediction) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Prediction) GoString() string { return s.String() } // SetDetails sets the Details field's value. func (s *Prediction) SetDetails(v map[string]*string) *Prediction { s.Details = v return s } // SetPredictedLabel sets the PredictedLabel field's value. func (s *Prediction) SetPredictedLabel(v string) *Prediction { s.PredictedLabel = &v return s } // SetPredictedScores sets the PredictedScores field's value. func (s *Prediction) SetPredictedScores(v map[string]*float64) *Prediction { s.PredictedScores = v return s } // SetPredictedValue sets the PredictedValue field's value. func (s *Prediction) SetPredictedValue(v float64) *Prediction { s.PredictedValue = &v return s } // The data specification of an Amazon Relational Database Service (Amazon RDS) // DataSource. type RDSDataSpec struct { _ struct{} `type:"structure"` // A JSON string that represents the splitting and rearrangement processing // to be applied to a DataSource. If the DataRearrangement parameter is not // provided, all of the input data is used to create the Datasource. // // There are multiple parameters that control what data is used to create a // datasource: // // * percentBegin // // Use percentBegin to indicate the beginning of the range of the data used // to create the Datasource. If you do not include percentBegin and percentEnd, // Amazon ML includes all of the data when creating the datasource. // // * percentEnd // // Use percentEnd to indicate the end of the range of the data used to create // the Datasource. If you do not include percentBegin and percentEnd, Amazon // ML includes all of the data when creating the datasource. // // * complement // // The complement parameter instructs Amazon ML to use the data that is not // included in the range of percentBegin to percentEnd to create a datasource. // The complement parameter is useful if you need to create complementary // datasources for training and evaluation. To create a complementary datasource, // use the same values for percentBegin and percentEnd, along with the complement // parameter. // // For example, the following two datasources do not share any data, and can // be used to train and evaluate a model. The first datasource has 25 percent // of the data, and the second one has 75 percent of the data. // // Datasource for evaluation: {"splitting":{"percentBegin":0, "percentEnd":25}} // // Datasource for training: {"splitting":{"percentBegin":0, "percentEnd":25, // "complement":"true"}} // // * strategy // // To change how Amazon ML splits the data for a datasource, use the strategy // parameter. // // The default value for the strategy parameter is sequential, meaning that // Amazon ML takes all of the data records between the percentBegin and percentEnd // parameters for the datasource, in the order that the records appear in // the input data. // // The following two DataRearrangement lines are examples of sequentially ordered // training and evaluation datasources: // // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"sequential"}} // // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"sequential", "complement":"true"}} // // To randomly split the input data into the proportions indicated by the percentBegin // and percentEnd parameters, set the strategy parameter to random and provide // a string that is used as the seed value for the random data splitting // (for example, you can use the S3 path to your data as the random seed // string). If you choose the random split strategy, Amazon ML assigns each // row of data a pseudo-random number between 0 and 100, and then selects // the rows that have an assigned number between percentBegin and percentEnd. // Pseudo-random numbers are assigned using both the input seed string value // and the byte offset as a seed, so changing the data results in a different // split. Any existing ordering is preserved. The random splitting strategy // ensures that variables in the training and evaluation data are distributed // similarly. It is useful in the cases where the input data may have an // implicit sort order, which would otherwise result in training and evaluation // datasources containing non-similar data records. // // The following two DataRearrangement lines are examples of non-sequentially // ordered training and evaluation datasources: // // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}} // // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}} DataRearrangement *string `type:"string"` // A JSON string that represents the schema for an Amazon RDS DataSource. The // DataSchema defines the structure of the observation data in the data file(s) // referenced in the DataSource. // // A DataSchema is not required if you specify a DataSchemaUri // // Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames // have an array of key-value pairs for their value. Use the following format // to define your DataSchema. // // { "version": "1.0", // // "recordAnnotationFieldName": "F1", // // "recordWeightFieldName": "F2", // // "targetFieldName": "F3", // // "dataFormat": "CSV", // // "dataFileContainsHeader": true, // // "attributes": [ // // { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": // "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": // "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" // }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": // "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" // } ], // // "excludedVariableNames": [ "F6" ] } DataSchema *string `type:"string"` // The Amazon S3 location of the DataSchema. DataSchemaUri *string `type:"string"` // The AWS Identity and Access Management (IAM) credentials that are used connect // to the Amazon RDS database. // // DatabaseCredentials is a required field DatabaseCredentials *RDSDatabaseCredentials `type:"structure" required:"true"` // Describes the DatabaseName and InstanceIdentifier of an Amazon RDS database. // // DatabaseInformation is a required field DatabaseInformation *RDSDatabase `type:"structure" required:"true"` // The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute // Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS // to an Amazon S3 task. For more information, see Role templates (path_to_url // for data pipelines. // // ResourceRole is a required field ResourceRole *string `min:"1" type:"string" required:"true"` // The Amazon S3 location for staging Amazon RDS data. The data retrieved from // Amazon RDS using SelectSqlQuery is stored in this location. // // S3StagingLocation is a required field S3StagingLocation *string `type:"string" required:"true"` // The security group IDs to be used to access a VPC-based RDS DB instance. // Ensure that there are appropriate ingress rules set up to allow access to // the RDS DB instance. This attribute is used by Data Pipeline to carry out // the copy operation from Amazon RDS to an Amazon S3 task. // // SecurityGroupIds is a required field SecurityGroupIds []*string `type:"list" required:"true"` // The query that is used to retrieve the observation data for the DataSource. // // SelectSqlQuery is a required field SelectSqlQuery *string `min:"1" type:"string" required:"true"` // The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to // monitor the progress of the copy task from Amazon RDS to Amazon S3. For more // information, see Role templates (path_to_url // for data pipelines. // // ServiceRole is a required field ServiceRole *string `min:"1" type:"string" required:"true"` // The subnet ID to be used to access a VPC-based RDS DB instance. This attribute // is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon // S3. // // SubnetId is a required field SubnetId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s RDSDataSpec) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RDSDataSpec) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *RDSDataSpec) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RDSDataSpec"} if s.DatabaseCredentials == nil { invalidParams.Add(request.NewErrParamRequired("DatabaseCredentials")) } if s.DatabaseInformation == nil { invalidParams.Add(request.NewErrParamRequired("DatabaseInformation")) } if s.ResourceRole == nil { invalidParams.Add(request.NewErrParamRequired("ResourceRole")) } if s.ResourceRole != nil && len(*s.ResourceRole) < 1 { invalidParams.Add(request.NewErrParamMinLen("ResourceRole", 1)) } if s.S3StagingLocation == nil { invalidParams.Add(request.NewErrParamRequired("S3StagingLocation")) } if s.SecurityGroupIds == nil { invalidParams.Add(request.NewErrParamRequired("SecurityGroupIds")) } if s.SelectSqlQuery == nil { invalidParams.Add(request.NewErrParamRequired("SelectSqlQuery")) } if s.SelectSqlQuery != nil && len(*s.SelectSqlQuery) < 1 { invalidParams.Add(request.NewErrParamMinLen("SelectSqlQuery", 1)) } if s.ServiceRole == nil { invalidParams.Add(request.NewErrParamRequired("ServiceRole")) } if s.ServiceRole != nil && len(*s.ServiceRole) < 1 { invalidParams.Add(request.NewErrParamMinLen("ServiceRole", 1)) } if s.SubnetId == nil { invalidParams.Add(request.NewErrParamRequired("SubnetId")) } if s.SubnetId != nil && len(*s.SubnetId) < 1 { invalidParams.Add(request.NewErrParamMinLen("SubnetId", 1)) } if s.DatabaseCredentials != nil { if err := s.DatabaseCredentials.Validate(); err != nil { invalidParams.AddNested("DatabaseCredentials", err.(request.ErrInvalidParams)) } } if s.DatabaseInformation != nil { if err := s.DatabaseInformation.Validate(); err != nil { invalidParams.AddNested("DatabaseInformation", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDataRearrangement sets the DataRearrangement field's value. func (s *RDSDataSpec) SetDataRearrangement(v string) *RDSDataSpec { s.DataRearrangement = &v return s } // SetDataSchema sets the DataSchema field's value. func (s *RDSDataSpec) SetDataSchema(v string) *RDSDataSpec { s.DataSchema = &v return s } // SetDataSchemaUri sets the DataSchemaUri field's value. func (s *RDSDataSpec) SetDataSchemaUri(v string) *RDSDataSpec { s.DataSchemaUri = &v return s } // SetDatabaseCredentials sets the DatabaseCredentials field's value. func (s *RDSDataSpec) SetDatabaseCredentials(v *RDSDatabaseCredentials) *RDSDataSpec { s.DatabaseCredentials = v return s } // SetDatabaseInformation sets the DatabaseInformation field's value. func (s *RDSDataSpec) SetDatabaseInformation(v *RDSDatabase) *RDSDataSpec { s.DatabaseInformation = v return s } // SetResourceRole sets the ResourceRole field's value. func (s *RDSDataSpec) SetResourceRole(v string) *RDSDataSpec { s.ResourceRole = &v return s } // SetS3StagingLocation sets the S3StagingLocation field's value. func (s *RDSDataSpec) SetS3StagingLocation(v string) *RDSDataSpec { s.S3StagingLocation = &v return s } // SetSecurityGroupIds sets the SecurityGroupIds field's value. func (s *RDSDataSpec) SetSecurityGroupIds(v []*string) *RDSDataSpec { s.SecurityGroupIds = v return s } // SetSelectSqlQuery sets the SelectSqlQuery field's value. func (s *RDSDataSpec) SetSelectSqlQuery(v string) *RDSDataSpec { s.SelectSqlQuery = &v return s } // SetServiceRole sets the ServiceRole field's value. func (s *RDSDataSpec) SetServiceRole(v string) *RDSDataSpec { s.ServiceRole = &v return s } // SetSubnetId sets the SubnetId field's value. func (s *RDSDataSpec) SetSubnetId(v string) *RDSDataSpec { s.SubnetId = &v return s } // The database details of an Amazon RDS database. type RDSDatabase struct { _ struct{} `type:"structure"` // The name of a database hosted on an RDS DB instance. // // DatabaseName is a required field DatabaseName *string `min:"1" type:"string" required:"true"` // The ID of an RDS DB instance. // // InstanceIdentifier is a required field InstanceIdentifier *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s RDSDatabase) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RDSDatabase) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *RDSDatabase) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RDSDatabase"} if s.DatabaseName == nil { invalidParams.Add(request.NewErrParamRequired("DatabaseName")) } if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) } if s.InstanceIdentifier == nil { invalidParams.Add(request.NewErrParamRequired("InstanceIdentifier")) } if s.InstanceIdentifier != nil && len(*s.InstanceIdentifier) < 1 { invalidParams.Add(request.NewErrParamMinLen("InstanceIdentifier", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDatabaseName sets the DatabaseName field's value. func (s *RDSDatabase) SetDatabaseName(v string) *RDSDatabase { s.DatabaseName = &v return s } // SetInstanceIdentifier sets the InstanceIdentifier field's value. func (s *RDSDatabase) SetInstanceIdentifier(v string) *RDSDatabase { s.InstanceIdentifier = &v return s } // The database credentials to connect to a database on an RDS DB instance. type RDSDatabaseCredentials struct { _ struct{} `type:"structure"` // The password to be used by Amazon ML to connect to a database on an RDS DB // instance. The password should have sufficient permissions to execute the // RDSSelectQuery query. // // Password is a required field Password *string `min:"8" type:"string" required:"true"` // The username to be used by Amazon ML to connect to database on an Amazon // RDS instance. The username should have sufficient permissions to execute // an RDSSelectSqlQuery query. // // Username is a required field Username *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s RDSDatabaseCredentials) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RDSDatabaseCredentials) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *RDSDatabaseCredentials) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RDSDatabaseCredentials"} if s.Password == nil { invalidParams.Add(request.NewErrParamRequired("Password")) } if s.Password != nil && len(*s.Password) < 8 { invalidParams.Add(request.NewErrParamMinLen("Password", 8)) } if s.Username == nil { invalidParams.Add(request.NewErrParamRequired("Username")) } if s.Username != nil && len(*s.Username) < 1 { invalidParams.Add(request.NewErrParamMinLen("Username", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetPassword sets the Password field's value. func (s *RDSDatabaseCredentials) SetPassword(v string) *RDSDatabaseCredentials { s.Password = &v return s } // SetUsername sets the Username field's value. func (s *RDSDatabaseCredentials) SetUsername(v string) *RDSDatabaseCredentials { s.Username = &v return s } // The datasource details that are specific to Amazon RDS. type RDSMetadata struct { _ struct{} `type:"structure"` // The ID of the Data Pipeline instance that is used to carry to copy data from // Amazon RDS to Amazon S3. You can use the ID to find details about the instance // in the Data Pipeline console. DataPipelineId *string `min:"1" type:"string"` // The database details required to connect to an Amazon RDS. Database *RDSDatabase `type:"structure"` // The username to be used by Amazon ML to connect to database on an Amazon // RDS instance. The username should have sufficient permissions to execute // an RDSSelectSqlQuery query. DatabaseUserName *string `min:"1" type:"string"` // The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance // to carry out the copy task from Amazon RDS to Amazon S3. For more information, // see Role templates (path_to_url // for data pipelines. ResourceRole *string `min:"1" type:"string"` // The SQL query that is supplied during CreateDataSourceFromRDS. Returns only // if Verbose is true in GetDataSourceInput. SelectSqlQuery *string `min:"1" type:"string"` // The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to // monitor the progress of the copy task from Amazon RDS to Amazon S3. For more // information, see Role templates (path_to_url // for data pipelines. ServiceRole *string `min:"1" type:"string"` } // String returns the string representation func (s RDSMetadata) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RDSMetadata) GoString() string { return s.String() } // SetDataPipelineId sets the DataPipelineId field's value. func (s *RDSMetadata) SetDataPipelineId(v string) *RDSMetadata { s.DataPipelineId = &v return s } // SetDatabase sets the Database field's value. func (s *RDSMetadata) SetDatabase(v *RDSDatabase) *RDSMetadata { s.Database = v return s } // SetDatabaseUserName sets the DatabaseUserName field's value. func (s *RDSMetadata) SetDatabaseUserName(v string) *RDSMetadata { s.DatabaseUserName = &v return s } // SetResourceRole sets the ResourceRole field's value. func (s *RDSMetadata) SetResourceRole(v string) *RDSMetadata { s.ResourceRole = &v return s } // SetSelectSqlQuery sets the SelectSqlQuery field's value. func (s *RDSMetadata) SetSelectSqlQuery(v string) *RDSMetadata { s.SelectSqlQuery = &v return s } // SetServiceRole sets the ServiceRole field's value. func (s *RDSMetadata) SetServiceRole(v string) *RDSMetadata { s.ServiceRole = &v return s } // Describes the real-time endpoint information for an MLModel. type RealtimeEndpointInfo struct { _ struct{} `type:"structure"` // The time that the request to create the real-time endpoint for the MLModel // was received. The time is expressed in epoch time. CreatedAt *time.Time `type:"timestamp" timestampFormat:"unix"` // The current status of the real-time endpoint for the MLModel. This element // can have one of the following values: // // * NONE - Endpoint does not exist or was previously deleted. // * READY - Endpoint is ready to be used for real-time predictions. // * UPDATING - Updating/creating the endpoint. EndpointStatus *string `type:"string" enum:"RealtimeEndpointStatus"` // The URI that specifies where to send real-time prediction requests for the // MLModel. // // NoteThe application must wait until the real-time endpoint is ready before // using this URI. EndpointUrl *string `type:"string"` // The maximum processing rate for the real-time endpoint for MLModel, measured // in incoming requests per second. PeakRequestsPerSecond *int64 `type:"integer"` } // String returns the string representation func (s RealtimeEndpointInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RealtimeEndpointInfo) GoString() string { return s.String() } // SetCreatedAt sets the CreatedAt field's value. func (s *RealtimeEndpointInfo) SetCreatedAt(v time.Time) *RealtimeEndpointInfo { s.CreatedAt = &v return s } // SetEndpointStatus sets the EndpointStatus field's value. func (s *RealtimeEndpointInfo) SetEndpointStatus(v string) *RealtimeEndpointInfo { s.EndpointStatus = &v return s } // SetEndpointUrl sets the EndpointUrl field's value. func (s *RealtimeEndpointInfo) SetEndpointUrl(v string) *RealtimeEndpointInfo { s.EndpointUrl = &v return s } // SetPeakRequestsPerSecond sets the PeakRequestsPerSecond field's value. func (s *RealtimeEndpointInfo) SetPeakRequestsPerSecond(v int64) *RealtimeEndpointInfo { s.PeakRequestsPerSecond = &v return s } // Describes the data specification of an Amazon Redshift DataSource. type RedshiftDataSpec struct { _ struct{} `type:"structure"` // A JSON string that represents the splitting and rearrangement processing // to be applied to a DataSource. If the DataRearrangement parameter is not // provided, all of the input data is used to create the Datasource. // // There are multiple parameters that control what data is used to create a // datasource: // // * percentBegin // // Use percentBegin to indicate the beginning of the range of the data used // to create the Datasource. If you do not include percentBegin and percentEnd, // Amazon ML includes all of the data when creating the datasource. // // * percentEnd // // Use percentEnd to indicate the end of the range of the data used to create // the Datasource. If you do not include percentBegin and percentEnd, Amazon // ML includes all of the data when creating the datasource. // // * complement // // The complement parameter instructs Amazon ML to use the data that is not // included in the range of percentBegin to percentEnd to create a datasource. // The complement parameter is useful if you need to create complementary // datasources for training and evaluation. To create a complementary datasource, // use the same values for percentBegin and percentEnd, along with the complement // parameter. // // For example, the following two datasources do not share any data, and can // be used to train and evaluate a model. The first datasource has 25 percent // of the data, and the second one has 75 percent of the data. // // Datasource for evaluation: {"splitting":{"percentBegin":0, "percentEnd":25}} // // Datasource for training: {"splitting":{"percentBegin":0, "percentEnd":25, // "complement":"true"}} // // * strategy // // To change how Amazon ML splits the data for a datasource, use the strategy // parameter. // // The default value for the strategy parameter is sequential, meaning that // Amazon ML takes all of the data records between the percentBegin and percentEnd // parameters for the datasource, in the order that the records appear in // the input data. // // The following two DataRearrangement lines are examples of sequentially ordered // training and evaluation datasources: // // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"sequential"}} // // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"sequential", "complement":"true"}} // // To randomly split the input data into the proportions indicated by the percentBegin // and percentEnd parameters, set the strategy parameter to random and provide // a string that is used as the seed value for the random data splitting // (for example, you can use the S3 path to your data as the random seed // string). If you choose the random split strategy, Amazon ML assigns each // row of data a pseudo-random number between 0 and 100, and then selects // the rows that have an assigned number between percentBegin and percentEnd. // Pseudo-random numbers are assigned using both the input seed string value // and the byte offset as a seed, so changing the data results in a different // split. Any existing ordering is preserved. The random splitting strategy // ensures that variables in the training and evaluation data are distributed // similarly. It is useful in the cases where the input data may have an // implicit sort order, which would otherwise result in training and evaluation // datasources containing non-similar data records. // // The following two DataRearrangement lines are examples of non-sequentially // ordered training and evaluation datasources: // // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}} // // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}} DataRearrangement *string `type:"string"` // A JSON string that represents the schema for an Amazon Redshift DataSource. // The DataSchema defines the structure of the observation data in the data // file(s) referenced in the DataSource. // // A DataSchema is not required if you specify a DataSchemaUri. // // Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames // have an array of key-value pairs for their value. Use the following format // to define your DataSchema. // // { "version": "1.0", // // "recordAnnotationFieldName": "F1", // // "recordWeightFieldName": "F2", // // "targetFieldName": "F3", // // "dataFormat": "CSV", // // "dataFileContainsHeader": true, // // "attributes": [ // // { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": // "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": // "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" // }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": // "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" // } ], // // "excludedVariableNames": [ "F6" ] } DataSchema *string `type:"string"` // Describes the schema location for an Amazon Redshift DataSource. DataSchemaUri *string `type:"string"` // Describes AWS Identity and Access Management (IAM) credentials that are used // connect to the Amazon Redshift database. // // DatabaseCredentials is a required field DatabaseCredentials *RedshiftDatabaseCredentials `type:"structure" required:"true"` // Describes the DatabaseName and ClusterIdentifier for an Amazon Redshift DataSource. // // DatabaseInformation is a required field DatabaseInformation *RedshiftDatabase `type:"structure" required:"true"` // Describes an Amazon S3 location to store the result set of the SelectSqlQuery // query. // // S3StagingLocation is a required field S3StagingLocation *string `type:"string" required:"true"` // Describes the SQL Query to execute on an Amazon Redshift database for an // Amazon Redshift DataSource. // // SelectSqlQuery is a required field SelectSqlQuery *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s RedshiftDataSpec) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RedshiftDataSpec) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *RedshiftDataSpec) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RedshiftDataSpec"} if s.DatabaseCredentials == nil { invalidParams.Add(request.NewErrParamRequired("DatabaseCredentials")) } if s.DatabaseInformation == nil { invalidParams.Add(request.NewErrParamRequired("DatabaseInformation")) } if s.S3StagingLocation == nil { invalidParams.Add(request.NewErrParamRequired("S3StagingLocation")) } if s.SelectSqlQuery == nil { invalidParams.Add(request.NewErrParamRequired("SelectSqlQuery")) } if s.SelectSqlQuery != nil && len(*s.SelectSqlQuery) < 1 { invalidParams.Add(request.NewErrParamMinLen("SelectSqlQuery", 1)) } if s.DatabaseCredentials != nil { if err := s.DatabaseCredentials.Validate(); err != nil { invalidParams.AddNested("DatabaseCredentials", err.(request.ErrInvalidParams)) } } if s.DatabaseInformation != nil { if err := s.DatabaseInformation.Validate(); err != nil { invalidParams.AddNested("DatabaseInformation", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDataRearrangement sets the DataRearrangement field's value. func (s *RedshiftDataSpec) SetDataRearrangement(v string) *RedshiftDataSpec { s.DataRearrangement = &v return s } // SetDataSchema sets the DataSchema field's value. func (s *RedshiftDataSpec) SetDataSchema(v string) *RedshiftDataSpec { s.DataSchema = &v return s } // SetDataSchemaUri sets the DataSchemaUri field's value. func (s *RedshiftDataSpec) SetDataSchemaUri(v string) *RedshiftDataSpec { s.DataSchemaUri = &v return s } // SetDatabaseCredentials sets the DatabaseCredentials field's value. func (s *RedshiftDataSpec) SetDatabaseCredentials(v *RedshiftDatabaseCredentials) *RedshiftDataSpec { s.DatabaseCredentials = v return s } // SetDatabaseInformation sets the DatabaseInformation field's value. func (s *RedshiftDataSpec) SetDatabaseInformation(v *RedshiftDatabase) *RedshiftDataSpec { s.DatabaseInformation = v return s } // SetS3StagingLocation sets the S3StagingLocation field's value. func (s *RedshiftDataSpec) SetS3StagingLocation(v string) *RedshiftDataSpec { s.S3StagingLocation = &v return s } // SetSelectSqlQuery sets the SelectSqlQuery field's value. func (s *RedshiftDataSpec) SetSelectSqlQuery(v string) *RedshiftDataSpec { s.SelectSqlQuery = &v return s } // Describes the database details required to connect to an Amazon Redshift // database. type RedshiftDatabase struct { _ struct{} `type:"structure"` // The ID of an Amazon Redshift cluster. // // ClusterIdentifier is a required field ClusterIdentifier *string `min:"1" type:"string" required:"true"` // The name of a database hosted on an Amazon Redshift cluster. // // DatabaseName is a required field DatabaseName *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s RedshiftDatabase) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RedshiftDatabase) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *RedshiftDatabase) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RedshiftDatabase"} if s.ClusterIdentifier == nil { invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) } if s.ClusterIdentifier != nil && len(*s.ClusterIdentifier) < 1 { invalidParams.Add(request.NewErrParamMinLen("ClusterIdentifier", 1)) } if s.DatabaseName == nil { invalidParams.Add(request.NewErrParamRequired("DatabaseName")) } if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetClusterIdentifier sets the ClusterIdentifier field's value. func (s *RedshiftDatabase) SetClusterIdentifier(v string) *RedshiftDatabase { s.ClusterIdentifier = &v return s } // SetDatabaseName sets the DatabaseName field's value. func (s *RedshiftDatabase) SetDatabaseName(v string) *RedshiftDatabase { s.DatabaseName = &v return s } // Describes the database credentials for connecting to a database on an Amazon // Redshift cluster. type RedshiftDatabaseCredentials struct { _ struct{} `type:"structure"` // A password to be used by Amazon ML to connect to a database on an Amazon // Redshift cluster. The password should have sufficient permissions to execute // a RedshiftSelectSqlQuery query. The password should be valid for an Amazon // Redshift USER (path_to_url // // Password is a required field Password *string `min:"8" type:"string" required:"true"` // A username to be used by Amazon Machine Learning (Amazon ML)to connect to // a database on an Amazon Redshift cluster. The username should have sufficient // permissions to execute the RedshiftSelectSqlQuery query. The username should // be valid for an Amazon Redshift USER (path_to_url // // Username is a required field Username *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s RedshiftDatabaseCredentials) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RedshiftDatabaseCredentials) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *RedshiftDatabaseCredentials) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RedshiftDatabaseCredentials"} if s.Password == nil { invalidParams.Add(request.NewErrParamRequired("Password")) } if s.Password != nil && len(*s.Password) < 8 { invalidParams.Add(request.NewErrParamMinLen("Password", 8)) } if s.Username == nil { invalidParams.Add(request.NewErrParamRequired("Username")) } if s.Username != nil && len(*s.Username) < 1 { invalidParams.Add(request.NewErrParamMinLen("Username", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetPassword sets the Password field's value. func (s *RedshiftDatabaseCredentials) SetPassword(v string) *RedshiftDatabaseCredentials { s.Password = &v return s } // SetUsername sets the Username field's value. func (s *RedshiftDatabaseCredentials) SetUsername(v string) *RedshiftDatabaseCredentials { s.Username = &v return s } // Describes the DataSource details specific to Amazon Redshift. type RedshiftMetadata struct { _ struct{} `type:"structure"` // A username to be used by Amazon Machine Learning (Amazon ML)to connect to // a database on an Amazon Redshift cluster. The username should have sufficient // permissions to execute the RedshiftSelectSqlQuery query. The username should // be valid for an Amazon Redshift USER (path_to_url DatabaseUserName *string `min:"1" type:"string"` // Describes the database details required to connect to an Amazon Redshift // database. RedshiftDatabase *RedshiftDatabase `type:"structure"` // The SQL query that is specified during CreateDataSourceFromRedshift. Returns // only if Verbose is true in GetDataSourceInput. SelectSqlQuery *string `min:"1" type:"string"` } // String returns the string representation func (s RedshiftMetadata) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RedshiftMetadata) GoString() string { return s.String() } // SetDatabaseUserName sets the DatabaseUserName field's value. func (s *RedshiftMetadata) SetDatabaseUserName(v string) *RedshiftMetadata { s.DatabaseUserName = &v return s } // SetRedshiftDatabase sets the RedshiftDatabase field's value. func (s *RedshiftMetadata) SetRedshiftDatabase(v *RedshiftDatabase) *RedshiftMetadata { s.RedshiftDatabase = v return s } // SetSelectSqlQuery sets the SelectSqlQuery field's value. func (s *RedshiftMetadata) SetSelectSqlQuery(v string) *RedshiftMetadata { s.SelectSqlQuery = &v return s } // Describes the data specification of a DataSource. type S3DataSpec struct { _ struct{} `type:"structure"` // The location of the data file(s) used by a DataSource. The URI specifies // a data file or an Amazon Simple Storage Service (Amazon S3) directory or // bucket containing data files. // // DataLocationS3 is a required field DataLocationS3 *string `type:"string" required:"true"` // A JSON string that represents the splitting and rearrangement processing // to be applied to a DataSource. If the DataRearrangement parameter is not // provided, all of the input data is used to create the Datasource. // // There are multiple parameters that control what data is used to create a // datasource: // // * percentBegin // // Use percentBegin to indicate the beginning of the range of the data used // to create the Datasource. If you do not include percentBegin and percentEnd, // Amazon ML includes all of the data when creating the datasource. // // * percentEnd // // Use percentEnd to indicate the end of the range of the data used to create // the Datasource. If you do not include percentBegin and percentEnd, Amazon // ML includes all of the data when creating the datasource. // // * complement // // The complement parameter instructs Amazon ML to use the data that is not // included in the range of percentBegin to percentEnd to create a datasource. // The complement parameter is useful if you need to create complementary // datasources for training and evaluation. To create a complementary datasource, // use the same values for percentBegin and percentEnd, along with the complement // parameter. // // For example, the following two datasources do not share any data, and can // be used to train and evaluate a model. The first datasource has 25 percent // of the data, and the second one has 75 percent of the data. // // Datasource for evaluation: {"splitting":{"percentBegin":0, "percentEnd":25}} // // Datasource for training: {"splitting":{"percentBegin":0, "percentEnd":25, // "complement":"true"}} // // * strategy // // To change how Amazon ML splits the data for a datasource, use the strategy // parameter. // // The default value for the strategy parameter is sequential, meaning that // Amazon ML takes all of the data records between the percentBegin and percentEnd // parameters for the datasource, in the order that the records appear in // the input data. // // The following two DataRearrangement lines are examples of sequentially ordered // training and evaluation datasources: // // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"sequential"}} // // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"sequential", "complement":"true"}} // // To randomly split the input data into the proportions indicated by the percentBegin // and percentEnd parameters, set the strategy parameter to random and provide // a string that is used as the seed value for the random data splitting // (for example, you can use the S3 path to your data as the random seed // string). If you choose the random split strategy, Amazon ML assigns each // row of data a pseudo-random number between 0 and 100, and then selects // the rows that have an assigned number between percentBegin and percentEnd. // Pseudo-random numbers are assigned using both the input seed string value // and the byte offset as a seed, so changing the data results in a different // split. Any existing ordering is preserved. The random splitting strategy // ensures that variables in the training and evaluation data are distributed // similarly. It is useful in the cases where the input data may have an // implicit sort order, which would otherwise result in training and evaluation // datasources containing non-similar data records. // // The following two DataRearrangement lines are examples of non-sequentially // ordered training and evaluation datasources: // // Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}} // // Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}} DataRearrangement *string `type:"string"` // A JSON string that represents the schema for an Amazon S3 DataSource. The // DataSchema defines the structure of the observation data in the data file(s) // referenced in the DataSource. // // You must provide either the DataSchema or the DataSchemaLocationS3. // // Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames // have an array of key-value pairs for their value. Use the following format // to define your DataSchema. // // { "version": "1.0", // // "recordAnnotationFieldName": "F1", // // "recordWeightFieldName": "F2", // // "targetFieldName": "F3", // // "dataFormat": "CSV", // // "dataFileContainsHeader": true, // // "attributes": [ // // { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": // "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": // "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" // }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": // "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" // } ], // // "excludedVariableNames": [ "F6" ] } DataSchema *string `type:"string"` // Describes the schema location in Amazon S3. You must provide either the DataSchema // or the DataSchemaLocationS3. DataSchemaLocationS3 *string `type:"string"` } // String returns the string representation func (s S3DataSpec) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s S3DataSpec) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *S3DataSpec) Validate() error { invalidParams := request.ErrInvalidParams{Context: "S3DataSpec"} if s.DataLocationS3 == nil { invalidParams.Add(request.NewErrParamRequired("DataLocationS3")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDataLocationS3 sets the DataLocationS3 field's value. func (s *S3DataSpec) SetDataLocationS3(v string) *S3DataSpec { s.DataLocationS3 = &v return s } // SetDataRearrangement sets the DataRearrangement field's value. func (s *S3DataSpec) SetDataRearrangement(v string) *S3DataSpec { s.DataRearrangement = &v return s } // SetDataSchema sets the DataSchema field's value. func (s *S3DataSpec) SetDataSchema(v string) *S3DataSpec { s.DataSchema = &v return s } // SetDataSchemaLocationS3 sets the DataSchemaLocationS3 field's value. func (s *S3DataSpec) SetDataSchemaLocationS3(v string) *S3DataSpec { s.DataSchemaLocationS3 = &v return s } // A custom key-value pair associated with an ML object, such as an ML model. type Tag struct { _ struct{} `type:"structure"` // A unique identifier for the tag. Valid characters include Unicode letters, // digits, white space, _, ., /, =, +, -, %, and @. Key *string `min:"1" type:"string"` // An optional string, typically used to describe or define the tag. Valid characters // include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @. Value *string `type:"string"` } // String returns the string representation func (s Tag) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Tag) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Tag) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Tag"} if s.Key != nil && len(*s.Key) < 1 { invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetKey sets the Key field's value. func (s *Tag) SetKey(v string) *Tag { s.Key = &v return s } // SetValue sets the Value field's value. func (s *Tag) SetValue(v string) *Tag { s.Value = &v return s } type UpdateBatchPredictionInput struct { _ struct{} `type:"structure"` // The ID assigned to the BatchPrediction during creation. // // BatchPredictionId is a required field BatchPredictionId *string `min:"1" type:"string" required:"true"` // A new user-supplied name or description of the BatchPrediction. // // BatchPredictionName is a required field BatchPredictionName *string `type:"string" required:"true"` } // String returns the string representation func (s UpdateBatchPredictionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateBatchPredictionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateBatchPredictionInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateBatchPredictionInput"} if s.BatchPredictionId == nil { invalidParams.Add(request.NewErrParamRequired("BatchPredictionId")) } if s.BatchPredictionId != nil && len(*s.BatchPredictionId) < 1 { invalidParams.Add(request.NewErrParamMinLen("BatchPredictionId", 1)) } if s.BatchPredictionName == nil { invalidParams.Add(request.NewErrParamRequired("BatchPredictionName")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBatchPredictionId sets the BatchPredictionId field's value. func (s *UpdateBatchPredictionInput) SetBatchPredictionId(v string) *UpdateBatchPredictionInput { s.BatchPredictionId = &v return s } // SetBatchPredictionName sets the BatchPredictionName field's value. func (s *UpdateBatchPredictionInput) SetBatchPredictionName(v string) *UpdateBatchPredictionInput { s.BatchPredictionName = &v return s } // Represents the output of an UpdateBatchPrediction operation. // // You can see the updated content by using the GetBatchPrediction operation. type UpdateBatchPredictionOutput struct { _ struct{} `type:"structure"` // The ID assigned to the BatchPrediction during creation. This value should // be identical to the value of the BatchPredictionId in the request. BatchPredictionId *string `min:"1" type:"string"` } // String returns the string representation func (s UpdateBatchPredictionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateBatchPredictionOutput) GoString() string { return s.String() } // SetBatchPredictionId sets the BatchPredictionId field's value. func (s *UpdateBatchPredictionOutput) SetBatchPredictionId(v string) *UpdateBatchPredictionOutput { s.BatchPredictionId = &v return s } type UpdateDataSourceInput struct { _ struct{} `type:"structure"` // The ID assigned to the DataSource during creation. // // DataSourceId is a required field DataSourceId *string `min:"1" type:"string" required:"true"` // A new user-supplied name or description of the DataSource that will replace // the current description. // // DataSourceName is a required field DataSourceName *string `type:"string" required:"true"` } // String returns the string representation func (s UpdateDataSourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateDataSourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateDataSourceInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateDataSourceInput"} if s.DataSourceId == nil { invalidParams.Add(request.NewErrParamRequired("DataSourceId")) } if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) } if s.DataSourceName == nil { invalidParams.Add(request.NewErrParamRequired("DataSourceName")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDataSourceId sets the DataSourceId field's value. func (s *UpdateDataSourceInput) SetDataSourceId(v string) *UpdateDataSourceInput { s.DataSourceId = &v return s } // SetDataSourceName sets the DataSourceName field's value. func (s *UpdateDataSourceInput) SetDataSourceName(v string) *UpdateDataSourceInput { s.DataSourceName = &v return s } // Represents the output of an UpdateDataSource operation. // // You can see the updated content by using the GetBatchPrediction operation. type UpdateDataSourceOutput struct { _ struct{} `type:"structure"` // The ID assigned to the DataSource during creation. This value should be identical // to the value of the DataSourceID in the request. DataSourceId *string `min:"1" type:"string"` } // String returns the string representation func (s UpdateDataSourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateDataSourceOutput) GoString() string { return s.String() } // SetDataSourceId sets the DataSourceId field's value. func (s *UpdateDataSourceOutput) SetDataSourceId(v string) *UpdateDataSourceOutput { s.DataSourceId = &v return s } type UpdateEvaluationInput struct { _ struct{} `type:"structure"` // The ID assigned to the Evaluation during creation. // // EvaluationId is a required field EvaluationId *string `min:"1" type:"string" required:"true"` // A new user-supplied name or description of the Evaluation that will replace // the current content. // // EvaluationName is a required field EvaluationName *string `type:"string" required:"true"` } // String returns the string representation func (s UpdateEvaluationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateEvaluationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateEvaluationInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateEvaluationInput"} if s.EvaluationId == nil { invalidParams.Add(request.NewErrParamRequired("EvaluationId")) } if s.EvaluationId != nil && len(*s.EvaluationId) < 1 { invalidParams.Add(request.NewErrParamMinLen("EvaluationId", 1)) } if s.EvaluationName == nil { invalidParams.Add(request.NewErrParamRequired("EvaluationName")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetEvaluationId sets the EvaluationId field's value. func (s *UpdateEvaluationInput) SetEvaluationId(v string) *UpdateEvaluationInput { s.EvaluationId = &v return s } // SetEvaluationName sets the EvaluationName field's value. func (s *UpdateEvaluationInput) SetEvaluationName(v string) *UpdateEvaluationInput { s.EvaluationName = &v return s } // Represents the output of an UpdateEvaluation operation. // // You can see the updated content by using the GetEvaluation operation. type UpdateEvaluationOutput struct { _ struct{} `type:"structure"` // The ID assigned to the Evaluation during creation. This value should be identical // to the value of the Evaluation in the request. EvaluationId *string `min:"1" type:"string"` } // String returns the string representation func (s UpdateEvaluationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateEvaluationOutput) GoString() string { return s.String() } // SetEvaluationId sets the EvaluationId field's value. func (s *UpdateEvaluationOutput) SetEvaluationId(v string) *UpdateEvaluationOutput { s.EvaluationId = &v return s } type UpdateMLModelInput struct { _ struct{} `type:"structure"` // The ID assigned to the MLModel during creation. // // MLModelId is a required field MLModelId *string `min:"1" type:"string" required:"true"` // A user-supplied name or description of the MLModel. MLModelName *string `type:"string"` // The ScoreThreshold used in binary classification MLModel that marks the boundary // between a positive prediction and a negative prediction. // // Output values greater than or equal to the ScoreThreshold receive a positive // result from the MLModel, such as true. Output values less than the ScoreThreshold // receive a negative response from the MLModel, such as false. ScoreThreshold *float64 `type:"float"` } // String returns the string representation func (s UpdateMLModelInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateMLModelInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateMLModelInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateMLModelInput"} if s.MLModelId == nil { invalidParams.Add(request.NewErrParamRequired("MLModelId")) } if s.MLModelId != nil && len(*s.MLModelId) < 1 { invalidParams.Add(request.NewErrParamMinLen("MLModelId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetMLModelId sets the MLModelId field's value. func (s *UpdateMLModelInput) SetMLModelId(v string) *UpdateMLModelInput { s.MLModelId = &v return s } // SetMLModelName sets the MLModelName field's value. func (s *UpdateMLModelInput) SetMLModelName(v string) *UpdateMLModelInput { s.MLModelName = &v return s } // SetScoreThreshold sets the ScoreThreshold field's value. func (s *UpdateMLModelInput) SetScoreThreshold(v float64) *UpdateMLModelInput { s.ScoreThreshold = &v return s } // Represents the output of an UpdateMLModel operation. // // You can see the updated content by using the GetMLModel operation. type UpdateMLModelOutput struct { _ struct{} `type:"structure"` // The ID assigned to the MLModel during creation. This value should be identical // to the value of the MLModelID in the request. MLModelId *string `min:"1" type:"string"` } // String returns the string representation func (s UpdateMLModelOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateMLModelOutput) GoString() string { return s.String() } // SetMLModelId sets the MLModelId field's value. func (s *UpdateMLModelOutput) SetMLModelId(v string) *UpdateMLModelOutput { s.MLModelId = &v return s } // The function used to train an MLModel. Training choices supported by Amazon // ML include the following: // // * SGD - Stochastic Gradient Descent. // * RandomForest - Random forest of decision trees. const ( // AlgorithmSgd is a Algorithm enum value AlgorithmSgd = "sgd" ) // A list of the variables to use in searching or filtering BatchPrediction. // // * CreatedAt - Sets the search criteria to BatchPrediction creation date. // // * Status - Sets the search criteria to BatchPrediction status. // * Name - Sets the search criteria to the contents of BatchPredictionName. // // * IAMUser - Sets the search criteria to the user account that invoked // the BatchPrediction creation. // * MLModelId - Sets the search criteria to the MLModel used in the BatchPrediction. // // * DataSourceId - Sets the search criteria to the DataSource used in the // BatchPrediction. // * DataURI - Sets the search criteria to the data file(s) used in the BatchPrediction. // The URL can identify either a file or an Amazon Simple Storage Service // (Amazon S3) bucket or directory. const ( // BatchPredictionFilterVariableCreatedAt is a BatchPredictionFilterVariable enum value BatchPredictionFilterVariableCreatedAt = "CreatedAt" // BatchPredictionFilterVariableLastUpdatedAt is a BatchPredictionFilterVariable enum value BatchPredictionFilterVariableLastUpdatedAt = "LastUpdatedAt" // BatchPredictionFilterVariableStatus is a BatchPredictionFilterVariable enum value BatchPredictionFilterVariableStatus = "Status" // BatchPredictionFilterVariableName is a BatchPredictionFilterVariable enum value BatchPredictionFilterVariableName = "Name" // BatchPredictionFilterVariableIamuser is a BatchPredictionFilterVariable enum value BatchPredictionFilterVariableIamuser = "IAMUser" // BatchPredictionFilterVariableMlmodelId is a BatchPredictionFilterVariable enum value BatchPredictionFilterVariableMlmodelId = "MLModelId" // BatchPredictionFilterVariableDataSourceId is a BatchPredictionFilterVariable enum value BatchPredictionFilterVariableDataSourceId = "DataSourceId" // BatchPredictionFilterVariableDataUri is a BatchPredictionFilterVariable enum value BatchPredictionFilterVariableDataUri = "DataURI" ) // A list of the variables to use in searching or filtering DataSource. // // * CreatedAt - Sets the search criteria to DataSource creation date. // * Status - Sets the search criteria to DataSource status. // * Name - Sets the search criteria to the contents of DataSourceName. // * DataUri - Sets the search criteria to the URI of data files used to // create the DataSource. The URI can identify either a file or an Amazon // Simple Storage Service (Amazon S3) bucket or directory. // * IAMUser - Sets the search criteria to the user account that invoked // the DataSource creation. // NoteThe variable names should match the variable names in the DataSource. const ( // DataSourceFilterVariableCreatedAt is a DataSourceFilterVariable enum value DataSourceFilterVariableCreatedAt = "CreatedAt" // DataSourceFilterVariableLastUpdatedAt is a DataSourceFilterVariable enum value DataSourceFilterVariableLastUpdatedAt = "LastUpdatedAt" // DataSourceFilterVariableStatus is a DataSourceFilterVariable enum value DataSourceFilterVariableStatus = "Status" // DataSourceFilterVariableName is a DataSourceFilterVariable enum value DataSourceFilterVariableName = "Name" // DataSourceFilterVariableDataLocationS3 is a DataSourceFilterVariable enum value DataSourceFilterVariableDataLocationS3 = "DataLocationS3" // DataSourceFilterVariableIamuser is a DataSourceFilterVariable enum value DataSourceFilterVariableIamuser = "IAMUser" ) // Contains the key values of DetailsMap: PredictiveModelType- Indicates the type of the MLModel. Algorithm- Indicates the algorithm that was used for the MLModel const ( // DetailsAttributesPredictiveModelType is a DetailsAttributes enum value DetailsAttributesPredictiveModelType = "PredictiveModelType" // DetailsAttributesAlgorithm is a DetailsAttributes enum value DetailsAttributesAlgorithm = "Algorithm" ) // Object status with the following possible values: // // * PENDING // * INPROGRESS // * FAILED // * COMPLETED // * DELETED const ( // EntityStatusPending is a EntityStatus enum value EntityStatusPending = "PENDING" // EntityStatusInprogress is a EntityStatus enum value EntityStatusInprogress = "INPROGRESS" // EntityStatusFailed is a EntityStatus enum value EntityStatusFailed = "FAILED" // EntityStatusCompleted is a EntityStatus enum value EntityStatusCompleted = "COMPLETED" // EntityStatusDeleted is a EntityStatus enum value EntityStatusDeleted = "DELETED" ) // A list of the variables to use in searching or filtering Evaluation. // // * CreatedAt - Sets the search criteria to Evaluation creation date. // * Status - Sets the search criteria to Evaluation status. // * Name - Sets the search criteria to the contents of EvaluationName. // * IAMUser - Sets the search criteria to the user account that invoked // an evaluation. // * MLModelId - Sets the search criteria to the Predictor that was evaluated. // // * DataSourceId - Sets the search criteria to the DataSource used in evaluation. // // * DataUri - Sets the search criteria to the data file(s) used in evaluation. // The URL can identify either a file or an Amazon Simple Storage Service // (Amazon S3) bucket or directory. const ( // EvaluationFilterVariableCreatedAt is a EvaluationFilterVariable enum value EvaluationFilterVariableCreatedAt = "CreatedAt" // EvaluationFilterVariableLastUpdatedAt is a EvaluationFilterVariable enum value EvaluationFilterVariableLastUpdatedAt = "LastUpdatedAt" // EvaluationFilterVariableStatus is a EvaluationFilterVariable enum value EvaluationFilterVariableStatus = "Status" // EvaluationFilterVariableName is a EvaluationFilterVariable enum value EvaluationFilterVariableName = "Name" // EvaluationFilterVariableIamuser is a EvaluationFilterVariable enum value EvaluationFilterVariableIamuser = "IAMUser" // EvaluationFilterVariableMlmodelId is a EvaluationFilterVariable enum value EvaluationFilterVariableMlmodelId = "MLModelId" // EvaluationFilterVariableDataSourceId is a EvaluationFilterVariable enum value EvaluationFilterVariableDataSourceId = "DataSourceId" // EvaluationFilterVariableDataUri is a EvaluationFilterVariable enum value EvaluationFilterVariableDataUri = "DataURI" ) const ( // MLModelFilterVariableCreatedAt is a MLModelFilterVariable enum value MLModelFilterVariableCreatedAt = "CreatedAt" // MLModelFilterVariableLastUpdatedAt is a MLModelFilterVariable enum value MLModelFilterVariableLastUpdatedAt = "LastUpdatedAt" // MLModelFilterVariableStatus is a MLModelFilterVariable enum value MLModelFilterVariableStatus = "Status" // MLModelFilterVariableName is a MLModelFilterVariable enum value MLModelFilterVariableName = "Name" // MLModelFilterVariableIamuser is a MLModelFilterVariable enum value MLModelFilterVariableIamuser = "IAMUser" // MLModelFilterVariableTrainingDataSourceId is a MLModelFilterVariable enum value MLModelFilterVariableTrainingDataSourceId = "TrainingDataSourceId" // MLModelFilterVariableRealtimeEndpointStatus is a MLModelFilterVariable enum value MLModelFilterVariableRealtimeEndpointStatus = "RealtimeEndpointStatus" // MLModelFilterVariableMlmodelType is a MLModelFilterVariable enum value MLModelFilterVariableMlmodelType = "MLModelType" // MLModelFilterVariableAlgorithm is a MLModelFilterVariable enum value MLModelFilterVariableAlgorithm = "Algorithm" // MLModelFilterVariableTrainingDataUri is a MLModelFilterVariable enum value MLModelFilterVariableTrainingDataUri = "TrainingDataURI" ) const ( // MLModelTypeRegression is a MLModelType enum value MLModelTypeRegression = "REGRESSION" // MLModelTypeBinary is a MLModelType enum value MLModelTypeBinary = "BINARY" // MLModelTypeMulticlass is a MLModelType enum value MLModelTypeMulticlass = "MULTICLASS" ) const ( // RealtimeEndpointStatusNone is a RealtimeEndpointStatus enum value RealtimeEndpointStatusNone = "NONE" // RealtimeEndpointStatusReady is a RealtimeEndpointStatus enum value RealtimeEndpointStatusReady = "READY" // RealtimeEndpointStatusUpdating is a RealtimeEndpointStatus enum value RealtimeEndpointStatusUpdating = "UPDATING" // RealtimeEndpointStatusFailed is a RealtimeEndpointStatus enum value RealtimeEndpointStatusFailed = "FAILED" ) // The sort order specified in a listing condition. Possible values include // the following: // // * asc - Present the information in ascending order (from A-Z). // * dsc - Present the information in descending order (from Z-A). const ( // SortOrderAsc is a SortOrder enum value SortOrderAsc = "asc" // SortOrderDsc is a SortOrder enum value SortOrderDsc = "dsc" ) const ( // TaggableResourceTypeBatchPrediction is a TaggableResourceType enum value TaggableResourceTypeBatchPrediction = "BatchPrediction" // TaggableResourceTypeDataSource is a TaggableResourceType enum value TaggableResourceTypeDataSource = "DataSource" // TaggableResourceTypeEvaluation is a TaggableResourceType enum value TaggableResourceTypeEvaluation = "Evaluation" // TaggableResourceTypeMlmodel is a TaggableResourceType enum value TaggableResourceTypeMlmodel = "MLModel" ) ```
```php <?php namespace App\Enums; use Exception; use Illuminate\Validation\Rule; class PaymentSource { private const BANK = 'bank'; private const CASH = 'cash'; private const INTERCOMPANY = 'intercompany'; // Mellemregning private const EXPENSES = 'expenses'; //Udlg /** * @var PaymentSource[] */ private static $values = null; /** * @var string */ private $source; /** * @var string */ private $displayValue; public function __construct(string $source, string $displayValue = null) { $this->source = $source; $this->displayValue = $displayValue; } /** * @param string $source * @return PaymentSource * @throws Exception */ public static function fromSource(string $source): PaymentSource { foreach (self::values() as $paymentSource) { if ($paymentSource->getSource() === $source) { return $paymentSource; } } throw new Exception('Unknown control status: ' . $source); } /** * @param string $displayValue * @return PaymentSource * @throws Exception */ public static function fromDisplayValue($displayValue) { foreach (self::values() as $paymentSource) { if ($paymentSource->getDisplayValue() === $displayValue) { return $paymentSource; } } throw new Exception('Unknown control status display value: ' . $displayValue); } /** * @return PaymentSource[] */ public static function values(): array { if (is_null(self::$values)) { self::$values = [ self::BANK => new PaymentSource(self::BANK, 'Bank'), self::CASH => new PaymentSource(self::CASH, 'Cash'), self::EXPENSES => new PaymentSource(self::EXPENSES, 'Expenses'), self::INTERCOMPANY => new PaymentSource(self::INTERCOMPANY, 'Intercompany') ]; } return self::$values; } /** * @return PaymentSource */ public static function bank(): PaymentSource { return self::values()[self::BANK]; } /** * @return PaymentSource */ public static function cash(): PaymentSource { return self::values()[self::CASH]; } /** * @return PaymentSource */ public static function intercompany(): PaymentSource { return self::values()[self::INTERCOMPANY]; } /** * @return PaymentSource */ public static function expenses(): PaymentSource { return self::values()[self::EXPENSES]; } public static function validationRules() { $values = array_column(self::values(), "source"); return Rule::in($values); } /** * @return string */ public function getSource(): string { return $this->source; } /** * @return string */ public function getDisplayValue(): string { return $this->displayValue; } /** * @return string */ public function __toString() { return (string) $this->source; } } ```
```javascript var _slicedToArray = function () { function sliceIterator(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"]) _i["return"](); } finally { if (_d) throw _e; } } return _arr; } return function (arr, i) { if (Array.isArray(arr)) { return arr; } else if (Symbol.iterator in Object(arr)) { return sliceIterator(arr, i); } else { throw new TypeError("Invalid attempt to destructure non-iterable instance"); } }; }(); var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } import { Map, List, fromJS } from 'immutable'; import { Group, Layer, Hole, Vertex } from './export'; import { IDBroker, NameGenerator, GeometryUtils, SnapUtils, SnapSceneUtils } from '../utils/export'; import { MODE_IDLE, MODE_WAITING_DRAWING_LINE, MODE_DRAWING_LINE, MODE_DRAGGING_LINE } from '../constants'; var Line = function () { function Line() { _classCallCheck(this, Line); } _createClass(Line, null, [{ key: 'create', value: function create(state, layerID, type, x0, y0, x1, y1, properties) { var lineID = IDBroker.acquireID(); var _Vertex$add = Vertex.add(state, layerID, x0, y0, 'lines', lineID), stateV0 = _Vertex$add.updatedState, v0 = _Vertex$add.vertex; var _Vertex$add2 = Vertex.add(stateV0, layerID, x1, y1, 'lines', lineID), stateV1 = _Vertex$add2.updatedState, v1 = _Vertex$add2.vertex; state = stateV1; var line = state.catalog.factoryElement(type, { id: lineID, name: NameGenerator.generateName('lines', state.catalog.getIn(['elements', type, 'info', 'title'])), vertices: new List([v0.id, v1.id]), type: type }, properties); state = state.setIn(['scene', 'layers', layerID, 'lines', lineID], line); return { updatedState: state, line: line }; } }, { key: 'select', value: function select(state, layerID, lineID) { state = Layer.select(state, layerID).updatedState; var line = state.getIn(['scene', 'layers', layerID, 'lines', lineID]); state = Layer.selectElement(state, layerID, 'lines', lineID).updatedState; state = Layer.selectElement(state, layerID, 'vertices', line.vertices.get(0)).updatedState; state = Layer.selectElement(state, layerID, 'vertices', line.vertices.get(1)).updatedState; return { updatedState: state }; } }, { key: 'remove', value: function remove(state, layerID, lineID) { var line = state.getIn(['scene', 'layers', layerID, 'lines', lineID]); if (line) { state = this.unselect(state, layerID, lineID).updatedState; line.holes.forEach(function (holeID) { return state = Hole.remove(state, layerID, holeID).updatedState; }); state = Layer.removeElement(state, layerID, 'lines', lineID).updatedState; line.vertices.forEach(function (vertexID) { return state = Vertex.remove(state, layerID, vertexID, 'lines', lineID).updatedState; }); state.getIn(['scene', 'groups']).forEach(function (group) { return state = Group.removeElement(state, group.id, layerID, 'lines', lineID).updatedState; }); } return { updatedState: state }; } }, { key: 'unselect', value: function unselect(state, layerID, lineID) { var line = state.getIn(['scene', 'layers', layerID, 'lines', lineID]); if (line) { state = Layer.unselect(state, layerID, 'vertices', line.vertices.get(0)).updatedState; state = Layer.unselect(state, layerID, 'vertices', line.vertices.get(1)).updatedState; state = Layer.unselect(state, layerID, 'lines', lineID).updatedState; } return { updatedState: state }; } }, { key: 'split', value: function split(state, layerID, lineID, x, y) { var line = state.getIn(['scene', 'layers', layerID, 'lines', lineID]); var v0 = state.getIn(['scene', 'layers', layerID, 'vertices', line.vertices.get(0)]); var v1 = state.getIn(['scene', 'layers', layerID, 'vertices', line.vertices.get(1)]); var x0 = v0.x, y0 = v0.y; var x1 = v1.x, y1 = v1.y; var _Line$create = Line.create(state, layerID, line.type, x0, y0, x, y, line.get('properties')), stateL1 = _Line$create.updatedState, line0 = _Line$create.line; var _Line$create2 = Line.create(stateL1, layerID, line.type, x1, y1, x, y, line.get('properties')), stateL2 = _Line$create2.updatedState, line1 = _Line$create2.line; state = stateL2; var splitPointOffset = GeometryUtils.pointPositionOnLineSegment(x0, y0, x1, y1, x, y); var minVertex = GeometryUtils.minVertex(v0, v1); line.holes.forEach(function (holeID) { var hole = state.getIn(['scene', 'layers', layerID, 'holes', holeID]); var holeOffset = hole.offset; if (minVertex.x === x1 && minVertex.y === y1) { splitPointOffset = 1 - splitPointOffset; holeOffset = 1 - hole.offset; } if (holeOffset < splitPointOffset) { var offset = holeOffset / splitPointOffset; if (minVertex.x === x1 && minVertex.y === y1) { offset = 1 - offset; } state = Hole.create(state, layerID, hole.type, line0.id, offset, hole.properties).updatedState; } else { var _offset = (holeOffset - splitPointOffset) / (1 - splitPointOffset); if (minVertex.x === x1 && minVertex.y === y1) { _offset = 1 - _offset; } state = Hole.create(state, layerID, hole.type, line1.id, _offset, hole.properties).updatedState; } }); //add splitted lines to the original line's group var lineGroups = state.getIn(['scene', 'groups']).filter(function (group) { var lines = group.getIn(['elements', layerID, 'lines']); return lines && lines.contains(lineID); }); lineGroups.forEach(function (group) { state = Group.addElement(state, group.id, layerID, 'lines', line0.id).updatedState; state = Group.addElement(state, group.id, layerID, 'lines', line1.id).updatedState; }); state = Line.remove(state, layerID, lineID).updatedState; return { updatedState: state, lines: new List([line0, line1]) }; } }, { key: 'addFromPoints', value: function addFromPoints(state, layerID, type, points, properties, holes) { var _this = this; points = new List(points).sort(function (_ref, _ref2) { var x1 = _ref.x, y1 = _ref.y; var x2 = _ref2.x, y2 = _ref2.y; return x1 === x2 ? y1 - y2 : x1 - x2; }); var pointsPair = points.zip(points.skip(1)).filterNot(function (_ref3) { var _ref4 = _slicedToArray(_ref3, 2), _ref4$ = _ref4[0], x1 = _ref4$.x, y1 = _ref4$.y, _ref4$2 = _ref4[1], x2 = _ref4$2.x, y2 = _ref4$2.y; return x1 === x2 && y1 === y2; }); var lines = []; pointsPair.forEach(function (_ref5) { var _ref6 = _slicedToArray(_ref5, 2), _ref6$ = _ref6[0], x1 = _ref6$.x, y1 = _ref6$.y, _ref6$2 = _ref6[1], x2 = _ref6$2.x, y2 = _ref6$2.y; var _create = _this.create(state, layerID, type, x1, y1, x2, y2, properties), stateL = _create.updatedState, line = _create.line; state = stateL; if (holes) { holes.forEach(function (holeWithOffsetPoint) { var _holeWithOffsetPoint$ = holeWithOffsetPoint.offsetPosition, xp = _holeWithOffsetPoint$.x, yp = _holeWithOffsetPoint$.y; if (GeometryUtils.isPointOnLineSegment(x1, y1, x2, y2, xp, yp)) { var newOffset = GeometryUtils.pointPositionOnLineSegment(x1, y1, x2, y2, xp, yp); if (newOffset >= 0 && newOffset <= 1) { state = Hole.create(state, layerID, holeWithOffsetPoint.hole.type, line.id, newOffset, holeWithOffsetPoint.hole.properties).updatedState; } } }); } lines.push(line); }); return { updatedState: state, lines: new List(lines) }; } }, { key: 'createAvoidingIntersections', value: function createAvoidingIntersections(state, layerID, type, x0, y0, x1, y1, oldProperties, oldHoles) { var _this2 = this; var points = [{ x: x0, y: y0 }, { x: x1, y: y1 }]; state = state.getIn(['scene', 'layers', layerID, 'lines']).reduce(function (reducedState, line) { var _line$vertices$map$to = line.vertices.map(function (vertexID) { return reducedState.getIn(['scene', 'layers', layerID, 'vertices']).get(vertexID); }).toArray(), _line$vertices$map$to2 = _slicedToArray(_line$vertices$map$to, 2), v0 = _line$vertices$map$to2[0], v1 = _line$vertices$map$to2[1]; var hasCommonEndpoint = GeometryUtils.samePoints(v0, points[0]) || GeometryUtils.samePoints(v0, points[1]) || GeometryUtils.samePoints(v1, points[0]) || GeometryUtils.samePoints(v1, points[1]); var intersection = GeometryUtils.twoLineSegmentsIntersection(points[0], points[1], v0, v1); if (intersection.type === 'colinear') { if (!oldHoles) { oldHoles = []; } var orderedVertices = GeometryUtils.orderVertices(points); reducedState.getIn(['scene', 'layers', layerID, 'lines', line.id, 'holes']).forEach(function (holeID) { var hole = reducedState.getIn(['scene', 'layers', layerID, 'holes', holeID]); var oldLineLength = GeometryUtils.pointsDistance(v0.x, v0.y, v1.x, v1.y); var offset = GeometryUtils.samePoints(orderedVertices[1], line.vertices.get(1)) ? 1 - hole.offset : hole.offset; var offsetPosition = GeometryUtils.extendLine(v0.x, v0.y, v1.x, v1.y, oldLineLength * offset); oldHoles.push({ hole: hole, offsetPosition: offsetPosition }); }); reducedState = _this2.remove(reducedState, layerID, line.id).updatedState; points.push(v0, v1); } if (intersection.type === 'intersecting' && !hasCommonEndpoint) { reducedState = _this2.split(reducedState, layerID, line.id, intersection.point.x, intersection.point.y).updatedState; points.push(intersection.point); } return reducedState; }, state); var _Line$addFromPoints = Line.addFromPoints(state, layerID, type, points, oldProperties, oldHoles), updatedState = _Line$addFromPoints.updatedState, lines = _Line$addFromPoints.lines; return { updatedState: updatedState, lines: lines }; } }, { key: 'replaceVertex', value: function replaceVertex(state, layerID, lineID, vertexIndex, x, y) { var vertexID = state.getIn(['scene', 'layers', layerID, 'lines', lineID, 'vertices', vertexIndex]); state = Vertex.remove(state, layerID, vertexID, 'lines', lineID).updatedState; var _Vertex$add3 = Vertex.add(state, layerID, x, y, 'lines', lineID), stateV = _Vertex$add3.updatedState, vertex = _Vertex$add3.vertex; state = stateV; state = state.setIn(['scene', 'layers', layerID, 'lines', lineID, 'vertices', vertexIndex], vertex.id); state = state.setIn(['scene', 'layers', layerID, 'lines', lineID], state.getIn(['scene', 'layers', layerID, 'lines', lineID])); return { updatedState: state, line: state.getIn(['scene', 'layers', layerID, 'lines', lineID]), vertex: vertex }; } }, { key: 'selectToolDrawingLine', value: function selectToolDrawingLine(state, sceneComponentType) { state = state.merge({ mode: MODE_WAITING_DRAWING_LINE, drawingSupport: new Map({ type: sceneComponentType }) }); return { updatedState: state }; } }, { key: 'beginDrawingLine', value: function beginDrawingLine(state, layerID, x, y) { var snapElements = SnapSceneUtils.sceneSnapElements(state.scene, new List(), state.snapMask); var snap = null; if (state.snapMask && !state.snapMask.isEmpty()) { snap = SnapUtils.nearestSnap(snapElements, x, y, state.snapMask); if (snap) { ; var _snap$point = snap.point; x = _snap$point.x; y = _snap$point.y; }snapElements = snapElements.withMutations(function (snapElements) { var a = void 0, b = void 0, c = void 0; var _GeometryUtils$horizo = GeometryUtils.horizontalLine(y); a = _GeometryUtils$horizo.a; b = _GeometryUtils$horizo.b; c = _GeometryUtils$horizo.c; SnapUtils.addLineSnap(snapElements, a, b, c, 10, 3, null); var _GeometryUtils$vertic = GeometryUtils.verticalLine(x); a = _GeometryUtils$vertic.a; b = _GeometryUtils$vertic.b; c = _GeometryUtils$vertic.c; SnapUtils.addLineSnap(snapElements, a, b, c, 10, 3, null); }); } var drawingSupport = state.get('drawingSupport').set('layerID', layerID); state = Layer.unselectAll(state, layerID).updatedState; var _Line$create3 = Line.create(state, layerID, drawingSupport.get('type'), x, y, x, y), stateL = _Line$create3.updatedState, line = _Line$create3.line; state = Line.select(stateL, layerID, line.id).updatedState; state = state.merge({ mode: MODE_DRAWING_LINE, snapElements: snapElements, activeSnapElement: snap ? snap.snap : null, drawingSupport: drawingSupport }); return { updatedState: state }; } }, { key: 'updateDrawingLine', value: function updateDrawingLine(state, x, y) { var snap = null; if (state.snapMask && !state.snapMask.isEmpty()) { snap = SnapUtils.nearestSnap(state.snapElements, x, y, state.snapMask); if (snap) { ; var _snap$point2 = snap.point; x = _snap$point2.x; y = _snap$point2.y; } } var layerID = state.getIn(['drawingSupport', 'layerID']); var lineID = state.getIn(['scene', 'layers', layerID, 'selected', 'lines']).first(); var _Line$replaceVertex = Line.replaceVertex(state, layerID, lineID, 1, x, y), stateLV = _Line$replaceVertex.updatedState, vertex = _Line$replaceVertex.vertex; state = stateLV; state = this.select(state, layerID, lineID).updatedState; state = state.merge({ activeSnapElement: snap ? snap.snap : null }); return { updatedState: state }; } }, { key: 'endDrawingLine', value: function endDrawingLine(state, x, y) { if (state.snapMask && !state.snapMask.isEmpty()) { var snap = SnapUtils.nearestSnap(state.snapElements, x, y, state.snapMask); if (snap) { ; var _snap$point3 = snap.point; x = _snap$point3.x; y = _snap$point3.y; } } var layerID = state.getIn(['drawingSupport', 'layerID']); var layer = state.getIn(['scene', 'layers', layerID]); var lineID = state.getIn(['scene', 'layers', layerID, 'selected', 'lines']).first(); var line = state.getIn(['scene', 'layers', layerID, 'lines', lineID]); var v0 = layer.vertices.get(line.vertices.get(0)); state = Line.remove(state, layerID, lineID).updatedState; state = Line.createAvoidingIntersections(state, layerID, line.type, v0.x, v0.y, x, y).updatedState; state = Layer.detectAndUpdateAreas(state, layerID).updatedState; state = state.merge({ mode: MODE_WAITING_DRAWING_LINE, snapElements: new List(), activeSnapElement: null }); return { updatedState: state }; } }, { key: 'beginDraggingLine', value: function beginDraggingLine(state, layerID, lineID, x, y) { var snapElements = SnapSceneUtils.sceneSnapElements(state.scene, new List(), state.snapMask); var layer = state.scene.layers.get(layerID); var line = layer.lines.get(lineID); var vertex0 = layer.vertices.get(line.vertices.get(0)); var vertex1 = layer.vertices.get(line.vertices.get(1)); state = state.merge({ mode: MODE_DRAGGING_LINE, snapElements: snapElements, draggingSupport: Map({ layerID: layerID, lineID: lineID, startPointX: x, startPointY: y, startVertex0X: vertex0.x, startVertex0Y: vertex0.y, startVertex1X: vertex1.x, startVertex1Y: vertex1.y }) }); return { updatedState: state }; } }, { key: 'updateDraggingLine', value: function updateDraggingLine(state, x, y) { var draggingSupport = state.draggingSupport; var snapElements = state.snapElements; var layerID = draggingSupport.get('layerID'); var lineID = draggingSupport.get('lineID'); var diffX = x - draggingSupport.get('startPointX'); var diffY = y - draggingSupport.get('startPointY'); var newVertex0X = draggingSupport.get('startVertex0X') + diffX; var newVertex0Y = draggingSupport.get('startVertex0Y') + diffY; var newVertex1X = draggingSupport.get('startVertex1X') + diffX; var newVertex1Y = draggingSupport.get('startVertex1Y') + diffY; var activeSnapElement = null; var curSnap0 = null, curSnap1 = null; if (state.snapMask && !state.snapMask.isEmpty()) { curSnap0 = SnapUtils.nearestSnap(snapElements, newVertex0X, newVertex0Y, state.snapMask); curSnap1 = SnapUtils.nearestSnap(snapElements, newVertex1X, newVertex1Y, state.snapMask); } var deltaX = 0, deltaY = 0; if (curSnap0 && curSnap1) { if (curSnap0.point.distance < curSnap1.point.distance) { deltaX = curSnap0.point.x - newVertex0X; deltaY = curSnap0.point.y - newVertex0Y; activeSnapElement = curSnap0.snap; } else { deltaX = curSnap1.point.x - newVertex1X; deltaY = curSnap1.point.y - newVertex1Y; activeSnapElement = curSnap1.snap; } } else { if (curSnap0) { deltaX = curSnap0.point.x - newVertex0X; deltaY = curSnap0.point.y - newVertex0Y; activeSnapElement = curSnap0.snap; } if (curSnap1) { deltaX = curSnap1.point.x - newVertex1X; deltaY = curSnap1.point.y - newVertex1Y; activeSnapElement = curSnap1.snap; } } newVertex0X += deltaX; newVertex0Y += deltaY; newVertex1X += deltaX; newVertex1Y += deltaY; state = state.merge({ activeSnapElement: activeSnapElement, scene: state.scene.updateIn(['layers', layerID], function (layer) { return layer.withMutations(function (layer) { var lineVertices = layer.getIn(['lines', lineID, 'vertices']); layer.updateIn(['vertices', lineVertices.get(0)], function (vertex) { return vertex.merge({ x: newVertex0X, y: newVertex0Y }); }); layer.updateIn(['vertices', lineVertices.get(1)], function (vertex) { return vertex.merge({ x: newVertex1X, y: newVertex1Y }); }); return layer; }); }) }); return { updatedState: state }; } }, { key: 'endDraggingLine', value: function endDraggingLine(state, x, y) { var _state = state, draggingSupport = _state.draggingSupport; var layerID = draggingSupport.get('layerID'); var layer = state.scene.layers.get(layerID); var lineID = draggingSupport.get('lineID'); var line = layer.lines.get(lineID); var vertex0 = layer.vertices.get(line.vertices.get(0)); var vertex1 = layer.vertices.get(line.vertices.get(1)); var maxV = GeometryUtils.maxVertex(vertex0, vertex1); var minV = GeometryUtils.minVertex(vertex0, vertex1); var lineLength = GeometryUtils.verticesDistance(minV, maxV); var alpha = Math.atan2(maxV.y - minV.y, maxV.x - minV.x); var holesWithOffsetPosition = []; layer.lines.get(lineID).holes.forEach(function (holeID) { var hole = layer.holes.get(holeID); var pointOnLine = lineLength * hole.offset; var offsetPosition = { x: pointOnLine * Math.cos(alpha) + minV.x, y: pointOnLine * Math.sin(alpha) + minV.y }; holesWithOffsetPosition.push({ hole: hole, offsetPosition: offsetPosition }); }); var diffX = x - draggingSupport.get('startPointX'); var diffY = y - draggingSupport.get('startPointY'); var newVertex0X = draggingSupport.get('startVertex0X') + diffX; var newVertex0Y = draggingSupport.get('startVertex0Y') + diffY; var newVertex1X = draggingSupport.get('startVertex1X') + diffX; var newVertex1Y = draggingSupport.get('startVertex1Y') + diffY; if (state.snapMask && !state.snapMask.isEmpty()) { var curSnap0 = SnapUtils.nearestSnap(state.snapElements, newVertex0X, newVertex0Y, state.snapMask); var curSnap1 = SnapUtils.nearestSnap(state.snapElements, newVertex1X, newVertex1Y, state.snapMask); var deltaX = 0, deltaY = 0; if (curSnap0 && curSnap1) { if (curSnap0.point.distance < curSnap1.point.distance) { deltaX = curSnap0.point.x - newVertex0X; deltaY = curSnap0.point.y - newVertex0Y; } else { deltaX = curSnap1.point.x - newVertex1X; deltaY = curSnap1.point.y - newVertex1Y; } } else { if (curSnap0) { deltaX = curSnap0.point.x - newVertex0X; deltaY = curSnap0.point.y - newVertex0Y; } if (curSnap1) { deltaX = curSnap1.point.x - newVertex1X; deltaY = curSnap1.point.y - newVertex1Y; } } newVertex0X += deltaX; newVertex0Y += deltaY; newVertex1X += deltaX; newVertex1Y += deltaY; } var lineGroups = state //get groups membership if present .getIn(['scene', 'groups']).filter(function (group) { var lines = group.getIn(['elements', layerID, 'lines']); return lines && lines.contains(lineID); }); state = Layer.mergeEqualsVertices(state, layerID, line.vertices.get(0)).updatedState; state = Layer.mergeEqualsVertices(state, layerID, line.vertices.get(1)).updatedState; state = Line.remove(state, layerID, lineID).updatedState; if (!GeometryUtils.samePoints({ newVertex0X: newVertex0X, newVertex0Y: newVertex0Y }, { newVertex1X: newVertex1X, newVertex1Y: newVertex1Y })) { var ret = Line.createAvoidingIntersections(state, layerID, line.type, newVertex0X, newVertex0Y, newVertex1X, newVertex1Y, line.properties, holesWithOffsetPosition); state = ret.updatedState; //re-add to old line's groups if present ret.lines.forEach(function (addedLine) { lineGroups.forEach(function (oldLineGroup) { state = Group.addElement(state, oldLineGroup.id, layerID, 'lines', addedLine.id).updatedState; }); }); } state = Layer.detectAndUpdateAreas(state, layerID).updatedState; state = state.merge({ mode: MODE_IDLE, draggingSupport: null, activeSnapElement: null, snapElements: new List() }); return { updatedState: state }; } }, { key: 'setProperties', value: function setProperties(state, layerID, lineID, properties) { state = state.mergeIn(['scene', 'layers', layerID, 'lines', lineID, 'properties'], properties); return { updatedState: state }; } }, { key: 'setJsProperties', value: function setJsProperties(state, layerID, lineID, properties) { return this.setProperties(state, layerID, lineID, fromJS(properties)); } }, { key: 'updateProperties', value: function updateProperties(state, layerID, lineID, properties) { properties.forEach(function (v, k) { if (state.hasIn(['scene', 'layers', layerID, 'lines', lineID, 'properties', k])) state = state.mergeIn(['scene', 'layers', layerID, 'lines', lineID, 'properties', k], v); }); return { updatedState: state }; } }, { key: 'updateJsProperties', value: function updateJsProperties(state, layerID, lineID, properties) { return this.updateProperties(state, layerID, lineID, fromJS(properties)); } }, { key: 'setAttributes', value: function setAttributes(state, layerID, lineID, lineAttributes) { var lAttr = lineAttributes.toJS(); var vertexOne = lAttr.vertexOne, vertexTwo = lAttr.vertexTwo, lineLength = lAttr.lineLength; delete lAttr['vertexOne']; delete lAttr['vertexTwo']; delete lAttr['lineLength']; state = state.mergeIn(['scene', 'layers', layerID, 'lines', lineID], fromJS(lAttr)).mergeIn(['scene', 'layers', layerID, 'vertices', vertexOne.id], { x: vertexOne.x, y: vertexOne.y }).mergeIn(['scene', 'layers', layerID, 'vertices', vertexTwo.id], { x: vertexTwo.x, y: vertexTwo.y }).mergeIn(['scene', 'layers', layerID, 'lines', lineID, 'misc'], new Map({ '_unitLength': lineLength._unit })); state = Layer.mergeEqualsVertices(state, layerID, vertexOne.id).updatedState; if (vertexOne.x != vertexTwo.x && vertexOne.y != vertexTwo.y) { state = Layer.mergeEqualsVertices(state, layerID, vertexTwo.id).updatedState; } state = Layer.detectAndUpdateAreas(state, layerID).updatedState; return { updatedState: state }; } }, { key: 'setVerticesCoords', value: function setVerticesCoords(state, layerID, lineID, x1, y1, x2, y2) { var line = state.getIn(['scene', 'layers', layerID, 'lines', lineID]); state = Vertex.setAttributes(state, layerID, line.vertices.get(0), new Map({ x: x1, y: y1 })).updatedState; state = Vertex.setAttributes(state, layerID, line.vertices.get(1), new Map({ x: x2, y: y2 })).updatedState; return { updatedState: state }; } }]); return Line; }(); export { Line as default }; ```