text
stringlengths
454
608k
url
stringlengths
17
896
dump
stringclasses
91 values
source
stringclasses
1 value
word_count
int64
101
114k
flesch_reading_ease
float64
50
104
1 2 /* Soot - a J*va Optimization Framework3 * Copyright (C) 2003 Ondrej Lhotak17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,18 * Boston, MA 02111-1307, USA.19 */20 21 /* THIS FILE IS AUTO-GENERATED FROM soot_options.xml. DO NOT MODIFY. */22 23 package soot.options;24 import java.util.*;25 26 /** Option parser for Call Graph Constructor. */27 public class CGOptions28 {29 private Map options;30 31 public CGOptions( Map options ) {32 this.options = options;33 }34 35 /** Enabled --36 37 * .38 39 * 40 */41 public boolean enabled() {42 return soot.PhaseOptions.getBoolean( options, "enabled" );43 }44 45 /** Safe forName --46 47 * Handle Class.forName() calls conservatively.48 49 * When a program calls Class.forName(), the named class is 50 * resolved, and its static initializer executed. In many cases, it 51 * cannot be determined statically which class will be loaded, and 52 * which static initializer executed. When this option is set to 53 * true, Soot will conservatively assume that any static 54 * initializer could be executed. This may make the call graph very 55 * large. When this option is set to false, any calls to 56 * Class.forName() for which the class cannot be determined 57 * statically are assumed to call no static initializers. 58 */59 public boolean safe_forname() {60 return soot.PhaseOptions.getBoolean( options, "safe-forname" );61 }62 63 /** Safe newInstance --64 65 * Handle Class.newInstance() calls conservatively.66 67 * When a program calls Class.newInstance(), a new object is 68 * created and its constructor executed. Soot does not determine 69 * statically which type of object will be created, and which 70 * constructor executed. When this option is set to true, Soot will 71 * conservatively assume that any constructor could be executed. 72 * This may make the call graph very large. When this option is set 73 * to false, any calls to Class.newInstance() are assumed not to 74 * call the constructor of the created object. 75 */76 public boolean safe_newinstance() {77 return soot.PhaseOptions.getBoolean( options, "safe-newinstance" );78 }79 80 /** Verbose --81 82 * Print warnings about where the call graph may be incomplete.83 84 * Due to the effects of native methods and reflection, it may not 85 * always be possible to construct a fully conservative call graph. 86 * Setting this option to true causes Soot to point out the parts 87 * of the call graph that may be incomplete, so that they can be 88 * checked by hand. 89 */90 public boolean verbose() {91 return soot.PhaseOptions.getBoolean( options, "verbose" );92 }93 94 /** All Application Class Methods Reachable --95 96 * Assume all methods of application classes are reachable..97 98 * When this option is false, the call graph is built starting at a 99 * set of entry points, and only methods reachable from those entry 100 * points are processed. Unreachable methods will not have any call 101 * graph edges generated out of them. Setting this option to true 102 * makes Soot consider all methods of application classes to be 103 * reachable, so call edges are generated for all of them. This 104 * leads to a larger call graph. For program visualization 105 * purposes, it is sometimes desirable to include edges from 106 * unreachable methods; although these methods are unreachable in 107 * the version being analyzed, they may become reachable if the 108 * program is modified.109 */110 public boolean all_reachable() {111 return soot.PhaseOptions.getBoolean( options, "all-reachable" );112 }113 114 /** Implicit Entry Points --115 116 * Include methods called implicitly by the VM as entry points.117 118 * When this option is true, methods that are called implicitly by 119 * the VM are considered entry points of the call graph. When it is 120 * false, these methods are not considered entry points, leading to 121 * a possibly incomplete call graph.122 */123 public boolean implicit_entry() {124 return soot.PhaseOptions.getBoolean( options, "implicit-entry" );125 }126 127 /** Trim Static Initializer Edges --128 129 * Removes redundant static initializer calls.130 131 * The call graph contains an edge from each statement that could 132 * trigger execution of a static initializer to that static 133 * initializer. However, each static initializer is triggered only 134 * once. When this option is enabled, after the call graph is 135 * built, an intra-procedural analysis is performed to detect 136 * static initializer edges leading to methods that must have 137 * already been executed. Since these static initializers cannot be 138 * executed again, the corresponding call graph edges are removed 139 * from the call graph. 140 */141 public boolean trim_clinit() {142 return soot.PhaseOptions.getBoolean( options, "trim-clinit" );143 }144 145 /** JDK version --146 147 * JDK version for native methods.148 149 * This option sets the JDK version of the standard library being 150 * analyzed so that Soot can simulate the native methods in the 151 * specific version of the library. The default, 3, refers to Java 152 * 1.3.x.153 */154 public int jdkver() {155 return soot.PhaseOptions.getInt( options, "jdkver" );156 }157 158 }159 Java API By Example, From Geeks To Geeks. | Our Blog | Conditions of Use | About Us_ |
http://kickjava.com/src/soot/options/CGOptions.java.htm
CC-MAIN-2016-44
refinedweb
839
57.47
. To. For improved performance, include: #define PERL_NO_GET_CONTEXT as shown below. For more details, see perlguts.: #define PERL_NO_GET_CONTEXT #include "EXTERN.h" #include "perl.h" #include "XSUB.h" ! The list of output parameters occurs at the very end of the function, just) with ``XXXXX''. If you check the first part of the typemap file (or section), you'll see that doubles are of type T_DOUBLE. In the INPUT part of the typemap, because the default Perl doesn't currently support the "const char *" type. Include a new TYPEMAP section in your XS code before the above function: TYPEMAP: <<END const char * T_PV END. Note that in contrast with ``EXAMPLE 1'', ``EXAMPLE 2'' and ``EXAMPLE 3'', this description does not contain the actual code for what. You may intersperse documentation and Perl code within the .pm file. In fact, if you want to use method autoloading, you must do this, as the comment inside the .pm file explains. See perlpod for more information about the pod format. INIT: Mytest.t while incrementing the ``9'' tests to ``11'': @a = &Mytest::statfs("/blech"); ok( scalar(@a) == 1 && $a[0] == 2 ); @a = &Mytest::statfs("/"); is( scalar(@a),. SV * multi_statfs(paths) SV * paths INIT: AV * results; SSize_t numpaths = 0, n; int i; struct statfs buf; SvGETMAGIC(paths); if ((!SvROK(paths)) || (SvTYPE(SvRV(paths)) != SVt_PVAV) || ((numpaths = av_top_index(); Suppose that for some strange reason we need a wrapper around the standard C library function "fputs()". This is all we need: #define PERLIO_NOT_STDIO 0 #define PERL_NO_GET_CONTEXT #include "EXTERN.h" #include "perl.h" #include "XSUB.h" #include <stdio.h> int fputs(s, stream) char * s FILE * stream The real work is done in the standard typemap. But you lose()". use lib './blib'; BEGIN { unshift(@INC, "./blib") }
http://linuxhowtos.org/manpages/1/perlxstut.htm
CC-MAIN-2019-43
refinedweb
285
77.33
Common definitions This section defines various terms and concepts that are common to many functions or build rules below. Contents Bourne shell tokenization Certain string attributes of some rules are split into multiple words according to the tokenization rules of the Bourne shell: unquoted spaces delimit separate words, and single- and double-quotes characters and backslashes are used to prevent tokenization. Those attributes that are subject to this tokenization are explicitly indicated as such in their definitions in this document. Attributes subject to "Make" variable expansion and Bourne shell tokenization are typically used for passing arbitrary options to compilers and other tools. Examples of such attributes are cc_library.copts and java_library.javacopts. Together these substitutions allow a single string variable to expand into a configuration-specific list of option words. Label expansion Some string attributes of a very few rules are subject to label expansion: if those strings contain a valid label as a substring, such as //mypkg:target, and that label is a declared prerequisite of the current rule, it is expanded into the pathname of the file represented by the target //mypkg:target. Example attributes include genrule.cmd and cc_binary.linkopts. The details may vary significantly in each case, over such issues as: whether relative labels are expanded; how labels that expand to multiple files are treated, etc. Consult the rule attribute documentation for specifics. Attributes common to all build rules This section describes attributes that are common to all build rules. Please note that it is an error to list the same label twice in a list of labels attribute. Attributes common to all test rules (*_test) This section describes attributes that are common to all test rules. Attributes common to all binary rules (*_binary) This section describes attributes that are common to all binary rules. Configurable attributes Most attributes are . Implicit output targets When you define a build rule in a BUILD file, you are explicitly declaring a new, named rule target in a package. Many build rule functions also implicitly entail one or more output file targets, whose contents and meaning are rule-specific. For example, when you explicitly declare a java_binary(name='foo', ...) rule, you are also implicitly declaring an output file target foo_deploy.jar as a member of the same package. (This particular target is a self-contained Java archive suitable for deployment.) Implicit output targets are first-class members of the global target graph. Just like other targets, they are built on demand, either when specified in the top-level built command, or when they are necessary prerequisites for other build targets. They can be referenced as dependencies in BUILD files, and can be observed in the output of analysis tools such as bazel query. For each kind of build rule, the rule's documentation contains a special section detailing the names and contents of any implicit outputs entailed by a declaration of that kind of rule. An important but somewhat subtle distinction between the two namespaces used by the build system: labels identify targets, which may be rules or files, and file targets may be divided into either source (or input) file targets and derived (or output) file targets. These are the things you can mention in BUILD files, build from the command-line, or examine using bazel query; this is the target namespace. Each file target corresponds to one actual file on disk (the "file system namespace"); each rule target may correspond to zero, one or more actual files on disk. There may be files on disk that have no corresponding target; for example, .o object files produced during C++ compilation cannot be referenced from within BUILD files or from the command line. In this way, the build tool may hide certain implementation details of how it does its job. This is explained more fully in the BUILD Concept Reference.
https://docs.bazel.build/versions/0.24.0/be/common-definitions.html
CC-MAIN-2020-34
refinedweb
640
52.9
Mac-Forums Mac-Forums Forums Mac-friendly or favorite CMS's? I need a little help with some CSS Web pages to slide show to dvd? easy? Best offers with this reviews?? Converting .MOV to .SWF... Need Some Input IP addresses of popular website severs Expert iWeb Help Requested set up free website Website feedback? iWeb and Other Web Designing Programmes Configuring SSL on Mac OS Snow Leopard Can I import a theme into iWeb? CSS woes iWeb Font question Rapidweaver vs. Dreamweaver Creating a download page for application in iWeb?? Creating a download page for application in iWeb?? Some Useful Info For iWeb Users! re-associating web files to iweb iWeb question CSS problem iWeb Album not showing up in FF3 Issues with iweb 09? Setting up my own Website/Business. Help. adding a email subscription in iweb iWeb 09 MobileMe and Facebook Suggestion for an HTML editor Links in iweb Photo Album Quicktime Kioskmode Disabling in Firefox with Firebug Plug-In iWeb alternative? dreamweaver site, own domain & mobileme hosting problems looking for best Mac supporting hosting service iweb 2006 FROM Html to iweb from MS Publisher to iWeb iWeb 09 handmade navigation problem Question about IWeb. Local Host only showing source code... Adding Photographs to an iweb site the iMac MAMP - Can't Start Servers question on makeing a kids game WampServer for Mac OS X Small Business, E-Commerce on Snow Leopard Server Tracking MySpace Traffic What would be the Mac equivelant to Front Page? Is RapidWeaver a good alternative to FrontPage? Browser compatibility... Synchronising Drupal website from Mac to Windows Server flex site not working well with mac Google xml sitemap problem Removing the hover from images Dreamweaver CS4 and Flash Buttons Drupal or Joomla?? buying a flash web design, what do I need to ad my info and photos Using iweb features without mobile me. Analytics and iweb/mobileme help Good FTP Program/ Poor Man's LDAP? New Laptop Apple MB990LL/A MacBook 13.3" LED $320 Coda and TextMate How to register your website with Google? sites made with iweb 09 Easy iweb publisher help!! Iweb not publishing whole site HTML help.. iweb not saving changes Another problem with comments in my iweb blog FTP Uploading question Iweb iweb HTML problem iweb photo albums won't display the way I want iWeb error interactive website creation on mac Website Builder Will Cyberduck answer my ftp needs? Change web site title for google results MySpace Formatting Error actionscript counting end of movie clips Apache Redirecting Designing a website for a bar/hotel Good retail wed design software? Setting up home web server using OS X Server 10.6 Looking For Speed Test Application For New Hosting Site.. iweb 06 and easy iweb publisher import site from iWeb into Rapidweaver? iWeb vs Rapidweaver vs Wordpress free website builder? Placing a picture within a text field with the text around it in iWeb? Mouse text scrolling in flash with macs. Hyperlink won't work when my iWeb site is published Text field won't change size in iWeb? Where can I purchase photos for my website? Blog ? in iWeb 09 moving iweb sites to another computer Adding simple Database for contacts Learing Dreamweaver & Flash... web hosting with 24/7 customer support 2 questions about my iWeb 09' website can't locate my Webserver folder Thinking of starting forum meta tags Image Rotation Help Helpdesk, crm, or Ticket system type script in PHP? NameVirtualHost Problem redesigning my website. Recommend useful iPod/iPhone Manager Tools for Winodws and Mac users Creating Pop up menu in DreamWeaver CS3 iweb site looks worse than pc version How to add photo album to existing photo page? rapidweaver iweb go daddy ftp fire fox cyber duck AP Divs NOT showing up! iWeb Question Keeping Content Fixed while Dragging Browser.. iWeb problems Center site in all browsers PDF :Link Issues Publish IWeb How to keep DW active when not active? Safari requiring basic auth twice How to change homepage to index.html in iWeb? Fireworks CSS exporting question.... Flash in IWEB How to reset Apache WebServer Rate my site FTP Server uploading How Should People Enter My Website? Looking for an IWeb designer to rebuild mywebsite Website Feedback :) Header Text / iWeb Creating a myspace icon link on Iweb page Mac Mini Server Movies don't publish on iWeb Apache File Permissions? Disable the ability of browsers to cache my website iWeb-Change title of webpage $25 Free @ HostGator - ~2 free months shared hosting hosting a jtv channel? Design studio fail How to consolidate iWeb sites on two Macs I need a simple contact form script code iWeb & using flash My first HTML website! Hyperlink to MyBookFace Easy iWeb publisher photo sizes coding, pic gallery & blog help needed for iweb page functionality .htaccess, apache and Firefox. Is it war? Mac and web design Bluefish HTML Editor Install Looking for an online magazine publishing tool Adding visitor access to iWeb site Another Iweb problem :( Images won't appear? DREAMWEAVER CS4 - question about AP Div layers versus Div Tag Recommend an FTP site .htaccess headaches redesign my site i:Web. Cannot use or upgrade MIME type issues Password protecting websites? centre a sliced image from photoshop into dreamweaver cs4 Best way to incease web traffic? How to delete thread post cart snippet shows up in search results Books to learn HTML? Website Design Question Rapid Weaver Question My Personal Website- some questions Getting html into iWeb09 Web Pages With Multiple WebCams Crashes Safari automated redirect from one domain to another Ftp Web Design Publishing GoDaddy domain name on Mobile Me I have no idea how to use my hosting thing. imovie in iweb shows on one page not another How To Remove An Old iWeb Site? Internet Brand Issues with my most recent page update Problem uploading pictures on iWeb Look at my website! Little problem Need advice for creating a website another iweb question Creating Simple Web Page What do you think of this site? I want to create a website and I have no clue where to start colors alter when importing photoshop file to Flash CS4 Self-activating slideshow on website? iWeb, need help publishing affiliate marketing on the internet Matthew Rodbourne web site trouble imovie in iweb loads slow How to do an Image Overlay? imovie in iweb loading time iWeb page not showing up Setting up Apache Need Help With Setting Up My New Site I need advice on starting a website Online Database Calendar iweb moblie me mulitple domain hosting Automatic FTP backup (From FTP) Quick HTML Question iweb alternative for more complex site? iweb08 Opacity and Reflection settings not displaying properly Good webhosting? File sharing, drops halfway through: "do not have sufficient privileges" Links Forms - submitting information Anybody know how to flip pdf pages? Monitor Server Loads Need information on cirtexhosting How do I transfer blog to new .com domain? shout box/ wall on your web page Accounting Software Ruby on Rails Possible Firefox Flaw? Or height CSS issue Changing Apache's server signature newbie question PHP mail(); won't work on XAMPP Dreamweaver CS3 and Safari 4.0 Help with FTP Flash slideshow maker for Mac? Erratic behavior with Iweb I have two problems in iWeb Google page ranking Uploading - Servers vs. Mobile Me Wisiwig How do websites get those scrolling signs? So I'm Building a WebPage Uploading ease - mac or server with the domain name ? games in iweb Website 'boreders' Web hosting Any feedback welcome newbie needs iweb help Help with blog imovie in iweb Creating a web server iweb to dreamweaver iWeb - updates are not showing Website only Partially Published Using iWeb and FTP settings Px, Pt or Percentage? 123-reg & DNS records weebly Textmate over sshfs or sftp iWeb FLV won't access idisk Picture not showing up on iWeb Mac Newbie: Simple FTP Question
http://www.mac-forums.com/mac-forums-sitemap.php?forumid=37&page=1
CC-MAIN-2014-49
refinedweb
1,310
62.38
The weight sensor is a very important element in many Arduino projects. By changing the weight, you can recognize the mass and track and record changes to the object and then perform some actions. In this article, we will learn how to connect a weight sensor to an Arduino using load cells of various denominations and the HX711 chip as an analog-to-digital converter. Load Cell Working Principle The weight sensor’s work is based on changing any physical parameter proportional to the weight of the object being measured. The parameter depends on which element is used in the sensor. So when the load on the piezo-ceramic plate changes, the voltage removed from the electrodes at the ends of the piezo-sensor changes. When the capacitive sensor is used, the capacitance of the variable capacitor changes. This design uses a weight sensor made on an elastic resistor, and when the weight changes, its resistance changes, and therefore the voltage removed from the bridge circuit. The sensor is a rectangular bar made of aluminum alloy, with a hole in the center. On its side surfaces are applied thin-film resistors connected by a bridge circuit, so the resistive sensor has 4 flexible outputs. All sensor elements are filled with an epoxy compound. Threaded holes are provided on the bar to fix it to the base and mount the plate under the measured load. The end side of the sensor is marked to indicate the maximum weight of the measured cargo. In order for the resistors to change their resistance, the strain gauge sensor must be fixed at one end on the base and at the other end of the load must act so that the deformation of the bar and, accordingly, the film resistors. In order to convert the analog signal from the output of the tensor sensor into binary code, an analog-to-digital converter (ADC) HX711 is used. Load Cell and HX711 Integral chip HX711 is an analog-to-digital converter with a sample rate of 24 bits and a built-in low noise operational amplifier. The multiplexer allows selecting one of the two available input channels. Channel A has a programmable gain selection that can be 64 or 128. Channel B operates at a preset gain of 32. An integrated voltage stabilizer is included in the chip, eliminating the need for an external stabilizer. Any pulse signal from an external source can be fed to the synchronization input; however, the ADC allows operation from a built-in oscillator. Main technical characteristics of НХ711: - ADC discharge – 24 bits - Input amplification A – 64 or 128 - Input amplification B – 32 - Frequency of measurement – 10 or 80 times per second - Supply voltage – 2.6-5.5 V - Current Consumption – less than 10 mA - Input voltage – ± 40 mV There are two connectors on the board with ADC – J1 and JP2, which have the following designations: J1 - E -, E + tensor bridge power supply - A -, A + differential input of channel A - V -, V + differential input of channel V JP2 - GND, VCC power supply - DT, SCK – information buses Connecting HX711 to Arduino As the bridge circuit switches on the tensor resistors, 4 conductors with different color markings depart from the device. The reference voltage is fed to two arms of the bridge, and the output voltage is removed from the other two arms, which is fed to the input of the operating amplifier of the chip HX711. Connection by wire colors is performed as follows: - Red – E + - Black – E – - White – A – - Green – A + For further processing and transfer of information, the connection of HX711 to Arduino UNO is carried out. For this purpose, power contacts GND and VCC HX711 have connected to the points GND, and 5V POWER connector module Arduino UNO, and contacts DT and SCK are connected to points A1 and A0 connector ANALOG IN. Load sensor НХ711 through the controller Arduino UNO can be connected to the LCD 1602 or computer, using the USB port and standard libraries for Arduino. HX711 Calibration Code Since the voltage changes at the output of the measuring bridge, it is it that converts to binary code. The range of monitored voltages depends on the selected gain. If the ratio is 128, the range of measured voltages varies from – 20 mV to + 20 mV, the choice of the gain 64 determines the measurement limits from – 40 mV to + 40 mV and at a ratio of 32, the measurement limits are 80 mV and + 80 mV. These data will be correct only at +5 V supply voltage. If the input voltage exceeds the lower limit of the range, the ADC will give the code 800000h, and if the upper limit is 7FFFFh. The following codes can be used for calibration and measurement: // code for calibration #include "HX711.h" HX711 scale(A1, A0); // DT, CLK float Calibration_Factor_Of_Load_cell = -3.7; // this calibration factor is adjusted according to the load sensor float U; float O; scale to 0 long zero_factor = scale.read_average(); // Get basic reading Serial.print("Zero factor: "); // This can be used to eliminate the need to tare the scale. Useful in constant scale projects. Serial.println(zero_factor); } void loop() { scale.set_scale(Calibration_Factor_Of_Load_cell); // Adjust this calibration factor Serial.print("Reading: "); U = scale.get_units(); if (U < 0) { U = 0.00; } O = U * 0.035274; Serial.print(O); Serial.print(" grams"); Serial.print(" Calibration_Factor_Of_Load_cell: "); Serial.print(Calibration_Factor_Of_Load_cell); Serial.println(); if (Serial.available()) { char temp = Serial.read(); if (temp == '+' || temp == 'a') Calibration_Factor_Of_Load_cell += 1; else if (temp == '-' || temp == 'z') Calibration_Factor_Of_Load_cell -= 1; } } Explanation of the code: - A correction factor will be displayed on the port monitor, which should be used further when working with the scale sensor; - If correction of the correction factor is necessary, a “+” or “-” should be sent to the Arduino board via the port monitor. HX711 Load Cell Code #include <HX711.h> HX711 scale(2, 3); // ports DT, CLK float calibration_factor = -3.7; // sensor calibration float units; float ounces; void setup() { Serial.begin(9600); scale.set_scale(); scale.tare(); // reset the sensor to 0 scale.set_scale(calibration_factor); // apply calibration } void loop() { Serial.print("Reading: "); for(int i=0; i<10; i++) units =+ scale.get_units(), 10; // make measurements 10 times units / 10; // divide values by 10 ounces = units * 0.035274; // convert values into grams Serial.print(ounces); Serial.print(" grams"); Serial.println(); } Explanation of the code:
https://nerdytechy.com/connecting-hx711-load-cell-to-arduino/
CC-MAIN-2021-31
refinedweb
1,053
54.12
In April I had already written about the nice surprises XML and XQuery have for developers. XQuery and XPath operate on the tree representation of a document and in most cases those trees have more than one branch. This is something to keep in mind, always! This morning I was reminded about that article and that problem again when I was contacted with a question about two versions of seemingly the same query against the DB2 sample database: xquery declare default element namespace ""; for $d in db2-fn:xmlcolumn("PURCHASEORDER.PORDER") where $d/PurchaseOrder/item[quantity = 1] return $d/PurchaseOrder/item/name and xquery declare default element namespace ""; db2-fn:xmlcolumn ("PURCHASEORDER.PORDER")/PurchaseOrder/item[quantity = 1]/name In both cases we iterate over the PurchaseOrders and seem to select only items with a quantity of one. But the first query returns 4 records, the second query 3 records. Why? This is because the element we are qualifying, i.e., PurchaseOrder, can have (and actually has!) multiple item elements, PurchaseOrder qualifies if there is at least one such item with a quantity of 1 (existential semantics). The second query directly filters at the item level. Hence the difference in the results. The first query can be rewritten to return the same result as the second query: xquery declare default element namespace ""; for $d in db2-fn:xmlcolumn("PURCHASEORDER.PORDER")/PurchaseOrder/item where $d[quantity = 1] return $d/name What do we learn? Regardless of SQL or XQuery: Think green! Remember the trees! And enjoy pureXML...
http://blog.4loeser.net/2009/06/lessons-from-field-again-remember-trees.html
CC-MAIN-2017-13
refinedweb
255
57.77
digitalmars.D - Outer names, binding - bearophile <bearophileHUGS lycos.com> May 30 2009 A blog post that shows some of the good things done by split (among the things it does, it supports the only attribute that is the "unique" Bartosz talks about): One of the attributes supported by Split suggests me something like: void foo(string s) outer out x { x = s.length; } After "outer" there's the list of the names of the enclosing namespace that are used inside foo(). In this "outer out x" means that foo() overwrites x. Knowing/stating what globals (or the outer scope) a function/method uses sounds good. -----------------------JavaFX lets you bind, or link, attributes so that when one attribute changes, all attributes bound to it will automatically change as well.<: In Python "Cellulose" is vaguely similar (but this isn't a built-in features of the language, it's a module).< Bye, bearophile May 30 2009
http://www.digitalmars.com/d/archives/digitalmars/D/Outer_names_binding_91437.html
CC-MAIN-2015-32
refinedweb
154
78.59
31290 1 Posted April 7, 2015 Hi everyone, hope you're fine I've almost finished my 700+ lines script but I'm facing a major issue here... Sometime in the script, I have to copy a file from a network location to my local HDD, with absolute need of a progress bar. Here's what I'm using: ;LanDesk Agent Copy Function Func LanDeskAgentCopy ($fromfile, $tofile) Local $FOF_RESPOND_YES = 16 Local $FOF_SIMPLEPROGRESS = 256 $winShell = ObjCreate ("shell.application") $winShell.namespace ($tofile).CopyHere ($fromFile, $FOF_RESPOND_YES) EndFunc Global $LDASource = "\\sealedair\ldrep\ldswd\LANDesk Agents\LD9.5_Prod\SAC Default Windows Workstation Agent_with_status.exe" Global $LDADest = "C:\LanDeskAgent\SAC Default Windows Workstation Agent_with_status.exe" Later on in the script, I do this: LanDeskAgentCopy($LDASource, $LDADest) But this is not working over the network. When using big local files, it IS working and the progress bar displays.... An other important you might know is that I can't map any network drive on the target machine (or even mine). Am I missing or doing something wrong? Thanks a lot for the future help you could provide See you. -31290- ~~~ Doom Shall Never Die, Only The Players ~~~ Share this post Link to post Share on other sites
https://www.autoitscript.com/forum/topic/169500-network-copy-is-not-working/
CC-MAIN-2018-51
refinedweb
200
56.25
So my dear friend Lucy-S's day log last month got me thinking. I've been doing way too much work for "me" recently (server deployment improvements, code cleanups, etc), and not enough work for you guys, so we're going to change that this month. So here's how this is going to work; I'm going to trade you. I'm going to do some work for me up front, and then we're going to work on some stuff for you guys. Does that sound good? A couple of big things are done up front. I've upgraded the database as of last night to mysql 5.5. This brings us inline with the development environment and closes a bit of odd technical debt that might make it hard to debug something with the split database (5.5 in dev, 5.1 in production). I just needed an evening to get it done, and now it is. I've turned on page compression. This is a bit humbling; as I thought it was on, but upon deeper inspection into my soul, I have no idea why I would think it would be on. If we ever graduate to a real CDN, then we'll move it off of being inside of the page, but for now, it's inside of the engine itself. We were already doing this for stylesheets, so it's about time. There are probably better ways to do it inside of the engine, but I did it on the quick and dirty, and according to safari, it works great. This should mean faster pageloads for people, especially our lovely UK noders. So let's talk features. There's two that we're going to get done this month, that are going to require a good deal of work, but that I think is going to start to make things a bit interesting around here. The first one is that we're going to retire Node Heaven. Okay, so that's a bit dishonest. That's a feature for both of us; I need the archived node format to change some. Drafts Beta will give you an idea of what the new Drafts page is going to look like after I push it live. Removed/nuked drafts will live in their own section. I need to do a bit more work on it, but that's kind of what that looks like. So instead of looking into node heaven, I'm going to put everything that was ever nuked, and is not obviously a maintenance node, into your drafts. You can permanently remove them from there if you so choose, or leave them there to rot. No worries either way Okay, so onto the real feature. We're going to create a writeup feedback mechanism that's very similar to user group debates, if you have seen those. Here's the general overview of it, and once I have it working in a beta capacity, then I'll put together a feature spec for people to comment on (possibly using the system, if it's in production). I'm calling the feature "Writeup Feedback", but I'm bad at naming. Here's the gist: The goal here is to help collaboration when writing here, and to make it a place where you can truly work on a piece with the community. I have larger plans for this feature, but that is certainly the first cut, and I'll have some more details on it soon as I work it up in dev. Diving deeper into the code, we've done a lot of symbol pollution cleanup. This means moving a lot of Everything::HTML functions into Everything::Application so that we can properly encapsulate the logic. E2 used to have a large, flat namespace where people could call various functions and it was used liberally inside of the eval()'d execution context. Now, since we're moving to templates, we're going to want the logic in a place where we can get to it. Everything::HTML will continue to exist for a while as we get the last of the display and document code out of the database. The goal is to move it into a combination of Everything::Application and Everything::Request. Everything::Request is our first Moose-based object in the code. Strengthening the models has been going well off in the branch in which I am working. It appears that there are some small, deep bugs with how it works; most notably the return type is bogus when looking for either the search results or "not found". Once I have that and the rebless code for the current drafts implementation done, we can start to take the newly cleaned namespace of Everything::Application, and wind that into the various nodetypes. So that's where we stand. Progress continues, but is hard to see because it is so deeply under the covers. We are changing the tires on the car while it is still running Log in or register to write something here or to contact authors. Need help? accounthelp@everything2.com
https://everything2.com/title/root+log%253A+October+2014
CC-MAIN-2017-30
refinedweb
862
78.89
webbrowser – Displays web pages¶ The webbrowser module includes functions to open URLs in interactive browser applications. The module includes a registry of available browsers, in case multiple options are available on the system. It can also be controlled with the BROWSER environment variable. Simple Example¶ To open a page in the browser, use the open() function. import webbrowser webbrowser.open('') The URL is opened in a browser window, and that window is raised to the top of the window stack. The documentation says that an existing window will be reused, if possible, but the actual behavior may depend on your browser’s settings. Using Firefox on my Mac, a new window was always created. Windows vs. Tabs¶ If you always want a new window used, use open_new(). import webbrowser webbrowser.open_new('') If you would rather create a new tab, use open_new_tab() instead. Using a specific browser¶ If for some reason your application needs to use a specific browser, you can access the set of registered browser controllers using the get() function. The browser controller has methods to open(), open_new(), and open_new_tab(). This example forces the use of the lynx browser: import webbrowser b = webbrowser.get('lynx') b.open('') Refer to the module documentation for a list of available browser types. BROWSER variable¶ Users can control the module from outside your application by setting the environment variable BROWSER to the browser names or commands to try. The value should consist of a series of browser names separated by os.pathsep. If the name includes %s, the name is interpreted as a literal command and executed directly with the %s replaced by the URL. Otherwise, the name is passed to get() to obtain a controller object from the registry. For example, this command opens the web page in lynx, assuming it is available, no matter what other browsers are registered. $ BROWSER=lynx python webbrowser_open.py If none of the names in BROWSER work, webbrowser falls back to its default behavior. Command Line Interface¶ All of the features of the webbrowser module are available via the command line as well as from within your Python program. $ python -m webbrowser Usage: /Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5/webbrowser.py [-n | -t] url -n: open new window -t: open new tab See also - webbrowser - Standard library documentation for this module.
https://pymotw.com/2/webbrowser/index.html
CC-MAIN-2018-51
refinedweb
389
56.45
Cleaner Data Analysis with Pandas Using Pipes By Soner Yıldırım, Data Science Enthusiast Pandas is a widely-used data analysis and manipulation library for Python. It provides numerous functions and methods to provide robust and efficient data analysis process. In a typical data analysis or cleaning process, we are likely to perform many operations. As the number of operations increase, the code starts to look messy and harder to maintain. One way to overcome this issue is using the pipe function of Pandas. What pipe function does is to allow combining many operations in a chain-like fashion. In this article, we will go over examples to understand how the pipe function can be used to produce cleaner and more maintainable code. We will first do some data cleaning and manipulation on a sample dataframe in separate steps. After that, we will combine these steps using the pipe function. Let’s start by importing libraries and creating the dataframe. import numpy as np import pandas as pd marketing = pd.read_csv("/content/DirectMarketing.csv") marketing.head() The dataset contains information about a marketing campaign. It is available here on Kaggle. The first operation I want to do is to drop columns that have lots of missing values. thresh = len(marketing) * 0.6 marketing.dropna(axis=1, thresh=thresh, inplace=True) The code above drops the columns with 40 percent or more missing values. The value we pass to the thresh parameter of dropna function indicates the minimum number of required non-missing values. I also want to remove some outliers. In the salary column, I want to keep the values between the 5th and 95th quantiles. low = np.quantile(marketing.Salary, 0.05) high = np.quantile(marketing.Salary, 0.95) marketing = marketing[marketing.Salary.between(low, high)] We find the lower and upper limits of the desired range by using the quantile function of numpy. These values are then used to filter the dataframe. It is important to note that there are many different ways to detect outliers. In fact, the way we have used is kind of superficial. There exist more realistic alternatives. However, the focus here is the pipe function. Thus, you can implement the operation that fits best for your task. The dataframe contains many categorical variables. If the number of categories are few compared to the total number values, it is better to use the category data type instead of object. It saves a great amount of memory depending on the data size. The following code will go over columns with object data type. If the number of categories are less than 5 percent of the total number of values, the data type of the column will be changed to category. cols = marketing.select_dtypes(include='object').columns for col in cols: ratio = len(marketing[col].value_counts()) / len(marketing) if ratio < 0.05: marketing[col] = marketing[col].astype('category') We have done three steps of data cleaning and manipulation. Depending on the task, the number of steps might be more. Let’s create a pipe that accomplish all these tasks. The pipe function takes functions as inputs. These functions need to take a dataframe as input and return a dataframe. Thus, we need to define functions for each task. def drop_missing(df): thresh = len(df) * 0.6 df.dropna(axis=1, thresh=thresh, inplace=True) return df def remove_outliers(df, column_name): low = np.quantile(df[column_name], 0.05) high = np.quantile(df[column_name], 0.95) return df[df[column_name].between(low, high, inclusive=True)] def to_category(df): cols = df.select_dtypes(include='object').columns for col in cols: ratio = len(df[col].value_counts()) / len(df) if ratio < 0.05: df[col] = df[col].astype('category') return df You may argue that what the point is if we need to define functions. It does not seem like simplifying the workflow. You are right for one particular task but we need to think more generally. Consider you are doing the same operations many times. In such case, creating a pipe makes the process easier and also provides cleaner code. We have mentioned that the pipe function takes a function as input. If the function we pass to the pipe function has any arguments, we can pass it to the pipe function along with the function. It makes the pipe function even more efficient. For instance, the remove_outliers function takes a column name as argument. The function removes the outliers in that column. We can now create our pipe. marketing_cleaned = (marketing. pipe(drop_missing). pipe(remove_outliers, 'Salary'). pipe(to_category)) It looks neat and clean. We can add as many steps as needed. The only criterion is that the functions in the pipe should take a dataframe as argument and return a dataframe. Just like with the remove_outliers function, we can pass the arguments of the functions to the pipe function as an argument. This flexibility makes the pipes more useful. One important thing to mention is that the pipe function modifies the original dataframe. We should avoid changing the original dataset if possible. To overcome this issue, we can use a copy of the original dataframe in the pipe. Furthermore, we can add a step that makes a copy of the dataframe in the beginning of the pipe. def copy_df(df): return df.copy() marketing_cleaned = (marketing. pipe(copy_df). pipe(drop_missing). pipe(remove_outliers, 'Salary'). pipe(to_category)) Our pipeline is complete now. Let’s compare the original dataframe with the cleaned to confirm it is working. marketing.shape (1000,10) marketing.dtypes Age object Gender object OwnHome object Married object Location object Salary int64 Children int64 History object Catalogs int64 AmountSpent int64 marketing_cleaned.dtypes (900,10) marketing_cleaned.dtypes Age category Gender category OwnHome category Married category Location category Salary int64 Children int64 History category Catalogs int64 AmountSpent int64 The pipeline is working as expected. Conclusion The pipes provide cleaner and more maintainable syntax for data analysis. Another advantage is that they automatize the steps of data cleaning and manipulation. If you are doing the same operations over and over, you should definitely consider creating a pipeline. Thank you for reading. Please let me know if you have any feedback. Bio: Soner Yıldırım is a Data Science Enthusiast. See his portfolio here. Original. Reposted with permission. Related: - Data Cleaning: The secret ingredient to the success of any Data Science Project - Data Cleaning and Wrangling in SQL - Merging Pandas DataFrames in Python
https://www.kdnuggets.com/2021/01/cleaner-data-analysis-pandas-pipes.html
CC-MAIN-2021-10
refinedweb
1,065
51.65
Abstract base class for slice shape nodes. More... #include <VolumeViz/nodes/SoSlice.h> This class defines common properties for the SoOrthoSlice, SoObliqueSlice, SoFenceSlice, SoVolumeSkin and SoHeightFieldRender nodes. The alphaUse field controls how the voxel's alpha component is used when drawing the slice. The enableBumpMapping and bumpScale fields control a rendering effect called bump mapping that simulates displacement of the slice surface using the gradient vectors (computed on the GPU) as normal vectors. The largeSliceSupport field enables direct loading of full resolution slice data from the volume reader, without (necessarily) loading complete tiles. SoObliqueSlice, SoOrthoSlice, SoFenceSlice, SoVolumeSkin Returns the type identifier for this class. Reimplemented from SoVolumeShape. Reimplemented in ObliqueSliceBorder, OrthoSliceBorder, SoFenceSlice, SoHeightFieldRender, SoObliqueSlice, SoOrthoSlice, and SoVolumeSkin. Returns the type identifier for this specific instance. Reimplemented from SoVolumeShape. Reimplemented in ObliqueSliceBorder, OrthoSliceBorder, SoFenceSlice, SoHeightFieldRender, SoObliqueSlice, SoOrthoSlice, and SoVolumeSkin. Specifies how to use the alpha component of each voxel's RGBA value. Use enum AlphaUse. Default is ALPHA_OPAQUE. Compatibility Note: The default value changed with the release of Open Inventor version 10. In previous versions the default value was ALPHA_BINARY. The alpha component will typically come from the transfer function, but (since Open Inventor 8.5) could come from direct rendering of an RGBA volume. The alpha values used for rendering the slice will vary depending on the value of this field. The following table shows how the alpha value is computed according to this field and the incoming voxel alpha values: Note that this field only affects alpha values coming from the transfer function or an RGBA volume. Global transparency assigned by an SoMaterial node is not affected. This field optionally holds a subgraph containing only core Open Inventor nodes that can be used for rendering when VolumeViz is not available. For example, the alternate representation for an SoOrthoSlice could be a small scene graph containing an SoTexture2 and an SoIndexedFaceSet. Such a scene graph could be converted and exported as a PDF3D file. Default is NULL. But an alternateRep scene graph is automatically created during a write action if SoVolumeRendering::setWriteAlternateRep(TRUE) was called.NOTE: field available since Open Inventor 4.0 Specifies the intensity of the bump mapping effect. Default value is 1.0. Valid values range from 0 (no bump mapping) to no upper limit. (50 is a fairly large value.)NOTE: field available since Open Inventor 6.0 Specifies if a bump mapping effect will be applied to the slice. Default is FALSE. Bump mapping is a rendering effect that simulates displacement of the slice surface using the gradient vectors (computed on the GPU) as normal vectors. Normally lighting should be enabled to maximize the visual effect of bump mapping. This mode requires a graphics card supporting the OpenGL GLSL shading language. If your graphics card does not have the necessary support, the slice will be rendered without bump mapping. Bump mapping is not available for the SoHeightFieldRender node.NOTE: field available since Open Inventor 6.0 Activate/deactivate direct loading of full resolution slice data. Default is FALSE. When TRUE, from).NOTE: field available since Open Inventor 8.0 Limitations : Specifies whether to create RGBA textures. Default is FALSE. If useRGBA is FALSE, indexed textures will be used if the graphics board supports them. If useRGBA is TRUE, RGBA textures will be used instead of indexed textures. RGBA textures allow higher precision but are slower to load and use more texture memory.NOTE: If the dataSet is RGBA then this field is ignored and RGBA textures are used.
https://developer.openinventor.com/refmans/latest/RefManCpp/class_so_slice.html
CC-MAIN-2022-05
refinedweb
583
51.55
Introduction: DIY RC Android Sumobot (Sumo Robot) Quadcopters, Hovercrafts, Drones, Helicopter, you name it, they are getting much more common these days. So I thought of building something new and unique for the DIY community. Sumobots! Weighing 1-3KG with a 19x16cm chassis, they push each other out of the ring with brutal force! An intense tournament setting, guaranteed to make you sweat from adrenaline! It's already considered a sport. ______________________________________________________________________ We have plans on raising a sumobot booth for our school's entrepreneurial fair (simulation), to be held this September. Also, we have high hopes of attracting customers, raising funds for charity! :D Challange Mode: Pay 10 php ($0.20), beat the unbeatable autonomous FAC3PALM with CALSONIC, the RC Bluetooth Sumobot. Winner gets a prize (ex. a cup of Icecream, soda, t-shirt & etc.) Two Player Mode: Pay 5 php ($0.10), and play with your friends, for fun! =D (includes 3 rounds) ______________________________________________________________________ From our past experiences in competitions and local school fairs, the crowd's energy tends to be high all the time, everybody wants to give it a try. People are attracted to the extreme collision and violence :)) just like in real-life wrestling and boxing. Imagine a science fair, with your booth gathering all of the attention of students and teachers. It somehow becomes the center of attraction/ main event :D The bot was designed to compete in the autonomous division, I tweaked it just for fun and added a $8 bluetooth module. It's an Arduino-Android based, open source robot, you can pretty much do anything with it! The app is free from google-play (Bluetooth RC Car). It includes: 8 directional movements, a speed control slider, 3 toggle switches and can use the accelerometer for steering. With the proper tools & materials you can pretty much finish the bot within an hour. You can save $41 by making your ownDIY Arduino Motor Shield[for only $8] _______________________________________________________________________ What is a sumobot? It is a sport in which two robots attempt to push each other out of the ring. The engineering challenges are for the robot to find its opponent by using infrared or ultra-sonic sensors), this falls in the autonomous division meaning RC devices are not allowed in this division, AI runs the sumobot. For the RC division, competitors are allowed to use remote controlled devices. Aren't Sumobots Old? What's so unique about them? Yes they are old indeed, it started around year 2008. But full-cover tutorials aren't easy to find. These sumobot's fabricational methods where hindered from hobbyists. People who compete internationally in "Robotics Competitions" tried everything they can to hide their tech from other competitors. That's the reason why these sumobots aren't popular, it's a game of secrecy in technology. How fun are they? After building at lest two of these babies and tried to gather a large group of friends, I bet you all are going to have an awesome time, shouting, cheering and sweating while fighting with these bots. Since it is an OpenSource bot, you can customize it, change the motors, the body's design, the sensors. This gives everyone's bot personalities! ______________________________________________________________________________________ SPECS: - Has a 19x16cm Acylic Chassis (50mm Thick) - Arduino UNO based (programmed with C++) - 500-1200 RPM Metal Gearbox (High Speed & Torque) - 30 Ampere Pololu Motor Shield (High Performance) - 15 meter range bluetooth module (easy to use TX, RX) - 3x Sharp Proximity Sensors + 2 Ground Sensors The project is so easy even a high-school student can make one :D Please don't forget to vote! Cheers! _______________________________________________________________________________________ My Testdrive/ Dry-run Video (Enjoy!) [Don't Miss This! Must Watch!] Step 1: Tools & Materials Here's the list of my exact parts. Try to wait for a Christmas sale in Pololu.com, I saved 35% on their parts. In steps 2 and 3 alternatives will be given. Click on the material so see link. Materials: - Arduino UNO R3/ Arduino Leonardo -----------------------[$17 @Amazon, clone = $12] - Pololu Dual VNH5019 Motor Shield (2x30A) -----------[$39 on Christmas/ reg=$49] - 2x 500-1200 rpm 37D Metal Gearbox (19:1) -----------[$48, DX version = $27] - JY-MCU Bluetooth Module (10-15m Range) -----------[$8.20] - Ground/ Line Digital Sensors (Schmitt Trigger) -------[$4.60] - 3x Sharp Proximity Sensors (Not Included in $90)---[$30 Optional] - 2x Recyled Tamiya RC Wheels (Free - Inventory) ---[Recycled Free!] - DIY/ Custom Gearbox, Aluminum Bracket -------------[Recycled Free! <Alternative Link $7] - Recyled 4cell 1.3Ah LiPo (From Bloated Turningy)--[Recycled Free! <Alternative Link] - 19x16 Acrylic (Thickness 50mm) --------------------------[$5 - Local Hardware Store] - Super Glue Bottle (Gorilla 10ml) ----------------------------[$5 - Free If You have one] Tools: - Leatherman - Dremel Rotary Tool - Jigsaw (Blade for Acrylic) Step 2: Choosing Your Metal Gearbox In my opinion, Pololu is the best place to buy high quality metal gearboxes, they are expensive though. My second choice is DX.com, they sell a lot of cheap ones, but they only sell those with "high-torque: low-speed". Links: - Pololu List: Metal Gearboxes - DealExtreme List: Metal Gearboxes - I have the pololu gearbox 37D (19:1, 500RPM) Ratio Dictionary: - Torque = Provides a high output power. (ex. A truck needs high torque when climbing up a hill with a heavy load "1st gear") - Speed = The RPM rating, swiftness of an object (ex. On a flat surface, race cars need a lot of speed to catch up "7th gear") - When using the same motor setup, high-torque gearboxes have a lower speeds, while high-speed gearboxes have lower torques. Step 3: Choosing Your Motor Driver I had the Pololu Dual VNH5019 (High-Performance 30A Motor driver) for about a year now, it performs really well and doesn't even sweat/ heat-up. The Arduino R3 Motor Shield is also a great choice, but can only support 4A motors. DealExtreme on the other hand sells cloned versions for half the price, I haven't tried them yet but the reviews are good. My Preferred Choices: 1st.) Pololu Dual VNH5019 (2x30 Amperes) - $49 ($35 Christmas) 2nd.) DX Dual VNH Clone (2x30 Amperes) - $24 3rd.) Arduino R3 Motor Shield (2x4 Amperes) - $25 4th.) DX, R3 Motor Shield Clone (2x4 Amperes) - $13 Want to save money? Make one for only $8.00! Read more about this in the next page! Or go directly to my instructable guide: DIY Arduino Motor Shield [for only $8!] Step 4: OPTIONAL: Make Your Own Motor Driver (for Only $8!) THIS STEP IS OPTIONAL, YOU CAN SKIP IT RIGHT AWAY! Since most hobbyist prefer to be more practical, you can consider making one for only $8.00! You can read my full instructable guide here: DIY Arduino Motor Shield [for only $8!] (L298N 2x4A) The Bluetooth module is now 100% compatible with the DIY Motor Driver Shield. I've used it on my first ever 500g sumobot. Good to say it's completely reliable, considering the fact of achieving 7th place in the nationals (robotics competition). __________________________________ Specs: Voltage Range: 5- 50 volts Current Range: 2-4 Amperes Power: 25w @75°C Working Temparature: -40°C to 150°C Board Compatibility: Arduino Uno Motor Outputs: 2 Motors (Left & Right) Possible Robot Movements: Left, Right, Forward, Backward & etc.. PWM Pins: [12&11] [6&5] Step 5: Mounting the Motors Use your Dremel, or Jigsaw to cut a 19x16cm piece of acrylic (thickness 50mm), this will be used for your chassis. I found these cool L-shaped aluminum brackets from trash, I used it to fabricate my own "Gearbox Mounts". It's free! =D You can ask for some L-shaped, aluminum brackets from nearby construction crew (aluminum scrap inventory). Step 6: Gluing the Side Panels It took me a lot of sweat to cut these side panels. I did it the hard way, by using a file and hacksaw. In my experience, using rotary tools on acrylic is a big no, since I shattered a lot of them before. For the wheels, I needed an adapter/ hubs for the Tamiya RC wheels, so I fabricated mine using a lathe machine. If you want the ones, ready made, go to BaneBots.com, they sell a wheel+hub package. Step 7: Sharpening the Scoop's Wedge Sumobots need to have a sharp wedge to scoop out the competitor, in head to head combat, the one with the sharpest wedge usually prevails. The scoop's blade was designed for combat therefor it must be perfect. Again, rotary tools rotate too fast, therefore you don't have that much control over it. I used the trusty old file to do the job. Step 8: Installing the Batteries The battery's position plays a huge roll on balance. If placed on the back then there's a bigger chance of having a wheelee' effect, this increases traction, but if your batteries are located near the front, traction will decrease while the wheelee effect decreases too. We all know that Lithium-Polymers bloat, and when they bloat they start to become useless (fear of exploding). I debloated them by carefully punching a hole, small enough for the hydrogen gasses to escape, then I patched them. Warning: Debloating batteries is not advisable, do it at your own risk! If you want to buy a new one, the best, quality brand I know is Turningy (4Cell, 1300mAh LiPo) they also feature a 1 year warranty. Pic1: My Current Design (CALSONIC) Pic2: My Previous Deisgn (FAC3PALM) Pic3: My Previous Deisgn (FAC3PALM) Step 9: Installing the Arduino Shields & Sensors As we all know, it's easy to assemble the shields. Just snap in the "Motor Driver" above the Arduino UNO. Then wire up your sensors to your board. The Bluetooth connection will be shown in the next step. I found this cool looking PC videocard fan. I installed it since it makes your bot look more intimidating. For the scoop, I hacksawed 4 identical pillars then glued them with super glue and reinforced by clear epoxy. The sensors are optional, I installed them since the robot is intended to fight in the autonomous division. Step 10: Installing & Knowing Your Bluetooth Module You can buy your Bluetooth Module from DX.com, for $8, don't worry about the shipping because it's free. There's a graphic diagram below. Now let's get started, first your package should come with a wire (extender), connect your +5v to your duino, then your ground wire. Second the TX from your Bluetooth should go to your Arduino's RX and the RX of your bluetooth to your Arduino's TX. REMEMBER: Before programming, disconnect your Bluetooth's TX & RX from your Arduino to avoid errors. How it works: So far, I know that the APP sends letters to your Arduino board via bluetooth. Your Arduino, then decodes these by using statements and condition, which will be discussed on the next page. Step 11: The Arduino Codes (C++) [UPDATED! 9/4/13] I reconstructed the codes since they were intended for a different motor shield. I also changed the executions and added more math, divisional processes, thus making the movements more fluid and dynamic. The toggle switches are disabled, you can enable them by removing the "// (comment sign)". Updates: 8/31/13 - Pololu Dual VNH5019 (codes) [Status: now compatible!] 9/04/13 - DIY Arduino Motor Driver (codes) [Status: now compatible!] soon.... - Arduino R3 Motor Shield (codes) [Status: Beta stage (Unstable)] soon.... - DX Dual VNH5019 Clone (codes) [Status: queued experimentation] soon.... - DX R3 Motor Clone Shield (codes) [Status: queued experimentation] Attention: Choose your motor driver codes below! - Pololu Dual VNH5019 -------- [Download: Arduino Bluetooth (for Pololu Dual VNH5019).rar] - DIY Arduino Motor Shield ---[ Download: Arduino Bluetooth (for DIY Motor Shield).rar] - The original codes ------------[Download: Bluetooth RC Codes] _________________________________________________________________________________________ // Reconstructed By: Angelo Casimiro (ASCAS) #include "DualVNH5019MotorShield.h" DualVNH5019MotorShield md; char dataIn='S';char determinant;char det;int vel = 0; //Bluetooth Stuff //int power = 4; // Remove This To Enable Tollge Switch #2 int overdrive = 13; //Press Toggle Switch #1, the pin13 LED will light up void setup(){ Serial.begin(9600);md.init(); /*pinMode(power , OUTPUT);*/ //toggle switch function pinMode(overdrive, OUTPUT); } void loop(){ det = check(); // You'l need to reconstruct this if your not using the Pololu Dual VNH5019 while (det == 'F') // F, move forward {md.setSpeeds(vel,vel);det = check();} while (det == 'B') // B, move back {md.setSpeeds(-vel,-vel);det = check();} while (det == 'L') // L, move wheels left {md.setSpeeds(vel/4,vel);det = check();} while (det == 'R') // R, move wheels right {md.setSpeeds(vel,vel/4);det = check();} while (det == 'I') // I, turn right forward {md.setSpeeds(vel,vel/2);det = check();} while (det == 'J') // J, turn right back {md.setSpeeds(-vel,-vel/2);det = check();} while (det == 'G') // G, turn left forward {md.setSpeeds(vel/2,vel);det = check();} while (det == 'H') // H, turn left back {md.setSpeeds(-vel/2,-vel);det = check();} while (det == 'S') // S, stop {md.setSpeeds(0,0);det = check();} //---------------------Toggle switch code------------------// /* while (det == 'U') {digitalWrite(power, HIGH);det = check();} while (det == 'u') {digitalWrite(power, LOW);det = check();}*/ //---------------------Mains Power------------------// while (det == 'W') {digitalWrite(overdrive, HIGH);det = check();} while (det == 'w') {digitalWrite(overdrive, LOW);det = check();} } int check() {if (Serial.available() > 0) {dataIn = Serial.read(); if (dataIn == 'F'){determinant = 'F';} else if (dataIn == 'B'){determinant = 'B';}else if (dataIn == 'L'){determinant = 'L';} else if (dataIn == 'R'){determinant = 'R';}else if (dataIn == 'I'){determinant = 'I';} else if (dataIn == 'J'){determinant = 'J';}else if (dataIn == 'G'){determinant = 'G';} else if (dataIn == 'H'){determinant = 'H';}else if (dataIn == 'S'){determinant = 'S';} else if (dataIn == '0'){vel = 400;}else if (dataIn == '1'){vel = 380;} else if (dataIn == '2'){vel = 340;}else if (dataIn == '3'){vel = 320;} else if (dataIn == '4'){vel = 280;}else if (dataIn == '5'){vel = 240;} else if (dataIn == '6'){vel = 200;}else if (dataIn == '7'){vel = 160;} else if (dataIn == '8'){vel = 120;}else if (dataIn == '9'){vel = 80;} else if (dataIn == 'q'){vel = 40;}else if (dataIn == 'U'){determinant = 'U';} else if (dataIn == 'u'){determinant = 'u';}else if (dataIn == 'W'){determinant = 'W';} else if (dataIn == 'w'){determinant = 'w';}}return determinant;} Step 12: Installing the Hood (Takes a Lot of Thinking) It took me a lot of time to design my hood. Since this robot was intended for combat (@National Competition), it is essential to have a highly accessible hood, in case problems start to show up during the competition. My first hood version used a sheet of 1/8 illustration board. The second one used a bent metal sheet, I used the sliding folder's clip as the mount. Step 13: The Android APP Interface APP Download: Arduino Bluetooth RC Car (Android Devices Only) Steps: 1st.) Download App in google play 2nd.) Turn your bluetooth on, and open the app 3rd.) Establish a bluetooth connection betweeen your phone Go to Settings> Press Connect> Press LINVOR (Your BT Device) 4th.) Wait for the red light to turn green. And Your'e Done! Troubleshooting: If robot doesn't respond, move the speed slider (Left to Right), this bug shows up sometimes. Step 14: Your'e Done! Enjoy Your, Heavy Combat Sumobot! Your'e done! Cheers! You've finished your Android Controlled Sumobot. Cheers! =D Don't forget to vote and leave a comment. =D Step 15: All My Sumobot Videos (RC & Autonomous) CALSONIC 1KG Hybrid (Mrk.14 Bluetooth Mode) FAC3PALM 400g Mini Sumobot (Mrk.13 Autonomous Mode) National Competition (Philippines) FAC3PALM 500g Mini Sumobot (Mrk.3 Autonomous Mode) Monster 500g Mini Sumobot (Mrk.2 Autonomous Mode) Step 16: Printable PDF File Step 17: Update: (11/1/2013) Black Edition. Painted the chassis off with a mat black finish :DD Second Prize in the Remote Control Contest Participated in the Microcontroller Contest Participated in the Arduino Contest Be the First to Share Recommendations 36 Discussions 7 years ago on Introduction Hello there, its me again. That looks superb! Well said and well documented. You got one hell of skill there in craftsmanship. I can't believe you hacksawed the acrylic, the cuts are so clean, for one second there I thought this was another CNC related post. Good Job! You definitely got my vote! You have a bright future ahead of you. I have one question, how do you add a wireless camera to your bot, and what's the best android "RC +Camera" app available? Reply 7 years ago on Introduction Hello there classmate. Thanks for the comment and vote! For the acrylic, I used a regular file and manually filed it until the edges are clean. Oh, if you want to add a camera, the best thing to use are IP cams, but I don't think google play has a RC app with a camera displayed on it. Instead you could get 2 android devices, one for the IP cam viewer, and one for the remote, more like a dual screen/ device setup. This way you can have a First-Person-View of your sumobattle. Question 1 year ago how we fix 'det' was not declared in this scope 3 years ago Thanks for sharing bro :), can i have a copy of your full schematic diagram ? I'm planning to create this project for our control system hohohoh. can i have a copy of it ?thanks for sharing again :) God Bless :) 3 years ago Good day ASCAS do you have already the code for DX Dual VNH Clone,,,,we dont have the original polulo motor driver but we have the clone version,,,,thanks and more power 3 years ago good day sir ASCAS do you have already code for DX Dual VNH5019 Clone (codes),,,because we dont have polulo motor driver we've just got a clone,,,,thanks and more power 5 years ago Can u please upload the full circuit diagram? 5 years ago on Step 1 Sir, nice job sharing us your materials used in this project. We also have a project to make sumobot. However, arduino programming is our problem. Can you please share your program used in your robot? Thanks sir. 5 years ago on Introduction Hi ASCAS! im from Philippines too. regarding the buttons on the app, will the robot stick going forward even if i just pressed the forward button once? im taking a little bit confuse on the program. please help. thanks 6 years ago Hi Angelo! This is Justin of ABS-CBN NEWS Channel 2. We would like to invite you for a sit down interview with Kuya Kim this week. It's about your latest invention. (Electricity generating footwear). Would you be available? Please send me a message if so. Thank you and more power! 6 years ago on Introduction hey hi there!!! umm wat changes do i have to make if i replace the motor driver with an L293D ic??? what changes will i have to do in the code and wiring??? pl reply fast.... need help!!!!! Reply 6 years ago on Introduction Just replace the codes in the curly braces { } with the sample codes of the L293D. Why are you in such a hurry? Reply 6 years ago on Introduction oh ok thanks!!! got it!!! yep was in a hurry for an exhibition.. 6 years ago how do you recreate the coding for if you have a different type of motor shield? 6 years ago on Introduction Is this compatible with a regular 2.4 GHz (I think its GHz, not sure...) RC car remote? If it is, will it be possible for it to be both RC and Animatronic? I would like to know because I'm doing this project for school (I'm planning to make the code myself using a different program for credit) and need to know. That way I can figure out weather or not I'm going to make it RC or Animatronic or both(If possible.). Thanks! ~Tarry 6 years ago on Introduction Do you have any plans, designs, dimensions, or cut files for the chassis, hood, and structure? Great project! Reply 6 years ago on Introduction Sorry, the blueprints and cut files were not encoded digitally. I drew the cut-layout with my drafting equipment. I'll try to find my cut-layout, scan it then upload it. Thanks! Reply 6 years ago on Introduction If you do that, I will convert the drawings and send them to you for upload. Thanks! Reply 6 years ago on Introduction Really? Thanks! I'm still digging deep in my file cabinet. 6 years ago on Introduction voted!
https://www.instructables.com/Android-Controlled-Bluetooth-Sumobot-Ultimate-DIY-/
CC-MAIN-2020-45
refinedweb
3,401
73.07
Advance Praise for Head First Python “Head First Python is a great introduction to not just the Python language, but Python as it’s used in the real world. The book goes beyond the syntax to teach you how to create applications for Android phones, Google’s App Engine, and more.” — David Griffiths, author and Agile coach “Where other books start with theory and progress to examples, Head First Python jumps right in with code and explains the theory as you read along. This is a much more effective learning environment, because it engages the reader to do from the very beginning. It was also just a joy to read. It was fun without being flippant and informative without being condescending. The breadth of examples and explanation covered the majority of what you’ll use in your job every day. I’ll recommend this book to anyone starting out on Python.” — Jeremy Jones, coauthor of Python for Unix and Linux System Administration Download from Wow! eBook <> “Head First Python is a terrific book for getting a grounding in a language that is increasing in relevance day by day.” — Phil Hartley, University of Advancing Technology Praise for other Head First books “Kathy and Bert’s Head First Java transforms the printed page into the closest thing to a GUI you’ve ever seen. In a wry, hip manner, the authors make learning Java an engaging ‘what’re they gonna do next?’ experience.” — Warren Keuffel, Software Development Magazine “Beyond the engaging style that drags you forward from know-nothing into exalted Java warrior status, Head First Java covers a huge amount of practical matters that other texts leave as the dreaded ‘exercise “I feel like a thousand pounds of books have just been lifted off of my head.” — Ward Cunningham, inventor of the Wiki and founder of the Hillside Group .” — Erich Gamma, IBM Distinguished Engineer, and coauthor of Design Patterns “One of the funniest and smartest books on software design I’ve ever read.” — Aaron LaBerge, VP Technology, ESPN.com “What used to be a long trial and error learning process has now been reduced neatly into an engaging paperback.” — Mike Davidson, CEO, Newsvine, Inc. “Elegant design is at the core of every chapter here, each concept conveyed with equal doses of pragmatism and wit.” — Ken Goldstein, Executive Vice President, Disney Online “I ♥ Head First HTML with CSS & XHTML—it teaches you everything you need to learn in a ‘fun-coated’ format.” — Sally Applin, UI Designer and Artist “Usually when reading through a book or article on design patterns, I’d have to occasionally stick myself in the eye with something just to make sure I was paying attention. Not with this book. Odd as it may sound, this book makes learning about design patterns fun. “While other books on design patterns are saying ‘Bueller…Bueller…Bueller…’ this book is on the float belting out ‘Shake it up, baby!’” — Eric Wuehler “I literally love this book. In fact, I kissed this book in front of my wife.” — Satish Kumar Other related books from O’Reilly Learning Python Programming Python Python in a Nutshell Python Cookbook Python for Unix and Linux System Administration Other books in O’Reilly’s Head First series Head First Algebra Head First Ajax Head First C#, Second Edition Head First Design Patterns Head First EJB Head First Excel Head First 2D Geometry Head First HTML with CSS & XHTML Head First iPhone Development Head First Java Head First JavaScript Head First Object-Oriented Analysis & Design (OOA&D) Head First PHP & MySQL Head First Physics Head First PMP, Second Edition Head First Programming Head First Rails Head First Servlets & JSP, Second Edition Head First Software Development Head First SQL Head First Statistics Head First Web Design Head First WordPress Head First Python Wouldn’t it be dreamy if there were a Python book that didn’t make you wish you were anywhere other than stuck in front of your computer writing code? I guess it’s just a fantasy... Paul Barry Beijing • Cambridge • Farnham • Kln • Sebastopol • Tokyo Head First Python by Paul Barry Copyright © 2011 Paul Bar corporate@oreilly.com. Series Creators: Kathy Sierra, Bert Bates Editor: Brian Sawyer Cover Designer: Karen Montgomery Production Editor: Rachel Monaghan Proofreader: Nancy Reinhardt Indexer: Angela Howard Page Viewers: Deirdre, Joseph, Aaron, and Aideen Deirdre Aideen Printing History: November 2010: First Edition. Joseph Aaron The O’Reilly logo is a registered trademark of O’Reilly Media, Inc. The Head First series designations, Head First Python, author assume no responsibility for errors or omissions, or for damages resulting from the use of the information contained herein. No athletes were pushed too hard in the making of this book. TM This book uses RepKover™, a durable and flexible lay-flat binding. ISBN: 978-1-449-38267-4 [M] I dedicate this book to all those generous people in the Python community who have helped to make this great little language the first-rate programming technology it is. And to those that made learning Python and its technologies just complex enough that people need a book like this to learn it. the author Author of Head First Python Paul Barry recently worked out that he has been Paul was there), completed a B.Sc. and M.Sc. in Computing, written or cowritten three other books, as well as a bunch of technical articles for Linux Journal (where he’s a Contributing Editor). When Paul first saw Head First HTML with CSS & XHTML, he loved it so much he knew immediately that the Head First approach would be a great way to teach programming. He was only too delighted then, together with David Griffiths, to create Head First Programming in an attempt to prove his hunch correct. Paul’s day job is working as a lecturer at The Institute of Technology, Carlow, in Ireland. As part of the Department of Computing and Networking, Paul gets to spend his day exploring, learning, and teaching programming technologies to his students, including Python. Paul recently completed a post-graduate certificate in Learning and Teaching and was more than a bit relieved to discover that most of what he does conforms to current thirdlevel best practice. viii table of contents Table of Contents (Summary) Intro xxiii 1 Meet Python: Everyone Loves Lists 1 2 Sharing Your Code: Modules of Functions 33 3 Files and Exceptions: Dealing with Errors 73 4 Persistence: Saving Data to Files 105 5 Comprehending Data: Work That Data! 139 6 Custom Data Objects: Bundling Code with Data 173 7 Web Development: Putting It All Together 213 8 Mobile App Development: Small Devices 255 9 Manage Your Data: Handling Input 293 10 Scaling Your Webapp: Getting Real 351 11 Dealing with Complexity: Data Wrangling 397 i Leftovers: The Top Ten Things (We Didn’t Cover) 435 Table of Contents (the real thing) Intro Your brain on Python. Python? Who is this book for? We know what you’re thinking Metacognition Bend your brain into submission Read me The technical review team Acknowledgments xxiv xxv xxvii xxix xxx xxxii xxxiii ix table of contents 1 meet python Everyone loves lists… What’s to like about Python? 2 Install Python 3 3 Use IDLE to help learn Python 4 Work effectively with IDLE 5 Deal with complex data 6 Create simple Python lists 7 Lists are like arrays 9 Add more data to your list 11 Work with your list data 15 For loops work with lists of any size 16 Store lists within lists 18 Check a list for a list 20 Complex data is hard to process 23 Handle many levels of nested lists 24 Don’t repeat code; create a function 28 Create a function in Python 29 Recursion to the rescue! 31 Your Python Toolbox 32 & Terry Gilliam, 91 mins The Holy Grail, 1975, Terry Jones Graham Chapman iam, Eric Idle & Terry Jones Michael Palin, John Cleese, Terry Gill x table of contents 2 sharing your code Modules of functions. nester nester.py It’s too good not to share 34 Turn your function into a module 35 Modules are everywhere 36 Comment your code 37 Prepare your distribution 40 Build your distribution 41 A quick review of your distribution 42 Import a module to use it 43 Python’s modules implement namespaces 45 Register with the PyPI website 47 Upload your code to PyPI 48 Welcome to the PyPI community 49 Control behavior with an extra argument 52 Before your write new code, think BIF 53 Python tries its best to run your code 57 Trace your code 58 Work out what’s wrong 59 Update PyPI with your new code 60 You’ve changed your API 62 Use optional arguments 63 Your module supports both APIs 65 Your API is still not right 66 Your module’s reputation is restored 70 Your Python Toolbox 71 setup.py xi table of contents 3 files and exceptions Dealing with errors. Data is external to your program 74 It’s all lines of text 75 Take a closer look at the data 77 Know your data 79 Know your methods and ask for help 80 Know your data (better) 82 Two very different approaches 83 Add extra logic 84 Handle exceptions 88 Try first, then recover 89 Identify the code to protect 91 Take a pass on the error 93 What about other errors? 96 Add more error-checking code… 97 …Or add another level of exception handling split(beans) xii 98 So, which approach is best? 99 You’re done…except for one small thing 101 Be specific with your exceptions 102 Your Python Toolbox 103 table of contents 4 persistence Saving data to files!’] Programs produce data 106 Open your file in write mode 110 Files are left open after an exception! 114 Extend try with finally 115 Knowing the type of error is not enough 117 Use with to work with files 120 Default formats are unsuitable for files 124 Why not modify print_lol()? 126 Pickle your data 132 Save with dump and restore with load 133 Generic file I/O with pickle is the way to go! 137 Your Python Toolbox 138 [!'] xiii table of contents 5 comprehending data Work that data! Data comes in all shapes and sizes, formats and encodings. To work effectively with your data, you often have to manipulate and transform it into a common format to allow for efficient processing, sorting, and storage. In this chapter, you’ll explore Python goodies that help you work your data up into a sweat, allowing you to achieve data-munging greatness. This chapter’s guaranteed to give you a workout! xiv Coach Kelly needs your help 140 Sort in one of two ways 144 The trouble with time 148 Comprehending lists 155 Iterate to remove duplicates 161 Remove duplicates with sets 166 Your Python Toolbox 172 table of contents 6 custom data objects Bundling code with data. Coach Kelly is back (with a new file format) 174 Use a dictionary to associate data 178 Bundle your code and its data in a class 189 Define a class 190 Use class to define classes 191 The importance of self 192 Every method’s first argument is self 193 Inherit from Python’s built-in list 204 Coach Kelly is impressed 211 Your Python Toolbox 212 The Object Factory xv table of contents 7 web development Putting it all together. xvi It’s good to share 214 You can put your program on the Web 215 What does your webapp need to do? 218 Design your webapp with MVC 221 Model your data 222 View your interface 226 Control your code 234 CGI lets your web server run programs 235 Display the list of athletes 236 The dreaded 404 error! 242 Create another CGI script 244 Enable CGI tracking to help with errors 248 A small change can make all the difference 250 Your webapp’s a hit! 252 Your Python Toolbox 253 table of contents 8 mobile app development Small devices. The world is getting smaller 256 Coach Kelly is on Android 257 Don’t worry about Python 2 259 Set up your development environment 260 Configure the SDK and emulator 261 Install and configure Android Scripting 262 Add Python to your SL4A installation 263 Test Python on Android 264 Define your app’s requirements 266 The SL4A Android API 274 Select from a list on Android 278 The athlete’s data CGI script 281 The data appears to have changed type 284 JSON can’t handle your custom datatypes 285 Run your app on a real phone 288 Configure AndFTP 289 The coach is thrilled with his app 290 Your Python Toolbox 291 xvii table of contents 9 manage your data Handling input. xviii Your athlete times app has gone national 294 Use a form or dialog to accept input 295 Create an HTML form template 296 The data is delivered to your CGI script 300 Ask for input on your Android phone 304 It’s time to update your server data 308 Avoid race conditions 309 You need a better data storage mechanism 310 Use a database management system 312 Python includes SQLite 313 Exploit Python’s database API 314 The database API as Python code 315 A little database design goes a long way 316 Define your database schema 317 What does the data look like? 318 Transfer the data from your pickle to SQLite 321 What ID is assigned to which athlete? 322 Insert your timing data 323 SQLite data management tools 326 Integrate SQLite with your existing webapp 327 You still need the list of names 332 Get an athlete’s details based on ID 333 You need to amend your Android app, too 342 Update your SQLite-based athlete data 348 The NUAC is over the moon! 349 Your Python Toolbox 350 table of contents 10 scaling your webapp Getting real. There are whale sightings everywhere 352 The HFWWG needs to automate 353 Build your webapp with Google App Engine 354 Download and install App Engine 355 Make sure App Engine is working 356 App Engine uses the MVC pattern 359 Model your data with App Engine 360 What good is a model without a view? 363 Use templates in App Engine 364 Django’s form validation framework 368 Check your form 369 Controlling your App Engine webapp 370 Restrict input by providing options 376 Meet the “blank screen of death” 378 Process the POST within your webapp 379 Put your data in the datastore 380 Don’t break the “robustness principle” 384 Accept almost any date and time 385 It looks like you’re not quite done yet 388 Sometimes, the tiniest change can make all the difference… 389 Capture your user’s Google ID, too 390 Deploy your webapp to Google’s cloud 391 Your HFWWG webapp is deployed! 394 Your Python Toolbox 395 xix table of contents 11 dealing with complexity Data wrangling. xx What’s a good time goal for the next race? 398 So…what’s the problem? 400 Start with the data 401 Store each time as a dictionary 407 Dissect the prediction code 409 Get input from your user 413 Getting input raises an issue… 414 416 The trouble is with time 418 The time-to-seconds-to-time module 419 The trouble is still with time… 422 Port to Android 424 Your Android app is a bunch of dialogs 425 Put your app together… 429 Your app’s a wrap! 431 Your Python Toolbox 432 table of contents i leftovers The Top Ten Things (we didn’t cover). #1: Using a “professional” IDE 436 #2: Coping with scoping 437 #3: Testing 438 #4: Advanced language features 439 #5: Regular expressions 440 #6: More on web frameworks 441 #7: Object relational mappers and NoSQL 442 #8: Programming GUIs 443 #9: Stuff to avoid 444 #10: Other books 445 xxi how to use this book Intro I can’t believe they put that in a Python book. ning question: In this section, we answer thein bur Python book?” “So why DID they put that a xxiii how to use this book Who is this book for? If you can answer “yes” to all of these: 1 Do you already know how to program in another programming language? 2 Do you wish you had the know-how to program Python, add it to your list of tools, and make it do new things? 3 Do you prefer actually doing things and applying the stuff you learn over listening to someone in a lecture rattle on for hours on end? this book is for you. Who should probably back away from this book? If you can answer “yes” to any of these: 1 Do you already know most of what you need to know to program with Python? 2 Are you looking for a reference book to Python, one that covers all the details in excruciating detail? 3. [Note from marketing: this book is for anyone with a credit card… we’ll accept a check, too.] xxiv intro the intro We know what you’re thinking “How can this be a serious Python book?” “What’s with all the graphics?” “Can I actually learn it this way?” We know what your brain is thinking Your bra THIS is imin thinks port? Great. Only 450 more dull, dry, boring pages. Neurons fire. Emotions crank up. Chemicals surge. And that’s how your brain knows… This must be important! Don’t forget it! hinks But imagine you’re at home, or in a library. It’s a safe, warm, tiger‑free zone. Your brain t th or You’re studying. Getting ready for an exam. Or trying to learn some tough THIS isn’t w technical topic your boss thinks will take a week, ten days at the most. saving..” you are here 4 xxv how to use this book er as a learner. t” read We think of a “Head Firs n make sure you have to get it, the st, Fir ? ng thi me so e to learn on the latest So what does it tak o your head. Based int ts fac ing sh pu t learning It’s not abou ational psychology, you don’t forget it. obiology, and educ ur ne , ce ien sc e itiv on. research in cogn what turns your brain on a page. We know t tex n tha re mo lot takes a ciples: First lear ning prin Some of the Head much ne, and make learning morable than words alo me re mo far more s are ng s age dies). It also makes thi Make it visual. Im recall and transfer stu in ent than on vem pro her im rat 89% hics they relate to, more effective (up to or near the gr ap in th wi s ated to the rd rel wo ms e ble th as likely to solve pro understandable. Put rs will be up to twice rne lea and e, pag r the the bottom or on ano t. ten con performed up ent studies, students alized style. In rec on rs pe d an l first-person, a na to the reader, using Use a conver satio content spoke directly the if ts tes g casual language. nin Use . ear s instead of lec turing to 40% better on post-l rie sto l Tel e. ton l ma ner par ty her than tak ing a for ion to: a stimulating din conversational style rat uld you pay more attent wo ich Wh . sly iou ser Don’t take yourself too e? companion or a lec tur your neurons, , unless you actively flex eply. In other words de re mo inspired to ink and th s, d, engaged, curiou Get the learner to der has to be motivate rea A d. hea llenges, r cha you d in nee ns d for that, you nothing much happe ate new knowledge. An ner ge and and ns, in sio bra clu the con olve both sides of solve problems, draw s, and activities that inv ion est qu ing vok pro exercises, and thoughtmultiple senses. rn this but I can’t the “I really want to lea had all ’ve We . ion nt of the ordinary, he reader’s atte to things that are out Get—and keep—t r brain pays attention You e. enc eri esn’t have to be exp e” ic on gh, technical top do stay awake past page ed. Learning a new, tou ect exp un , ing tch -ca interesting, strange, eye ick ly if it’s not. l learn much more qu boring. Your brain wil is largely dependent remember something w that your ability to kno w no We s. you feel something. ion You remember when Touch their emot ut. abo e car you at wh t. You remember emotions like his dog. We’re talking on its emotional conten ries about a boy and sto ng chi ren solve a puzzle, rt‑w you hea that comes when No, we’re not talking the feeling of “I Rule!” and , ” …? the more technical hat “w “I’m , t w something tha surprise, curiosity, fun hard, or realize you kno is nks thi e els y od learn something everyb engineering doesn’t. than thou” Bob from xxvi intro the intro Metacognition: thinking about thinking. I wonder how I can trick my brain into remembering this stuff... But we assume that if you’re holding this book, you really want to learn how to design user-friendly websites.‑being. As important as a tiger. Otherwise, you’re in for a constant battle, with your brain doing its best to keep the new content from sticking. So just how DO you get your brain to treat programming like it… you are here 4 xxvii how to use this book Here’s what WE did: more than 80. xxviii intro the intro Here’s what YOU can do to bend your brain into submission So, we did our part. The rest is up to you. These tips are a starting point; listen to your brain and figure out what works for you and what doesn’t. Try new things. Cut this out and sti on your refrigerator.ck it 3 4 6 7. 8. 9 Drink water. Lots of it. Your brain works best in a nice bath of fluid. Dehydration (which can happen before you ever feel thirsty) decreases cognitive function.. Write a lot of code! There’s only one way to learn to program: writing a lot of code. And that’s what you’re going to do throughout this book. Coding is a skill, and. you are here 4 xxix how to use this book Read Me. Chapters 8 through.. xxx intro the intro The a lot of the code examples on the Web so you can copy and paste them as needed. You’ll find them at two locations:. you are here 4 xxxi the review team The technical review team David Griffiths Jeremy Jones Phil Hartley Technical Reviewers: David Griffiths is the author of Head First Rails and the coauthor of Head First Programming. He fellow Head First author— Dawn. xxxii intro. the intro Acknowledgments. Brian Sawyer. you are here 4 xxxiii safari books online. xxxiv intro 1 meet python Everyone loves lists Yes, yes...we have lots of Pythons in stock... I’ll just make a quick list.… this is a new chapter 1 python greatness What’s to like about Python? Lots. Rather than tell you, this book’s goal is to show you the greatness that is Python. Yeah... I need something that I can deploy on PCs, Macs, handhelds, phones,the Web, on big servers and small clients...and it has to let me build GUIs quickly and painlessly... OK, yes, yeah, I’m listening... What?!? You’re kidding! Python can do all that? Before diving head first into Python, let’s get a bit of housekeeping out of the way. To work with and execute the Python code in this book, you need a copy of the Python 3 interpreter on your computer. Like a lot of things to do with Python, it’s not difficult to install the interpreter. Assuming, of course, it’s not already there… 2 Chapter 1 meet python Install Python 3 Before you write and run Python code, you need to make sure the Python interpreter is on your computer. In this book, you’ll start out with Release 3 of Python, the very latest (and best) version of the language. A release of Python might already be on your computer. Mac OS X comes with Python 2 preinstalled, as do most versions of Linux (which can also ship with Release 3). Windows, in contrast, doesn’t include any release of Python. Let’s check your computer for Python 3. Open up a command-line prompt and, if you are using Mac OS X or Linux, type: python3 -V On Windows, use this command: That’s an UP “v”, by the wayP.ERCASE c:\Python31\python.exe -V Using the ” UPPERCASE “vPython he t results in ing on version appear screen. Without the UPPERCASE “v”, you are taken into the Python interpreter. Use the quit() command to exit the interpreter and return to your OS prompt. Do this! If Python 3 is missing from your computer, download a copy for your favorite OS from the website. File Edit Window Help WhichPython? $ python3 -V Python 3.1.2 $ $ python3 Python 3.1.2 (r312:79360M, Mar 24 2010, 01:33:18) [GCC 4.0.1 (Apple Inc. build 5493)] on darwin Type "help", "copyright", "credits" or "license" for more info. >>> >>> quit() $ When you install Python 3, you also get IDLE, Python’s simple—yet surprisingly useful— integrated development environment. IDLE includes a color syntax-highlighting editor, a debugger, the Python Shell, and a complete copy of Python 3’s online documentation set. Let’s take a quick look at IDLE. you are here 4 3 idle hands Use IDLE to help learn Python IDLE lets you write code in its full-featured code editor as well as experiment with code at the Python Shell. You’ll use the code editor later in this book but, when learning Python, IDLE’s shell really rocks, because it lets you try out new Python code as you go. When you first start IDLE, you are presented with the “triple chevron” prompt (>>>) at which you enter code. The shell takes your code statement and immediately executes it for you, displaying any results produced on screen. IDLE knows all about Python syntax and offers “completion hints” that pop up when you use a built-in function like print(). Python programmers generally refer to built-in functions as BIFs. The print() BIF displays messages to standard output (usually the screen). Enter your codome pt. at the >>> pr Unlike other C-based languages, which use { and } to delimit blocks, Python uses indentation instead. See results immediately. IDLE uses colored syntax to highlight your code. By default, built-in functions are purple, strings are green, and language keywords (like if) are orange. Any results produced are in blue. If you hate these color choices, don’t worry; you can easily change them by adjusting IDLE’s preferences. IDLE also knows all about Python’s indentation syntax, which requires code blocks be indented. When you start with Python, this can be hard to get used to, but IDLE keeps you straight by automatically indenting as needed. 4 Chapter 1 IDLE knows Python’s syntax and helps you conform to the Python indentation rules. meet python Work effectively with IDLE This is how IDLE looks on my computer. It might look little different on yours, buta not by much. (And, yes, it’s meant to look this ugly.) IDLE has lots of features, but you need to know about only a few of them to get going. TAB completion Start to type in some code, and then press the TAB key. IDLE will offer suggestions to help you complete your statement. Type “pr” and then TAB at the >>> prompt to see IDLE’s list of command completion suggestions. Recall code statements Press Alt-P to recall the previous code statement entered into IDLE or press Alt-N to move to the next code statement (assuming there is one). Both key combinations can be used to cycle rapidly through all of the code you’ve entered into IDLE, re-executing any code statements as needed. Edit recalled code Once you recall your code statement, you can edit it and move around the statement using the arrow keys. It’s possible to edit any statement that you’ve previously entered, even code statements that span multiple lines. Alt-P for Previous Alt-N for Next Unless you’re on a Mac, in which case it’s Ctrl-P and Ctrl-N. Adjust IDLE’s preferences IDLE’s preferences dialog lets you adjust its default Tweak IDLE behavior to your tastes. There are four tabs of settings to to your heart’s tweak. You can control font and tab behavior, the colors content. used to syntax highlight, the behavior of certain keycombinations, and IDLE’s start-up settings. So, if shocking pink strings is really your thing, IDLE gives you the power to change how your code looks on screen. you are here 4 5 dealing with data Deal with complex data Any program of any worth that you create has to work with data. Sometimes, the data is simple and straightforward—easy to work with. Other times, the data you have to work with is complex in its structure and meaning, forcing you to work hard to make sense of it all, let alone write code to process it. To tame complexity, you can often arrange your data as a list: there’s the list of customers, your friend’s list, the shopping list, and your to-do list (to name a few). Arranging data in lists is so common that Python makes it easy for you to create and process lists in code. Let’s look at some complex data before learning how to create and process list data with Python. I’ve been making lists of movie data for years and would love to be able to process it on my laptop... A highly organized movie buff There sure is a lot of data listed here. This data is complex, too. On first glance, this collection of data does indeed look quite complex. However, the data appears to conform to some sort of structure: there’s a line for a list of basic movie facts, then another line for the lead actor(s), followed by a third line listing the movie’s supporting actors. This looks like a structure you can work with… 6 Chapter 1 meet python Create simple Python lists Let’s start with the following simple list of movie titles and work up from there: The Holy Grail The Life of Brian The Meaning of Life A short list of some Monty Python movies Here’s the same list written in a way that Python understands: movies = ["The Holy Grail", "The Life of Brian", "The Meaning of Life"] To turn the human-friendly list into a Python-friendly one, follow this fourstep process: 1 Convert each of the names into strings by surrounding the data with quotes. 2 Separate each of the list items from the next with a comma. 3 Surround the list of items with opening and closing square brackets. 4 Assign the list to an identifier (movies in the preceding code) using the assignment operator (=). It’s perfectly OK to put your list creation code all on one line, assuming, of course, that you have room: movies = ["The Holy Grail", "The Life of Brian", "The Meaning of Life"] This works, too. you are here 4 7 not my type Hang on a second! Aren’t you forgetting something? Don’t you need to declare type information for your list? No, because Python’s variable identifiers don’t have a type. Many other programming languages insist that every identifier used in code has type information declared for it. Not so with Python: identifiers are simply names that refer to a data object of some type. Think of Python’s list as a high-level collection. The type of the data items is not important to the list. It’s OK to state that your movies list is a “collection of strings,” but Python doesn’t need to be told this. All Python needs to know is that you need a list, you’ve given it a name, and the list has some data items in it. 8 Chapter 1 meet python Lists are like arrays When you create a list in Python, the interpreter creates an array-like data structure in memory to hold your data, with your data items stacked from the bottom up. Like array technology in other programming languages, the first slot in the stack is numbered 0, the second is numbered 1, the third is numbered 2, and so on: This is your “movies” list in code. Item #0 Item #2 Item #1 movies = ["The Holy Grail", "The Life of Brian", "The Meaning of Life"] This is your “movies” list in memory. "The Meaning of Life" "The Life of Brian" "The Holy Grail" Access list data using the square bracket notation 2 Each data item in the list has a numeric OFFSET associated with it. 1 0 Python starts counting from zero. As with arrays, you can access the data item in a list slot using the standard square bracket offset notation: print(movies[1]) a Use the “print()” BIF to display data item on screen. The Life of Brian No surprise here, really…the requested data appears on screen . Let’s use IDLE to learn a bit about how lists work. you are here 4 9 idle session Lists in Python might look like arrays, but they are much more than that: they are full-blown Python collection objects. This means that lists come with ready-to-use functionality in the form of list methods. Let’s get to know some of Python’s list methods. Open up IDLE and follow along with the code entered at the >>> prompt. You should see exactly the same output as shown here. Start by defining a list of names, which you then display on screen using the print() BIF. Then, use the len() BIF to work out how many data items are in the list, before accessing and displaying the value of the second data item: >>> cast = ["Cleese", 'Palin', 'Jones', "Idle"] >>> print(cast) ['Cleese', 'Palin', 'Jones', 'Idle'] >>> print(len(cast)) 4 >>> print(cast[1]) It’s OK to invoke a BIF on . the results of another BIF Palin With your list created, you can use list methods to add a single data item to the end of your list (using the append() method), remove data from the end of your list (with the pop() method), and add a collection of data items to the end of your list (thanks to the extend() method): >>> cast.append("Gilliam") >>> print(cast) Methods are invoked using the common “.” dot notation. ['Cleese', 'Palin', 'Jones', 'Idle', 'Gilliam'] >>> cast.pop() 'Gilliam' >>> print(cast) ['Cleese', 'Palin', 'Jones', 'Idle'] ted by commas, It’s another list: items separats. surrounded by square bracke >>> cast.extend(["Gilliam", "Chapman"]) >>> print(cast) ['Cleese', 'Palin', 'Jones', 'Idle', 'Gilliam', 'Chapman'] Finally, find and remove a specific data item from your list (with the remove() method) and then add a data item before a specific slot location (using the insert() method): >>> cast.remove("Chapman") >>> print(cast) ['Cleese', 'Palin', 'Jones', 'Idle', 'Gilliam'] >>> cast.insert(0, "Chapman") >>> print(cast) ['Chapman', 'Cleese', 'Palin', 'Jones', 'Idle', 'Gilliam'] 10 Chapter 1 After all that, we end up wit the cast of Monty Python’s h Flying Circus! meet python Add more data to your list With your list of movie names created, now you need to add more of the movie buff ’s complex data to it. You have a choice here: I think I’ll use the appropriate list methods to add the extra data I need. With something this small, I’m gonna simply re-create my list from scratch. Either strategy works. Which works best for you depends on what you are trying to do. Let’s recall what the movie buff ’s data looks like: A number representing the year is next. The next piece of data you need to add to your list is a number (which represents the year the movie was released), and it must be inserted after each movie name. Let’s do that and see what happens. you are here 4 11 mixed type What?!? There’s no way you can mix data of different types in lists, is there? Surely this is madness? No, not madness, just the way Python works. Python lists can contain data of mixed type. It’s perfectly OK to mix strings with numbers within the same Python list. In fact, you can mix more than just strings and numbers; you can store data of any type in a single list, if you like. Recall that a Python list is a high-level collection, designed from the get-go to store a collection of “related things.” What type those things have is of little interest to the list, because the list exists merely to provide the mechanism to store data in list form. So, if you really need to store data of mixed type in a list, Python won’t stop you. 12 Chapter 1 meet python Let’s take a bit of time to try to work out which strategy to use when adding data to your list in this case. Given the following list-creation code: movies = ["The Holy Grail", "The Life of Brian", "The Meaning of Life"] 1 Work out the Python code required to insert the numeric year data into the preceding list, changing the list so that it ends up looking like this: ["The Holy Grail", 1975, "The Life of Brian", 1979, "The Meaning of Life", 1983] Write your insertion code here. 2 Now write the Python code required to re-create the list with the data you need all in one go: Write your re-creation code here. In this case, which of these two methods do you think is best? (Circle your choice). 1 or 2 you are here 4 13 additional data § Let’s take a bit of time to try and work out which strategy to use when adding data to your list in this case. Given the following list-creation code: movies = ["The Holy Grail", "The Life of Brian", "The Meaning of Life"] 1 You were to work out the Python code required to insert the numeric year data into the preceding list: Insert the first year BEFORE the second list item. Insert the second year BEFORE the fourth list item. movies.insert(1, 1975) Did you get the math right? After the first insertion, the list grows, so you have to tak that into consideration when e working out where to do the second insert. movies.insert(3, 1979) movies.append(1983) Then append the last year to the end of the list. 2 You were also to write the Python code required to recreate the list with the data you need all in one go: movies = ["The Holy Grail", 1975, "The Life of Brian", 1979, “movies” Assign all your data to the usly there is vio pre s identifier. What wa replaced. "The Meaning of Life", 1983] In this case, which of these two methods do you think is best? (You were to circle your choice.) 1 14 Chapter 1 or 2 ter Yes, method 2 seems the betsma option here…that is, for a ll list like this. Also, there’s no tricky counting to do. meet python Work with your list data You often need to iterate over your list and perform some action on each item as you go along. Of course, it is always possible to do something like this, which works but does not scale: Define a list items with theannad populate its mes of two movies. fav_movies = ["The Holy Grail", "The Life of Brian"] print(fav_movies[0]) e of Display the vallulis t item each individua. on the screen print(fav_movies[1]) This is the list-processing code. This code works as expected, making the data from the list appear on screen. However, if the code is later amended to add another favorite movie to the list, the list-processing code stops working as expected, because the list-processing code does not mention the third item. Big deal: all you need to do is add another print() statement, right? Yes, adding one extra print() statement works for one extra movie, but what if you need to add another hundred favorite movies? The scale of the problem defeats you, because adding all those extra print() statements becomes such a chore that you would rather find an excuse not to have to do. It’s time to iterate Processing every list item is such a common requirement that Python makes it especially convenient, with the built-in for loop. Consider this code, which is a rewrite of the previous code to use a for loop: ate Use “for” to itder aying pl over the list, isch the value of ea on individual itemgo. screen as you Define a list just as you didanbed populate it fore. fav_movies = ["The Holy Grail", "The Life of Brian"] for each_flick in fav_movies: print(each_flick) This is the list-processing code, using a for loop. Using a for loop scales and works with any size list. you are here 4 15 list processing For loops work with lists of any size Python’s for loop exists to process lists and other iterations in Python. Lists are the most common iterated data structure in Python, and when you need to iterate a list, it’s best to use for: The keyword “for” indicates the start of the loop and comes before the target identifier. The keyword “in” separates the target identifier from your list. for target identifer list in A colon “:” follows your list name and indicates the start of your listprocessing code. : list-processing code ng code The list-procesensited MUST be ind loop. under the for The list-processing code is referred to by Python programmers as the suite. The target identifier is like any other name in your code. As your list is iterated over, the target identifier is assigned each of the data values in your list, in turn. This means that each time the loop code executes, the target identifier refers to a different data value. The loop keeps iterating until it exhausts all of your list’s data, no matter how big or small your list is. An alternative to using for is to code the iteration with a while loop. Consider these two snippets of Python code, which perform the same action: When you use “while”, you have to worry about “state information,” which requires you to employ a counting identifier. count = 0 while count < len(movies): print(movies[count]) count = count+1 These while and for statements do the same thing. 16 Chapter 1 for each_item in movies: print(each_item) When you use “for”, the Python interpreter worries about the “state information” for you. meet python Q: So…when iterating over a list, I should always use for instead of while? A: Yes, unless you have a really good reason to use (or need the extra control of) a while loop. The for loop takes care of working from the start of your list and continuing to the end. It’s next to impossible to get stung by an off-by-one error when you use for. This is not the case with while. Q: So, lists aren’t really like arrays then, because they do so much more? A: Well…they are in that you can access individual data items in your list with the standard square bracket notation, but—as you’ve seen—Python’s lists can do so much more. At Head First Labs, we like to think of lists as “arrays on steroids.” Q: Seeing as Python’s lists shrink and grow as needed, they must not support bounds-checking, right? A: Well, lists are dynamic, in that they shrink and grow, but they are not magic, in that they cannot access a data item that does not exist. If you try to access a nonexistent data item, Python responds with an IndexError, which means “out of bounds.” Q: What’s with all the strange references to Monty Python? A: And they work this way only in Python 3, right? Ah, you spotted that, eh? It turns out that the creator of Python, Guido van Rossum, was reading the scripts of the Monty Python TV shows while designing his new programming language. When Guido needed a name for his new language, he chose “Python” as a bit of a joke (or so the legend goes). A: Do I need to know Monty Python in order to understand the examples? Q: No. There are certain enhancements to lists that were added in Python 3, but release 2 of Python has lists, too. All of what you’ve learned about lists so far will work with lists in Releases 2 and 3 of Python. Q: Why are we using Python 3? What’s wrong with Python 2, anyway? Lots of programmers seem to be using it. A: Lots of programmers are using Python 2, but the future of Python development lies with Release 3. Of course, moving the entire Python community to Python 3 won’t happen overnight, so there’s an awful lot of projects that will continue to run on Release 2 for the foreseeable future. Despite 2’s dominance at the moment, at Head First Labs we think the new bits in 3 are well worth the added investment in learning about them now. Don’t worry: if you know 2, Python 3 is easy. Q: A: No, but as they say in the official Python documentation: “it helps if you do.” But don’t worry: you’ll survive, even if you’ve never heard of Monty Python. Q: I notice that some of your strings are surrounded with double quotes and others with single quotes. What’s the difference? A: There isn’t any. Python lets you use either to create a string. The only rule is that if you start a string with one of the quotes, then you have to end it with the same quote; you can’t mix’n’match. As you may have seen, IDLE uses single quotes when displaying strings within the shell. Q: What if I need to embed a double quote in a string? A: You have two choices: either escape the double quote like this: \”, or surround your string with single quotes. Q: Can I use any characters to name my identifiers? A: No. Like most other programming languages, Python has some rules that must be adhered to when creating names. Names can start with a letter character or an underscore, then include any number of letter characters, numbers, and/or underscores in the rest of the name. Strange characters (such as %$£) are not allowed and you’ll obviously want to use names that have meaning within the context of your code. Names like members, the_ time , and people are much better than m, t, and p, aren’t they? Q: Yes, good naming practice is always important. But what about case sensitivity? A: Yes, Python is the “sensitive type,” in that Python code is case sensitive. This means that msg and MSG are two different names, so be careful. Python (and IDLE) will help with the problems that can occur as a result of this. For instance, you can use an identifier in your code only if it has been given a value; unassigned identifiers cause a runtime error. This means that if you type mgs when you meant msg, you’ll find out pretty quickly when Python complains about your code having a NameError. you are here 4 17 lists within lists Store lists within lists As you’ve seen, lists can hold data of mixed type. But it gets even better than that: lists can hold collections of anything, including other lists. Simply embed the inner list within the enclosing list as needed. Looking closely at the movie buff ’s data, it is possible to determine a structure which looks much like a list of lists: There’s a list of movie facts… …which itself contains a list of lead actors… …which itself contains a list of supporting actors. There’s only one lead actor listed here, but there could be more. The Holy Grail, 1975, Terry Jones & Terry Gilliam, 91 mins Graham Chapman Michael Palin, John Cleese, Terry Gilliam, Eric Idle & Terry Jones In Python, you can turn this real list of data into code with little or no effort. All you need to remember is that every list is a collection of items separated from each other with commas and surrounded with square brackets. And, of course, any list item can itself be another list: The start of the first, outer list movies = [ "The Holy Grail", 1975, "Terry Jones & Terry Gilliam", 91, The end of all the lists is here. ["Graham Chapman", The start of the second, inner list: “movies[4]” ["Michael Palin", "John Cleese", "Terry Gilliam", "Eric Idle", "Terry Jones"]]] The start of the third, inner inner list: “movies[4][1]” So, a list within a list is possible, as is a list within a list within a list (as this example code demonstrates). In fact, it’s possible to nest lists within lists to most any level with Python. And you can manipulate every list with its own list methods and access it with the square bracket notation: print(movies[4][1][3]) A list within a list within a list 18 Chapter 1 This looks a little weird…until you remember that there are thr opening square brackets, so theeere must also be three closing ones. Eric Idle Eric is this deeply nested, so he can’t possibly be idle. § meet python Creating a list that contains another list is straightforward. But what happens when you try to process a list that contains another list (or lists) using the for loop from earlier in this chapter? Let’s use IDLE to work out what happens. Begin by creating the list of the movie data for “The Holy Grail” in memory, display it on screen, and then process the list with your for loop: >>> movies = ["The Holy Grail", 1975, "Terry Jones & Terry Gilliam", 91, ["Graham Chapman", ["Michael Palin", "John Cleese", "Terry Gilliam", "Eric Idle", "Terry Jones"]]] >>> print(movies) ['The Holy Grail', 1975, 'Terry Jones & Terry Gilliam', 91, ['Graham Chapman', ['Michael Palin', 'John Cleese', 'Terry Gilliam', 'Eric Idle', 'Terry Jones']]] The list within a list within a list has been created in memory. >>> for each_item in movies: print(each_item) The Holy Grail 1975 Terry Jones & Terry Gilliam The “for” loop prints each item of the outer loop ONLY. 91 ['Graham Chapman', ['Michael Palin', 'John Cleese', 'Terry Gilliam', 'Eric Idle', 'Terry Jones']] The inner list within the inner list is printed “as-is.” Your for loop is working OK. I think the trouble is that you haven’t told it what to do with any inner lists that it finds, so it just prints everything, right? Yes, that’s correct: the loop code isn’t complete. At the moment, the code within the loop simply prints each list item, and when it finds a list at a slot, it simply displays the entire list on screen. After all, the inner list is just another list item as far as the outer enclosing list is concerned. What’s we need here is some mechanism to spot that an item in a list is in fact another list and take the appropriate action. That sounds a little tricky. But can Python help? you are here 4 19 looking for lists Check a list for a list Each time you process an item in your list, you need to check to see if the item is another list. If the item is a list, you need to process the nested list before processing the next item in your outer list. Deciding what to do when in Python follows the familiar if... else... pattern: The keyword “if” indicates the start of the decision code. if some condition holds es if the This code execsut(i.e., it’s TRUE). condition hold A colon (:) follows your condition test. : the "true" suite else: This code executes if the condit does NOT hold (i.e., it’s FALSE).ion Note: both suites are indented. Look! Another colon. the "false" suite No surprises here, as the if statement in Python works pretty much as expected. But what condition do you need to check? You need a way to determine if the item currently being processed is a list. Luckily, Python ships with a BIF that can help here: isinstance(). What’s cool about the isinstance() BIF is that it lets you check if a specific identifier holds data of a specific type: Create a short list and assign it to an identifier. Ask if “names” is a list (it is). Assign a number to an identifier. es” is a Ask if “num_nam list (it isn’t). 20 Chapter 1 Let’s use the IDLE shell to learn a little about how isinstance() works: >>> names = ['Michael', 'Terry'] >>> isinstance(names, list) True >>> num_names = len(names) >>> isinstance(num_names, list) False Refer to a Python type here. In this case, the type is “list”. meet python Here’s a copy of the current list-processing code. Your task is to rewrite this code using an if statement and the isinstance() BIF to process a list that displays another list. for each_item in movies: print(each_item) Write your new code here. Q: Are there many of these BIFs in Python? A: Q: Yes. At the last count, there were over 70 BIFs in Python 3. Over 70! How am I to remember that many, let alone find out what they all are? A: You don’t have to worry about remembering. Let Python do it for you. Q: A: dir(__builtins__) How? At the Python or IDLE shell, type to see a list of the built-in stuff that comes with Python (that’s two leading and trailing underscore characters, by the way). The shell spits out a big list. Try it. All those lowercase words are BIFs. To find out what any BIF does—like input(), for example—type help(input) at the shell for a description of the BIFs function. Q: A: Why so many BIFs? Why not? Because Python comes with lots of built-in functionality, it can mean less code for you to write. This Python philosophy is known as “batteries included”: there’s enough included with Python to let you do most things well, without having to rely on code from third parties to get going. As well as lots of BIFs, you’ll find that Python’s standard library is rich and packed with features waiting to be exploited by you. you are here 4 21 list the list Here’s a copy of the current list-processing code. Your task was to rewrite this code using an if statement and the isinstance() BIF to process a list that displays another list. for each_item in movies: Process the “movies” list as before. print(each_item) for each_item in movies: The inner loop needs a new target identifier. You need to check if the if isinstance(each_item, list): current item is a list. for nested_item in each_item: If it is a list, use another “for” loop to process the print(nested_item) nested list. If the current item else: of the enclosing list Did you manage to get your print(each_item) isn’t a list, display it indentation right? on screen. Let’s use IDLE to see if this code makes a difference to the output displayed on screen: >>> for each_item in movies: if isinstance(each_item, list): for nested_item in each_item: print(nested_item) else: print(each_item) The Holy Grail 1975 Terry Jones & Terry Gilliam by This is a little better, but notlist here ted nes r the much…there’s ano ly. that’s not being processed proper 91 Graham Chapman [‘Michael Palin’, ‘John Cleese’, ‘Terry Gilliam’, ‘Eric Idle’, ‘Terry Jones’] 22 Chapter 1 meet python Complex data is hard to process The movie buff ’s data is complex. Let’s take another look at a subset of the data and your Python code that processes it. The outer, enclosing list The data The Holy Grail, 1975, Terry Jones & Terry Gilliam, 91 mins Graham Chapman Michael Palin, John Cleese, Terry Gilliam, Eric Idle & Terry Jones Another inner (inner), nested list An inner, nested list Your code Process the outer, enclosing list. Process the in nested list. ner, for each_item in movies: if isinstance(each_item, list): for nested_item in each_item: Yeah.. that’s almost working...it’s just a pity about that list of supporting actors... print(nested_item) else: print(each_item) Can you spot the problem with your Python code as it is currently written? What do you think needs to happen to your code to allow it to process the movie buff’s data correctly? you are here 4 23 nested lists Handle many levels of nested lists: for each_item in movies: This code processes a nested list. if isinstance(each_item, list): for nested_item in each_item: print(nested_item) else: print(each_item) Here’s where the repeated code needs to go. The next iteration of your code looks like this. for each_item in movies: if isinstance(each_item, list): for nested_item in each_item: if isinstance(nested_item, list): in this for deeper_item in nested_item: code, each print(deeper_item) “if” needs an associated else: “else”. print(nested_item) Note: else: 24 Chapter 1 print(each_item) The repeated co replaces the “pride statement and innt()” another target troduces called “deeper_itidentifier em”. meet python Let’s use IDLE once more to test this latest iteration of your code: >>> for each_item in movies: if isinstance(each_item, list): for nested_item in each_item: if isinstance(nested_item, list): for deeper_item in nested_item: print(deeper_item) else: print(nested_item) eply nested e d a s s e c o Pr nested list list inside eanclosing list. inside an else: print(each_item) The Holy Grail 1975 Terry Jones & Terry Gilliam 91 Graham Chapman It works! This time, you see all of your list data on screen. Michael Palin John Cleese Terry Gilliam Eric Idle Terry Jones I just love that...in fact, I love it so much I’ve decided to add another list to my data. I want to include the other movies each supporting actor has starred in. If I add the data, can you change your code to print this data, too?. That doesn’t sound like too much trouble, does it? you are here 4 25 avoid complexity I think I’d rather have a root canal than change that code again. Adding another nested loop is a huge pain. Your data is getting more complex (that mind-bending list of lists of lists of lists) and, as a consequence, your code is getting overly complex, too (that brain-exploding for loop inside a for loop inside a for loop). And overly complex code is rarely a good thing… 26 Chapter 1 meet python Wouldn’t it be dreamy if there were an efficient way to process lists, preferably using a technique that resulted in less code, not more? But I know it’s just a fantasy... you are here 4 27 reduce, reuse, recycle Don’t repeat code; create a function Take a look at the code that you’ve created so far, which (in an effort to save you from having your brain explode) has already been amended to process yet another nested list. Notice anything? This code is essentially the same as this code… for each_item in movies: …which is essentially the same as this code… if isinstance(each_item, list): for nested_item in each_item: if isinstance(nested_item, list): for deeper_item in nested_item: if isinstance(deeper_item, list): for deepest_item in deeper_item: else: print(deeper_item) else: print(deepest_item) print(nested_item) else: print(each_item) h There’s not mucong these difference am ts, either! four statemen This code is beginning to get a little scary… Your code now contains a lot of repeated code. It’s also a mess to look at, even though it works with the movie buff ’s amended data. All that nesting of for loops is hard to read, and it’s even harder to ensure that the else suites are associated with the correct if statement. There has to be a better way…but what to do? When code repeats in this way, most programmers look for a way to take the general pattern of the code and turn it into a reusable function. And Python programmers think this way, too. Creating a reusable function lets you invoke the function as needed, as opposed to cutting and pasting existing code. So, let’s turn the repeating code into a function. 28 Chapter 1 …which is not that much different than this code. meet python Create a function in Python A function in Python is a named suite of code, which can also take an optional list of arguments if required. You define a Python function using the def statement, providing a name for your function and specifying either an empty or populated argument list within parentheses. The standard form looks something like this: The keyword “def” introduces the name of the function. A colon (:) follows the closing parenthesis and indicates the start of your functions code suite. Argument lists are optional, but the parentheses are NOT. def function name de The function’sencoted under MUST be ind ent. the def statem ( argument(s) ): function code suite What does your function need to do? Your function needs to take a list and process each item in the list. If it finds a nested list within the first list, the function needs to repeat. It can do this by invoking itself on the nested list. In other words, the function needs to recur— that is, invoke itself from within the funtion code suite. Let’s call the function that you’ll create print_lol(). It takes one argument: a list to display on screen. Grab your pencil and complete the code below to provide the required functionality: def print_lol(the_list): for if else: you are here 4 29 recursive function You were to call the function that you’ll create print_lol(). It takes one argument: a list to display on screen. You were to grab your pencil and complete the code to provide the required functionality: def print_lol(the_list): for each_item in the_list: Process the provid ed p. loo r” “fo a h wit list isinstance(each_item, list): if else: print_lol(each_item) print(each_item) If the item being processed is itself a list, invoke the function. ’T If the item being processed ISN . een scr a list, display the item on Let’s use IDLE one final time to test your new function. Will it work as well as your earlier code? >>> def print_lol(the_list): for each_item in the_list: if isinstance(each_item, list): Define the function. print_lol(each_item) else: print(each_item) >>> print_lol(movies) Invoke the function. The Holy Grail 1975 Terry Jones & Terry Gilliam 91 Graham Chapman Michael Palin John Cleese Terry Gilliam Eric Idle Terry Jones 30 Chapter 1 It works, too! The recusrive ction produces EXACTLY the samfun e res ults as the earlier code. meet python. Python 3 defaults its recursion limit to 1,000, which is a lot of lists of lists of lists of lists…and this limit can be changed should you ever need even more depth than that. Ah, yes, that’s terrific! I can now relax, knowing that your code can process my movie data. I really should’ve done this years ago... What a great start! By taking advantage of functions and recursion, you’ve solved the code complexity problems that had crept into your earlier list-processing code. By creating print_lol(), you’ve produced a reusable chunk of code that can be put to use in many places in your (and others) programs. You’re well on your way to putting Python to work! you are here 4 31 python toolbox You’ve got Chapter 1 under your belt and you’ve added some key Python goodies to your toolbox. Run Python 3 from the command line or from within IDLE. Identifiers are names that refer to data objects. The identifiers have no “type,” but the data objects that they refer to do. Python Lingo - a built-in function. Python code, which • “Suite” - a block of is indented to indicate grouping. ” - a way of • “Batteries included on comes referring to the fact that Pythto get with most everything you’ll need going quickly and productively. • “BIF” IDLE Notes • The IDLE shell lets you experiment wit h your code as you write it. • Adjust IDLE’s prefer ences to suit the way you work. • Remember: when wo rking with the shell, use Alt-P for Previous and use Alt Next (but use Ctrl if you’re on -N for a Mac). print() BIF displays a message on screen. A list is a collection of data, separated by commas and surrounded by square brackets. Lists are like arrays on steroids. Lists can be used with BIFs, but also support a bunch of list methods. Lists can hold any data, and the data can be of mixed type. Lists can also hold other lists. Lists shrink and grow as needed. All of the memory used by your data is managed by Python for you. Python uses indentation to group statements together. len() BIF provides a length of some data object or count the number of items in a collection, such as a list. The for loop lets you iterate a list and is often more convenient to use that an equivalent while loop. The if... else... statement lets you make decisions in your code. isinstance() BIF checks whether an identifier refers to a data object of some specified type. Use def to define a custom function. 32 Chapter 1 CHAPTER 1 CHAPTER 1 Your Python Toolbox 2 sharing your code Modules of functions I’d love to share...but how am I supposed to function without a module?. this is a new chapter 33 let’s share It’s too good not to share You’ve been showing your function to other programmers, and they like what they see. You should make your function shareable, so that everyone can use it. Yes, a function this good should be shared with the world. Python provides a set of technologies that make this easy for you, which includes modules and the distribution utilities: Modules let you organize your code for optimal sharing. The distribution utilities let you share your modules with the world. Let’s turn your function into a module, then use the distribution utilities to share your module with the wider Python programming community. 34 Chapter 2 sharing your code Turn your function into a module A module is simply a text file that contains Python code. The main requirement is that the name of the file needs to end in .py: the Python extension. To turn your function into a module, save your code into an appropriately named file: Your code from Chapter 1 def print_lol(the_list): for each_item in the_list: if isinstance(each_item, list): print_lol(each_item) else: print(each_item) Let’s call this file “nester.py”. Q: A: What’s the best Python editor? The answer to that question really depends on who you ask. However, you can, of course, use any text editor to create and save your function’s code in a text file. Something as simple as NotePad on Windows works fine for this, as does a full-featured editor such as TextMate on Mac OS X. And there’s also full-fledged IDEs such as Eclipse on Linux, as well as the classic vi and emacs editors. And, as you already know, Python comes with IDLE, which also includes a built-in code editor. It might not be as capable as those other “real” editors, but IDLE is installed with Python and is essentially guaranteed to be available. For lots of jobs, IDLE’s edit window is all the editor you’ll ever need when working with your Python code. Of course, there are other IDEs for Python, too. Check out WingIDE for one that specifically targets Python developers. Do this! Go ahead and create a text file called nester.py that contains your function code from the end of Chapter 1. you are here 4 35 modules repository Modules are everywhere As might be expected, you’ll find Python modules in lots of places. I’m preloaded with lots of modules in the Python Standard Library... and they are already on your computer. If the Standard Library doesn’t do it for you, why not try the Web? I hear PyPI is where third-party Python modules hang out. ed PyPI is pronounc “pie-pie.” The Python Package Index (or PyPI for short) provides a centralized repository for third-party Python modules on the Internet. When you are ready, you’ll use PyPI to publish your module and make your code available for use by others. And your module is ready, but for one important addition. What do you think is missing from your module? 36 Chapter 2 Geek Bits If you are already familiar with Perl’s CPAN repository, you can think of PyPI as the Python equivalent. sharing your code Comment your code It’s always a good idea to include comments with your code. As your plan to share your module with the world, well-written comments help to document your work. In Python, a common commenting technique is to use a triple quote for multiple-line comments. When you use a triple quote without assigning it to a variable, everything between the triple quotes is considered a comment: Start with a triple quote… Hello! I’m a big string who just happens to be a Python comment, too. Nice, eh? """This is the standard way to include a multiple-line comment in your code.""" …and end with a triple quote. Here is your module code (which is saved in the file nester.py). In the spaces provided, use your pencil to compose two comments: the first to describe the module and the second to describe the function. Put your mod comment here.ule def print_lol(the_list): Add a commenttion for your func here. for each_item in the_list: if isinstance(each_item, list) print_lol(each_item) else: print(each_item) you are here 4 37 request for comments Here is your module code (which is saved in the file nester.py). In the spaces provided, you were asked to use your pencil to compose two comments: the first to describe the module and the second to describe the function. “““This is the “nester.py" module, and it provides one function called print_lol() which prints lists that may or may not include nested lists.””” Did you def remember to ple include the tri quotes? print_lol(the_list): “““This function takes a positional argument called “the_list", which is any Python list (of, possibly, nested lists). Each data item in the provided list is (recursively) printed to the screen on its own line.””” for each_item in the_list: if isinstance(each_item, list): print_lol(each_item) else: Q: How do I know where the Python modules are on my computer? A: sys.path Ask IDLE. Type import sys; (all on one line) into the IDLE prompt to see the list of locations that your Python interpreter searches for modules. Q: Hang on a second. I can use “;” to put more than one line of code on the same line in my Python programs? A: Yes, you can. However, I don’t recommend that you do so. Better to give each Python statement its own line; it makes your code much easier for you (and others) to read. 38 Chapter 2 print(each_item) Q: Does it matter where I put my nester.py module? A: For now, no. Just be sure to put it somewhere where you can find it later. In a while, you’ll install your module into your local copy of Python, so that the interpreter can find it without you having to remember when you actually put it. Q: So comments are like a funnylooking string surrounded by quotes? A: Yes. When a triple-quoted string is not assigned to a variable, it’s treated like a comment. The comments in your code are surrounded by three double quotes, but you could have used single quotes, too. There are no changes to thet actual code here; you’re jus adding some comments. Q: Is there any other way to add a comment to Python code? A: Yes. If you put a “#” symbol anywhere on a line, everything from that point to the end of the current line is a comment (unless the “#” appears within a triple quote, in which case it’s part of that comment). A lot of Python programmers use the “#” symbol to quickly switch on and off a single line of code when testing new functionality. sharing your code Now that you’ve added your comments and created a module, let’s test that your code is still working properly. Rather than typing your function’s code into IDLE’s prompt, bring the nester.py file into IDLE’s edit window, and then press F5 to run the module’s code: Note that the comments are color coded. Nothing appears to happen, other than the Python shell “restarting” and an empty prompt appearing: >>> ================================ RESTART ================================ >>> >>> What’s happened is that the Python interpreter has reset and the code in your module has executed. The code defines the function but, other than that, does little else. The interpreter is patiently waiting for you to do something with your newly defined function, so let’s create a list of lists and invoke the function on it: >>> movies = [ "The Holy Grail", 1975, "Terry Jones & Terry Gilliam", 91, Define the list of movies facts from Chapter 1. ["Graham Chapman", ["Michael Palin", "John Cleese", "Terry Gilliam", "Eric Idle", "Terry Jones"]]] >>> print_lol(movies) The Holy Grail Invoke the function on the list. 1975 Terry Jones & Terry Gilliam 91 Graham Chapman Michael Palin John Cleese Cool. Your code continues todata function as expected. The in the list of lists is displayed on screen. Terry Gilliam Eric Idle Terry Jones you are here 4 39 distribution plan Prepare your distribution In order to share your newly created module, you need to prepare a distribution. This is the Python name given to the collection of files that together allow you to build, package, and distribute your module. Do this! Once a distribution exists, you can install your module into your local copy of Python, as well as upload your module to PyPI to share with the world. Follow along with the process described on these two pages to create a distribution for your module. 1 Begin by creating a folder for your module. With the folder created, copy your nester.py module file into the folder. To keep things simple, let’s call the folder nester: The “nester.py” module file. Follow along with each of the steps described on these pages. By the time you reach the end, your module will have transformed into a Python distribution. nester 2 The newly created “nester” folder (or directory). Create a file called “setup.py” in your new folder. This file contains metadata about your distribution. Edit this file by adding the following code: Import the “setup” function from Python’s distribution utilities. from distutils.core import setup setup( These are the setup function’s argument names. name = 'nester', version = '1.0.0', py_modules = ['nester'], author = 'hfpython', Associate your module’s metadata with the setup function’s arguments. author_email = 'hfpython@headfirstlabs.com', ) 40 Chapter 2 url = '', description = 'A simple printer of nested lists', These are the values Head First Labs use with their modules; your metadata will be different. sharing your code Build your distribution You now have a folder with two files in it: your module’s code in nester.py and metadata about your module in setup.py. Now, it’s time to build your distribution. 3 Note: if you ar replace “python3e using Windows, with “c:\Python3” in these commands 1\python.exe”. Build a distribution file. The distribution utilities include all of the smarts required to build a distribution. Open a terminal window within your nester folder and type a single command: python3 setup.py sdist. Enter the command at the prompt. File Edit Window Help Build $ python3 setup.py sdist running sdist running check warning: sdist: manifest template 'MANIFEST.in' does not exist warning: sdist: standard file not found: should have README A collection of status messages appears on screen, confirming the creation of your distribution. 4 writing manifest file 'MANIFEST' creating nester-1.0.0 making hard links in nester-1.0.0... hard linking nester.py -> nester-1.0.0 hard linking setup.py -> nester-1.0.0 creating dist Creating tar archive removing 'nester-1.0.0' (and everything under it) $ Install your distribution into your local copy of Python. Staying in the terminal, type this command: sudo python3 setup.py install. File Edit Window Help Install Another bunch of status messages appear on screen, confirming the installation of your distribution. $ python3 setup.py install running install running build running build_py creating build creating build/lib copying nester.py -> build/lib running install_lib copying build/lib/nester.py -> /Library/Frameworks/Python. framework/Versions/3.1/lib/python3.1/site-packages byte-compiling /Library/Frameworks/Python.framework/Versions/3.1/ lib/python3.1/site-packages/nester.py to nester.pyc running install_egg_info Writing /Library/Frameworks/Python.framework/Versions/3.1/lib/ python3.1/site-packages/nester-1.0.0-py3.1.egg-info Your distribution is ready. you are here 4 41 ready for distribution A quick review of your distribution Thanks to Python’s distribution utilities, your module has been transformed into a distribution and installed into your local copy of Python. You started with a single function, which you entered into a file called nester.py, creating a module. You then created a folder called nester to house your module. The addition of a file called setup.py to your folder allowed you to build and install your distribution, which has resulted in a number of additional files and two new folders appearing within your nester folder. These files and folders are all created for you by the distribution utilities. Before Setup nester After Setup Your code is in this file. nester.py nester A list of files in your distribution is in this file. MANIFEST Here are your new folders. setup.py Your metadata is in this file. build lib Your code is in this file. nester.py dist Your code is in this file. This is your distribution package. nester-1.0.0.tar.gz nester.py nester.pyc Your metadata is in this file. 42 Chapter 2 setup.py A “compiled” version of your code is in this file. sharing your code Import a module to use it Now that your module is built, packaged as a distribution, and installed, let’s see what’s involved in using it. To use a module, simply import it into your programs or import it into the IDLE shell: Use the Python keyword “import”… import nester …and provide the name of your module. Note: you don’t need to include the “.py” extension when naming your module. The import statement tells Python to include the nester.py module in your program. From that point on, you can use the module’s functions as if they were entered directly into your program, right? Well…that’s what you might expect. Let’s check out the validity of your assumption. Write a small program that imports your newly created module, defines a small list called “cast,” and then uses the function provided by your module to display the contents of the list on screen. Use the following list data (all strings): Palin, Cleese, Idle, Jones, Gilliam, and Chapman. Open your program in IDLE’s edit window, and then press F5 to execute your code. Describe what happens in the space below: you are here 4 43 idle error You were to write a small program that imports your newly created module, defines a small list called “cast,” and then uses the function provided by your module to display the contents of the list on screen. You were to use the following list data (all strings): Palin, Cleese, Idle, Jones, Gilliam, and Chapman. import nester It’s a simple threeline program. There’s nothing too difficult here. cast = ['Palin’, 'Cleese’, 'Idle’, 'Jones’, 'Gilliam’, 'Chapman’] print_lol(cast) Open your program in IDLE’s edit window, and then press F5 to execute your code. Describe what happens in the space below: But it didn’t work! IDLE gives an error, and the program does not run! With your program in the IDLE edit window, pressing F5 (or choosing Run Module from the Run menu) does indeed cause problems: Your program does not appear to have executed and an error message is reported: >>> ================================ RESTART ================================ >>> Traceback (most recent call last): File "/Users/barryp/HeadFirstPython/chapter2/try_nester.py", line 4, in <module> print_lol(cast) NameError: name 'print_lol' is not defined >>> 44 Chapter 2 With your program in IDLE, pre ssing F5 causes a NameError…it looks like you r fun ction can’t be found!!! sharing your code Python’s modules implement namespaces All code in Python is associated with a namespace. That’s a double underscore in front of the word “main” and after it. Code in your main Python program (and within IDLE’s shell) is associated with a namespace called __main__. When you put your code into its own module, Python automatically creates a namespace with the same name as your module. So, the code in your module is associated with a namespace called nester. I guess namespaces are like family names? If someone is looking for Chris, we need to know if it’s Chris Murray or Chris Larkin, right? The family name helps to qualify what we mean, as do namespace names in Python. Yes, namespace names are like family names. When you want to refer to some function from a module namespace other than the one you are currently in, you need to qualify the invocation of the function with the module’s namespace name. So, instead of invoking the function as print_lol(cast) you need to qualify the name as nester.print_lol(cast). That way, the Python interpreter knows where to look. The format for namespace qualification is: the module’s name, followed by a period, and then the function name. The module name, which identifies the namespace. nester.print_lol(cast) The function is then invoked as normal, with “cast” provided as the list to process. espace A period separates the module.e nam name from the function nam you are here 4 45 ready for pypi Let’s test this. Staying at the IDLE shell, import your module, create the list, and then try to invoke the function without a qualifying name. You’re expecting to see an error message: >>> import nester >>> cast = ['Palin', 'Cleese', 'Idle', 'Jones', 'Gilliam', 'Chapman'] >>> print_lol(cast) Traceback (most recent call last): File "<pyshell#4>", line 1, in <module> print_lol(cast) NameError: name 'print_lol' is not defined sed a As expected, your code has cau qualify n’t did you e NameError, becaus the name. When you qualify the name of the function with the namespace, things improve dramatically: >>> nester.print_lol(cast) Palin Cleese Idle Jones Gilliam This time, things work as expect the list are displayed on screen. ed…the data items in Chapman Geek Bits When you use a plain import statement, such as import nester, the Python interpreter is instructed to allow you to access nester’s functions using namespace qualification. However, it is possible to be more specific. If you use from nester import print_lol, the specified function (print_lol in this case) is added to the current namespace, effectively removing the requirement for you to use namespace qualification. But you need to be careful. If you already have a function called print_lol defined in your current namespace, the specific import statement overwrites your function with the imported one, which might not be the behavior you want. Your module is now ready for upload to PyPI. 46 Chapter 2 sharing your code Register with the PyPI website In order to upload your distribution to PyPI, you need to register with the PyPI website. This is a relatively straightforward process. Begin by surfing over to the PyPI website at and requesting a PyPI ID: Provide the Username you’d like to use. Enter your chosen password twice for confirmation purposes. Don’t try to use “hfpython that Username is already tak,”en.because Provide a valid email address. t Don’t worry abPoukey providing a PGually have (unless you act one). Don’t forget to click the “I agree” checkbox before clicking on the Register button. If all of your registration details are in order, a confirmation message is sent to the email address submitted on the registration form. The email message contains a link you can click to confirm your PyPI registration: You are now registered with PyPI. plete Click the confirmation link to com . ion your PyPI registrat you are here 4 47 register and upload Upload your code to PyPI You’re ready to rock! The code in your function has been placed in a module, used to create a distribution and installed into your local copy of Python. To upload your distribution to PyPI, complete these two steps: command-line registration with PyPI and command-line uploading. It might seem strange to have to register with PyPI again, seeing as you just did this with their website. However, the command-line uploading tool needs to be made aware of your PyPI Username and Password, and that’s what this registration does. Don’t worry: you have to do this only once. Instruct setup to register your details. Confirm that you want to use your just-created PyPI credentials. Use your PyPI settings and save them for future use. File Edit Window Help Register $ python3 setup.py register running register running check We need to know who you are, so please choose either: 1. use your existing login, 2. register as a new user, 3. have the server generate a new password for you (and email it to you), or 4. quit Your selection [default 1]: 1 Username: hfpython Password: Registering nester to Server response (200): OK I can store your PyPI login so future submissions will be faster. (the login will be stored in /Users/barryp/.pypirc) Save your login (y/N)?y With your registration details entered and saved, you are now ready to upload your distribution to PyPI. Another command line does the trick: Instruct setup ftotware upload your so PyPI. distribution to Setup confirms that the upload is successful. Your distribution is now part of PyPI. 48 Chapter 2 Note: If you try to upload dule called “nester”, you’ll get an error aasmo tha t name’s already taken. § File Edit Window Help Upload $ python3 setup.py sdist upload running sdist running check reading manifest file ‘MANIFEST’ creating nester-1.0.0 making hard links in nester-1.0.0... hard linking nester.py -> nester-1.0.0 hard linking setup.py -> nester-1.0.0 Creating tar archive removing ‘nester-1.0.0’ (and everything under it) running upload Submitting dist/nester-1.0.0.tar.gz to Server response (200): OK $ sharing your code Welcome to the PyPI community Congratulations! You are now a full-fledged, card-carrying member of the PyPI community. Your distribution has joined the over 10,000 other uploads on PyPI. Feel free to surf on over to the PyPI website to confirm the upload. Programmers from all over the globe are now able to download, unpack, and install your module into their local copy of Python, which is pretty cool when you think about it. You’ve now written and published your code… how cool is that? Sit back, put your feet up, and wait for the plaudits to begin… Q: Which is best: plain imports or specific imports? A: Neither, really. Most programmers mix and match based on their own personal preference and taste (although there are plenty of programmers willing to argue that their preferred way is the “one true way”). Note that the from module import function form pollutes your current namespace: names already defined in your current namespace are overwritten by the imported names. Q: And when I press F5 in IDLE’s edit window, it’s as if the module’s code is imported with an import statement, right? A: Yes, that is essentially what happens. The code in your edit window is compiled and executed by Python, and any names in the edit window are imported into the namespace being used by IDLE’s shell. This is handy, because it makes it easy to test functionality with IDLE. But bear in mind that outside of IDLE, you still need to import your module before you can use its functionality. Q: Is it really necessary for me to install my modules into my local copy of Python? Can’t I just put them in any old folder and import them from there? A: Yes, it is possible. Just bear in mind that Python looks for modules in a very specific list of places (recall the import sys; sys.path trick from earlier in this chapter). If you put your modules in a folder not listed in Python’s path list, chances are the interpreter won’t find them, resulting in ImportErrors. Using the distribution utilities to build and install your module into your local copy of Python avoids these types of errors. Q: I noticed the distribution utiliites created a file called nester.pyc. What’s up with that? A: That’s a very good question. When the interpreter executes your module code for the first time, it reads in the code and translates it into an internal bytecode format which is ultimately executed. (This idea is very similar to the way the Java JVM works: your Java code is turned into a class file as a result of your Java technologies compiling your code.) The Python interpreter is smart enough to skip the translation phase the next time your module is used, because it can determine when you’ve made changes to the original module code file. If your module code hasn’t changed, no translation occurs and the “compiled” code is executed. If your code has changed, the translation occurs (creating a new pyc file) as needed. The upshot of all this is that when Python sees a pyc file, it tries to use it because doing so makes everything go much faster. Q: Cool. So I can just provide my users with the pyc file? A:pyc No, don’t do that, because the use of the file (if found) is primarily a runtime optimization performed by the interpreter. Q: So, can I delete the pyc file if I don’t need it? A: Sure, if you really want to. Just be aware that you lose any potential runtime optimization. you are here 4 49 conflicting requests With success comes responsibility Lots of programmers from many different locations are using your module. And some of these programmers are looking for more features. We really love your code, but is there any chance this thing could print the data to screen and indent each nested list whenever one is found? Likes what you’ve done, but could be happier. Requests for change are inevitable You need to keep your current users happy by maintaining the existing functionality, while at the same time providing enhanced functionality to those users that require it. This could be tricky. What are your options here? 50 Chapter 2 Hang on a second. I kinda like the way it works right now. I vote NOT to change it. Any changes to the way you function works are likely to r annoy this guy. sharing your code Life’s full of choices When it comes to deciding what to do here, there’s no shortage of suggestions. That’s soooo easy. Simply create another function called “print_lol2”, right? You could then import the function you want using the specific form of the import statement. It’s not that hard, really... Yeah, that might just work. You could edit your module’s code and define a new function called print_lol2, then code up the function to perform the nested printing When you want to use the original function, use this specific form of the import statement: from nester import print_lol. When you want to use the new, improved version of the function, use this import statement: from nester import print_lol2. Which would work, but… But that suggestion is twice the work...which might be OK sometimes...but the creation of a second, almost identical, function seems wasteful to me. Right. A second function is wasteful. Not only are you introducing an almost identical function to your module, which might create a potential maintenance nightmare, but you’re also making things much more difficult for the users of your module, who must decide ahead of time which version of the function they need. Adding a second function makes your module’s application programming interface (API) more complex than it needs to be. There has to be a better strategy, doesn’t there? you are here 4 51 add an argument Control behavior with an extra argument If you add an extra argment to your function, you can handle indentation within your current code without too much trouble. Yikes! I should’ve thought about that myself... I probably need to go easy on the coffee. Of course, it’s clear to me now: adding another argument to your function gives you options. Take your function to the next level At the moment, your function has a single argument: the_list. If you add a second argument called level, you can use it to control indentation. A positive value for level indicates the number of tab-stops to include when displaying a line of data on screen. If level is 0, no indentation is used; if it’s 1, use a single tab-stop; if it’s 2, use two tab-stops; and so on. It’s clear you are looking at some sort of looping mechanism here, right? You already know how to iterate over a variably sized list, but how do you iterate a fixed number of times in Python? Does Python provide any functionality that can help? 52 Chapter 2 sharing your code Before your write new code, think BIF When you come across a need that you think is generic, ask yourself if there’s a built-in function (BIF) that can help. After all, iterating a fixed number of times is something you’ll need to do all the time. And remember: Python 3 includes over 70 BIFs, so there’s a lot of functionality waiting to be discovered. Use your pencil to draw a line matching each BIF to the correct description. The first one is done for you. Once you have all your lines drawn, circle the BIF you think you need to use in the next version of your function. BIF What the BIF does list() Creates a numbered list of paired-data, starting from 0. range() Returns the unique identification for a Python data object.). you are here 4 53 who did what SOLUTION You were to use your pencil to draw a line matching each BIF to the correct description. Once you had all your lines drawn, you were to circle the BIF you think you need to use in the next version of your function. BIF What the BIF does list() Creates a numbered list of paired-data, starting from 0. range() Returns the unique identification for a Python data object. This BIF looks interesting.). The range() BIF iterates a fixed number of times The range() BIF gives you the control you need to iterate a specific number of times and can be used to generate a list of numbers from zero upto-but-not-including some number. Here’s how to use it: “num” is the target identifier and is assigned each of the ()” numbers generated by “range in turn. 54 Chapter 2 Generate numbers up-tobut-not-including 4. for num in range(4): print(num) The numbers 0, 1, 2, and 3 will appear on screen. sharing your code Q: Don’t I need to import the BIFs in order to use them in my program? A: No. For all intents and purposes, the BIFs are specifically imported into every Python program as well as IDLE. Q: So the BIFs must belong to the __main__ namespace, right? A: No. They are automatically imported into the __main__ namespace, but the BIFs have their very own namespace called (wait for it) __builtins__. Q: I get how range() works, but surely I could just as easily use a while loop to do the same thing? A: Yes, you can, but it’s not as elegant as using range(). Seriously, though, the while equivalent not only requires you to write more code, but it also makes it your responsibility to worry about loop state, whereas range() worries about this for you. As a general rule, Python programmers look for ways to reduce the amount of code they need to write and worry about, which leads to better code robustness, fewer errors, and a good night’s sleep. Q: A: So BIFs are actually good for me? BIFs exist to make your programming experience as straightforward as possible by providing a collection of functions that provide common solutions to common problems. Since they are included with Python, you are pretty much assured that they have been tested to destruction and do “exactly what it says on the tin.” You can depend on the BIFs. Using them gives your program a leg up and makes you look good. So, yes, the BIFs are good for you! Now that you know a bit about the range() B.""" def print_lol(the_list, ): Include the name of the extra arg ument. """This function takes a positional argument called "the_list", which is any Python list (of - possibly - nested lists). Each data item in the provided list is (recursively) printed to the screen on it's own line.""" for each_item in the_list: if isinstance(each_item, list): print_lol(each_item) else: Don’t forget to edit the comment. ed Add code here to take the requir number of tab-stops. print(each_item) you are here 4 55 using range Now that you know a bit about the range() BIF, you were.""" level def print_lol(the_list, ): """This function takes a positional argument called "the_list", which is any Python list (of - possibly - nested lists). Each data item in the provided list is (recursively) printed to the screen on it's own line.""" A second argument called “level" is used to insert tab-stops when a nested list is encountered.""" for each_item in the_list: if isinstance(each_item, list): print_lol(each_item) else: for tab_stop in range(level): print("\t", end='') print(each_item) trol Use the value of “level” to con d. use are how many tab-stops Display a TAB character for eac h level of indentation. It’s time to test the new version of your function. Load your module file into IDLE, press F5 to import the function into IDLE’s namespace, and then invoke the function on your movies list with a second argument: >>> print_lol(movies, 0) Invoke your function, being sure to provide a second argument. The Holy Grail 1975 Terry Jones & Terry Gilliam The data in “movies” starts to appear on screen… 91 Traceback (most recent call last): …then all hell breaks loose! Something is not righ File "<pyshell#2>", line 1, in <module> t here. print_lol(movies,0) File "/Users/barryp/HeadFirstPython/chapter2/nester/nester.py", line 14, in print_lol print_lol(each_item) TypeError: print_lol() takes exactly 2 positional arguments (1 given) 56 Chapter 2 Your code has a TypeError, which caused it to crash. Here’s your clue as to what’s gone wrong. sharing your code Python tries its best to run your code Unlike compiled languages (such as C, Java, C#, and others), Python doesn’t completely check the validity of your code until it runs. This allows Python to do some rather cool things that just aren’t possible in those other languages, such as dynamically defining functions at runtime. This, of course, can be very flexible and powerful. The cost to you, however, is that you need to be very careful when writing your code, because something that typically would be caught and flagged as an “error” by a traditional, statically typed, compiled language often goes unnoticed in Python. ...OK, C++ syntax fine...continuing to parse...whoops! You’re trying to use a function before it’s declared?!? That’s NOT allowed around here... I’m outta here. Please wait. Compiling your C++ code… Ah ha! The old “calling a function before you’ve defined it yet” trick, eh? I’ll just make a note in case you define it later at runtime. You are planning to do that, right? Please don’t disappoint me, or I’ll give you an error... Running your Python code right now… Take another look at the error on the opposite page. Why do you think Python is giving you this particular error? What do you think is wrong? you are here 4 57 line check Trace your code When you’re trying to work out what went wrong with a program that looks like it should be OK, a useful technique is to trace what happens as each line of code executes. Here’s the code that you are currently working with. At only three lines long (remember: the creation of the list is one line of code), it doesn’t look like it should cause any trouble: Thes two lines look OK. import nester movies = [ "The Holy Grail", 1975, "Terry Jones & Terry Gilliam", 91,["Graham Chapman", ["Michael Palin", "John Cleese", "Terry Gilliam", "Eric Idle", "Terry Jones"]]] nester.print_lol(movies, 0) With the data assigned to the function’s arguments, the function’s code starts to execute on each data item contained within the passed-in list: To save space, the entire comment is not shown. Process each item in the list… …then decide what to do next based on whether or not the data item is a list. The “movies” list is assigned “the_list”, and the value 0 isto assigned to “level”. def print_lol(the_list, level): """This function ... """ for each_item in the_list: if isinstance(each_item, list): print_lol(each_item) else: for tab_stop in range(level): print("\t", end='') print(each_item) 58 Chapter 2 uments, h two arg You are invoking the function wit so that’s OK, too. If the data item is a list, recursively invoke the function…hang on a second, that doesn’t look right!? sharing your code Work out what’s wrong There’s your problem: the recursive invocation of your function is using the old function signature that required only one argument. The new version of your function requires two arguments. The fix is easy: provide the correct number of arguments when calling the new version of your function. So, this snippet of code from your function: if isinstance(each_item, list): print_lol(each_item) needs to be rewritten to specify the correct number of arguments: if isinstance(each_item, list): print_lol(each_item, level) Not so fast. Surely the nested list needs to be printed after a specific number of tab-stops? At the moment, your code sets “level” to 0 but never changes the value, so “level” never has any impact on your displayed output... Right. Your use of “level” needs one final tweak. The whole point of having level as an argument is to allow you to control the nested output. Each time you process a nested list, you need to increase the value of level by 1. Your code snippet needs to look like this: if isinstance(each_item, list): print_lol(each_item, level+1) It’s time to perform that update. of level by 1 each Simply increment the value you function. time you recursively invoke r you are here 4 59 fresh pypi Update PyPI with your new code Go ahead and edit your nester.py module (in the nester folder) to invoke your function properly. Now that you have a new version of your module, it’s a good idea to update the distribution that you uploaded to PyPI. With your code amended, there’s also a small change needed to your distribution’s setup.py program. You’ve changed your API, so adjust the value associated with version in setup.py. Let’s move from version 1.0.0 to 1.1.0: from distutils.core import setup setup( name = 'nester', version = '1.1.0', py_modules = ['nester'], author = 'hfpython', Change the va associated withlue to indicate to P “version” this is indeed a yPI that new version. author_email = 'hfpython@headfirstlabs.com', url = '', description = 'A simple printer of nested lists', ) Just as you did when you created and uploaded your distribution, invoke the setup.py program within your distribution folder to perform the upload: File Edit Window Help UploadAgain Don’t you just love those “200 OK” messages? $ python3 setup.py sdist upload running sdist running check reading manifest file 'MANIFEST' creating nester-1.1.0 making hard links in nester-1.1.0... hard linking nester.py -> nester-1.1.0 hard linking setup.py -> nester-1.1.0 Creating tar archive removing 'nester-1.1.0' (and everything under it) running upload Submitting dist/nester-1.1.0.tar.gz to Server response (200): OK $ Your new distribution is now available on PyPI. 60 Chapter 2 sharing your code Cool.There’s a new version of “nester” on PyPI. Take a look. Mark: Take a look at this, guys…the nester module has been updated on PyPI. Bob: Version 1.1.0… Laura: I wonder what’s changed? Mark: It still works with nested lists of lists, but now you can see the nested structure on screen, which I think is pretty cool. Laura: And useful. I’ve been waiting for that feature. Bob: Eh…OK…but how do I upgrade my existing local copy? Mark: Just follow the same steps as when you downloaded and installed nester from PyPI the first time. Bob: So I download the package file, unpack it, and ask setup.py to install it into my Python for me? Mark: Yes. It couldn’t be any easier. Laura: And what about my existing version of nester; what happens to that “old” version? Bob: Yeah…do I have two nester modules now? Bob Mark Laura Mark: No. When you use setup.py to install the latest version it becomes the current version and effectively replaces the previous module, which was the 1.0.0 release. Bob: And PyPI knows to give you the latest version of the module, too, right? Mark: Yes, when you surf the PyPI website and search for nester, you are always provided with the latest version of the module. Laura: Well, I use this module all the time and I’ve been waiting for this feature. I think I’ll update right away. Mark: I’ve already upgraded mine, and it works a treat. Bob: Yeah, I use it a lot, too, so I guess I’ll keep my system up to date and install the latest version. It’s probably not a good idea to rely on out-of-date software, right? Mark: I’d say. And, there’s nothing quite like progress. Laura: Catch you later, guys, I’ve got work to do. Bob: Me, too. I’m off to PyPI to grab the latest nester and install it into my local copy of Python. I’ll give it a quick test to confirm all is OK. Mark: Later, dudes… you are here 4 61 unhappy user You’ve changed your API Your new version of nester is indeed better, but not for all your users. Ah, phooey! I can’t believe it... I installed the latest version of “nester” from PyPI, and now all of my code that uses your function is not working. What did you do?!? Get with the program, Bob. You have TypeError’s everywhere... In your rush to release the lates and greatest version of your module, you forgot about some of your existing users. Recall that not all of your users want the new nested printing feature. However, by adding the second argument to print_lol(), you’ve changed your function’s signature, which means your module has a different API. Anyone using the old API is going to have problems. The ideal solution would be to provide both APIs, one which switches on the new feature and another that doesn’t. Maybe the feature could be optional? But how would that work? 62 Chapter 2 sharing your code Use optional arguments To turn a required argument to a function into an optional argument, provide the argument with a default value. When no argument value is provided, the default value is used. When an argument value is provided, it is used instead of the default. The key point is, of course, that the default value for the argument effectively makes the argument optional. To provide a default value for any function argument, specify the default value after the argument name: Both argumen are REQUIREDts. def print_lol(the_list, level): def print_lol(the_list, level=0): The addition of a default value has turned “level” into an OPTIONAL argument. With the default value for the argument defined, you can now invoke the function in a number of different ways: Invoke the function and provide both arguments. nester.print_lol(movies, 0) nester.print_lol(movies) Invoke the function with both arguments, but provide an alternative starting value for the second argument. Invoke the function with one argument and use the default value for the second. nester.print_lol(movies, 2) Your function now supports different signatures, but the functonality remains as it was. you are here 4 63 idle session Amend your code to give the level argument a default value of 0 and then load your code into the IDLE editor. Press F5 to load the code into the shell and then follow along to confirm that the latest version of your function works as expected. Start be defining a short list of lists and use the function to display the the list on screen: >>> names = ['John', 'Eric', ['Cleese', 'Idle'], 'Michael', ['Palin']] >>> print_lol(names, 0) John The standard behavior works as expected, with nested lists indented. Eric Cleese Idle Michael Palin Now try to do the same thing without specifiying the second argument. Let’s rely on the default value kicking in: >>> print_lol(names) John Eric Cleese Idle ument, the Without specifying the second . arg default is used and works, too Michael Palin Now specify a value for the second argument and note the change in the function’s behavior: >>> print_lol(names, 2) John Eric Cleese Idle Specify an alternative value for the second argument and the indenting sta rts from that level. Michael Palin One final example provides what looks like a silly value for the second argument. Look what happens: >>> print_lol(names, -9) John Eric Cleese Idle Michael Palin 64 Chapter 2 as the ly switches OFF the indentingis, loo ks exactly Using a negative value effecttoivebec ome a positive integer. Th count for “level” is unlikely m version 1.0.0, right? like the original output fro sharing your code Your module supports both APIs Well done! It looks like your module is working well, as both APIs, the original 1.0.0 API and the newer 1.1.0 API, can now be used. Let’s take a moment to create and upload a new distibution for PyPI. As before, let’s amend the version setting in the setup.py program: name = 'nester', version = '1.2.0', py_modules = ['nester'], Once again, be sure to change the value associated with “version” in “setup.py”. And with the code changes applied, upload this new version of your distribution to PyPI: File Edit Window Help UploadThree This all looks fine and dandy. $ python3 setup.py sdist upload running sdist running check reading manifest file 'MANIFEST' creating nester-1.2.0 making hard links in nester-1.2.0... hard linking nester.py -> nester-1.2.0 hard linking setup.py -> nester-1.2.0 Creating tar archive removing 'nester-1.2.0' (and everything under it) running upload Submitting dist/nester-1.2.0.tar.gz to Server response (200): OK $ Success! The messages from setup.py confirm that the your latest version of nester is up on PyPI. Let’s hope this one satisfies all of your users. Consider your code carefully. How might some of your users still have a problem with this version of your code? you are here 4 65 faulty default Your API is still not right Although the API lets your users invoke the function in its original form, the nesting is switched on by default. This behavior is not required by everyone and some people aren’t at all happy. Another version of “nester” has been released...but its default behavior might not be what you want. Funny...it works fine for me. Of course, if you have some functionality that really ought to be optional (that is, not the default), you should adjust your code to make it so. But how? One solution is to add a third argument which is set to True when the indenting is required and False otherwise. If you ensure that this argument is False by default, the original functonality becomes the default behavior and users of your code have to request the new indenting feature explicitly. Let’s look at adding this final revision. 66 Chapter 2 I can’t believe it! My programs were back to running fine, but now everything is indented. Has this thing changed again?!? sharing your code 1 Amend your module one last time to add a third argument to your function. Call your argument indent and set it initially to the value False—that is, do not switch on indentation by default. In the body of your function, use the value of indent to control your indentation code. Note: to save a bit of space, the comments from the module are not shown here. Of course, you need to make the necessary adjustments to your comments to keep them in sync with your code. Put the extra argument here. def print_lol(the_list, , level=0): for each_item in the_list: if isinstance(each_item, list): print_lol(each_item, What needs to go in here? , level+1) else: de Add a line of con he w to control rs. indenting occu for tab_stop in range(level): print("\t", end='') print(each_item) 2 With your new code additions in place, provide the edit you would recommend making to the setup.py program prior to uploading this latest version of your module to PyPI: 3 Provide the command you would use to upload your new distribution to PyPI: you are here 4   67 adding an argument 1 You were to amend your module one last time to add a third argument to your function. You were to call your argument indent and set it initially to the value False—that is, do not switch on indentation by default. In the body of your function, you were to use the value of indent to control your indentation code. Did you include the default value? def print_lol(the_list, indent=False , level=0): for each_item in the_list: if isinstance(each_item, list): print_lol(each_item, else: A simple “if” s statement doe the trick. 2 if indent : indent s changed, Your signatureuphadate this so be sure to invocation. , level+1) Don’t forget the colon at the end for tab_stop in range(level): of the “if” line. print("\t", end='') print(each_item) A sweet alternative to this “for” is this code: print("\t" * level, loop end=''). With your new code additions in place, you were to provide the edit you would recommend making to the setup.py program prior to uploading this latest version of your module to PyPI: Edit “setup.py” so that it reads: version = ‘1.3.0’, It’s a new version of your modul e, so be sure to change the value associated with “versio n” in your “setup.py” file. 3 You were to provide the command you would use to upload your new distribution to PyPI: python3 setup.py sdisk upload 1\python.exe” s use “C:\Python3 Remember: if you are on Window instead of “python3”. 68 Chapter 2 sharing your code A final test of the functonality should convince you that your module is now working exactly the way you and your users want it to. Let’s start with the original, default behavior: >>> names = ['John', 'Eric', ['Cleese', 'Idle'], 'Michael', ['Palin']] >>> print_lol(names) John The original, default functionality is restored (that should please Bob). Eric Cleese Idle Michael Palin Next, turn on indentation by providing True as the second argument: >>> names = ['John', 'Eric', ['Cleese', 'Idle'], 'Michael', ['Palin']] >>> print_lol(names, True) John Eric Cleese Idle Michael ent, By providing a second argument it’s possible to switch on ind . ed output (keeping Laura happy) Palin And, finally, control where indentation begins by providing a third argument value: >>> names = ['John', 'Eric', ['Cleese', 'Idle'], 'Michael', ['Palin']] >>> print_lol(names, True, 4) John Eric Cleese Idle Indenting from a specific tab-stop is also possible. Michael Palin Do this! Go ahead and edit your setup.py file; then upload your distribution to PyPI. you are here 4 69 one module for all Your module’s reputation is restored Congratulations! Word of your new and improved module is spreading fast. Lots of PyPI hits already. I told you this was good. Great work! I love that I can switch indentation on and off. My programs are back to working the way I want them to, so I’m a happy guy. Thanks! This is as close as Bob gets to a smile. But trust us, he’s happy. § Your Python skills are starting to build You’ve created a useful module, made it shareable, and uploaded it to the PyPI website. Programmers all over the world are downloading and using your code in their projects. Keep up the good work. 70 Chapter 2 sharing your code Your Python Toolbox Python Lingo e a “triple-quoted string” to includ e. cod a multiple-line comment in your on Package Index and • “PyPI” is the Pyth is well worth a visit. place in Python’s • A “namespace” is a memory where names exist. ace is known as • Python’s main namesp __main__. • Use IDLE Notes F5 to “run” the code in the IDLE edit window. • When you press F5 to “load” a code into the IDLE shell, the momodule’s names are specifically imported intdule’s IDLE’s namespace. This is a conveno when using IDLE. Within your cod ience need to use the import statemente, you explicitly. A module is a text file that contains Python code. The distribution utilities let you turn your module into a shareable package. The setup.py program provides metadata about your module and is used to build, install, and upload your packaged distribution. Import your module into other programs using the import statement. Each module in Python provides its own namespace, and the namespace name is used to qualify the module’s functions when invoking them using the module. function() form. Specifically import a function from a module into the current namespace using the from module import function form of the import statement. Use # to comment-out a line of code or to add a short, one-line comment to your program. The built-in functions (BIFs) have their own namespace called __builtins__, which is automatically included in every Python program. The range() BIF can be used with for to iterate a fixed number of times. Including end=’’ as a argument to the print() BIF switches off its automatic inclusion of a new-line on output. Arguments to your functions are optional if you provide them with a default value. you are here 4 71 CHAPTER 2 You’ve got Chapter 2 under your belt and you’ve added some key Python goodies to your toolbox. 3 files and exceptions Dealing with errors I always thought he was exceptional...especially when it comes to processing my files.. this is a new chapter 73 getting data in Data is external to your program Most of your programs conform to the input-process-output model: data comes in, gets manipulated, and then is stored, displayed, printed, or transferred. Data comes fr. om lots of places DBMS So far, you’ve learned how to process data as well as display it on screen. But what’s involved in getting data into your programs? Specifically, what’s involved in reading data from a file? How does Python read data from a file? 74 Chapter 3 I’m ready for your data...just give it to me, baby! files and exceptions It’s all lines of text The basic input mechanism in Python is line based: when read into your program from a text file, data arrives one line at a time. Python’s open() BIF lives to interact with files. When combined with a for statement, reading files is straightforward. Your data in a text file called “sketch.txt”. open() Your data as individual lines. When you use the open() BIF to access your data in a file, an iterator is created to feed the lines of data from your file to your code one line at a time. But let’s not get ahead of ourselves. For now, consider the standard openprocess-close code in Python: Do this! Open… …Process… the_file = open('sketch.txt') # Do something with the data # in "the_file". the_file.close() Create a folder called HeadFirstPython and a subfolder called chapter3. With the folders ready, download sketch.txt from the Head First Python support website and save it to the chapter3 folder. Let’s use IDLE to get a feel for Python’s file-input mechanisms. you are here 4 75 idle session Start a new IDLE sesson and import the os module to change the current working directory to the folder that contains your just-downloaded data file: Import “os” from the Standard Library. What’s the current working directory? >>> os.getcwd() '/Users/barryp/Documents' Change to the folder that contains your data file. >>> import os >>> os.chdir('../HeadFirstPython/chapter3') >>> os.getcwd() Confirm you are now in the right place. '/Users/barryp/HeadFirstPython/chapter3' Now, open your data file and read the first two lines from the file, displaying them on screen: >>> data = open('sketch.txt') >>> print(data.readline(), end='') Open a named file and assign the file to a file object called “data”. Man: Is this the right room for an argument? >>> print(data.readline(), end='') Other Man: I've told you once. Use the “readline()” method to grab a line from the file, then use the “print()” BIF to display it on screen. Let’s “rewind” the file back to the start, then use a for statement to process every line in the file: >>> data.seek(0) 0 Use the “seek()” method to return to the start of the file. And yes, you can use “tell()” with Python’s files, too. >>> for each_line in data: print(each_line, end='') This code should look familiar: it’s iteration using the file’s data as a standard input. Man: Is this the right room for an argument? Other Man: I've told you once. Man: No you haven't! Every line of the data is displayed on screen (although for space reasons, it is abridged here). Other Man: Yes I have. Man: When? Other Man: Just now. Man: No you didn't! ... Man: (exasperated) Oh, this is futile!! (pause) Other Man: No it isn't! Man: Yes it is! >>> data.close() 76 Chapter 3 Since you are now done with the file, be sure to close it. files and exceptions Take a closer look at the data Look closely at the data. It appears to conform to a specific format: The cast member’s role A colon, followed by a space character The line spoken by the cast member Man: Is this the right room for an argument? Other Man: I’ve told you once. Man: No you haven’t! Other Man: Yes I have. Man: When? Other Man: Just now. Man: No you didn’t! With this format in mind, you can process each line to extract parts of the line as required. The split() method can help here: Man: Is this the right room for an argument? ()” method Invoke the “splhit the “each_line” associated wit k the string string and breais found. whenever a “:” each_line.split(":") Man This tells “split()” what to split on. Is this the right room for an argument? The split() method returns a list of strings, which are assigned to a list of target identifiers. This is known as multiple assignment: A list of target identifiers on the left… …are assigned the strings return ed by “split()”. (role, line_spoken) = each_line.split(":") Using the example data from abo ve, “role” is assigned the string “Man”, wh ereas… string “Is this …“line_spoken”: is assigned the ?” ent the right room for an argum Well? Is it? § you are here 4 77 idle session Let’s confirm that you can still process your file while splitting each line. Type the following code into IDLE’s shell: >>> data = open('sketch.txt') Open the data file. >>> for each_line in data: (role, line_spoken) = each_line.split(':') print(role, end='') print(' said: ', end='') print(line_spoken, end='') Man said: Is this the right room for an argument? Other Man said: Man said: Yes I did! I'm telling you, I did! You did not! Other Man said: Man said: This all looks OK. You didn't! Other Man said: Man said: Just now. No you didn't! Other Man said: Man said: Yes I have. When? Other Man said: Man said: I've told you once. No you haven't! Other Man said: Man said: each part from Process the data, extractinhg par t on screen. each line and displaying eac Oh I'm sorry, is this a five minute argument, or the full half hour? Ah! (taking out his wallet and paying) Just the five minutes. Other Man said: Just the five minutes. Thank you. Other Man said: Anyway, I did. Man said: You most certainly did not! Traceback (most recent call last): File "<pyshell#10>", line 2, in <module> (role, line_spoken) = each_line.split(':') sly Whoops! There’s something seriou wrong here. ValueError: too many values to unpack It’s a ValueError, so that must mean there’s something wrong with the data in your file, right? 78 Chapter 3 files and exceptions Know your data Your code worked fine for a while, then crashed with a runtime error. The problem occurred right after the line of data that had the Man saying, “You most certainly did not!” Let’s look at the data file and see what comes after this successfully processed line: M a n : You didn't! Other Man: I'm telling you, I did! Man: You did not! Other Man: Oh I'm sorry, is this a five minute argument, or the full half hour? Man: Ah! (taking out his wallet and paying) Just the five minutes. Other Man: Just the five minutes. Thank you. Other Man: Anyway, I did. Man: You most certainly did not! The error occurs AFTER this line of data. Other Man: Now let's get one thing quite clear: I most definitely told you! Man: Oh no you didn't! Other Man: Oh yes I did! Notice anything? Notice anything about the next line of data? The next line of data has two colons, not one. This is enough extra data to upset the split() method due to the fact that, as your code currently stands, split()expects to break the line into two parts, assigning each to role and line_spoken, respectively. When an extra colon appears in the data, the split() method breaks the line into three parts. Your code hasn’t told split() what to do with the third part, so the Python interpreter raises a ValueError, complains that you have “too many values,” and terminates. A runtime error has occurred. What approach might you take to solve this dataprocessing problem? Do this! To help diagnose this problem, let’s put your code into its own file called sketch.py. You can copy and paste your code from the IDLE shell into a new IDLE edit window. you are here 4 79 ask for help Know your methods and ask for help It might be useful to see if the split() method includes any functionality that might help here. You can ask the IDLE shell to tell you more about the split() method by using the help() BIF. >>> help(each_line.split) Help on built-in function split: Looks like “split()” takes an optional argument. split(beans). The optional argument to split() controls how many breaks occur within your line of data. By default, the data is broken into as many parts as is possible. But you need only two parts: the name of the character and the line he spoke. If you set this optional argument to 1, your line of data is only ever broken into two pieces, effectively negating the effect of any extra colon on any line. Let’s try this and see what happens. Geek Bits IDLE gives you searchable access to the entire Python documentation set via its Help ➝ Python Docs menu option (which will open the docs in your web browser). If all you need to see is the documentation associated with a single method or function, use the help() BIF within IDLE’s shell. 80 Chapter 3 split(beans, 1) files and exceptions Here’s the code in the IDLE edit window. Note the extra argument to the split() method. The extra argument controls how “split()” splits. With the edit applied and saved, press F5 (or select Run Module from IDLE’s Run menu) to try out this version of your code: >>> ================================ RESTART ================================ >>> Man said: Is this the right room for an argument? Other Man said: Man said: No you haven't! Other Man said: Man said: I've told you once. Yes I have. When? Other Man said: Just now. ... Other Man said: Man said: Oh yes I did! Oh no you didn't! Other Man said: Man said: Now let's get one thing quite clear: I most definitely told you! Oh no you didn't! Other Man said: Man said: Anyway, I did. You most certainly did not! Other Man said: Man said: The displayed output is abridged to allow the important stuff to fit on this page. Cool. You made it past the line with two colons… Oh yes I did! Oh look, this isn't an argument! ere’s …but your joy is short lived. Th ANOTHER ValueError!! Traceback (most recent call last): File "/Users/barryp/HeadFirstPython/chapter4/sketch.py", line 5, in <module> (role, line_spoken) = each_line.split(':', 1) ValueError: need more than 1 value to unpack That’s enough to ruin your day. What could be wrong now? you are here 4 81 missing colon Know your data (better) Your code has raised another ValueError, but this time, instead of complaining that there are “too many values,” the Python interpreter is complaining that it doesn’t have enough data to work with: “need more than 1 value to unpack.” Hopefully, another quick look at the data will clear up the mystery of the missing data. Other Man: Now let's get one thing quite clear: I most definitely told you! Man: Oh no you didn't! Other Man: Oh yes I did! Man: Oh no you didn't! Other Man: Oh yes I did! Man: Oh look, this isn't an argument! (pause) What’s this?!? Some of the a doesn’t conform to the expected format…which dat can’t be good. Other Man: Yes it is! Man: No it isn’t! (pause) Man: It's just contradiction! Other Man: No it isn't! The case of the missing colon Some of the lines of data contain no colon, which causes a problem when the split() method goes looking for it. The lack of a colon prevents split() from doing its job, causes the runtime error, which then results in the complaint that the interpreter needs “more than 1 value.” It looks like you still have problems with the data in your file. What a shame it’s not in a standard format. 82 Chapter 3 files and exceptions Two very different approaches When you have to deal with a bunch of exceptional situations, the best approach is to add extra logic. If there’s more stuff to worry about, you need more code. Jill Or you could decide to let the errors occur, then simply handle each error if and when it happens. That would be exceptional. Joe Jill’s suggested approach certainly works: add the extra logic required to work out whether it’s worth invoking split() on the line of data. All you need to do is work out how to check the line of data. Joe’s approach works, too: let the error occur, spot that it has happened, and then recover from the runtime error…somehow. Which approach works best here? you are here 4 83 find the substring Add extra logic Let’s try each approach, then decide which works best here. In addition to the split() method, every Python string has the find() method, too. You can ask find() to try and locate a substring in another string, and if it can’t be found, the find() method returns the value -1. If the method locates the substring, find() returns the index position of the substring in the string. Assign a string to the each_line variable that does not contain a colon, and then use the find() method to try and locate a colon: >>>>> each_line.find(':') -1 The string does NOT contain a colon, so “find()” returns -1 for NOT FOUND. Press Alt-P twice to recall the line of code that assigns the string to the variable, but this time edit the string to include a colon, then use the find() method to try to locate the colon: >>>>> each_line.find(':') 10 The string DOES contain a colon, so “find()” returns a positive index value. And you thought this approach wouldn’t work? Based on this IDLE session, I think this could do the trick. 84 Chapter 3 files and exceptions Adjust your code to use the extra logic technique demonstrated on the previous page to deal with lines that don’t contain a colon character. data = open('sketch.txt') What condition needs to go here? for each_line in data: if (role, line_spoken) = each_line.split(':', 1) print(role, end='') print(' said: ', end='') print(line_spoken, end='') data.close() Can you think of any potential problems with this technique? Grab your pencil and write down any issues you might have with this approach in the space provided below: you are here 4   85 substring found You were to adjust your code to use the extra logic technique to deal with lines that don’t contain a colon character: data = open('sketch.txt') for each_line in data: the Note the use of ch hi w , d or “not” keyw lue of va he negates t the condition. It takes a few seconds to get head around this condition, butyour it does work. if not each_line.find(':') == -1: (role, line_spoken) = each_line.split(':', 1) print(role, end='') print(' said: ', end='') print(line_spoken, end='') data.close() issues It’s OK if your. Just so are different e similar long as they ar to these. You were to think of any potential problems with this technique, grabbing your pencil to write down any issues you might have with this approach. There might be a problem with this code if the format of the data file changes, which will require changes to the condition. The condition used by the if statement is somewhat hard to read and understand. This code is a little “fragile”…it will break if another exceptional situation arises. 86 Chapter 3 files and exceptions Test Drive Amend your code within IDLE’s edit window, and press F5 to see if it works. No errors this time. Your program works…although it is fragile. If the format of the file changes, your code will need to change, too, and more code generally means more complexity. Adding extra logic to handle exceptional situations works, but it might cost you in the long run. Maybe it’s time for a different approach? One that doesn’t require extra logic, eh? you are here 4 87 exceptional catch Handle exceptions Have you noticed that when something goes wrong with your code, the Python interpreter displays a traceback followed by an error message? The traceback is Python’s way of telling you that something unexpected has occurred during runtime. In the Python world, runtime errors are called exceptions. Whoooah! I don’t know what to do with this error, so I’m gonna raise an exception...this really is someone else’s problem. >>> if not each_ Traceback (most r It looks Oooh, yuck’s! a bug. like there File “<pyshell (role, line_ ValueError: too m Of course, if you decide to ignore an exception when it occurs, your program crashes and burns. But here’s the skinny: Python let’s you catch exceptions as they occur, which gives you with a chance to possibly recover from the error and, critically, not crash. By controlling the runtime behavior of your program, you can ensure (as much as possible) that your Python programs are robust in the face of most runtime errors. Try the code first. Then deal with errors as they happen. 88 Chapter 3 files and exceptions Try first, then recover Rather than adding extra code and logic to guard against bad things happening, Python’s exception handling mechanism lets the error occur, spots that it has happened, and then gives you an opportunity to recover. During the normal flow of control, Python tries your code and, if nothing goes wrong, your code continues as normal. During the exceptional flow of control, Python tries your code only to have something go wrong, your recovery code executes, and then your code continues as normal. Exceptional flow Normal flow Python tries your code. Your recovery code executes. It’s all OK, so keep going… The try/except mechanism Python includes the try statement, which exists to provide you with a way to systematically handle exceptions and errors at runtime. The general form of the try statement looks like this: Python tries your code, but fails! Crash! Your exception is handled. Then you keep going… try: Both “try” and “except” are Python keywords. your code (which might cause a runtime error ) except: your error-recovery code you are here 4 89 allow errors Hang on, here! Are you actually letting errors occur on purpose? ARE YOU MAD?!? No. Not mad. And, yes. Letting errors occur. If you try to code for every possible error, you’ll be at it for a long time, because all that extra logic takes a while to work out. Paradoxically, when you worry less about covering every possible error condition, your coding task actually gets easier. 90 Chapter 3 files and exceptions Identify the code to protect In order to plug into the Python exception handling mechanism, take a moment to identify the code that you need to protect. Study your program and circle the line or lines of code that you think you need to protect. Then, in the space provided, state why. data = open('sketch.txt') for each_line in data: (role, line_spoken) = each_line.split(':', 1) print(role, end='') print(' said: ', end='') print(line_spoken, end='') data.close() State your reason why here. Q: Something has been bugging me for a while. When the split() method executes, it passes back a list, but the target identifiers are enclosed in regular brackets, not square brackets, so how is this a list? A: Well spotted. It turns out that there are two types of list in Python: those that can change (enclosed in square brackets) and those that cannot be changed once they have been created (enclosed in regular brackets). The latter is an immutable list, more commonly referred to as a tuple. Think of tuples as the same as a list, except for one thing: once created, the data they hold cannot be changed under any circumstances. Another way to think about tuples is to consider them to be a constant list. At Head First, we pronounce “tuple” to rhyme with “couple.” Others pronounce “tuple” to rhyme with “rupal.” There is no clear concensus as to which is correct, so pick one and stick to it. you are here 4 91 code to protect You were to study your program and circle the line or lines of code that you think you need to protect. Then, in the space provided, you were to state why. data = open('sketch.txt') for each_line in data: (role, line_spoken) = each_line.split(':', 1) print(role, end='') print(' said: ', end='') print(line_spoken, end='') These four lines of code all need to be protected. data.close() If the call to “split()” fails, you don’t want the three “print()” statements executing, so it’s best to protect all four lines of the “if” suite, not just the line of code that calls “split()”. OK. I get that the code can be protected from an error. But what do I do when an error actually occurs? 92 Chapter 3 Yeah...good point. It’s probably best to ignore it, right? I wonder how... files and exceptions Take a pass on the error With this data (and this program), it is best if you ignore lines that don’t conform to the expected format. If the call to the split() method causes an exception, let’s simply pass on reporting it as an error. When you have a situation where you might be expected to provide code, but don’t need to, use Python’s pass statement (which you can think of as the empty or null statement.) Here’s the pass statement combined with try: data = open('sketch.txt') for each_line in data: try: This code is protected from runtime errors. (role, line_spoken) = each_line.split(':', 1) print(role, end='') print(' said: ', end='') print(line_spoken, end='') except: pass If a runtime error occurs, this code is executed. data.close() Now, no matter what happens when the split() method is invoked, the try statement catches any and all exceptions and handles them by ignoring the error with pass. Let’s see this code in action. Do this! Make the required changes to your code in the IDLE edit window. you are here 4 93 idle session With your code in the IDLE edit window, press F5 to run it. >>> ================================ RESTART ================================ >>> Man said: Is this the right room for an argument? Other Man said: Man said: No you haven’t! Other Man said: Man said: I’ve told you once. Yes I have. When? ... OOther Man said: Man said: Nonsense! (exasperated) Oh, this is futile!! Other Man said: Man said: This code works, and there are no runtime errors, either. No it isn’t! Yes it is! So...both approaches work. But which is better? 94 Chapter 3 files and exceptions Tonight’s talk: Approaching runtime errors with extra code and exception handlers Extra Code: Exception Handler: By making sure runtime errors never happen, I keep my code safe from tracebacks. At the cost of added complexity…. Complexity never hurt anyone. I’ll be sure to remind you of that the next time you’re debugging a complex piece of code at 4 o’clock in the morning. I just don’t get it. You’re more than happy for your code to explode in your face…then you decide it’s probably a good idea to put out the fire?!? Yes. I concentrate on getting my work done first and foremost. If bad things happen, I’m ready for them. But the bad things still happen to you. They never happen with me, because I don’t let them. Until something else happens that you weren’t expecting. Then you’re toast. Well…that depends. If you’re smart enough—and, believe me, I am—you can think up all the possible runtime problems and code around them. Sounds like a whole heap of extra work to me. Hard work never hurt anyone. You did hear me earlier about debugging at 4 AM, right? Sometimes I think you actually enjoy writing code that you don’t need… Of course all my code is needed! How else can you code around all the runtime errors that are going to happen? Yeah…how many? Um, uh…most of them, I guess. You don’t know, do you? You’ve no idea what will happen when an unknown or unexpected runtime error occurs, do you? Look: just cut it out. OK? you are here 4 95 more errors What about other errors? It is true that both approaches work, but let’s consider what happens when other errors surface. OK, guys. How does you code react when the data file is deleted? Ummm...I’m not totally sure, to be honest... I guess that’ll produce another exception, but what happens in my code is anyone’s guess. Handling missing files Frank’s posed an interesting question and, sure enough, the problem caused by the removal of the data file makes life more complex for Jill and Joe. When the data file is missing, both versions of the program crash with an IOError. Do this! 96 Chapter 3 Rename the data file, then run both versions of your program again to confirm that they do indeed raise an IOError and generate a traceback. files and exceptions Add more error-checking code… If you’re a fan of the “let’s not let errors happen” school of thinking, your first reaction will be to add extra code to check to see if the data file exists before you try to open it, right? Let’s implement this strategy. Python’ s os module has some facilities that can help determine whether a data file exists, so we need to import it from the Standard Library, then add the required check to the code: import os Check whether the file exists. if os.path.exists('sketch.txt'): data = open('sketch.txt') for each_line in data: All of this code remains unchanged. Inform the user of the bad news. if not each_line.find(':') == -1: (role, line_spoken) = each_line.split(':', 1) print(role, end='') print(' said: ', end='') print(line_spoken, end='') data.close() else: print('The data file is missing!') A quick test of the code confirms that this new problem is dealt with properly. With this new version of your code in IDLE’s edit window, press F5 to confirm all is OK. >>> ================================ RESTART ================================ >>> The data file is missing! Exactly what was expected. Cool. >>> you are here 4 97 take it to another level …Or add another level of exception handling If you are a fan of the “handle exceptions as they occur” school of thinking, you’ll simply wrap your code within another try statement. Add another “try” statement. try: data = open('sketch.txt') for each_line in data: As with the other program, all of this code remains unchanged. try: (role, line_spoken) = each_line.split(':', 1) print(role, end='') print(' said: ', end='') print(line_spoken, end='') except: Give the user the bad news. pass data.close() except: print('The data file is missing!') Another quick test is required, this time with the version of your program that uses exception handling. Press F5 to give it a spin. >>> ================================ RESTART ================================ >>> The data file is missing! >>> 98 Chapter 3 As expected, this version of the , too. program handles the missing file files and exceptions So, which approach is best? Well…it depends on who you ask! Here are both versions of your code: This version uses extra logic to handle File I/O errors. This version uses another “try” statement to handle File I/O err ors . Let’s ask a simple question about these two versions of your program: What do each of these programs do? Grab your pencil. In box 1, write down what you think the program on the left does. In box 2, write down what you think the program on the right does. 1 2 you are here 4 99 keep it simple actually need There’s a lot to write, so you than was on more space for your descriptie. provided on the previous pag 1 You were to grab your pencil, then in box 1, write down what you thought the program on the left does. In box 2, write down what you thought the program on the right does. The code on the right starts by importing the “os” library, and then it uses “path.exists” to make sure the data file exists, before it attempts to open the data file. Each line from the file is then processed, but only after it has determined that the line conforms to the required format by checking first for a single “:” character in the line. If the “:” is found, the line is processed; otherwise, it’s ignored. When we’re all done, the data file is closed. And you get a friendly message at the end if the file is not found. Now…that’s more like it. 2 The code on the right opens a data file, processes each line in that file, extracts the data of interest and displays it on screen. The file is closed when done. If any exceptions occur, this code handles them. Complexity is rarely a good thing Do you see what’s happening here? As the list of errors that you have to worry about grows, the complexity of the “add extra code and logic” solution increases to the point where it starts to obscure the actual purpose of the program. This is not the case with the exceptions handling solution, in which it’s obvious what the main purpose of the program is. By using Python’s exception-handling mechanism, you get to concentrate on what your code needs to do, as opposed to worrying about what can go wrong and writing extra code to avoid runtime errors. Prudent use of the try statement leads to code that is easier to read, easier to write, and—perhaps most important—easier to fix when something goes wrong. Concentrate on what your code needs to do. 100 Chapter 3 files and exceptions You’re done…except for one small thing Your exception-handling code is good. In fact, your code might be too good in that it is too general. At the moment, no matter what error occurs at runtime, it is handled by your code because it’s ignored or a error message is displayed. But you really need to worry only about IOErrors and ValueErrors, because those are the types of exceptions that occurred earlier when your were developing your program. Although it is great to be able to handle all runtime errors, it’s probably unwise to be too generic…you will want to know if something other than an IOError or ValueError occurs as a result of your code executing at runtime. If something else does happen, your code might be handling it in an inappropriate way. try: data = open('sketch.txt') for each_line in data: try: (role, line_spoken) = each_line.split(':', 1) print(role, end='') print(' said: ', end='') print(line_spoken, end='') except: data.close() except: pass ANY s code runs whencode that hi t d an e d co s Thi curs within the runtime error oc is being tried. print('The data file is missing!') As your code is currently written, it is too generic. Any runtime error that occurs is handled by one of the except suites. This is unlikely to be what you want, because this code has the potential to silently ignore runtime errors. You need to somehow use except in a less generic way. you are here 4 101 specify exceptions Be specific with your exceptions If your exception-handling code is designed to deal with a specific type of error, be sure to specify the error type on the except line. In doing so, you’ll take your exception handling code from generic to specific. try: data = open('sketch.txt') for each_line in data: try: (role, line_spoken) = each_line.split(':', 1) print(role, end='') print(' said: ', end='') print(line_spoken, end='') except ValueError: pass data.close() except IOError: Specify the t e of runtime error you are yp handling. print('The data file is missing!') Of course, if an different type of runtime error occurs, it is no longer handled by your code, but at least now you’ll get to hear about it. When you are specific about the runtime errors your code handles, your programs no longer silently ignore some runtime errors. Using “try/except” lets you concentrate on what your code needs to do... 102 Chapter 3 ...and it lets you avoid adding unnecessary code and logic to your programs. That works for me! files and exceptions Your Python Toolbox go n i L n o h t y P s a result n” occurs a “exceptio ror, producing a e er of a runtim traceback. d is a detaile error ” k c a b e c a r ime • A “t f the runt o n io t ip r c s de curred. that has oc • An IDLE Notes • Access P ython’ by choosing Pythos documentation IDLE’s Help men n Docs from documentation seu. The Python 3 your favorite webt should open in browser. Use the open() BIF to open a disk file, creating an iterator that reads data from the file one line at a time. The readline() method reads a single line from an opened file. The seek() method can be used to “rewind” a file to the beginning. The close() method closes a previously opened file. The split() method can break a string into a list of parts. An unchangeable, constant list in Python is called a tuple. Once list data is assigned to a tuple, it cannot be changed. Tuples are immutable. A ValueError occurs when your data does not conform to an expected format. An IOError occurs when your data cannot be accessed properly (e.g., perhaps your data file has been moved or renamed). The help() BIF provides access to Python’s documentation within the IDLE shell. The find() method locates a specific substring within another string. The not keyword negates a condition. The try/except statement provides an exception-handling mechanism, allowing you to protect lines of code that might result in a runtime error. The pass statement is Python’s empty or null statement; it does nothing. you are here 4 103 CHAPTER 3 You’ve got Chapter 3 under your belt and you’ve added some key Python techiques to your toolbox. 4 persistence Saving data to files I’m in a bit of a pickle...my data is not as persistent as it could be.. So…flip the page and let’s get started learning them. this is a new chapter 105 save your work Programs produce data It’s a rare program that reads data from a disk file, processes the data, and then throws away the processed data. Typically, programs save the data they process, display their output on screen, or transfer data over a network. You have lots of choices about which type of disk file to use. Your data is ready. Where should I put it for you? DBMS Hey, that’s nice output...exactly what I want. Before you learn what’s involved in writing data to disk, let’s process the data from the previous chapter to work out who said what to whom. When that’s done, you’ll have something worth saving. 106 Chapter 4 persistence Code Magnets Add the code magnets at the bottom of this page. try: data = open('sketch.txt') for each_line in data: try: (role, line_spoken) = each_line.split(':', 1) except ValueError: pass data.close() Here are your magnets. except IOError: print('The datafile is missing!') elif role == 'Other Man': man = [] if role == 'Man': line_spoken = line_spoken.strip() print(other) other = [] man.appen d(line_sp oken) other.app end(line_ spoken) print(man) you are here 4   107 process and print Code Magnets Solution Your were to add the code magnets. Assign an empty list to “man” and “other”. man = [] other = [] try: data = open('sketch.txt') for each_line in data: try: Assign the stripped string back onto itself. (role, line_spoken) = each_line.split(':', 1) line_spoken = line_spoken.strip() if role == 'Man': The “strip()” method removes unwanted whitespace from a string. man.append(line_spoken) elif role == 'Other Man': “elif” means “else if.” other.append(line_spoken) except ValueError: pass data.close() except IOError: print('The datafile is missing!') print(man) print(other) 108 Chapter 4 Conclude by displaying the processed data on screen. Update one of the lists based on who said what. persistence Test Drive Load your code into IDLE’s edit window and take it for a spin by pressing F5. Be sure to save your program into the same folder that contains sketch.txt. The code in IDLE’s edit window And here’s what appears on screen: the contents of the two lists. It worked, as expected. Surely Python’s open() BIF can open files for writing as well as reading, eh? Yes, it can. When you need to save data to a file, the open() BIF is all you need. you are here 4 109 open and close Open your file in write mode When you use the open() BIF to work with a disk file, you can specify an access mode to use. By default, open() uses mode r for reading, so you don’t need to specify it. To open a file for writing, use mode w: The data file object The access model to use out = open("data.out", "w") The name of the file to write to By default, the print() BIF uses standard output (usually the screen) when displaying data. To write data to a file instead, use the file argument to specify the data file object to use: print("Norwegian Blues stun easily.", file=out) What gets written to the file When you’re done, be sure to close the file to ensure all of your data is written to disk. This is known as flushing and is very important: out.close() The name of the data file object to write to This is VERY important when writing to files. Geek Bits. 110 Chapter 4 persistence At the bottom of your program, two calls to the print() BIF display your processed data on screen. Let’s amend this code to save the data to two disk files instead. Call your disk files man_data.txt (for what the man said) and other_data.txt (for what the other man said). Be sure to both open and close your data files, as well as protect your code against an IOError using try/except. man = [] other = [] try: data = open('sketch.txt') for each_line in data: try: (role, line_spoken) = each_line.split(':', 1) line_spoken = line_spoken.strip() if role == 'Man': man.append(line_spoken) elif role == 'Other Man': other.append(line_spoken) except ValueError: pass data.close() except IOError: print('The datafile is missing!') Go on, try. Open your two data files here. print(man, Be sure to close your files. print(other, ) ) Specify the files to write to when you invoke “print()”. Handle any exceptions here. you are here 4 111 save to disk At the bottom of your program, two calls to the print() BIF display your processed data on screen. You were to amend this code to save the data to two disk files instead. You were to call your disk files man_data.txt (for what the man said) and other_data.txt (for what the other man said). You were to make sure to both open and close your data files, as well as protect your code against an IOError using try/except. man = [] other = [] try: data = open('sketch.txt') for each_line in data: try: (role, line_spoken) = each_line.split(':', 1) line_spoken = line_spoken.strip() All of this code is unchanged. if role == 'Man': man.append(line_spoken) elif role == 'Other Man': other.append(line_spoken) except ValueError: pass data.close() except IOError: print('The datafile is missing!') try: man_file = open(‘man_data.txt’, ‘w’) other_file = open(‘other_data.txt’, ‘w’) print(man, file=man_file ) file=other_file print(other, man_file.close() other_file.close() except IOError: print('File error.’) 112 Chapter 4 ) Did you remember to open your files in WRITE mode? Open your two files, and assign each to file objects. Use the “print()” BIF to sav named lists to named disk filees.the Don’t forget to close BOTH files. Handle an I/O exception, should one occur. persistence Test Drive Perform the edits to your code to replace your two print() calls with your new file I/O code. Then, run your program to confirm that the data files are created: Before your program runs, there are no data files in your folder, just your code. When you run your code, this is all you see, what looks like an “empty” IDLE shell. After your program runs, two new files are CREATED in your folder. That code worked, too. You’ve created two data files, each holding the data from each of your lists. Go ahead and open these files in your favorite editor to confirm that they contain the data you expect. Consider the following carefully: what happens to your data files if the second call to print() in your code causes an IOError? you are here 4 113 close files Files are left open after an exception!. try: Ok. man_file = open('man_data.txt', 'w') other_file = open('other_data.txt', 'w') print(man, file=man_file) print(other, file=other_file) OK Not OK!! Crash! man_file.close() other_file.close() except IOError: OK print('File error.') These two lines of code DON’T get to run. OK. 114 Chapter 4 OK OK persistence Extend try with finally When you have a situation where code must always run no matter what errors occur, add that code to your try statement’s finally suite: try: man_file = open('man_data.txt', 'w') other_file = open('other_data.txt', 'w') No changes here, except that… print(man, file=man_file) print(other, file=other_file) except IOError: print('File error.') …the calls to “close()” are moved to here. finally: man_file.close() other_file.close()? you are here 4 115 no dumb questiohns Q: I’m intrigued. When you stripped the line_spoken data of unwanted whitespace, you assigned the result back to the line_spoken variable. Surely invoking the strip() method on line_spoken changed the string it refers to? A: Q: No, that’s not what happens. Strings in Python are immutable, which means that once a string is created, it cannot be changed. But you did change the line_spoken string by removing any unwanted whitespace, right? A: strip() Yes and no. What actually happens is that invoking the method on the line_spoken string creates a new string with leading and trailing whitespace removed. The new string is then assigned to line_spoken, replacing the data that was referred to before. In effect, it is as if you changed line_ spoken, when you’ve actually completely replaced the data it refers to. Q: A: So what happens to the replaced data? Python’s built-in memory management technology reclaims the RAM it was using and makes it available to your program. That is, unless some other Python data object is also referring to the string. Q: A: What? I don’t get it. It is conceivable that another data object is referring to the string referred to by line_spoken. For example, let’s assume you have some code that contains two variables that refer to the same string, namely “Flying Circus.” You then decide that one of the variables needs to be in all UPPERCASE, so you invoke the upper() method on it. The Python interperter takes a copy of the string, converts it to uppercase, and returns it to you. You can then assign the uppercase data back to the variable that used to refer to the original data. Q: But surely Python can work out how many variables are referring to any one particular string? A: It does, but only for the purposes of garbage collection. If you have a line of code like print('Flying Circus'), the string is not referred to by a variable (so any variable reference counting that’s going on isn’t going to count it) but is still a valid string object (which might be referred to by a variable) and it cannot have its data changed under any circumstances. Q: So Python variables don’t actually contain the data assigned to them? A: That’s correct. Python variables contain a reference to a data object.The data object contains the data and, because you can conceivably have a string object used in many different places throughout your code, it is safest to make all strings immutable so that no nasty side effects occur. Q: Isn’t it a huge pain not being able to adjust strings “in place”? A: No, not really. Once you get used to how strings work, it becomes less of an issue. In practice, you’ll find that this issue rarely trips you up. Q: A: Q: Yes, a few. There’s the tuple, which is an immutable list. Also, all of the number types are immutable. Other than learning which is which, how will I know when something is immutable? Q: A: Q: A: A: And the original data cannot change, because there’s another variable referring to it? Precisely. That’s why strings are immutable, because you never know what other variables are referring to any particular string. 116 Chapter 4 Are any other Python data types immutable? Don’t worry: you’ll know. If you try to change an immutable value, Python raises a TypeError exception. Of course: an exception occurs. They’re everywhere in Python, aren’t they? Yes. Exceptions make the world go ’round. persistence Knowing the type of error is not enough When a file I/O error occurs, your code displays a generic “File Error” message. This is too generic. How do you know what actually happened? Maybe the problem is that you can’t open the file? It could be that the file can be opened but not written to? Yeah, or it could be a permission error, or maybe your disk is full?. you are here 4 117 idle session Let’s see what happens when you try to open a file that doesn’t exist, such as a disk file called missing.txt. Enter the following code at IDLE’s shell: >>> try: data = open('missing.txt') print(data.readline(), end='') except IOError: print('File error') finally: data.close() There’s your error message, but… File error Traceback (most recent call last): File "<pyshell#8>", line 7, in <module> data.close() NameError: name 'data' is not defined …what’s this?!? Another except raised and it killed your code. ion was As the file doesn’t exist, the data file object wasn’t created, which subsequently makes it impossible to call the close() method on it, so you end up with a NameError. A quick fix is to add a small test to the finally suite to see if the data name exists before you try to call close(). The locals() BIF returns a collection of names defined in the current scope. Let’s exploit this BIF to only invoke close() when it is safe to do so: finally: if 'data' in locals(): The “in” operator tests for membership. data.close() This is just the bit of code tha needs to change. Press Alt-P tot edit your code at IDLE’s shell. File error No extra exceptions this tim Just your error message. e. Here you’re searching the collection returned by the locals() BIF for the string data. If you find it, you can assume the file was opened successfully and safely call the close() method. If some other error occurs (perhaps something awful happens when your code calls the print() BIF), your exception-handling code catches the error, displays your “File error” message and, finally, closes any opened file. But you still are none the wiser as to what actually caused the error. 118 Chapter 4 persistence When an exception is raised and handled by your except suite, the Python interpreter passes an exception object into the suite. A small change makes this exception object available to your code as an identifier: except IOError as err: print('File error: ' + err) Give your exception object a name… …then use it as part of your error message. But when you try to run your code with this change made, another exception is raised: Traceback (most recent call last): File "<pyshell#18>", line 5, in <module> print('File error:' + err) TypeError: Can't convert 'IOError' object to str implicitly Whoops! Yet another exception; this time it’s a “TypeError”. This time your error message didn’t appear at all. It turns out exception objects and strings are not compatible types, so trying to concatenate one with the other leads to problems. You can convert (or cast) one to the other using the str() BIF: except IOError as err: print('File error: ' + str(err)) Use the “str()” BIF to for the exception object to behave ce like a string. Now, with this final change, your code is behaving exactly as expected: File error: [Errno 2] No such file or directory: 'missing.txt' And you now get a specific erry or message that tells you exactl what went wrong. Of course, all this extra logic is starting to obscure the real meaning of your code. Wouldn’t it be dreamy if there were a way to take advantage of these mechanisms without the code bloat? I guess it’s just a fantasy... you are here 4 119 try with Use with to work with files: This is the usual “try/ except/finally” pattern. try: data = open('its.txt', "w") print("It's...", file=data) except IOError as err: print('File error: ' + str(err)) finally: if 'data' in locals(): data.close() try: with open('its.txt', "w") as data: print("It's...", file=data) The use of “with” negates the need for the “finally” suite. except IOError as err: print('File error: ' + str(err)). Geek Bits The with statement takes advantage of a Python technology called the context management protocol. 120 Chapter 4 persistence() Write your “with” code here. you are here 4 121 no finally You were to() try: with open(‘man_data.txt', ‘w') as man_file: Using two “with” print(man, file=man_file) statements to rewrite the code without the with open(‘other_data.txt', ‘w') as other_file: “finally” suite. print(other, file=other_file) except IOError as err: print(‘File error: ' + str(err)) ls into one Or combine the two “open()” cal “with” statement. Note the use of the comma. with open('man_data.txt', 'w') as man_file, open('other_data.txt’, 'w’) as other_file: print(man, file=man_file) print(other, file=other_file) 122 Chapter 4 persistence Test Drive Add your with code to your program, and let’s confirm that it continues to function as expected. Delete the two data files you created with the previous version of your program and then load your newest code into IDLE and give it a spin. IDLE No errors in theindicate to shell appears ram ran that the prog successfully. If you check your folder, your two data files should’ve reappeared. Let’s take a closer look at the data file’s contents by opening them in your favorite text editor (or use IDLE). Here’s what the man said. Here’s what the other man said. You’ve saved the lists in two files containing what the Man said and what the Other man said. Your code is smart enough to handle any exceptions that Python or your operating system might throw at it. Well done. This is really coming along. you are here 4 123 unsuitable format Default formats are unsuitable for files Although your data is now stored in a file, it’s not really in a useful format. Let’s experiment in the IDLE shell to see what impact this can have. Use a with statement to open your data file and display a single line from it: >>> with open('man_data.txt') as mdf: print(mdf.readline()) Note: no need to close your , because “with” does that forfile? Geek Bits. 124 Chapter 4 persistence I guess I could write some custom parsing code to process the “internal format” used by “print()”. It shouldn’t take me all that long... It might be worth looking at using something other than a plain “print()” to format the data prior to saving it to the data file? I’d certainly look into it.? Can you think of a function you created from earlier in this book that might help here? you are here 4 125 nonstandard output Why not modify print lol()?): r This code currently displays you data on the screen.. 126 Chapter 4 persistence 1 Let’s add a fourth argument to your print_lol() function to identify a place to write your data to. Be sure to give your argument a default value of sys.stdout, so that it continues to write to the screen if no file object is specified when the function is invoked. Fill in the blanks with the details of your new argument. (Note: to save on space, the comments have been removed from this cod, but be sure to update your comments in your nester.py module after you’ve amended your code.) def print_lol(the_list, indent=False, level=0, ): for each_item in the_list: if isinstance(each_item, list): print_lol(each_item, indent, level+1, ) else: if indent: for tab_stop in range(level): print("\t", end='', print(each_item, ) ) 2 What needs to happen to the code in your with statement now that your amended print_lol() function is available to you? 3 List the name of the module(s) that you now need to import into your program in order to support your amendments to print_lol(). you are here 4 127 extend your function 1 You were to add a fourth argument to your print_lol() function to identify a place to write your data to, being sure to give your argument a default value of sys.stdout so that it continues to write to the screen if no file object is specified when the function is invoked. You were to fill in the blanks with the details of your new argument. (Note: to save on space, the comments have been removed from this code, but be sure to update those in your nester.py module after you’ve amended your code). e it a Add the fourth argument and giv default value. fh=sys.stdout def print_lol(the_list, indent=False, level=0, for each_item in the_list: if isinstance(each_item, list): print_lol(each_item, indent, level+1, fh ) else: if indent: for tab_stop in range(level): print("\t", end='', print(each_item, 2 file=fh file=fh ) ) ): Note: the signature has changed. Adjust the two calls to “print()” to use the new argument. What needs to happen to the code in your with statement now that your amended print_lol() function is available to you? The code needs to be adjusted so that instead of using the “print()” BIF, the code needs to invoke “print_lol()” instead. 3 List the name of the module(s) that you now need to import into your program in order to support your amendments to print_lol(). The program needs to import the amended “nester” module. 128 Chapter 4 persistence Test Drive Before taking your code for a test drive, you need to do the following: 1. Make the necessary changes to nester and install the amended module into your Python environment (see Chapter 2 for a refresher on this). You might want to upload to PyPI, too. 2. Amend your program so that it imports nester and uses print_lol() instead of print() within your with statement. Note: your print_lol() invocation should look something like this: print_lol(man, fh=man_file). When you are ready, take your latest program for a test drive and let’s see what happens: As before, there’s no output on screen. Let’s check the contents of the files to see what they look like now. What the man said is now legible. And here’s what the other man said. This is looking good. By amending your nester module, you’ve provided a facility to save your list data in a legible format. It’s now way easier on the eye. But does this make it any easier to read the data back in? you are here 4 129 brittle code Hang on a second...haven’t you been here before? You’ve already written code to read in lines from a data file and put ‘em into lists...do you like going around in circles?!?? 130 Chapter 4 persistence Custom Code Exposed This week’s interview: When is custom code appropriate? Head First: Hello, CC, how are you today? Custom Code: Hi, I’m great! And when I’m not great, there’s always something I can do to fix things. Nothing’s too much trouble for me. Here: have a seat. Head First: Why, thanks. Custom Code: Let me get that for you. It’s my new custom SlideBack&Groove™, the 2011 model, with added cushions and lumbar support…and it automatically adjusts to your body shape, too. How does that feel? Head First: Actually [relaxes], that feels kinda groovy. Custom Code: See? Nothing’s too much trouble for me. I’m your “go-to guy.” Just ask; absolutely anything’s possible when it’s a custom job. Head First: Which brings me to why I’m here. I have a “delicate” question to ask you. Custom Code: What?!? That’s where I excel: creating beautifully crafted custom solutions for folks with complex computing problems. Head First: But if something’s been done before, why reinvent the wheel? Custom Code: But everything I do is custommade; that’s why people come to me… Head First: Yes, but if you take advantage of other coders’ work, you can build your own stuff in half the time with less code. You can’t beat that, can you? Custom Code: “Take advantage”…isn’t that like exploitation? Head First: More like collaboration, sharing, participation, and working together. Custom Code: [shocked] You want me to give my code…away? Custom Code: Go ahead, shoot. I can take it. Head First: Well…more like share and share alike. I’ll scratch your back if you scratch mine. How does that sound? Head First: When is custom code appropriate? Custom Code: That sounds disgusting. Custom Code: Isn’t it obvious? It’s always appropriate. Head First: Very droll [laughs]. All I’m saying is that it is not always a good idea to create everything from scratch with custom code when a good enough solution to the problem might already exist. Head First: Even when it leads to problems down the road? Custom Code: Problems?!? But I’ve already told you: nothing’s too much trouble for me. I live to customize. If it’s broken, I fix it. Head First: Even when a readymade solution might be a better fit? Custom Code: Readymade? You mean (I hate to say it): off the shelf? Head First: Yes. Especially when it comes to writing complex programs, right? Custom Code: I guess so…although it won’t be as perfect a fit as that chair. Head First: But I will be able to sit on it! Custom Code: [laughs] You should talk to my buddy Pickle…he’s forever going on about stuff like this. And to make matters worse, he lives in a library. Head First: I think I’ll give him a shout. Thanks! Custom Code: Just remember: you know where to find me if you need any custom work done. you are here 4 131 in a pickle Pickle your data Python ships with a standard library called pickle, which can save and load almost any Python data object, including lists. Once you pickle your data to a file, it is persistent and ready to be read into another program at some later date/time: Your data as it appears in Python’s memory The pickle engine [ Python data to pickle. Your pickled data Out comes the pickled version of your data.: The same pickle engine Your pickled pickled data to pickle. 132 Chapter 4 Out comes the Python version of your pickled data. Your data is recreated in Python’s memory, exactly as before. [!'] persistence Save with dump and restore with load Using pickle is straightforward: import the required module, then use dump() to save your data and, some time later, load() to restore it. The only requirement when working with pickled files is that they have to be opened in binary access mode: Always remember to import the “pickle” module. To save your data, use “dump()”. Assign youdr ata restored ntifier. to an ide import pickle ... with open('mydata.pickle', 'wb') as mysavedata: pickle.dump([1, 2, 'three'], mysavedata) ... with open('mydata.pickle', 'rb') as myrestoredata: a_list = pickle.load(myrestoredata) The “b” tells Python to open your data files in BINARY mode. Restore your data from your file using “load()”. print(a_list) r program, you can Once your data is back in you ect. treat it like any other data obj What if something goes wrong? If something goes wrong when pickling or unpickling your data, the pickle module raises an exception of type PickleError. w Here’s a snippet of your code as it currently stands. Grab your pencil and strike out the code you no longer need, and then replace it with code that uses the facilities of pickle instead. Add any additional code that you think you might need, too. try: with open('man_data.txt', 'w') as man_file, open('other_data.txt', 'w') as other_file: nester.print_lol(man, fh=man_file) nester.print_lol(other, fh=other_file) except IOError as err: print('File error: ' + str(err)) you are here 4 133 significance of the pickle print(‘Pickling error: ‘ + str(perr)) import pickle Here’s a snippet of your code as it currently stands. You were to grab your pencil and strike out the code you no longer need, and then replace it with code that uses the facilities pickle instead. You were also to add any additional code that you think you might need. Import “pickle” the top of yournear program. Change the access mode to be “writeable, binary”. ‘wb' try: 'wb' with open('man_data.txt', 'w') as man_file, open('other_data.txt', 'w') as other_file: nester.print_lol(man, fh=man_file) nester.print_lol(other, fh=other_file) except IOError as err: print('File error: ' + str(err)) except pickle.PickleError as perr: print('Pickling error: ' + str(perr)) pickle.dump(man, man_file) pickle.dump(other, other_file) rint_lol()” Replace the two calls to “nester.p with calls to “pickle.dump()”. Don’t forget to handle any except ions that can occur. Q: When you invoked print_lol() earlier, you provided only two arguments, even though the function signature requires you to provide four. How is this possible? A: When you invoke a Python function in your code, you have options, especially when the function provides default values for some arguments. If you use positional arguments, the position of the argument in your function invocation dictates what data is assigned to which argument. When the function has arguments that also provide default values, you do not need to always worry about positional arguments being assigned values. Q: A: OK, you’ve completely lost me. Can you explain? Consider print(), which has this signature: print(value, sep=' ', end='\n', file=sys.stdout). By default, this BIF displays to standard output (the screen), because it has an argument called file with a default value of sys.stdout. The file argument is the fourth positional argument. However, when you want to send data to something other than the screen, you do not need to (nor want to have to) include values for the second and third positional arguments. They have default values anyway, so you need to provide values for them only if the defaults are not what you want. If all you want to do is to send data to a file, you invoke the print() BIF like this: print("Dead Parrot Sketch", file='myfavmonty.txt') and the fourth positional argument uses the value you specify, while the other positional arguments use their defaults. In Python, not only do the BIFs work this way, but your custom functions support this mechamism, too. 134 Chapter 4 persistence Test Drive Let’s see what happens now that your code has been amended to use the standard pickle module instead of your custom nester module. Load your amended code into IDLE and press F5 to run it. Once again, you get no visual clue that something has happened. So, once again, let’s check the contents of the files to see what they look like now: The is the man’s pickled data. The is the other man’s pickled data. It appears to have worked…but these files look like gobbledygook! What gives? Recall that Python, not you, is pickling your data. To do so efficiently, Python’s pickle module uses a custom binary format (known as its protocol). As you can see, viewing this format in your editor looks decidedly weird. Don’t worry: it is supposed to look like this. you are here 4 135 idle session pickle really shines when you load some previously pickled data into another program. And, of course, there’s nothing to stop you from using pickle with nester. After all, each module is designed to serve different purposes. Let’s demonstrate with a handful of lines of code within IDLE’s shell. Start by importing any required modules: >>> import pickle >>> import nester No surprises there, eh? Next up: create a new identifier to hold the data that you plan to unpickle.Create an empty list called new_man: >>> new_man = [] Yes, almost too exciting for words, isn’t it? With your list created. let’s load your pickled data into it. As you are working with external data files, it’s best if you enclose your code with try/except: >>> try: with open('man_data.txt', 'rb') as man_file: new_man = pickle.load(man_file) except IOError as err: print('File error: ' + str(err)) except pickle.PickleError as perr: print('Pickling error: ' + str(perr)) This code is not news to you either. However, at this point, your data has been unpickled and assigned to the new_man list. It’s time for nester to do its stuff: >>> nester.print_lol(new_man) Is this the right room for an argument? No you haven’t! Not all the data is shown here, but trust us: it’s all there. When? No you didn’t! ... You did just then! (exasperated) Oh, this is futile!! Yes it is! And to finish off, let’s display the first line spoken as well as the last: >>> print(new_man[0]) Is this the right room for an argument? >>> print(new_man[-1]) Yes it is! 136 Chapter 4 See: after all that, it is the rig ht room! § persistence Generic file I/O with pickle is the way to go! Now, no matter what data you create and process in your Python programs, you have a simple, tested, triedand-true mechanism for saving and restoring your data. How cool is that? are here 4 137 python toolbox You’ve got Chapter 4 under your belt and you’ve added some key Python techiques to your toolbox. The strip() method removes unwanted whitespace from strings. The file argument to the print() BIF controls where data is sent/saved. The finally suite is always executed no matter what exceptions occur within a try/except statement. go Python Ltypiens” - data types table d • “Immu once assignealue , t a h t n o h v in Pyt t have that o n n a c , e lu a av changed. of he process rsistence t ” g n li k pe • “Pic a object to t a d a g in v sa storage. rocess of p e h t ” g klin • “Unpic data object d e v a s a g restorin tence storage. from persis An exception object is passed into the except suite and can be assigned to an identifier using the as keyword. The str() BIF can be used to access the stringed representation of any data object that supports the conversion. The locals() BIF returns a collection of variables within the current scope. The in operator tests for membership. The “+” operator concatenates two strings when used with strings but adds two numbers together when used with numbers. The with statement automatically arranges to close all opened files, even when exceptions occur. The with statement uses the as keyword, too. sys.stdout is what Python calls “standard output” and is available from the standard library’s sys module. The standard library’s pickle module lets you easily and efficiently save and restore Python data objects to disk. The pickle.dump() function saves data to disk. The pickle.load() function restores data from disk. 138 Chapter 4 CHAPTER 4 CHAPTER 4 Your Python Toolbox 5 comprehending data Work that data! Life could be so much easier if only she’d let me help her extract, sort, and comprehend her data... Data comes in all shapes and sizes, formats and encodings. To work effectively with your data, you often have to manipulate and transform it into a common format to allow for efficient processing, sorting, and storage. In this chapter, you’ll explore Python goodies that help you work your data up into a sweat, allowing you to achieve data-munging greatness. So, flip the page, and let’s not keep the coach waiting… this is a new chapter 139 coaching crisis Coach Kelly needs your help I’m too busy on the track to waste time fiddling with my computer. Can you help me process my athlete data? The coach is an old friend, and you’d love to help. His crack squad of U10 athletes has been training hard. With each 600m run they do, Coach Kelly has recorded their time in a text file on his computer. There are four files in all, one each for James, Sarah, Julie, and Mikey. julie.txt james.txt 2-34,3:21,2.34,2.45,3.01,2:01,2:01,3:10,2-22 2.59,2.11,2:11,2:23,3-10,2-23,3:10,3.21,3-21 2:22,3.01,3:01,3.02,3:02,3.02,3:22,2.49,2:38 2:58,2.58,2:39,2-25,2-55,2:54,2.18,2:55,2:55 sarah.txt Initially, the coach needs a quick way to know the top three fastest times for each athlete. Can you help? 140 Chapter 5 mikey.txt comprehending data Do this! Before proceeding with this chapter, take a few moments to download the four data files from the Head First Python support website. Let’s begin by reading the data from each of the files into its own list. Write a short program to process each file, creating a list for each athlete’s data, and display the lists on screen. Hint: Try splitting the data on the commas, and don’t forget to strip any unwanted whitespace. Write your code here. you are here 4 141 let’s split Let’s begin by reading the data from each of the files into its own list. You were to write a short program to process each file, creating a list for each athlete’s data. You were then to display the lists on screen. Open the file. with open(‘james.txt’) as jaf: Read the line of data. data = jaf.readline() james = data.strip().split(‘,’) Convert the data to a list. with open(‘julie.txt’) as juf: Open each of the data files in turn, read the line of data from the file, and create a list from the line of data. data = juf.readline() julie = data.strip().split(‘,’) with open(‘mikey.txt’) as mif: data = mif.readline() mikey = data.strip().split(‘,’) with open(‘sarah.txt’) as saf: data = saf.readline() sarah = data.strip().split(‘,’) print(james) print(julie) print(mikey) Display the four lists on screen. print(sarah) Q: A: That data.strip().split(',') line looks a little weird. Can you explain what’s going on? That’s called method chaining. The first method, strip(), is applied to the line in data, which removes any unwanted whitespace from the string. Then, the results of the stripping are processed by the second method, split(','), creating a list. The resulting list is then applied to the target identifier in the previous code. In this way, the methods are chained together to produce the required result. It helps if you read method chains from left to right. 142 Chapter 5 comprehending data Test Drive Load your code into IDLE and run it to confirm that it’s all OK for now: Here’s your program as displayed in IDLE. And here’s the output produced by running your code. So far, so good. Coach Kelly’s data is now represented by four lists in Python’s memory. Other than the use of method chaining, there’s nothing much new here, because you’ve pretty much mastered reading data from files and using it to populate lists. There’s nothing to show the coach yet, so no point in disturbing him until his data is arranged in ascending order, which requires you to sort it. Let’s look at your sorting options in Python. you are here 4 143 in-place or copied sorting Sort in one of two ways When it comes to sorting your data using Python, you have two options. In-place sorting takes your data, arranges it in the order you specify, and then replaces your original data with the sorted version. The original ordering is lost. With lists, the sort() method provides in-place sorting: The original, a unordered dat ] , 5 6 , , 2 , 4 3 , [ 1 The Python “InEngine transformplace Sorting” s, then replaces. ] , 4, 5, 6 [ 1, 2, 3 The original data has now been ordered (and replaced). Copied sorting takes your data, arranges it in the order you specify, and then returns a sorted copy of your original data. Your original data’s ordering is maintained and only the copy is sorted. In Python, the sorted() BIF supports copied sorting. The Python “Cop Engine transformied Sorting” s and returns. The original, a unordered dat ] , 5 6 , , 2 , 4 3 , [ 1 [ 1, 3 , 4, 2 , 6, 5 ] The original, unordered data remains UNTOUCHED. ] 5, 6 , 4 2, 3, [ 1, en w be The data has no pied). ordered (and co 144 Chapter 5 comprehending data Let’s see what happens to your data when each of Python’s sorting options is used. Start by creating an unordered list at the IDLE shell: >>> data = [6, 3, 1, 2, 4, 5] >>> data [6, 3, 1, 2, 4, 5] Create a list of unordered data and assign to a variable. Perform an in-place sort using the sort() method that is built in as standard to every Python list: >>> data.sort() >>> data [1, 2, 3, 4, 5, 6] Perform IN-PLACE sorting on the data. The data’s ordering has changed. Reset data to its original unordered state, and then perform a copied sort using the sorted() BIF: >>> data = [6, 3, 1, 2, 4, 5] >>> data [6, 3, 1, 2, 4, 5] >>> data2 = sorted(data) >>> data [6, 3, 1, 2, 4, 5] >>> data2 [1, 2, 3, 4, 5, 6] et. The data’s ordering has been res Perform COPIED sorting on the data. Same as it ever was. The copied data is ordered from lowest to highest. Either sorting option works with the coach’s data, but let’s use a copied sort for now to arrange to sort the data on output. In the space below, provide four amended print() statements to replace those at the bottom of your program. you are here 4 145 all sorted Either sorting option works with the coach’s data, but let’s use a copied sort for now to arrange to sort the data on output. You were to provide four amended print() statements to replace those at the bottom of your program. Simply call “sorted()” on the data BEFORE you display it on screen. Q: A: print(sorted(james)) print(sorted(julie)) print(sorted(mikey)) print(sorted(sarah)) What happens to the unsorted data when I use sort()? For all intents and purposes, it disappears. Python takes a copy, sorts it, and then replaces your original data with the sorted version. Q: A:sorted() And there’s no way to get the original data back? the No. If the ordering of the original data is important to you, use BIF to transform your data into a sorted copy. Geek Bits You’ve already seen method chaining, and now it’s time to say “hello” to function chaining. Function chaining allows you to apply a series of functions to your data. Each function takes your data, performs some operation on it, and then passes the transformed data on to the next function. Unlike method chains, which read from left to right, function chains read from right to left (just to keep things interesting). 146 Chapter 5 comprehending data Test Drive Let’s see if this improves your output in any way. Make the necessary amendments to your code and run it. s update Here’s thede. to the co Look at this: 2-55 is coming BEFORE 2.18…now that is weird. But look at T The data is notHIS! sorted…which is, at all like, weird. Hey, it looks like your data values are not uniform. Is the problem with all those periods, dashes, and colons? Yes. The minute and seconds separators are confusing Python’s sorting technology. When recording his athletes’ times in each of their files, Coach Kelly sometimes used a different character to separate minutes from seconds. It looks like you need to fix your data. you are here 4 147 time trials The trouble with time Well…there’s never enough of it, is there? Let’s look closely at the coach’s data to see what the problem is. Here’s Sarah raw data again: 2:58,2.58,2:39,2-25,2-55,2:54,2.18,2:55,2:55 Oh, look: what a lovely bunch of strings... sarah.txt Recall that data read from a file comes into your program as text, so Sarah’s data looks like this once you turn it into a list of “times”: ['2:58', '2.58', '2:39’, '2-25', '2-55', '2:54’, '2.18', '2:55', '2:55'] the coach These are all strings, even though thinks they’re times. And when you sort Sarah’s data, it ends up in this order (which isn’t quite what you were expecting): ['2-25', '2-55', '2.18', '2.58', '2:39', '2:54', '2:55', '2:55', '2:58'] Whoops! That’s not right. How can 2.18 come after 2-55? Whoops again. 2:39 can’t come between 2.58 and 2:54, can it? Python sorts the strings, and when it comes to strings, a dash comes before a period, which itself comes before a colon. As all the strings start with 2, the next character in each string acts like a grouping mechanism, with the dashed times grouped and sorted, then the period times, and finally the colon times. Nonuniformity in the coach’s data is causing the sort to fail. 148 Chapter 5 I don’t get what the problem is...they’re all times to me. comprehending data Code Magnets Let. Rearrange the code magnets at the bottom of the page to provide the required functionality. def sanitize(time_string): return(mins + '.' + secs) Return the sanitized time the caller of this function. string to elif ':' in time_strin g: Your magnets are waiting. time_string.split(splitter) else: if '-' in time_strin g: splitter = ':' '-' ter = split return(time_stri ng) (mins, secs) = you are here 4 149 sanitzing function Code Magnets Solution You were. You were to rearrange the code magnets at the bottom of the previous page to provide the required functionality. def sanitize(time_string): if '-' in time_string: splitter = '-' Use the “in” operator to check if the string contains a dash or a colon. elif ':' in time_string: splitter = ':' else: return(time_string) (mins, secs) = time_string.split(splitter) return(mins + '.' + secs) Of course, on its own, the sanitize() function is not enough. You need to iterate over each of your lists of data and use your new function to convert each of the athlete’s times into the correct format. Let’s put your new function to work right away. 150 Chapter 5 Do nothing if the string doe NOT need to be sanitized. s Split the string to extract the minutes and seconds parts. comprehending data Let’s write the code to convert your existing data into a sanitized version of itself. Create four new lists to hold the sanitized data. Iterate over each athlete’s list data and append each sanitized string from each list to the appropriate new list. Conclude your program by printing a sorted copy of each new list to the screen. The code that reads the data from the data files remains unchanged (and has been compressed to fit on this(',') Add your new code here. What happens to the four “print()” statements? print( ) print( ) print( ) print( ) you are here 4 151 sanitized for your protection Let’s write the code to convert your existing data into a sanitized version of itself. You were to create four new lists to hold the sanitized data. You were then to iterate over each athlete’s data and append each sanitized string from each list to an appropriate new list. You were to conclude your program by printing a sorted copy of each new list to the screen. four new, initially empty lists. clean_james = [] clean_julie = [] clean_mikey = [] clean_sarah = [] for each_t in james: clean_james.append(sanitize(each_t)) for each_t in julie: clean_julie.append(sanitize(each_t)) for each_t in mikey: clean_mikey.append(sanitize(each_t)) for each_t in sarah: clean_sarah.append(sanitize(each_t)) The four “print()” statements now display the new lists, which are sorted, too. print( print( print( print( 152 Chapter 5 sorted(clean_james) sorted(clean_julie) sorted(clean_mikey) sorted(clean_sarah) Take each of the data items in the original lists, sanitize the and then append the sanitizedm, data to the appropriate new list. ) ) ) ) comprehending data Test Drive Combine your sanitize() function with your amended code from the previous page, and then press F5 in IDLE to confirm the sorting is now working as expected. The sanitized data only contains ‘.’ as a separator. The ordering works, because all the times are now comparable. Four sorted lists This output looks much better. It’s taken a bit of work, but now the data from each of the four files is both sorted and uniformly formatted. By preprocessing your data before you sort it, you’ve helped ensure Python’s sorting technology performs correctly. Geek Bits By default, both the sort() method and the sorted() BIF order your data in ascending order. To order your data in descending order, pass the reverse=True argument to either sort() or sorted() and Python will take care of things for you. you are here 4 153 duplicated code Hang on a sec! Something doesn’t feel quite right...look at all that duplicated code, as well as all those duplicated lists. This duplication is bad, right? Is this really the best you can do? That’s right. Duplicated code is a problem. As things stand, your code creates four lists to hold the data as read from the data files. Then your code creates another four lists to hold the sanitized data. And, of course, you’re iterating all over the place… There has to be a better way to write code like this. Transforming lists is such a common requirement that Python provides a tool to make the transformation as painless as possible. This tool goes by the rather unwieldly name of list comprehension. And list comprehensions are designed to reduce the amount of code you need to write when transforming one list into another. 154 Chapter 5 comprehending data Comprehending lists Consider what you need to do when you transform one list into another. Four things have to happen. You need to: 1 Create a new list to hold the transformed data. 2 Iterate each data item in the original list. 3 With each iteration, perform the transformation. 4 Append the transformed data to the new list. 1. Create. clean_mikey = [] 2. Iterate. for each_t in mikey: clean_mikey.append(sanitize(each_t)) 3. Transform. 4. Append. Here’s the same functionality as a list comprehension, which involves creating a new list by specifying the transformation that is to be applied to each of the data items within an existing list. You get to pick the target identifier to use (just like with regular iterations). clean_mikey = [sanitize(each_t) for each_t in mikey] The new list is created… …by applying a transformation… …to each data item… …within an existing list. What’s interesting is that the transformation has been reduced to a single line of code. Additionally, there’s no need to specify the use of the append() method as this action is implied within the list comprehension. Neat, eh? you are here 4 155 idle session Let’s see some other list comprehension examples. Open up your IDLE shell and follow along with these one-liner transformations. Start by transforming a list of minutes into a list of seconds: >>> mins = [1, 2, 3] >>> secs = [m * 60 for m in mins] >>> secs [60, 120, 180] Simply multiply the minute values by 60. How about meters into feet? >>> meters = [1, 10, 3] Yes, there are 3.281 feet in a meter. >>> feet = [m * 3.281 for m in meters] >>> feet [3.281, 32.81, 9.843] Given a list of strings in mixed and lowercase, it’s a breeze to transform the strings to UPPERCASE: >>> lower = ["I", "don't", "like", "spam"] >>> upper = [s.upper() for s in lower] Every string comes with the “upper()” method. >>> upper ['I', "DON'T", 'LIKE', 'SPAM'] Let’s use your sanitize() function to transform some list data into correctly formatted times: >>> dirty = ['2-22', '2:22', '2.22'] >>> clean = [sanitize(t) for t in dirty] >>> clean ['2.22', '2.22', '2.22'] n something It’s never been so easy to tur § dirty into something clean. It’s also possible to assign the results of the list transformation back onto the original target identifier. This example transforms a list of strings into floating point numbers, and then replaces the original list data: >>> clean = [float(s) for s in clean] >>> clean [2.22, 2.22, 2.22] The “float()” BIF converts to flo ating point. And, of course, the transformation can be a function chain, if that’s what you need: >>> clean = [float(sanitize(t)) for t in ['2-22', '3:33', '4.44']] >>> clean [2.22, 3.33, 4.44] 156 Chapter 5 data Combining transformations on the items is supported, too! comprehending data Now that you know about list comprehensions, let’s write four of them to process the coach’s four lists of timing values. Transform each of your lists into sorted, sanitized version of themselves. Grab your pencil and in the space provided, scribble the list comprehensions you plan to use. Q: A: So…let me get this straight: list comprehensions are good and list iterations are bad, right? No, that’s not the best way to look at it. If you have to perform a transformation on every item in a list, using a list comprehension is the way to go, especially when the transformation is easily specified on one line (or as a function chain). List iterations can do everything that list comprehensions can, they just take more code, but iterations do provide more flexibility should you need it. Geek Bits Python’s list comprehension is an example of the language’s support for functional programming concepts. There’s plenty of debate about the best way to develop program code: either procedurally, using functional programming techniques, or using object orientation. At Head First Labs, we try not to get involved in this debate, other than to rejoice in the fact that Python supports, in one way or another, all three of these programming practices. you are here 4 157 list comprehensions Now that you know about list comprehensions, you were to write four of them to process the coach’s four lists of timing values. You were to transform each of your lists into sorted, sanitized version of themselves. You were to grab your pencil and in the space provided, scribble the list comprehensions you plan to use. The list comprehension performs the transformation, and the new list is then ordered by the “sorted()” BIF. sorted([sanitize(t) for t in james]) sorted([sanitize(t) for t in julie]) sorted([sanitize(t) for t in mikey]) sorted([sanitize(t) for t in sarah]) Be careful about where you use the sorted() BIF when defining your list comprehensions. You may have been tempted to use the function chain sorted(sanitize(t)) within your list comprehension. Don’t be. Recall that the transformation works on one list item at a time, not the entire list. In this example, the sorted() BIF expects to sort a list, not an individual data item. The beauty of list comprehensions The use of list comprehensions with the coach’s athlete data has resulted in a lot less code for you to maintain. Additionally, as you get used to list comprehension syntax and usage, you’ll find that their use is natural and matches the way your brain thinks about your data and the transformations that you might want to apply. Let’s confirm that your new code is working as expected. 158 Chapter 5 Rinse and repeat for the other lists. comprehending data Test Drive Replace your list iteration code from earlier with your four new (beautiful) list comprehensions. Run your program to confirm that the results have not changed. Your new list comprehensions produce EXACTLY the same output as your earlier list iterations. As expected, the outout matches that from earlier. You’ve written a program that reads Coach Kelly’s data from his data files, stores his raw data in lists, sanitizes the data to a uniform format, and then sorts and displays the coach’s data on screen. And all in 25 lines of code. It’s probably safe to let the coach take a look at your output now. What will the coach think? you are here 4 159 list slicing Have you not been drinking enough water? I wanted the three fastest times for each athlete...but you’ve given me everything and it contains duplicates! In your haste to sanitize and sort your data, you forgot to worry about what you were actually supposed to be doing: producing the three fastest times for each athlete. And, of course, there’s no place for any duplicated times in your output. Accessing the first three data items from any list is easy. Either specify each list item individually using the standard notation or use a list slice: Access each data item you need individually. james[0] james[1] james[2] james[0:3] But…what about removing duplicates from your list? 160 Chapter 5 m list Use a list slice to access frolud ing inc item 0 up-to-but-notlist item 3. comprehending data Iterate to remove duplicates Processing a list to remove duplicates is one area where a list comprehension can’t help you, because duplicate removal is not a transformation; it’s more of a filter. And a duplicate removal filter needs to examine the list being created as it is being created, which is not possible with a list comprehension. To meet this new requirement, you’ll need to revert to regular list iteration code.uniform data with the sorted, sanitized copy. Your next task is to write some code to remove any duplicates from the james list produced by the preceding line of code. Start by creating a new list called unique_james, and then populate it with the unique data items found in james. Additionally, provide code to display only the top three fastest times for James. Hint: you might want to consider using the not in operator. you are here 4 161 top three-uniform data with the sorted, sanitized copy. Your next task was to write some code to remove any duplicates from the james list produced by the preceding line of code. You were to start by creating a new list called unique_james and then populate it with the unique data items found in james. Additionally, you were to provide code to only display the top three fastest times for James. Create the empty list to hold the unique data items. unique_james = [] Iterate over the existing data… …and if the data item ISN’T for each_t in james: already in the new list… if each_t not in unique_james: m to unique_james.append(each_t) …append the unique data ite the new list. Slice the first three data items from the list and display them on screen. print(unique_james[0:3]) Do this! Repeat the code on this page for the rest of the coach’s lists: julie, mikey & sarah. Add all of your new code to your existing program. 162 Chapter 5 comprehending data Test Drive Take all of the recent amendments and apply them to your program. Run this latest code within IDLE when you are ready. Sort and sanitize each list. Remove the duplicates. Looking good! It worked! You are now displaying only the top three times for each athlete, and the duplicates have been successfully removed. The list iteration code is what you need in this instance. There’s a little bit of duplication in your code, but it’s not too bad, is it? you are here 4 163 duplicated code to remove duplication? “Not too bad”...you’re kidding, right?!? Surely there’s something that can be done with all that duplicated duplicate code? The irony is hard to avoid, isn’t it? The code that removes duplicates from your lists is itself duplicated. Sometimes such a situation is unavoidable, and sometimes creating a small function to factor out the duplicated code can help. But something still doesn’t feel quite right here… 164 Chapter 5 comprehending data Wouldn't it be dreamy if there were a way to quickly and easily remove duplicates from an existing list? But I know it's just a fantasy... you are here 4   165 factory functions Remove duplicates with sets In addition to lists, Python also comes with the set data structure, which behaves like the sets you learned all about in math class. The overriding characteristics of sets in Python are that the data items in a set are unordered and duplicates are not allowed. If you try to add a data item to a set that already contains the data item, Python simply ignores it. Create an empty set using the set() BIF, which is an example of a factory function: Create a new, empty set, and assign it to a variable. distances = set() It is also possible to create and populate a set in one step. You can provide a list of data values between curly braces or specify an existing list as an argument to the set() BIF, which is the factory function: distances = {10.6, 11, 8, 10.6, "two", 7} Any duplicates in the “james” list are ignored. Cool. Any duplicates in the supplied list of data values are ignored. distances = set(james) Factory Function: A factory function is used to make new data items of a particular type. For instance, “set()” is a factory function because it makes a new set. In the real world, factories make things, hence the name. 166 Chapter 5 comprehending data Tonight’s talk: Does list suffer from set envy? List: Set: [sings] “Anything you can do, I can do better. I can do anything better than you.” Can you spell “d-a-t-a l-o-s-s”? Getting rid of data automatically sounds kinda dangerous to me. I’m resisting the urge to say, “No, you can’t.” Instead, let me ask you: what about handling duplicates? When I see them, I throw them away automatically. But that’s what I’m supposed to do. Sets aren’t allowed duplicate values. Seriously? Yes. That’s why I exist…to store sets of values. Which, when it’s needed, is a real lifesaver. And that’s all you do? That’s all I need to do. And they pay you for that?!? Very funny. You’re just being smug in an effort to hide from the fact that you can’t get rid of duplicates on your own. Have you ever considered that I like my duplicate values. I’m very fond of them, you know. Yeah, right. Except when you don’t need them. Which isn’t very often. And, anyway, I can always rely on the kindness of others to help me out with any duplicates that I don’t need. Do this! I think you meant to say, “the kindness of set()”, didn’t you? To extract the data you need, replace all of that list iteration code in your current program with four calls to sorted(set(...))[0:3]. you are here 4 167 code review Head First Code Review The Head First Code Review Team has taken your code and annotated it in the only way they know how: they’ve scribbled all over it. Some of their comments are confirmations of what you might already know. Others are suggestions that might make your code better. Like all code reviews, these comments are an attempt to improve the quality of your code. def sanitize(time_string): if '-' in time_string: splitter = '-' elif ':' in time_string: I think we can make a few improvements here. A comment would be nice to have here. splitter = ':' else: return(time_string) (mins, secs) = time_string.split(splitter) return(mins + '.' + secs) with open('james.txt') as jaf: What happens data = jaf.readline() if one of these james = data.strip().split(',') files is missing?!? with open('julie.txt') as juf: Where’s your g lin nd ha n data = juf.readline() exceptio julie = data.strip().split(',') code? with open('mikey.txt') as mif: data = mif.readline() mikey = data.strip().split(',') Meet the Head First Code Review Team. You There’s a bit of duplication here. small could factor out the code into a is call function; then, all you need to do lete the function for each of your ath an data files, assigning the result to athlete list. with open('sarah.txt') as saf: data = saf.readline() There’s a lot sarah = data.strip().split(',') going on here, but we find it’s print(sorted(set([sanitize(t) for not too hard to print(sorted(set([sanitize(t) for u understand if yo print(sorted(set([sanitize(t) for read it from the print(sorted(set([sanitize(t) for inside out. 168 Chapter 5 t in james]))[0:3]) t in julie]))[0:3]) t in mikey]))[0:3]) t in sarah]))[0:3]) Ah, OK. We get it. The slice is applied to the list produced by “sorted()”, right? comprehending data Let’s take a few moments to implement the review team’s suggestion to turn those four with statements into a function. Here’s the code again. In the space provided, create a function to abstract the required functionality, and(',') Write your new function here. Provide one example call. you are here 4 169 statement to function You were to take a few moments to implement the review team’s suggestion to turn those four with statements into a function. In the space provided, your were to create a function to abstract the required functionality, a new function. def get_coach_data(filename): try: with open(filename) as f: Add the suggested exception-handling code. Accept a filename as the sole argument. data = f.readline() return(data.strip().split(‘,')) except IOError as ioerr: print(‘File error: ' + str(ioerr)) return(None) Calling the function sarah = get_coach_data(‘sarah.txt') is straightforward. 170 Chapter 5 Open the file, and read the data. Perform the split/strip tri the data prior to returningckitonto the calling code. Tell your user about the errorone” (if it occurs) and return “N to indicate failure. Provide the name of the file to process. comprehending data Test Drive It’s time for one last run of your program to confirm that your use of sets produces the same results as your list-iteration code. Take your code for a spin in IDLE and see what happens. As expected, your latest code does the business. Looking good! Excellent! You’ve processed the coach’s data perfectly, while taking advantage of the sorted() BIF, sets, and list comprehensions. As you can imagine, you can apply these techniques to many different situations. You’re well on your way to becoming a Python data-munging master! That’s great work, and just what I need. Thanks! I’m looking forward to seeing you on the track soon... you are here 4 171 python toolbox CHAPTER 5 Your Python Toolbox You’ve got Chapter 5 under your belt and you’ve added some more Python techiques to your toolbox. o g n i L n o h t Py forms ing - trans The sort() method changes the ordering of lists in-place. lace” sort . eplaces and then r transforms g in t r o s d” • “Copie eturns. and then r g g” - readin in in a h C d o h sa • “Met ight, applie data. r o t t f le to from f methods o n io t c e ll o c ing ing” - read in a h C n io t a s • “Func left, applie data. o t t h ig r o m t fro f functions collection o • “In-p The sorted() BIF sorts most any data structure by providing copied sorting. More Python Lin Pass reverse=True to either sort() or sorted() to arrange your data in descending order. • “List go Compreh a transformatio ension” - specify opposed to using n on one line (as an iteration). • A “slice” - acce item from a list ss more than one . • A “set” a co unordered data llection of contains no duplicitems that ates. 172 Chapter 5 When you have code like this: new_l = [] for t in old_l: new_l. append(len(t)) rewrite it to use a list comprehension, like this: new_l = [len(t) for t in old_l] To access more than one data item from a list, use a slice. For example: my_list[3:6] accesses the items from index location 3 up-to-but-not-including index location 6. Create a set using the set() factory function. 6 custom data objects Bundling code with data The object of my desire [sigh] is in a class of her own.. this is a new chapter 173 additional data Coach Kelly is back (with a new file format) I love what you’ve done, but I can’t tell which line of data belongs to which athlete, so I’ve added some information to my data files to make it easy for you to figure it out. I hope this doesn’t mess things up much.: This is “sarah2.txt”, with extra data added. Sarah Sweeney,2002-6-17,2:58,2.58,2:39,2-25,2-55,2:54,2.18,2:55,2:55,2:22,2-21,2.22 Sarah’s full name Sarah’s date of birth Sarah’s timing data. 174 Chapter 6 Do this! Grab the updated files from the Head First Python website. custom data objects Code Magnets Let’s look at the code to implement the strategy outlined at the bottom of the previous page. For now, let’s concentrate on Sarah’s data. Rearrange the code magnets at the bottom of this page to implement the list processing required to extract and process Sarah’s three fastest times from Coach Kelly’s raw data. Hint: the pop() method removes and returns a data item from the specified list location. The “sanitize()” function is as it was in Chapter 5. def sanitize(time_string): if '-' in time_string: splitter = '-' elif ':' in time_string: splitter = ':' else: return(time_string) (mins, secs) = time_string.split(splitter) return(mins + '.' + secs) on is The “get_coach_data()” functi r. pte also from the last cha def get_coach_data(filename): try: with open(filename) as f: data = f.readline() return(data.strip().split(',')) except IOError as ioerr: print('File error: ' + str(ioerr)) return(None) Rearrange the magnets here. sarah (sarah_name, sarah_dob) "'s fastes t times ar e: " + print(sarah_name + get_coach_data('sarah2.txt') = = sarah.pop(0), sarah.pop (0) str(sorted(set([sanitize(t) for t in sarah]))[0:3])) you are here 4 175 sarah’s times Code Magnets Solution Let’s look at the code to implement the strategy outlined earlier. For now, let’s concentrate on Sarah’s data. You were to rearrange the code magnets at the bottom of the previous page to implement the list processing required to extract and process Sarah’s three fastest times from Coach Kelly’s raw(',')) Use the function to turn Sarah’s data file into a list, and then assign it to the “sarah” variable. except IOError as ioerr: print('File error: ' + str(ioerr)) return(None) sarah = get_coach_data('sarah2.txt') (sarah_name, sarah_dob) The “pop(0)” call returns and removes data from the front of a list.. Two calls to “pop(0)” remove the first two data values and assigns them to the named variables. 176 Chapter 6 print(sarah_name + = sarah.pop(0), sarah.pop(0) "'s fastest times are: " + str(sorted(set([sanitize(t) for t in sarah]))[0:3])) A custom message within the call to “print()” is used to display the results you’re after. custom data objects Test Drive Let’s run this code in IDLE and see what happens. Your latest code This output is much more understandable. This program works as expected, and is fine…except that you have to name and create Sarah’s three variables in such as way that it’s possible to identify which name, date of birth, and timing data relate to Sarah. And if you add code to process the data for James, Julie, and Mikey, you’ll be up to 12 variables that need juggling. This just about works for now with four athletes. But what if there are 40, 400, or 4,000 athletes to process? Although the data is related in “real life,” within your code things are disjointed, because the three related pieces of data representing Sarah are stored in three separate variables. you are here 4 177 keys and values Use a dictionary to associate data Lists are great, but they are not always the best data structure for every situation. Let’s take another look at Sarah’s data: Sarah’s full name Sarah’s date of birth Sarah’s timing data Sarah Sweeney,2002-6-17,2:58,2.58,2:39,2-25,2-55,2:54,2.18,2:55,2:55,2:22,2-21,2.22 There’s a definite structure here: the athlete’s name, the date of birth, and then the list of times. Let’s continue to use a list for the timing data, because that still makes sense. But let’s make the timing data part of another data structure, which associates all the data for an athlete with a single variable. We’ll use a Python dictionary, which associates data values with keys: The “keys” Name "Sarah Sweeney" DOB "2002-6-17" Times [2:58,2.58,2:39,2-25,2-55,2:54,2.18,2:55,2:55,2:22,2-21,2.22] The associated data, also known as the “values” Dictionary A built-in data structure (included with Python) that allows you to associate data with keys, as opposed to numbers. This lets your in-memory data closely match the structure of your actual data. 178 Chapter 6 custom data objects Tonight’s talk: To use a list or not to use a list? Dictionary: List: Hi there, List. I hear you’re great, but not always the best option for complex data. That’s where I come in. True. But when you do, you lose any structure associated with the data you are processing. What?!? Haven’t you heard? You can put anything into a list, anything at all. Well…assuming, of course, that structure is important to you. Isn’t it always? Ummm, uh…I guess so. You guess so? When it comes to modeling your data in code, it’s best not to guess. Be firm. Be strong. Be assertive. Use a dictionary. [laughs] Oh, I do love your humor, List, even when you know you’re on thin ice. Look, the rule is simple: if your data has structure, use a dictionary, not a list. How hard is that? That sounds like a slogan from one of those awful self-help conferences. Is that where you heard it? Not that hard, really. Unless, of course, you are a list, and you miss being used for every piece of data in a program… Which rarely makes sense. Knowing when to use a list and when to use a dictionary is what separates the good programmers from the great ones, right? I guess so. Man, I do hate it when you’re right! Geek Bits The Python dictionary is known by different names in other programming languages. If you hear other programmers talking about a “mapping,” a “hash,” or an “associative array,” they are talking about a “dictionary.” you are here 4 179 idle session Let’s see the Python dictionary in action. Follow along with this IDLE session on your computer, ensuring that you get the same results as shown. Start by creating two empty dictionaries, one using curly braces and the other using a factory function: >>> cleese = {} >>> palin = dict() >>> type(cleese) <class 'dict'> >>> type(palin) Both techniques create an empty dictionary, as confirmed. <class 'dict'> Add some data to both of these dictionaries by associating values with keys. Note the actual structure of the data is presenting itself here, as each dictionary has a Name and a list of Occupations. Note also that the palin dictionary is being created at the same time: >>> cleese['Name'] = 'John Cleese' >>> cleese['Occupations'] = ['actor', 'comedian', 'writer', 'film producer'] >>> palin = {'Name': 'Michael Palin', 'Occupations': ['comedian', 'actor', 'writer', 'tv']} With your data associated with keys (which are strings, in this case), it is possible to access an individual data item using a notation similar to that used with lists: Use square brackets to index into the dictionary to access data items, but instead of numbers, index with keys. 'Michael Palin' Use numbers to access a list item stored at a particular dictionary key. >>> cleese['Occupations'][-1] Think of this as “index-chaining” and read from right to left: “…the last 'film producer' item of the list associated with Occupations…”. >>> palin['Name'] As with lists, a Python dictionary can grow dynamically to store additional key/value pairings. Let’s add some data about birthplace to each dictionary: >>> palin['Birthplace'] = "Broomhill, Sheffield, England" Provide the data associated with the new key. >>> cleese['Birthplace'] = "Weston-super-Mare, North Somerset, England" Unlike lists, a Python dictionary does not maintain insertion order, which can result in some unexpected behavior. The key point is that the dictionary maintains the associations, not the ordering: >>> palin {'Birthplace': 'Broomhill, Sheffield, England', 'Name': 'Michael Palin', 'Occupations': ['comedian', 'actor', 'writer', 'tv']} >>> cleese {'Birthplace': 'Weston-super-Mare, North Somerset, England', 'Name': 'John Cleese', 'Occupations': ['actor', 'comedian', 'writer', 'film producer']} The ordering maintained by Python is different from how the data was inserted. Don’t worry about it; this is OK. 180 Chapter 6 custom data objects It’s time to apply what you now know about Python’s dictionary to your code. Let’s continue to concentrate on Sarah’s data for now. Strike out the code that you no longer need: Strike out the code you no longer need. print('File error: ' + str(ioerr)) return(None) sarah = get_coach_data('sarah2.txt') (sarah_name, sarah_dob) = sarah.pop(0), sarah.pop(0) print(sarah_name + "'s fastest times are: " + Add your dictionary using and processing code here. str(sorted(set([sanitize(t) for t in sarah]))[0:3])) you are here 4 181 dictionary data It’s time to apply what you now know about Python’s dictionary to your code. Let’s continue to concentrate on Sarah’s data for now. You were to strike out the code that you no longer needed) You don’t need this code anymore. sarah = get_coach_data('sarah2.txt') (sarah_name, sarah_dob) = sarah.pop(0), sarah.pop(0) print(sarah_name + "'s fastest times are: " + str(sorted(set([sanitize(t) for t in sarah]))[0:3])) Create an empty dictionary. sarah_data = {} Populate the dictionary with the sarah_data[‘Name’] = sarah.pop(0) data by associating the data from the file sarah_data[‘DOB’] = sarah.pop(0) with the dictionary keys.. sarah_data[‘Times’] = sarah print(sarah_data[‘Name’] + “’s fastest times are: “ + str(sorted(set([sanitize(t) for t in sarah_data[‘Times’]]))[0:3])) Refer to the dictionary when processing the data. 182 Chapter 6 custom data objects Test Drive Let’s confirm that this new version of your code works exactly as before by testing your code within the IDLE environment. Your dictionary code produces the same results as earlier. Which, again, works as expected…the difference being that you can now more easily determine and control which identification data associates with which timing data, because they are stored in a single dictionary. Although, to be honest, it does take more code, which is a bit of a bummer. Sometimes the extra code is worth it, and sometimes it isn’t. In this case, it most likely is. Let’s review your code to see if we can improve anything. you are here 4 183 code review Head First Code Review The Head First Code Review Team has been at it again: they’ve scribbled all over your code. Some of their comments are confirmations; others are suggestions. Like all code reviews, these comments are an attempt to improve the quality of your code. It’s great to see you taking some of our suggestions on board. Here are a few more...) sarah = get_coach_data('sarah2.txt') sarah_data = {} sarah_data['Name'] = sarah.pop(0) sarah_data['DOB'] = sarah.pop(0) sarah_data['Times'] = sarah y as you go Rather than building the dictionar In fact, in along, why not do it all in one go? sense to do this situation, it might even make oach_data() this processing within the get_c return a function and have the function to a list. populated dictionary as opposed the Then, all you need to do is create ng an dictionary from the data file usi appropriate function call, right? print(sarah_data['Name'] + "'s fastest times are: " + str(sorted(set([sanitize(t) for t in sarah_data['Times']]))[0:3])) You might want to consider moving this code into the get_coach_data() function, too, because doing so would rather nicely abstract away these processing details. But whether you do or not is up to you. It’s your code, after all! 184 Chapter 6 custom data objects Actually, those review comments are really useful. Let’s take the time to apply them to your code. There are four suggestions that you need it’s new mode of operation. Grab your pencil and write your new get_coach_data() function in the space provided below. Provide the four calls that you’d make to process the data for each of the athletes and provide four amended print() statements: you are here 4 185 reviews are in You were to take the time to apply the code review comments to your code. There were four suggestions that you needed its new mode of operation. You were to grab your pencil and write your new get_coach_data() function in the space provided below, as well as provide the four calls that you’d make to process the data for each of the athletes and provide four amended print() statements: def get_coach_data(filename): try: with open(filename) as f: data = f.readline() 1. Create a temporary list to hold the data BEFORE creating the dictionary all in one go. templ = data.strip().split(‘,’) return({‘Name’ : templ.pop(0), ‘DOB’ : templ.pop(0), 2. The dictionary creation code is now part of the function. ‘Times’: str(sorted(set([sanitize(t) for t in templ]))[0:3])}) except IOError as ioerr: print(‘File error: ‘ + str(ioerr)) return(None) 4. Call the function james = get_coach_data(‘james2.txt’) for an athlete and adjust the “print()” statement as needed. the 3. The code that determinesthe of t top three scores is par function, too. We are showing only these tw line s of code for one athlete (becauseo rep eat ing it for the other three is a trivial exercise). print(james[‘Name’] + “’s fastest times are: “ + james[‘Times’]) 186 Chapter 6 custom data objects Test Drive Let’s confirm that all of the re-factoring suggestions from the Head First Code Review Team are working as expected. Load your code into IDLE and take it for a spin. All of the data processing is moved into the function. tidied up and This code has been considerablyathlete associated now displays the name of the with their times. Looking good! To process additional athletes, all you need is two lines of code: the first invokes the get_coach_data() function and the second invokes print(). And if you require additional functionality, it’s no big deal to write more functions to provide the required functionality, is it? you are here 4 187 associate custom code with custom data Wait a minute...you’re using a dictionary to keep your data all in one place, but now you’re proposing to write a bunch of custom functions that work on your data but aren’t associated with it. Does that really make sense? Keeping your code and its data together is good. It does indeed make sense to try and associate the functions with the data they are meant to work on, doesn’t it? After all, the functions are only going to make sense when related to the data—that is, the functions will be specific to the data, not general purpose. Because this is the case, it’s a great idea to try and bundle the code with its data. But how? Is there an easy way to associate custom code, in the form of functions, with your custom data? 188 Chapter 6 custom data objects Bundle your code and its data in a class Like the majority of other modern programming languages, Python lets you create and define an object-oriented class that can be used to associate code with the data that it operates on. Why would anyone want to do this? Using a class helps reduce complexity. By associating your code with the data it works on, you reduce complexity as your code base grows. So what’s the big deal with that? Reduced complexity means fewer bugs. Reducing complexity results in fewer bugs in your code. However, it’s a fact of life that your programs will have functionality added over time, which will result in additional complexity. Using classes to manage this complexity is a very good thing. Yeah? But...who really cares? Fewer bugs means more maintainable code. Using classes lets you keep your code and your data together in one place, and as your code base grows, this really can make quite a difference. Especially when it’s 4 AM and you’re under a deadline… you are here 4 189 get some class Define a class Python follows the standard object-oriented programming model of providing a means for you to define the code and the data it works on as a class. Once this definition is in place, you can use it to create (or instantiate) data objects, which inherit their characteristics from your class. Within the object-oriented world, your code is often referred to as the class’s methods, and your data is often referred to as its attributes. Instantiated data objects are often referred to as instances. The “raw” data "Sarah Sweeney","2002-6-17",[2:58,2.58,2:39,2-25,2-55,2:54,2.18,2:55,2:55,2:22,2-21,2.22] The Object Factory The factory has been primed with your class. s, Here are your instantiated objn ect r you which are packaged to contaia. code and its associated dat Mikey’s object instance Each object is created from the class and shares a similar set of characteristics. The methods (your code) are the same in each instance, but each object’s attributes (your data) differ because they were created from your raw data. Let’s look at how classes are defined in Python. 190 Chapter 6 Julie’s object instance James’s object instance custom data objects Use class to define classes Python uses class to create objects. Every defined class has a special method called __init__(), which allows you to control how objects are initialized. Methods within your class are defined in much the same way as functions, that is, using def. Here’s the basic form: Give your class a nice, descriptive name. The keyword starts the definition. class Athlete: def __init__(self): Don’t forget the colon! # The code to initialize a "Athlete" object. ... That’s a double underscore before and after the word “init”. The code that initializes each object goes in here. Creating object instances With the class in place, it’s easy to create object instances. Simply assign a call to the class name to each of your variables. In this way, the class (together with the __init__() method) provides a mechanism that lets you create a custom factory function that you can use to create as many object instances as you require: a = Athlete() All of these variables are unique and are “of type” Athlete. b = Athlete() The brackets tell Python to create a new “Athlete” obj which is then assigned to a ect, variable. c = Athlete() d = Athlete() Unlike in C++-inspired languages, Python has no notion of defining a constructor called “new.” Python does object contruction for you, and then lets you customize your object’s initial state using the __init__() method. you are here 4 191 note to self The importance of self To confirm: when you define a class you are, in effect, defining a custom factory function that you can then use in your code to create instances: The target identifier thateholds a reference to your instanc a = Athlete() Invoke the class’s custom factory function. When Python processes this line of code, it turns the factory function call into the following call, which identifies the class, the method (which is automatically set to __init__()), and the object instance being operated on: The name of the class Athlete().__init__(a) The target identifier of the object instance The name of the method Now take another look at how the __init__() method was defined in the class: def __init__(self): # The code to initialize an "Athlete" object. ... Check out what Python turns your object creation invocation into. Notice anything? The target identifer is assigned to the self argument. This is a very important argument assignment. Without it, the Python interpreter can’t work out which object instance to apply the method invocation to. Note that the class code is designed to be shared among all of the object instances: the methods are shared, the attributes are not. The self argument helps identify which object instance’s data to work on. 192 Chapter 6 custom data objects Every method’s first argument is self In fact, not only does the __init__() method require self as its first argument, but so does every other method defined within your class. Python arranges for the first argument of every method to be the invoking (or calling) object instance. Let’s extend the sample class to store a value in a object attribute called thing with the value set during initialization. Another method, called how_big(), returns the length of thing due to the use of the len() BIF: class Athlete: The “init” code now assigns a supplied value to a class attribute called “self.thing”. def __init__(self, value=0): Note the use of “self” to identify the calling object instance. self.thing = value def how_big(self): return(len(self.thing)) The “how_big()” method return s the length of “self.thing”. When you invoke a class method on an object instance, Python arranges for the first argument to be the invoking object instance, which is always assigned to each method’s self argument. This fact alone explains why self is so important and also why self needs to be the first argument to every object method you write: What you write: d = Athlete("Holy Grail") What Python executes: Athlete.__init__(d, "Holy Grail") The class d.how_big() The target indentifer (or instance) The method Athlete.how_big(d) you are here 4 193 idle session Let’s use IDLE to create some object instances from a new class that you’ll define. Start by creating a small class called Athlete: No >>> class Athlete: def __init__(self, a_name, a_dob=None, a_times=[]): self.name = a_name self.dob = a_dob self.times = a_times te the default values for two of the arguments. and assigned to three class Three attributes are initialidzed ument data. attributes using the supplie arg With the class defined, create two unique object instances which derive their characteristcs from the Athlete class: >>> sarah = Athlete('Sarah Sweeney', '2002-6-17', ['2:58', '2.58', '1.56']) >>> james = Athlete('James Jones') >>> type(sarah) <class '__main__.Athlete'> >>> type(james) <class '__main__.Athlete'> Confirm that both “sarah” and “james” are athletes. Create two unique athletes th “james” using the default arg(wi ument values). Even though sarah and james are both athletes and were created by the Athlete class’s factory function, they are stored at different memory addreses: >>> sarah <__main__.Athlete object at 0x14d23f0> >>> james <__main__.Athlete object at 0x14cb7d0> on our computer, which will These are the memory addresses ed on yours. The key point is differ from the values report and “james” differ. the memory address for “sarah” Now that sarah and james exist as object instances, you can use the familiar dot notation to access the attributes associated with each: >>> sarah.name 'Sarah Sweeney' >>> james.name 'James Jones' >>> sarah.dob '2002-6-17' >>> james.dob >>> sarah.times ['2:58', '2.58', '1.56'] >>> james.times [] 194 Chapter 6 The “james” object instanc has no value for “dob”, so nothing appears one scr een. custom data objects Here’s your code (except for the santize() function, which doesn’t need to change). With your pencil, write code to define the Athlete class. In addition to the __init__() method, define a new method called top3() that, when invoked, returns the top three times. Be sure to adjust the get_coach_data() function to return an Athlete object as opposed to a dictionary, and don’t forget to amend your print() statements, too. Write your Athlete class code here. def get_coach_data(filename): try: with open(filename) as f: data = f.readline() templ = data.strip().split(',') return({'Name' : templ.pop(0), 'DOB' to ensure What needs to change here hle te object At an s this function return y? nar as opposed to a dictio : templ.pop(0), 'Times': str(sorted(set([sanitize(t) for t in templ]))[0:3])}) except IOError as ioerr: print('File error: ' + str(ioerr)) return(None) james = get_coach_data('james2.txt') This line of code needs to change, too. print(james['Name'] + "'s fastest times are: " + james['Times']) you are here 4 195 class athlete Here’s your code (except for the santize() function, which doesn’t need to change). With your pencil, you were to write code to define the Athlete class. In addition to the __init__() method, you were to define a new method called top3() that, when invoked, returns the top three times. You were to be sure to adjust the get_coach_data() function to return an Athlete object as opposed to a dictionary, and you weren’t to forget to amend print(), too. class Athlete: def __init__(self, a_name, a_dob=None, a_times=[]): There’s nothing new here as this code is taken straight from most recent IDLE session. the self.name = a_name self.dob = a_dob self.times = a_times def top3(self): Did you remember to use “self”? return(sorted(set([sanitize(t) for t in self.times]))[0:3]) Remove the dictionary creati code and replace it with Athleonte object creation code instead. def get_coach_data(filename): try: with open(filename) as f: data = f.readline() templ = data.strip().split(',') return({'Name' : templ.pop(0), 'DOB' : templ.pop(0), Athlete(templ.pop(0), templ.pop(0), templ) 'Times': str(sorted(set([sanitize(t) for t in templ]))[0:3])}) except IOError as ioerr: print('File error: ' + str(ioerr)) return(None) Use the dot notation to get at your data. james = get_coach_data('james2.txt') james.name str(james.top3()) print(james['Name'] + "'s fastest times are: " + james['Times']) 196 Chapter 6 Invoke the “top3()” method and convert its results to a string prior to its display on screen. custom data objects Test Drive With these changes applied to your program, let’s ensure you continue to get the same results as earlier. Load your code into IDLE and run it. The code to the “sanitize()” fun not shown here, but it is still parction is t of this program. Cool! There’s no change here. And to make objects do more, I just add more methods, right? Yes, that’s correct: more functionality = more methods. Simply add methods to encapsulate the new functionality you need within your class. There’s no limit to how many methods a class can have, so feel free to knock yourself out! you are here 4 197 no dumb questions Q: I’m not sure I see why the top3() method is coded to return a three-item list, as opposed to a string? Surely a string would make the print() statement in the main program easier to write? A: It would, but it wouldn’t be as flexible. By returning a list (albeit a small one), the top3() method lets the calling code decide what happens next, as opposed to forcing the caller to work with a string. Granted, the current program needs to treat the list like a string, but not all programs will want or need to. Q: Why does the class even need the top3() method? Why not store the top three times as an attribute within the class and create it as part of the object’s creation? A: Again, better not to, because doing so is less flexible. If you compute and store the top three times at object creation, you make it harder to extend the list of timing data associated with the object. For instance, if you add more timing data after the object is created, you’ll need to arrange to recompute the top three (because the new times might be fast) and update the attribute. However, when you compute the top three times “on the fly” using a call to the top3() method, you always ensure you’re using the most up-to-date data. Q: OK, I think I’m convinced. But tell me: how do I go about adding more times to my existing Athlete objects? A: To do more, add more methods. With your Athlete class created, it’s a breeze to extend it to do more work for you: simply add more methods. So, if you want to add a single new timing value to your times attribute, define a method called add_time() to do it for you. Additionally, you can add a list of times by defining a method called add_times().Then all you need to do in your code is say something like this: sarah.add_time('1.31') to add a single time to Sarah’s timing data, or say this: james.add_times(['1.21','2.22']) to add a bunch of times to James’s data. Q: But surely, knowing that times is a list, I could write code like this to do the same thing? Q: A: Q: A: A: OK, I get that. But, with a little extra work, I could do it during object creation, right? Well, yes…but we really don’t advise that. By preserving the original data in each object’s attributes, you are supporting the extension of the class to support additional requirements in the future (whatever they might be). If you process the data as part of the object initialization code, the assumptions you make about how programmers will use your class might just come back to bite you. Q: But what if I’m the only programmer that’ll ever use a custom class that I write? A: Trust us: you’ll thank yourself for coding your class to be as flexible as possible when you come to use it for some other purpose in a future project. When you are creating a class, you have no idea how it will be used by other programmers in their projects. And, if you think about, you have no idea how you might use it in the future, too. 198 Chapter 6 sarah.times.append('1.31') james.times.append(['1.21','2.22']) You could, but that would be a disaster. What?!? Why do you say that? There’s nothing wrong with my suggestion, is there? Well…it does indeed work. However, the problem with writing code like that is that it exposes (and exploits) that fact that the timing data is stored in a list within the Athlete class. If you later change your class implementation to use (for instance) a string instead of a list, you may well break all of the existing code that uses your class and that exploits the fact that the timing data is a list. By defining your own API with add_time() and add_ times(), you leave open the possibility that the way the data is stored within your class can change in the future (obviously, only if such a change makes sense). It is worth noting that one of the reasons for using object orientation is to hide away the details of a class’s implementation from the users of that class. Defining your own API directly supports this design ideal. Exposing the internals of your class’s implementation and expecting programmers to exploit it breaks this fundamental ideal in a very big way. custom data objects Let’s add two methods to your class. The first, called add_time(), appends a single additional timing value to an athlete’s timing data. The second, add_times(), extends an athlete’s timing data with one or more timing values supplied as a list. Here’s your current class:]) Add your new methods here. Don’t put down the pencil just yet! Provide a few lines of code to test your new functionality: you are here 4 199 more methods Let’s add two methods to your class. The first, called add_time(), appends a single additional timing value to an athlete’s timing data. The second, add_times(), extends an athlete’s timing data with one of more timing values supplied as a list. Here’s your current class: you were to): Take the supplied argument and append it to the existing list of timing values. self.times.append(time_value) Don’t forget to use “self”!!! def add_times(self, list_of_times): self.times.extend(list_of_times) Create a new object instance for Vera. ents Take the list of supplied argum timing of and extend the existing list values with them. While still holding on firmly to your pencil, you were to provide a few lines of code to test your new functionality: vera = Athlete(‘Vera Vi’) vera.add_time(‘1.31’) Add a single timing value. This will display a list with only one value in it: 1.31. Add three more timing values. vera.add_times([‘2.22’, “1-21”, ‘2:22’]) print(vera.top3()) print(vera.top3()) 200 Chapter 6 The top 3 timing scores are now: 1.21, 1.31 and 2.22. custom data objects Do this! Amend your code with the updated version of your Athlete class before proceeding with this Test Drive. Test Drive After running your existing program, try out your test code in the IDLE shell to confirm that everything is working as expected. As expected. Create a new athlete. Add one timing value. Display the top three times (there’s only one, so that’s all you see). Add three more timing values. Display the top three times (which, now, makes a little more sense). Great: it worked. You’ve packaged your code with your data and created a custom class from which you can create objects that share behaviors. And when extra functionality is required, add more methods to implement the required functionality. By encapsulating your athlete code and data within a custom class, you’ve created a much more maintainable piece of software. You will thank yourself for doing this when, in six months, you need to amend your code. Well done. This is really coming along! you are here 4 201 reinventing the wheel Emmm...maybe I’m missing something, but isn’t your Athlete class wasteful? I mean, you’ve extended it with functionality that’s already in lists, which feels a little like reinventing the wheel to me... Yes, your Athlete class is much like a list. Your Athlete class does indeed behave like a list most of the time, and you’ve added methods to expose some list functionality to the users of your class. But it’s true: you are reinventing the wheel here. Your add_time() method is a thin wrapper around the list append() method and your add_times() method is list’s extend() method in disguise. In fact, your Athlete class only differs from Python’s list due to the inclusion of the name and dob object attributes. 202 Chapter 6 custom data objects Wouldn't it be dreamy if there were a way to extend a built-in class with custom attributes? But I know it's just a fantasy… you are here 4 203 inherit class Inherit from Python’s built-in list Python’s class lets you create a custom class from scratch, just like you did with your Athlete class. However, class also lets you create a class by inheriting from any other existing class, including Python’s built-in data structure classes that provide you with list, set, and dict. Such classes are referred to as subclasses. What’s really nice is that when you inherit from an existing class (such as list), you are given all of the existing functionality for free. As your existing class is really nothing more than a list with added attributes, perhaps a better design is to kill off your Athlete class and replace it with a class that inherits from the built-in list class? It’s certainly worth considering, isn’t it? Sorry to hear about your Athlete class. But, according to my files, you’re in line to inherit a mountain of functionality from the built-in list class. Congratulations, you’re rich! Slippery lawyer-type 204 Chapter 6 custom data objects Tonight’s talk: Inheritance, a.k.a. He looks just like his father. Custom Class: Inherited Class: Programmers like me because they get to control everything in their code…and you know programmers: they love to code. Yes, they do. But sometimes writing everything from scratch is not the best design decision. Design! Phooey! Real programmers eat, sleep, dream, snore, and exhale code. All that design talk is for people who can’t code! Is it really? So, you’re saying it’s much better to do everything from scratch and repeat the work of others, because your way is the best way. Are you serious?!? No, no, no: you’re not listening. It’s all done with control. When you build everything from the ground up, you’re in control, as it’s all your code. And you’re happy to reinvent the wheel, even though someone else solved that problem eons ago? Of course, especially when there are custom requirements to be taken into consideration. In that case, a brand-spanking new custom class is the only way to go. Not if you can extend someone else’s class to handle your custom requirements. That way, you get the best of both worlds: inheritied functionality (so you’re not reinventing the wheel) together with the custom bits. It’s a win-win situation. Yeah, right…it’s a win-win for you, not me. But it’s not about us: it’s to do with making the life of the programmer easier, even the ones that live to code, right? I guess so, although I’m still a fan of custom code… you are here 4 205 idle session Let’s see what’s involved in inheriting from Python’s built-in list class. Working in IDLE’s shell, start by creating a custom list derived from the built-in list class that also has an attribute called name: >>> class NamedList(list): def __init__(self, a_name): list.__init__([]) self.name = a_name Provide the name of the class that this new class derives from. Initialize the derived from class, and then assign the argument to the attribute. With your NamedList class defined, use it to create an object instance, check the object’s type (using the type() BIF), and see what it provides (using the dir() BIF): >>> johnny = NamedList("John Paul Jones") >>> type(johnny) Create a new “NamedList” object instance. Yes, “johnny” is a “NamedList”. <class '__main__.NamedList'> >>> dir(johnny) ['__add__', '__class__', '__contains__', '__delattr__', '__delitem__', '__dict__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__iadd__', '__imul__', '__init__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__mul__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__rmul__', '__setattr__', '__setitem__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', 'append', 'count', 'extend', 'index', 'insert', 'name', 'pop', 'remove', 'reverse', 'sort'] “johnny” can do everything a list can, as well as store data in the “name” attribute. Use some of the functionality supplied by the list class to add to the data stored in johnny: Add data to the “NamedList” using the methods provided by the list built in. >>> johnny.append("Bass Player") >>> johnny.extend(['Composer', "Arranger", "Musician"]) >>> johnny ['Bass Player', 'Composer', 'Arranger', 'Musician'] Access the list data, as well as the attribute data. >>> johnny.name 'John Paul Jones' Because johnny is a list, it’s quite OK to do list-type things to it: >>> for attr in johnny: print(johnny.name + " is a " + attr + ".") John Paul Jones is a Bass Player. John Paul Jones is a Composer. John Paul Jones is a Arranger. John Paul Jones is a Musician. 206 Chapter 6 Confirmation: John’s a busy boy. § “johnny” is like any other list, so feel free to use it wherever you’d use a list. custom data objects Here is the code for the now defunct Athlete class. In the space provided below, rewrite this class to inherit from the built-in list class. Call your new class AthleteList. Provide a few lines of code to exercise your new class, too:) def add_times(self, list_of_times): self.times.extend(list_of_times) Write your new class code here. Exercise your code here. you are here 4   207 new athletelist Here is the code for the now defunct Athlete class. In the space provided below, you were to rewrite this class to inherit from the built-in list class. You were to call your new class AthleteList, as well as provide a few lines of code to exercise your new class:) The class name has changed. def add_times(self, list_of_times): self.times.extend(list_of_times) These methods aren’t needed anymore. Inherit from the built-in list class. class AthleteList(list): def __init__(self, a_name, a_dob=None, a_times=[]): Nothing new list.__init__([]) here… this code is very self.name = a_name similar to the “NamedList” init self.dob = a_dob code. self.extend(a_times) def top3(self): return(sorted(set([sanitize(t) for t in self]))[0:3]) Use the new class’s name. Now that you’re inheriting from the built-in list, you can use its methods to get your work on. 208 Chapter 6 vera = AthleteList(‘Vera Vi’) vera.append(‘1.31’) print(vera.top3()) vera.extend([‘2.22’, “1-21”, ‘2:22’]) print(vera.top3()) This code does a good job of exercising your new class. The data itself is the timing data, so the “times” attribute is gone. custom data objects Do this! In your code, replace your Athlete class code with your new AthleteList class code, and don’t forget to change get_coach_data() to return an AthleteList object instance as opposed to an Athlete object instance. Q: Sorry…but not three minutes ago you were telling me not to expose the inner workings of my class to its users, because that was fundamentally a bad idea. Now you’re doing the exact opposite! What gives? A: Well spotted. In this particular case, it’s OK to expose the fact that the class is built on top of list. This is due to the fact that the class is deliberately called AthleteList to distinguish it from the more generic Athlete class. When programmers see the word “list” in a class name, they are likely to expect the class to work like a list and then some. This is the case with AthleteList. Q: A: Q: And I can inherit from any of the built-in types? Yes. What about inheriting from more than one class…does Python support multiple interitance? A: Q: A: Can I inherit from my own custom classes? Of course, that’s the whole idea. You create a generic class that can then be “subclassed” to provide more specific, targeted functionality. Q: A: Can I put my class in a module file? Yes, that’s a really good idea, because it lets you share your class with many of your own programs and with other programmers. For instance, if you save your AthleteList class to a file called athletelist.py, you can import the into your code using this line of code: from athletelist import AthleteList then use the class as if it was defined in your current program. And, of course, if you create a really useful class, pop it into its own module and upload it to PyPI for the whole world to share. Yes, but it’s kind of scary. Refer to a good Python reference text for all the gory details. you are here 4 209 test drive Test Drive One last run of your program should confirm that it’s working to specification now. Give it a go in IDLE to confirm. Your entire program now produces the output the coach wants. 210 Chapter 6 custom data objects Coach Kelly is impressed That looks great! I can’t wait to show this to my young athletes and see their reaction... By basing your class on built-in functionality, you’ve leveraged the power of Python’s data structures while providing the custom solution your application needs. You’ve engineered a much more maintainable solution to Coach Kelly’s data processing needs. Good job! you are here 4 211 python toolbox You’ve got Chapter 6 under your belt and you’ve added some key Python techiques to your toolbox. go Python yL” -ina built-in data ionar • “Dict llows you toeys. a t a h t e r u hk struct a values wit t a d e t ia c o ass up part of k o lo e h t • “Key” ary. t ic e the d ion part of th e, a t a d e h t valu ”• “Value can be any ure). h ic h w ( y r dictiona other data struct including an More Python Lingo • “self” - a method argument that always refers to the curren t object instance. Create a empty dictionary using the dict() factory function or using {}. To access the value associated with the key Name in a dictionary called person, use the familiar square bracket notation: person['Name']. Like list and set, a Python’s dictionary dynamically grows as new data is added to the data structure. Populate a dictionary as you go: new_d = {} or new_d = dict() and then d['Name'] = 'Eric Idle' or do the same thing all in the one go: new_d = {'Name': 'Eric Idle'} The class keyword lets you define a class. Class methods (your code) are defined in much the same way as functions, that is, with the def keyword. Class attributes (your data) are just like variables that exist within object instances. The __init__() method can be defined within a class to initialize object instances. Every method defined in a class must provide self as its first argument. Every attribute in a class must be prefixed with self. in order to associate it data with its instance. Classes can be built from scratch or can inherit from Python’s built-in classes or from other custom classes. Classes can be put into a Python module and uploaded to PyPI. 212 Chapter 6 CHAPTER 6 CHAPTER 6 Your Python Toolbox 7 web development Putting it all together This Web thing will never catch on...especially now that I have my trusty Underwood to keep me company.... this is a new chapter 213 caring is sharing It’s good to share The coach showed us your program running on his laptop...any chance me and my friends could also get access to our list of times? I’d love to show them to my dad... Coach Kelly’s young athletes You’re a victim of your own success. The new requests come flooding in right after Coach Kelly starts showing off your latest program. It appears that everyone wants access to the coach’s data! The thing is: what’s the “best way” to do this? 214 Chapter 7 web development You can put your program on the Web You’ll want to be able to share your functionality with lots of people... ...but you probably want only one version of your program “out there” that everyone accesses... ...and you need to make sure updates to your program are easy to apply. A “webapp” is what you want. If you develop your program as a Web-based application (or webapp, for short), your program is: • Available to everyone who can get to your website • In one place on your web server • Easy to upate as new functionality is needed But…how do webapps actually work? you are here 4 215 anatomy of a web request Webapps Up Close No matter what you do on the Web, it’s all about requests and responses. A web request is sent from a web browser to a web server as the result of some user interaction. On the web server, a web response (or reply) is formulated and sent back to the web browser. The entire process can be summarized in five steps. Step 1: Your user enters a web address, selects a hyperlink, or clicks a button in her chosen web browser. Step 2: The web browser converts the user’s action into a web request and sends it to a server over the Internet. I just type the web address into my browser’s location bar and press Enter... The Internet a web Here comes request. Hey, hello there...what’s this? A web request just for me? How nice... Deciding what to do next Web Server Step 3: The web server receives the web request and has to decide what to do next. 216 Chapter 7 One of two things happen at this point. If the web request is for static content—such as an HTML file, image, or anything else stored on the web server’s hard disk—the web server locates the resource and returns it to the web browser as a web response. If the request is for dynamic content—that is, content that must be generated—the web server runs a program to produce the web response. web development Here you go...a web response generated just for you. Enjoy! Step 4: The web server processes the web request, creating a web response, which is sent back over the Internet to the waiting web browser. Here co response.mes a web Web Server The Internet The (potentially) many substeps of step 4 In practice, step 4 can involve multiple substeps, depending on what the web server has to do to produce the response. Obviously, if all the server has to do is locate static content and sent it back to the browser, the substeps aren’t too taxing, because it’s all just file I/O. However, when dynamic content must be generated, the sub steps involve the web server locating the program to execute, executing the located program, and then capturing the output from the program as the web response…which is then sent back to the waiting web browser. This dynamic content generation process has been standardized since the early days of the Web and is known as the Common Gateway Interface (CGI). Programs that conform to the standard are often referred to as CGI scripts. That’s exactly what I need. Thanks! Step 5: The web browser receives the web response and displays it on your user’s screen. you are here 4 217 webapp requirements What does your webapp need to do? Let’s take a moment to consider what you want your webapp to look like and how it should behave on your user’s web browser. You can then use this information to help you specify what your webapp needs to do. I guess I need a nice, friendly home page to kick things off, eh? Yeah...and I want to be able to get at my times easily... 218 Chapter 7 ...and once I’ve selected mine, I want them to look nice on my screen, so I can print them for my mom. web development There’s nothing like grabbing your pencil and a few blank paper napkins to quickly sketch a simple web design. You probably need three web pages: a “welcome” page, a “select an athlete” page, and a “display times” page. Go ahead and draw out a rough design on the napkins on this page, and don’t forget to draw any linkages between the pages (where it makes sense). you are here 4 219 back-of-the-napkin sketch There’s nothing like grabbing your pencil and a few blank paper napkins to quickly sketch a simple web design. You probably need three web pages: a “welcome” page, a “select an athlete” page, and a “display times” page. You were to draw out a rough design on the napkins. You were to draw any linkages between the pages (where it made sense). § The home page displays a friendly graphic and a link to start the web app. go to Click on the home page’s linkoftoall the list a page that displays a athlete’s coach’s athletes. Click on an“Se lect” the n radio button and the a. button to see the dat Select an athlet e from this list Kelly’s Website ch oa C to e m co Wel find here is my ll u’ yo t ha t l al , For now data. athlete’s timing rack! See you on the t Sarah James Julie Mikey Select Timimg data for Sarah: 2.18 2.21 2.22 220 Chapter 7 The third web page displays the selected athlete’s data and provides links back to the other two pages. Select another athlete. to work with: web development Design your webapp with MVC Now that you have an idea of the pages your webapp needs to provide, your next question should be: what’s the best way to build this thing? Ask 10 web developers that question and you’ll get 10 different answers; the answer often depends on whom you ask. Despite this, the general consensus is that great webapps conform to the Model-View-Controller pattern, which helps you segment your webapp’s code into easily manageable functional chunks (or components): The Model The code to store (and sometimes process) your webapp’s data The View The code to format and display your webapp’s user interface(s) The Controller The code to glue your webapp together and provide its business logic By following the MVC pattern, you build your webapp in such as way as to enable your webapp to grow as new requirements dictate. You also open up the possibility of splitting the workload among a number of people, one for each component. Let’s build each of the MVC components for your webapp. you are here 4 221 build a model Model your data Your web server needs to store a single copy of your data, which in this case is Coach Kelly’s timing values (which start out in his text files). When your webapp starts, the data in the text files needs to be converted to AthleteList object instances, stored within a dictionary (indexed by athlete name), and then saved as a pickle file. Let’s put this functionality in a new function called put_to_store(). While your webapp runs, the data in the pickle needs to be available to your webapp as a dictionary. Let’s put this functionality in another new function called get_from_store(). When your webapp starts: james.txt julie.txt While your webapp runs: The single pickle with all of the coach’s data stored in a dictionary mikey.txt .txt The get_from_store() function The put_to_store() function ': AthleteList... , 'James': AthleteList... , 'Julie': AthleteList... , 'Mikey': AthleteList... } The single pickle with all of the data stored in a dictionary 222 Chapter 7 s returned A dictionary of AthleteList)” function re( sto from the “get_from_ web development Here is the outline for a new module called athletemodel.py, which provides the functionality described on the previous page. Some of the code is already provided for you. Your job is to provide the rest of the code to the put_to_store() and get_from_store() functions. Don’t forget to protect any file I/O calls. import pickle from athletelist import AthleteList def get_coach_data(filename): # Not shown here as it has not changed since the last chapter. def put_to_store(files_list): You need code in here to populate the dictionary with the data from the files. all_athletes = {} This function is called with a list of filenames as its sole argument. And don’t forget to save the dictionary to a pickle (and check for file I/O errors). return(all_athletes) def get_from_store(): all_athletes = {} Get the dictionary from the file, so that it can be returned to the caller. Both functions need to return a dictionary of AthleteLists. return(all_athletes) you are here 4 223 model module Here is the outline for a new module called athletemodel.py, which provides the functionality described on the previous page. Some of the code is already provided for you. Your job was to provide the rest of the code to the put_to_store() and get_from_store() functions. You were not to forget to protect any file I/O calls. import pickle from athletelist import AthleteList def get_coach_data(filename): # Not shown here as it has not changed since the last chapter. def put_to_store(files_list): all_athletes = {} Take each file, turn it into an AthleteList object instance, and add the athlete’s data to the dictionary. for each_file in files_list: ath = get_coach_data(each_file) all_athletes[ath.name] = ath try: Each athlete’s name is used as the “key” in the dictionary. The “value” is the AthleteList object instance. with open(‘athletes.pickle', ‘wb') as athf: Save the entire dictionary of AthleteLists to a pickle. pickle.dump(all_athletes, athf) except IOError as ioerr: print(‘File error (put_and_store): ' + str(ioerr)) And don’t forget a try/except to protect your file I/O code. return(all_athletes) def get_from_store(): all_athletes = {} Simply read the entire pickle into the dictionary. What could be easier? try: with open(‘athletes.pickle', ‘rb') as athf: all_athletes = pickle.load(athf) except IOError as ioerr: print(‘File error (get_from_store): ' + str(ioerr)) return(all_athletes) 224 Chapter 7 Again…don’t forget your try/except. web development Let’s test your code to ensure that it is working to specification. Type your code into an IDLE edit window and save your code into a folder that also includes the coach’s text files. Press F5 to import your code to the IDLE shell, and then use the dir() command to confirm that the import has been successful: >>> dir() ['AthleteList', '__builtins__', '__doc__', '__name__', '__package__', 'get_coach_data’, 'get_from_store', 'pickle', 'put_to_store'] Create a list of files to work with, and then call the put_to_store() function to take the data in the list of files and turn them into a dictionary stored in a pickle: Here’s all of the AthleteLists. >>> the_files = ['sarah.txt', 'james.txt', 'mikey.txt', 'julie.txt'] >>> data = put_to_store(the_files) >>> data {'James Lee': ['2-34', '3:21', '2.34', '2.45', '3.01', '2:01', '2:01', '3:10', '2-22', '201', '2.01', '2:16'], 'Sarah Sweeney': ['2:58', '2.58', '2:39', '2-25', '2-55', '2:54', '2.18', '2:55', '2:55', '2:22', '2-21', '2.22'], 'Julie Jones': ['2.59', '2.11', '2:11', '2:23', '310', '2-23', '3:10', '3.21', '3-21', '3.01', '3.02', '2:59'], 'Mikey McManus': ['2:22', '3.01', '3:01', '3.02', '3:02', '3.02', '3:22', '2.49', '2:38', '2:40', '2.22', '2-31']} At this point, the athletes.pickle file should appear in the same folder as your code and text files. Recall that this file is a binary file, so trying to view it in IDLE or in your editor is not going to make much sense. To access the data, use the dictionary returned by the put_to_store() or get_from_store() functions. Use the existing data in the data dictionary to display each athlete’s name and date of birth: >>> for each_athlete in data: print(data[each_athlete].name + ' ' + data[each_athlete].dob) b” By accessing the “name” andthe“dorest of at attributes, you can get the AthleteList data. James Lee 2002-3-14 Sarah Sweeney 2002-6-17 Julie Jones 2002-8-17 Mikey McManus 2002-2-24 Use the get_from_store() function to load the pickled data into another dictionary, then confirm that the results are as expected by repeating the code to display each athlete’s name and date of birth: >>> data_copy = get_from_store() >>> for each_athlete in data_copy: print(data_copy[each_athlete].name + ' ' + data_copy[each_athlete].dob) James Lee 2002-3-14 Sarah Sweeney 2002-6-17 Julie Jones 2002-8-17 Mikey McManus 2002-2-24 The data in the returned dictio is as expected, exactly the samenary that produced by put_to_store( as ). you are here 4 225 interface view View your interface With your model code written and working, it’s time to look at your view code, which creates your webapp’s user interface (UI). On the Web, UIs are created with HTML, the Web’s markup technology. If you are new to HTML, it is worth taking some time to become familiar with this critical web development technology. There’s lots of material on the Web and more than a few good books out there. [Note from Marketing: This is the book that we recommend for quickly getting up to speed with HTML…not that we’re biased or anything. § ]. Hey, we hear you are getting into web development? We have a small module that we put together that might help you generate HTML. It’s a little rough, but it works. You’re more than welcome to use it for your projects, if you like. YATE: Yet Another Template Engine Your friends over at the Head First Code Review Team heard you’re planning to write some code to generate HTML for your webapp’s UI. They’ve sent over some code that they swear will make your life easier. It’s a small library of HTML-generating helper functions called yate. The code was produced quickly and was originally designed to be “throw away,” so the team has provided it as is. It’s somewhat raw, but it should be OK. 226 Chapter 7 (Most of) the Head First Code Review Team web development from string import Template def start_response(resp="text/html"): return('Content-type: ' + resp + '\n\n') def include_header(the_title): with open('templates/header.html') as headf: head_text = headf.read() header = Template(head_text) return(header.substitute(title=the_title)) There’s not much help here, just code. No comments, explanations the documentation, or anything! , def include_footer(the_links): with open('templates/footer.html') as footf: foot_text = footf.read() link_string = '' for key in the_links: link_string += '<a href="' + the_links[key] + '">' + key + '</a> ' footer = Template(foot_text) return(footer.substitute(links=link_string)) def start_form(the_url,') def end_form(') 227 template engine code Let’s get to know the yate code before proceeding with the rest of this chapter. For each chunk of code presented, provide a written description of what you think it does in the spaces provided: from string import Template to Take a momentem plate” “T look up the hon’s module in Pyt set. documentation def start_response(resp="text/html"): One has already been done for you. return('Content-type: ' + resp + '\n\n') This function takes a single (optional) string as its argument and uses it to create a CGI “Content-type:” line, with “text/html” as the default. def include_header(the_title): with open('templates/header.html') as headf: head_text = headf.read() header = Template(head_text) return(header.substitute(title=the_title)) def include_footer(the_links): with open('templates/footer.html') as footf: foot_text = footf.read() link_string = '' for key in the_links: link_string += '<a href="' + the_links[key] + '">' + key + '</a> ' footer = Template(foot_text) return(footer.substitute(links=link_string)) 228 Chapter 7 Write your explanations in the spaces. web development def start_form(the_url,') def end_form(</form>')   229 template engine described Let’s get to know the yate code before proceeding with the rest of this chapter. For each chunk of code presented, you were to provide a written description of what you think it does: from string import Template Note the default for “resp”. Import the “Template” class from the standard library’s “string” module. This allows for simple string-substitution templates. def start_response(resp="text/html"): return('Content-type: ' + resp + '\n\n') This function takes a single (optional) string as its argument and uses it to create a CGI “Content-type:” line, with “text/html” as the default. Open the template file (which is HTML), read it in, and substitute in the provided “title”. def include_header(the_title): with open('templates/header.html') as headf: head_text = headf.read() header = Template(head_text) return(header.substitute(title=the_title)) Open the template file (which is HTML), read it in, def and substitute in the provided dictionary of HTML links in “the_links”. This function takes a single string as its argument and uses at the title for the start of a HTML page. The page itself is stored within a separate file in “templates/header.html”, and the title is substituted in as needed. include_footer(the_links): with open('templates/footer.html') as footf: foot_text = footf.read() link_string = '' for key in the_links: link_string += '<a href="' + the_links[key] + '">' + key + '</a> ' Turn the dictionary of links into a string, which is then substituted into the template. 230 Chapter 7 footer = Template(foot_text) return(footer.substitute(links=link_string)) This looks a little weird, but it’s an HTML hack for forcing spaces into a string. Similar to the “include_header” function, this one uses its single string. web development This is typically either “POST” or “GET”. def start_form(the_url,') This function returns the HTML for the start of a form and lets the caller specify the URL to send the form’s data to, as well as the method to use. def end_form(</form>') This function returns the HTML markup, which terminates the form while allowing the caller to customize the text of the form’s “submit” button. def radio_button(rb_name, rb_value): return('<input type="radio" name="' + rb_name + '" value="' + rb_value + '"> ' + rb_value + '<br />') Given a radio-button name and value, create a HTML radio button (which is typically included within a HTML form). Note: both arguments are required. def u_list(items): A simple “for” loop does the trick. u_string = '<ul>' for item in items: u_string += '<li>' + item + '</li>' u_string += '</ul>' return(u_string) Given a list of items, this function turns the list into a HTML unnumbered list. A simple “for” loop does all the work, adding a LI to the UL element with each iteration. def header(header_text, header_level=2): return('<h' + str(header_level) + '>' + header_text + '</h' + str(header_level) + '>') Create and return a HTML header tag (H1, H2, H2, and so on) with level 2 as the default.. The “header_text” argument is required. def para(para_text): return('<p>' + para_text + '</p>'): A: And you did this because you are using MVC? Partly, yes. The reason the MVC pattern is being followed is to ensure that the model code is separate from the view code, which are both separate from the controller code. No matter the size of the project, following MVC can make your life easier. Q: A: But surely MVC is overkill for something this small? default CGI response header , plus variations on a theme. 'Content-type: text/plain\n\n' >>> start_response("application/json") 'Content-type: application/json\n\n' The include_header() function generates the start of a web page and let’s you customizee its title: >>> include_header("Welcome to my home on the web!") '<html>\n<head>\n<title>Welcome to my home on the web!</title>\n<link type="text/css" rel="stylesheet" href="/coach.css" />\n</head>\n<body>\n<h1>Welcome to my home on the web!</ h1>\n' , but don’t worry; it’s meant This all looks a little bit messybro wser, NOT by you. Your web to be processed by your web y working with this HTML. Note browser will have no difficult file (more on this in a bit). the inclusion of a link to a CSS 232 Chapter 7 web development The include_footer() function produces HTML that terminates a web page, providing links (if provided as a dictionary). An empty dictionary switches off the inclusion of the linking HTML: >>> include_footer({'Home': '/index.html', 'Select': '/cgi-bin/select.py'}) '<p>\n<a href="/index.html">Home</a> <a href="/cgi-bin/select. py">Select</a> \n</p>\n</body>\n</html>\n' >>> include_footer({}) '<p>\n\n</p>\n</body>\n</html>\n' With links included, and without. The start_form() and end_form() functions bookend a HTML form, with the parameter (if supplied) adjusting the contents of the generated HTML: The argument allows you to specif the name of the program on the y server to send the form’s data to. >>> start_form("/cgi-bin/process-athlete.py") '<form action="/cgi-bin/process-athlete.py" method="POST">' >>> end_form() '<p></p><input type=submit</form>' >>> end_form("Click to Confirm Your Order") '<p></p><input type=submit</form>' HTML radio buttons are easy to create with the radio_button() function: >>> for fab in ['John', 'Paul', 'George', 'Ringo']: Which one is your favorite? io Select from the list of rad buttons. radio_button(fab, fab) '<input type="radio" name="John" value="John"> John<br />' '<input type="radio" name="Paul" value="Paul"> Paul<br />' '<input type="radio" name="George" value="George"> George<br />' '<input type="radio" name="Ringo" value="Ringo"> Ringo<br />' Unordered list are a breeze with the u_list() function: u_list(['Life of Brian', 'Holy Grail']) '<ul><li>Life of Brian</li><li>Holy Grail</li></ul>' Again, not too easy on your eye, but fine as far as your web browse r is concerned. The header() function lets you quickly format HTML headings at a selected level (with 2 as the default): >>> header("Welcome to my home on the web") '<h2>Welcome to my home on the web</h2>' >>> header("This is a sub-sub-sub-sub heading", 5) '<h5>This is a sub-sub-sub-sub heading</h5>' it works Nothing too exciting here, but e. her as expected. Same goes for Last, but not least, the para() function encloses a chunk of text within HTML paragraph tags: >>> para("Was it worth the wait? We hope it was...") '<p>Was it worth the wait? We hope it was...</p>' you are here 4 233 controller code Control your code Your model code is ready, and you have a good idea of how the yate module can help you with your view code. It’s time to glue it all together with some controller code. First things first: you need to arrange your wedapp’s directory structure to help keep things organized. To be honest, anything goes here, although by giving it a little thought, you can enhance your ability to extend your webapp over time. Here’s one folder structure that Head First Labs recommends. You can call your top-level folder anything you like. ders, this folder As well as containing the subfol ml” file, your contains your webapps “index.ht , and anything “favicon.ico” icon, style sheetsint o one of the tly else that doesn’t fit nea subfolders. webapp cgi-bin data Do this! Head on over to the Head First Python support website, download webapp.zip, and unpack it to your hard disk. 234 Chapter 7 images templates Any code that you write for your webapp needs to reside in a specially named folder called “cg ibin”. Let’s keep the coach’s data files in a separate folder by putting all of the TXT files in here. If your webapp has any ima file s (JPGs, GIFs, PNGs, and so ges on) , pop them into their own folder to hel p keep things organized. The templates that came with the “yate.py” download can go in here. web development CGI lets your web server run programs The Common Gateway Interface (CGI) is an Internet standard that allows for a web server to run a server-side program, known as a CGI script. Typically, CGI scripts are placed inside a special folder called cgi-bin, so that the web server knows where to find them. On some operating systems (most notably UNIX-styled systems), CGI scripts must be set to executable before the web server can execute them when responding to a web request. I’m all fired up and ready to go! I live to serve-up HTML and run CGIs... CGI Web Server So...to run my webapp, I need a web server with CGI enabled. More on this in a little bit. All webapps need to run on web servers. Practically every web server on the planet supports CGI. Whether your running Apache, IIS, nginx, Lighttpd, or any of the others, they all support running CGI scripts written in Python. But using one of these tools here is overkill. There’s no way the coach is going to agree to download, unpack, install, configure, and manage one of these industry heavyweights. As luck would have it, Python comes with its very own web server, included in the http.server library module. Check the contents of the webapp.zip download: it comes with a CGIenabled web server called simplehttpd.py. Import the HT server and CGI TP modules. Specify a port. Create a HTTP server. ly Display a friendart st d message an your server. Here are the five lines of cod build a web server in Python. e needed to from http.server import HTTPServer, CGIHTTPRequestHandler port = 8080 httpd = HTTPServer(('', port), CGIHTTPRequestHandler) print("Starting simple_httpd on port: " + str(httpd.server_port)) httpd.serve_forever() you are here 4 235 generate list Display the list of athletes Let’s create a program called generate_list.py which, when executed by the web server, dynamically generates a HTML web page that looks something like this: It wouldn’t hurt to add a title to this web page, would it? This is a paragraph. Select an athlete from this list There’s one radio button for each athlete. Sarah to work with: James Julie Mikey Select A “submit” button When your user selects an athlete by clicking on her radio button and clicking Select, a new web request is sent to the web server. This new web request contains data about which radio button was pressed, as well as the name of a CGI script to send the form’s data to. Recall that all of your CGI scripts need to reside in the cgi-bin folder on your web server. With this in mind, let’s make sure your generate_list.py CGI script sends its data to another program called: cgi-bin/generate_timing_data.py 236 Chapter 7 All of this is contained within an HTML form. web development Pool Puzzle Your job is to take the code from the pool and place them into the blank lines in the CGI script. You may not use the same line of code more than once. Your goal is to make a CGI script that will generate a HTML page that matches the hand-drawn design from the previous page. I’ve started things off for you. import athletemodel import yate import glob Import the modules that you nee d. You’ve already met “athletemodel” and “ya The “glob” module lets you query te”. operating system for a list of fileyour names. data_files = glob.glob("data/*.txt") athletes = athletemodel.put_to_store(data_files) ction Use your “put_to_store()” funlet es ath to create a dictionary of s. from the list of data file Let’s add a link to the bottom of the generated HTML page that takes your user home. print(yate.include_footer({"Home": "/index.html"})) Note: each thing from the pool can be used once! print(yat e.start_f orm("gene print(yat rate_timi e.para("S ng_data.p elect an y")) a t h l ete from print(yat the list e.include print(yat t _ o h e work with ader("Coa e.radio_b :")) ch Kelly' utton("wh s List of ich_athle te", athl A t h l e t e s")) etes[each for each_athlete in athletes: _athlete] ) ) ( p e r s i n . n o n t p a ( s m yate.end_ e e)) form("Sel e.start_r ect")) print(yat you are here 4 237 cgi script Pool Puzzle Solution Your job was to take the code from the pool and place them into the blank lines in the CGI script. You were not to use the same line of code more than once. Your goal was to make a CGI script that generates a HTML page that matches the hand-drawn design. import athletemodel import yate import glob Always start with a Content-type line. Start generating the form, providing the name of the serverside program to link to. Generate a radiobutton for each of your athletes.:")) A paragraph telling your user what to do for each_athlete in athletes: print(yate.radio_button("which_athlete", athletes[each_athlete].name)) print(yate.end_form("Select")) End the form generation with a custom “Submit” button. print(yate.include_footer({"Home": "/index.html"})) Cool…an empty pool. 238 Chapter 7 Start generating the web page, providing an appropriate title. web development What you need to do next depends on the operating system you’re running your web server on. If you are running on Windows, stop reading right now and proceed to the Test Drive. However, if you are running a Unixbased system (such as Linux, Mac OS X, or BSD) you need to do two things to prepare your CGI script for execution: 1. Set the executable bit for your CGI using the chmod +x command. 2. Add the following line of code to the very top of your program: #! /usr/local/bin/python3 Test Drive dow, type From your terminal win_l ist. te ra chmod +x gene bit. You e abl cut py to set the exe e. onc need do this only To test drive your CGI script, you need to have a web server up and running. The code to simplehttpd.py is included as part of the webapp.zip download. After you unpack the ZIP file, open a terminal window in the webapp folder and start your web server: File Edit Window Help WebServerOnWindows c:\Python31\python.exe simplehttpd.py Starting simple_httpd on port: 8080 Use this command on Windows-based systems. File Edit Window Help WebServerOnUNIX $ python3 simple_httpd.py Starting simple_httpd on port: 8080 Use this command on Unix-based systems. you are here 4 239 test drive Test Drive, continued With your web server running, let’s load up Coach Kelly’s home page and get things going. You’ve started your web server running on port 8080 on your computer, so you need to use the following web address in your web browser:. The coach’s home page appears in your browser. It’s called “index.html” and it is included in the “webapp.zip” download. …and your web server springs into life, logging (to the screen) any and all web requests that it processes. File Edit Window Help DisplayingHomePage The “timing data” hyperlink is waiting for you to click it. 240 Chapter 7 $ python3 simple_httpd.py Starting simple_httpd on port: 8080 localhost - - [12/Sep/2010 14:30:03] localhost - - [12/Sep/2010 14:30:03] localhost - - [12/Sep/2010 14:30:03] localhost - - [12/Sep/2010 14:30:03] "GET "GET "GET "GET / HTTP/1.1" 200 /coach.css HTTP/1.1" 200 /images/coach-head.jpg HTTP/1.1" 200 /favicon.ico HTTP/1.1" 200 - web development Sure enough, clicking on the home page’s link runs the generate_list.py program on the web server, which displays Coach Kelly’s athletes as a list of radio buttons. Looking good…the web page has been generated correctly… …and your web server logs the web request to run the “generate_list.py” CGI script. File Edit Window Help DisplayingHomePage $ python3 simple_httpd.py Starting simple_httpd on port: 8080 localhost - - [12/Sep/2010 14:30:03] localhost - - [12/Sep/2010 14:30:03] localhost - - [12/Sep/2010 14:30:03] localhost - - [12/Sep/2010 14:30:03] localhost - - [12/Sep/2010 14:45:16] "GET "GET "GET "GET "GET / HTTP/1.1" 200 /coach.css HTTP/1.1" 200 /images/coach-head.jpg HTTP/1.1" 200 /favicon.ico HTTP/1.1" 200 /cgi-bin/generate_list.py HTTP/1.1" 200 - You can click the Home hyperlink to return to the coach’s home page, or select an athlete from the list (by clicking on their radio-button), before pressing the Select button to continue. Select an athlete and press Select. What happens? you are here 4 241 no such cgi script The dreaded 404 error! Whoops! Your web server has responded with a “404” error code, which is its way of telling you that something was wrong with your request. The web server is in fact telling you that it can’t locate the resource that your web browser requested, so it’s telling you that you made a mistake: Yikes! The web server can’t find the “/cgi-bin/generate_timing_dat a.p y” CGI script, which triggers the 40 4. Check the web server’s console window to confirm that your attempt to post your form’s data to generate_timing_data.py resulted in failure. Which isn’t really that surprising seeing as you have yet to write that code! So…things aren’t as bad as they first appear. The “404” error is exactly what you would expect to be displayed in this situation, so your generate_list.py CGI is working fine. What’s needed is the code to the other CGI script. If you create the required CGI script, you’ll be back on track. 242 Chapter 7 web development Tonight’s talk: To be CGI or not to be CGI, that is the question. A Python Program: A Python CGI Script: Listen: you’re really not all that different than me; you just work on a web server, whereas I can work anywhere. Yes. I like to think of myself as special. Special?!? But you only work on the Web, nowhere else. How’s that “special”? Nonsense! The truth is that you work only on the Web and break pretty quickly when used elsewhere. You don’t even have control over your own I/O. Like [sniggers] generating text in the form of HTML? That’s really taxing… Oh, get over yourself ! You’re a regular program, just like me. I can generate HTML, too, I just choose not to. I guess so… Because all the cool stuff works on the Web these days and I’m designed, optimized, tailored, and engineered for the Web. Because the Web’s a cool place, it follows that I must be cool, too. See: special. I don’t need control over my input and output. I have a friendly web server to take care of that for me. My input comes from the web server and my output goes to the web server. This arrangement allows me to concentrate on the important stuff. Smirk all you want; HTML makes the World Wide Web go around and I’m a master at generating it dynamically, on demand, and as needed. Without me, the Web would be a pretty static place. And if you did generate HTML, you’d want it displayed somewhere…like in a browser? And to do that you’d need to rely on the services of a friendly web server, right? Ummmm…I guess so. Which would make you a CGI script. So, you’d be special, too. Q.E.D. you are here 4 243 yet another cgi script Create another CGI script Let’s take a moment to recall what is required from the generate_ timing_data.py CGI script. Based on your hand-drawn sketch from earlier, your need to generate a new HTML page that contains the top three times for the selected athlete: This looks like a HTML heading for the page, right? It’s probably a good idea to add a title to this page. Timing data for Sarah: This might be best rendered as an unordered HTML list. 2.18 2.21 2.22 Select another athlete. home Two hyperlinks: one jumps to the to the s urn ret er oth page, whereas the e. previous selection pag But how do you know which athlete is selected? When you click on a radio-button and then press the Select button, a new web request is sent to the server. The web request identifies the CGI script to execute (in this case, that’s generate_timing_data.py), together with the form’s data. The web server arranges to send the form’s data to your CGI script as its input. Within your code, you can access the form data using Python’s cgi module, which is part of the standard library: Import the “cgi” library. import cgi form_data = cgi.FieldStorage() Grab all of the form data and put it in a dictionary. athlete_name = form_data['which_athlete'].value Access a named piece of data from the form’s data. 244 Chapter 7 Let’s include the ahtlete’s full name and DOB here. web development Write the code to your new CGI script here. It’s time to exercise your newly acquired web-coding chops. Grab your pencil and write the code for the generate_timing_data. py CGI script. It’s not too different from the generate_list.py code, so you should be able to reuse a lot of your existing code. you are here 4 245 revised script It’s time to exercise your newly acquired web-coding chops. You were to grab your pencil and write the code for the generate_ timing_data.py CGI script. It’s not too different from the generate_list.py code, so you should be able to reuse a lot of your existing code. #! /usr/local/bin/python3 This line is needed on Unix-based systems only. import cgi Import the libraries and modules you intend to use. import athletemodel import yate Get the data from the athletes = athletemodel.get_from_store() model. Which athlete’s data are you working with? Nothing new here or here. form_data = cgi.FieldStorage() athlete_name = form_data['which_athlete'].value print(yate.start_response()) print(yate.include_header("Coach Kelly's Timing Data")) print(yate.header("Athlete: " + athlete_name + ", DOB: " + athletes[athlete_name].dob + ".")) print(yate.para("The top times for this athlete are:")) The bottom of this web page has two links. print(yate.u_list(athletes[athlete_name].top3())) Grab the athlete’s name and DOB. Turn the top three list into an unordered HTML list. print(yate.include_footer({"Home”: "/index.html", "Select another athlete": "generate_list.py"})) A link back to the previous CGI script. 246 Chapter 7 web development Test Drive Note: If you are on a Unix-based system, don’t forget to add “chmod +x generate_timing_data.py” to set the executable bit. Your web server should still be running from earlier. If it isn’t, start it again. In your web browser, return to the coach’s home page, then select the hyperlink to display the list of athletes, select Sarah, and then press the button. This all looks OK. Ah, phooey! Something’s not quite right here. Where’s Sarah’s top three times? File Edit Window Help HoustonWeHaveAProblem Does the web server’s logging information tell you anything? $ python3 simple_httpd.py Starting simple_httpd on port: 8080 localhost - - [12/Sep/2010 14:30:03] "GET / HTTP/1.1" 200 localhost - - [12/Sep/2010 14:30:03] "GET /coach.css HTTP/1.1" 200 localhost - - [12/Sep/2010 14:30:03] "GET /images/coach-head.jpg HTTP/1.1" 200 localhost - - [12/Sep/2010 14:30:03] "GET /favicon.ico HTTP/1.1" 200 localhost - - [12/Sep/2010 14:45:16] "GET /cgi-bin/generate_list.py HTTP/1.1" 200 localhost - - [12/Sep/2010 16:12:27] “GET /cgi-bin/generate_list.py HTTP/1.1” 200 localhost - - [12/Sep/2010 16:12:29] “POST /cgi-bin/generate_timing_data.py HTTP/1.1” 200 Traceback (most recent call last): File “/Users/barryp/HeadFirstPython/chapter7/cgi-bin/generate_timing_data.py”, line 21, in <module> print(yate.u_list(athletes[athlete_name].top3())) TypeError: ‘list’ object is not callable localhost - - [12/Sep/2010 16:12:29] CGI script exit status 0x100 Your CGI has suffered from a TypeError exception, but other than looking at the web server’s logging screen, it’s not clear on the web browser screen that anything has gone wrong. What do you think is the problem here? Take a moment to study the error message before flipping the page. you are here 4 247 track down cgi errors Enable CGI tracking to help with errors The CGI standard dictates that any output generated by a server-side program (your CGI script) should be captured by the web server and sent to the waiting web browser. Specifically, anything sent to STDOUT (standard output) is captured. When your CGI script raises an exception, Python arranges for the error message to display on STDERR (standard error). The CGI mechanism is programmed to ignore this output because all it wants is the CGI script’s standard output. When your CGI works, I’ll fill your STDOUT with lovely HTML. When your CGI fails, it’s a case of—POOF!—gone for good. Sorry, but that’s the way the CGI cookie crumbles... Web Server This behavior is fine when the webapp is deployed, but not when it’s being developed. Wouldn’t it be useful to see the details of the exception in the browser window, as opposed to constantly having to jump to the web server’s logging screen? Well…guess what? Python’s standard library comes with a CGI tracking module (called cgitb) that, when enabled, arranges for detailed error messages to appear in your web browser. These messages can help you work out where your CGI has gone wrong. When you’ve fixed the error and your CGI is working well, simply switch off CGI tracking: import cgitb cgitb.enable() 248 Chapter 7 Add these two lines near the start of your CGI scripts to enable Python’s CGI tracking technology. web development Test Drive Wow! Look at all of this detail. Add the two CGI tracking lines of code near the top of your generate_timing_data.py CGI script. Press the Back button on your web browser and press the Select button again. Let’s see what happens this time. Notice that the CGI tracking module tries to identify exactly where the problem with your code lies. What’s this? top3() is undefined?!? you are here 4 249 small fix, big difference A small change can make all the difference The CGI tracking output indicates an error with the use of the top3() method from the AthleteList code. A quick review of the code to the AthleteList class uncovers the source of the error: the top3() method has been redesignated as a class property. @property def top3(self): This decorator allows you to acc returned by “top3()” as if it weress the data e a class attribute. return(sorted(set([self.sanitize(t) for t in self]))[0:3]) The use of the @property decorator allows the top3() method to appear like an attribute to users of the class. So, instead of calling the top3() method like this: A method call always needs the parentheses… print(yate.u_list(athletes[athlete_name].top3())) Treat the top3() method as if it was another class attribute, and call it like this: print(yate.u_list(athletes[athlete_name].top3)) It’s a small change, but it’s an important one When a change is made to the way a class is used, you need to be careful to consider what impact the change has on existing programs, both yours and those written by others. At the moment, you are the only one using the AthleteList class, so it’s not a big deal to fix this. But imagine if thousands of programmers were using and relying on your code… Let’s fix your CGI script and try again. 250 Chapter 7 …unless the method is declared to be an “@property”, in which case parentheses are NOT required. web development Test Drive Make the small edit to your code to remove the brackets from the call to the top3() method, press your web browser’s Back button, and press the Select button one last time. Whoo hoo! This time the selected athlete’s data is displayed on screen. Nice, eh? Now that you’ve solved that problem, be sure to switch off CGI tracking. Q: A: What happens if the coach recruits new athletes? All Coach Kelly needs do is create a new text file similar to the others, and your webapp handles the rest by dynamically including the new athlete the next time your webapp runs, which occurs when someone clicks on the home page’s “timing data” hyperlink. Q: Shouldn’t the server’s data be in a database as opposed to a pickle? Surely that would be better, right? A: In this case, it’s probably overkill to use a database, but it might be worth considering sometime in the future. you are here 4 251 successful webapp Your webapp’s a hit! This is great! Now I can share my data with the kids, and they can access their times without bugging me... Wait until my mom sees my times...she’ll be so proud of me! By moving your program to the Web, you’ve made it a no-brainer for Coach Kelly to share his data with not only his athletes, but with anyone else that needs to access his data. By conforming to the MVC pattern and using CGI, you’ve built a webapp in such a way that it’s easy to extend as new requirements are identified. Congratulations! You’re a web developer. 252 Chapter 7 Hey, this is super cool and the coach is right... if we train hard, we can improve on these times! web development Your Python Toolbox The Model-View-Controller pattern lets you design and build a webapp in a maintainable way. The model stores your webapp’s data. Python Lingo • “@proper ty” lets you arrange a decorator that appear as if it is for a class method to a class attribute. Web Lingo m that runs on ra og pr a p” • “webap the Web. eb sent from the w ” st ue q re eb • “w web server. browser to the nt from the web se ” se on sp re se • “web browser in repson eb w he t to er serv . to a web request eway the Common Gateb server to • “CGI” allows a w Interface, which e program. run a server-sid her name for a ot an ” pt ri sc I • “CG ram. server-side prog The view displays your webapp’s user interface. The controller glues everything together with programmed logic. The standard library string module includes a class called Template, which supports simple string substitutions. The standard library http.server module can be used to build a simple web server in Python. The standard library cgi module provides support for writing CGI scripts. The standard library glob module is great for working with lists of filenames. Set the executable bit with the chmod +x command on Linux and Mac OS X. The standard library cgitb module, when enabled, lets you see CGI coding errors within your browser. Use cgitb.enable() to switch on CGI tracking in your CGI code. Use cgi.FieldStorage() to access data sent to a web server as part of a web request; the data arrives as a Python dictionary. you are here 4 253 CHAPTER 7 You’ve got Chapter 7 under your belt and you’ve added some key Python techiques to your toolbox. 8 mobile app development Small devices This had better be a smartphone running Honeycomb or Mr. Smooth is history!. this is a new chapter 255 going mobile The world is getting smaller Coach Kelly is continuing to use his webapp every day, but he’s having a problem with his new smartphone. I can access my timing data over WiFi on my phone, but it’s so small it’s all but impossible to read, let alone click on links or buttons. Can you take a look at it for me? Gotta dash. I’ve got another 5K to do before breakfast... There’s more than just desktop computers out there. Who knew that your users would try to interact with your webapp using something other than a desktop computer or laptop? It’s a diverse computing environment out there. 256 Chapter 8 mobile app development Coach Kelly is on Android The coach has a lovely new smartphone that’s running Google’s Android operating system. Sure enough, when you check it out, the webapp is way too small and not much use on the coach’s three-inch screen: Does anyone ha magnifying glass?ve a And don’t go telling me to do all that twofingered zoom and doubletapping thing. That just drives me crazy! Obviously, the coach needs to access his data and run his webapp on his phone…but what’s the best way to do this if not through the phone’s browser? Open your web browser on your desktop computer (or phone) and enter “Python for Android” into your favorite search engine. Make a note in the space below of the most promising site from your search results: you are here 4 257 scripting layer for android Is this the one you found? You were to open your web browser on your desktop computer (or phone) and enter “Python for Android” into your favorite search engine. You were then to make a note in the space below of the most promising site from your search results: (the home of the SL4A project.) Run Python on the coach’s smartphone A quick search of the Web uncovers a pleasent surprise: Python runs on Android. At least a version of Python runs on Android. A project called Scripting Layer for Android (SL4A) provides technology to let you run Python on any Android device. But there’s a catch. Ummmm...I just checked the SL4A website, and it looks like it supports Python 2.6.2, not Python 3. Phooey! Yes. SL4A ships with Python 2, not 3. Python 3, this book’s preference, is the best version of Python yet, but it achieves its greatness at the cost of a lack of backward compatibility. There’s some stuff in 3 that will never work in 2 and vice versa. Is this fact alone a show-stopper? 258 Chapter 8 mobile app development Don’t worry about Python 2 The fact that Python 2 is available for Android and you’ve learned Python 3 in this book is nothing to lose sleep over. Python 2 is still Python, and the differences between Python 2 and Python 3 are easy to manage. I’m quite happy to run Python 3 all day long... Think about your webapp for a minute. Right now, the model, view, and controller code resides on the web server, which is running Python 3. Your web browser runs here. Web Server The Internet If you move the user interaction to the smartphone, the model and some of the controller code stay on the server (and continue to run on Python 3), whereas the view code and the rest of the controller code move to the smartphone, where they need to be rewritten to run on Python 2. All of your webapp code runs here. Python 3 is still going strong... Run Python code? No problem...just so long as it’s Python 2. Web Server The Internet Half your webapp’s code runs here… …and the other half runs here. you are here 4 259 android sdk Set up your development environment Understandably, the coach won’t let your have his phone to work on until you have something that works. Thankfully, Google provides a cross-platform Android emulator that lets you develop for the phone as needed, even though you don’t own any hardware. Download the Software Development Kit (SDK) Let’s get started developing for Android. Visit this website and download the SDK for your computer and operating system: Do this! Follow along with these instructions to ensure you have your Android development environment correctly set up on your computer. The Android SDK website Despite what this website might look like it’s telling you, you do not need to install Eclipse to run the Android emulator. However, you do need to have a Java Runtime Environment installed. If you are unsure about this, don’t worry: the Android emulator will advise your best course of action if it spots that Java is missing. 260 Chapter 8 id SDK Note: This is how the Androtim e of the download page looks at le litt a k this writing. It might loo es: just rri different for you. No wo of the download the latest version SDK. mobile app development Configure the SDK and emulator You need to do two things to configure the SDK and emulator: add an Android Platform and create an Android Virtual Device (known as an AVD). Add an Android platform The coach is running Android 2.2 on his phone, so let’s add a 2.2 platform to mimic this setup. Open up the Android SDK and AVD Manager tool, select Available Packages, and pick 2.2 for installation. ns The Android download contaithe n Ru ”. ols a folder called “to “android” program within this folder. This is the only version of the SDK that you need. This might take a minute or tw depending on the speed of your o, network connection. Create a new Android Virtual Device (AVD) With the 2.2 platform downloaded and installed, create a new Android Virtual Device. Give your AVD a name, and select a target. Set the size of the virtual SDcard: 512 is more than enough. Click on “Create AVD”. Your AVD is a simulated Android phone. you are here 4 261 emulate sl4a Install and configure Android Scripting With the emulator ready, use the AVD Manager to start your 2.2 device. Click on the emulator’s browser (the little globe), surf to this web address: and tap on the “boxed” bar code near the bottom of the page: These instructions work on a “re phone, too. Just be sure to enableal” “Unknown sources” to allow for Market application downloads. non- Don’t worry if it takes you em ulator a minute or two to start. Ther em ula slower than the actual phone… tor is On the emulator, tap on the “boxed” bar code to start the SL4A download. When the download completes, select the emulator’s Menu button ➝ More ➝ Downloads, and then tap on the sl4a_r2.apk file to install the SL4A package on the emulator. When the install completes, tap Done. ht be The version available to you : mig nload the dow rry different, but don’t wo latest release. ferent than The version you see might bethedifmost recent. this. Don’t worry; yours in 262 Chapter 8 mobile app development Add Python to your SL4A installation Return to the emulator’s web browser, double-tap on the screen to zoom in, and select the Downloads tab. Double-tap again and tap the following link: python_for_android_r1.apk Tap the download link, and tap on the package name to download it. Select Menu ➝More ➝ Downloads, and tap on the newly downloaded package. The Python for Android app runs. When you are ready, tap Open -> Install to complete the installation. This downloads, extracts, and installs the Python support files for Android, which can take a few minutes to complete. When it does, Python 2.6.2 and Python for Android are installed on your emulator and ready for action. ht Again, the version you see mig the ect Sel s. be different than thi most recent file. This last bit is really important. Let’s confirm everything is working with a quick test. you are here 4 263 your script on android Test Python on Android Return to your emulator’s main screen and find an app called SL4A added to your list of app icons. Tap this app to display the list of Python scripts preinstalled with Python for Android. Simplty tap on any script name to execute it: The “menu” button. Take your Android emulator for a spin Here’s a four-line Python script that you can create to test your installation. Let’s call this script mydroidtest.py: Import the “android” library and create a new app object instance. Create an appropriate message and display it on screen. Be sure to set the SL4A rotation mode to automatic. Your screen might switch to landscape by default the first time you run a script. To fix this, choose Menu ➝ Preferences, scroll down to Rotation mode, and set its value to Automatic. import android app = android.Android() msg = "Hello from Head First Python on Android" app.makeToast(msg) To transfer your script to the emulator, you need to copy it to the emulator’s virtual SD card. Another program within the tools folder called adb helps with this: Issue this command at your min al window to transfer your scrter ipt to the emulator. tools/adb push mydroidtest.py /sdcard/sl4a/scripts Your script should now appear on the list of scripts available to SL4A. 264 Chapter 8 mobile app development Test Drive Let’s confirm that your Android setup is working. With the SL4A app open, simply tap on your script’s name to run it, and then click the run wheel from the menu. Click your app’s name… …then click the “run wheel.” And there’s your message. It works! Your Android emulator with SL4A is working, and it’s running your Python code. you are here 4 265 what to do? Define your app’s requirements Let’s think a little bit about what your Android app needs to do. Nothing’s really changed...you just have to get the web data onto the phone. Frank: Well…first off, the view code no longer has to generate HTML, so that makes things interesting. Jill: In fact, you need the web server only to supply your data on request, not all that generated HTML. Joe: Ah ha! I’ve solved it. Just send the pickle with all the data from the server to the Android phone. It can’t be all that hard, can it? Jill: Sorry, guys, that’ll cause problems. The pickle format used by Python 3 is incompatible with Python 2. You’ll certainly be able to send the pickle to the phone, but the phone’s Python won’t be able to work with the data in the pickle. Frank Frank: Darn…what are our options, then? Plain data? Jill Joe Joe: Hey, good idea: just send the data as one big string and parse it on the phone. Sounds like a workable solution, right? Jill: No, that’s a potential disaster, because you never know in what format that stringed data will arrive. You need an data interchange format, something like XML or JSON. Frank: Hmm…I’ve heard XML is a hound to work with…and it’s probably overkill for this simple app. What’s the deal with JSON? Joe: Yes, of course, I keep hearing about JSON. I think they use it in lots of different places on the Web, especially with AJAX. Frank: Oh, dear…pickle, XML, JSON, and now AJAX…I think my brain might just explode here. Jill: Never worry, you only need to know JSON. In fact, you don’t even need to worry about understanding JSON at all; you just need to know how to use it. And, guess what? JSON comes standard with Python 2 and with Python 3…and the format is compatible. So, we can use JSON on the web server and on the phone. Frank & Joe: Bonus! That’s the type of technology we like! 266 Chapter 8 mobile app development JSON Exposed This week’s interview: The Data Interchange Lowdown Head First: Hello, JSON. Thanks for agreeing to talk to us today. JSON: No problem. Always willing to play my part in whatever way I can. Head First: And what is that, exactly? JSON: Oh, I’m just one of the most widely used data interchange formats on the Web. When you need to transfer data over the Internet, you can rely on me. And, of course, you’ll find me everywhere. Head First: Why’s that? JSON: Well…it’s really to do with my name. The “JS” in JSON stands for “JavaScript” and the “ON” stands for “Object Notation.” See? Head First: Uh…I’m not quite with you. JSON: I’m JavaScript’s object notation, which means I’m everywhere. Head First: Sorry, but you’ve completely lost me. JSON: The first two letters are the key ones: I’m a JavaScript standard, which means you’ll find me everywhere JavaScript is…which means I’m in every major web browser on the planet. Head First: What’s that got to do with Python? JSON: That’s where the other two letters come into play. Because I was initially designed to allow JavaScript data objects to be transferred from one JavaScript program to another, I’ve been extended to allow objects to be transferred regardless of what programming language is used to create the data. By using the JSON library provided by your favorite programming language, you can create data that is interchangeable. If you can read a JSON data stream, you can recreate data as you see fit. Head First: So I could take an object in, say, Python, use JSON to convert it to JSON’s object notation, and then send the converted data to another computer running a program written in C#? JSON: And as long as C# has a JSON library, you can recreate the Python data as C# data. Neat, eh? Head First: Yes, that sounds interesting…only [winks] why would anyone in their right mind want to program in C#? JSON: [laughs] Oh, come on now: be nice. There’s plenty of reasons to use different programming languages for different reasons. Head First: Which goes some of the way to explain why we have so many great programming titles, like Head First C#, Head First Java, Head First PHP and MySQL, Head First Rails, and Head First JavaScript. JSON: Was that a shameless, self-serving plug? Head First: You know something…I think it might well have been! [laughs]. JSON: [laughs] Yes, it pays to advertise. Head First: And to share data, right? JSON: Yes! And that’s exactly my point: when you need a language-neutral data interchange format that is easy to work with, it’s hard to pass me by. Head First: But how can you be “language neutral” when you have JavaScript in your name? JSON: Oh, that’s just my name. It’s what they called me when the only language I supported was JavaScript, and it kinda stuck. Head First: So they should really call you something else, then? JSON: Yes, but “WorksWithEveryProgramming LanguageUnderTheSunIncludingPythonObject Notation” doesn’t have quite the same ring to it! you are here 4 267 leaving pickle on the plate This is NOT cool... I spent all that time learning to use pickles and now you’re abandoning them in favor of this “JSON” thing. You’ve got to be joking...? You are not exactly “abandoning” pickle. The JSON technology is a better fit here for a number of reasons. First of all, it’s a text-based format, so it fits better with the way the Web works. Second, it’s a standard that works the same on Python 2 and Python 3, so there are no compatibility issues. And third, because JSON is language-neutral, you open up the possibility of other web tools written in other programming languages interacting with your server. If you use pickle here, you lose all this. 268 Chapter 8 mobile app development JSON is an established web standard that comes preinstalled with Python 2 and Python 3. The JSON API is not that much different to the one used by pickle: >>> import json Import the JSON library. >>> names = ['John', ['Johnny', 'Jack'], 'Michael', ['Mike', 'Mikey', 'Mick']] Create a list of lists. >>> names ['John', ['Johnny', 'Jack'], 'Michael', ['Mike', 'Mikey', 'Mick']] >>> to_transfer = json.dumps(names) Transform the Python list-of-lists into a JSON list of lists. >>> to_transfer '["John", ["Johnny", "Jack"], "Michael", ["Mike", "Mikey", "Mick"]]' >>> from_transfer = json.loads(to_transfer) >>> from_transfer The format is similar, but different. Transform the JSON list of list s back into one that Python understands. ['John', ['Johnny', 'Jack'], 'Michael', ['Mike', 'Mikey', 'Mick']] >>> names e The new data is exactly the sam as the original list of lists. ['John', ['Johnny', 'Jack'], 'Michael', ['Mike', 'Mikey', 'Mick']] Add a new function to the athletemodel module that, when called, returns the list of athlete names as a string. Call the new function get_names_from_store(). you are here 4   269 athletemodel function You were to add a new function to the athletemodel module that, when called, returns the list of athlete names as a string. You were to all the new function get_names_from_store(). def get_names_from_store(): Extract a list of athlete names from the data. athletes = get_from_store() Get all the data from the pickle. response = [athletes[each_ath].name for each_ath in athletes] return(response) Return the list to the caller. So...rather than running a CGI script to create a HTML web page, you want me to deliver just the data, right? That’s OK. Not a problem—just be sure to tell me which script to run... Web Server 270 Chapter 8 mobile app development With your new function written and added to the athletemodel module, create a new CGI script that, when called, returns the data from the get_names_from_store() function to the web requester as a JSON data stream. Call your new script cgi-bin/generate_names.py. Hint: Use application/json as your Content-type. I may be small, but I’m mighty capable. Whether you need a web page or just your data, you can count on me to get the job done. you are here 4 271 json-generating cgi script With your new function written and added to the athletemodel module, you were to create a new CGI script that, when called, returns the data from the get_names_from_store() function to the web requester as a JSON data stream. You were to call your new script cgi-bin/generate_names.py. Don’t forget this “magic” first line if you’re running on Linux or Mac OS X. #! /usr/local/bin/python3 import json Do your imports. import athletemodel import yate Start with the appropriate “Content-type”: line. names = athletemodel.get_names_from_store() print(yate.start_response('application/json')) print(json.dumps(sorted(names))) Take care testing your JSON-generating CGI code. The behavior you see when testing your JSONgenerating CGI script will differ depending on the web browser you are using. For instance, Firefox might attempt to download the generated data as opposed to display it on screen. 272 Chapter 8 Get the data from the model. Sort “names”, then convert to JSON and send to STDOUT. mobile app development Test Drive If it is not already running, start your web server and be sure to set the executable bit with the chmod +x cgi-bin/generate_names.py command (if on Linux or Mac OS X). When you’re ready, grab your favorite web browser and take your new CGI for a spin. I in your Enter the web address of the CG browser’s location bar. Hey! It looks like the coach added two new athletes. has File Edit Window Help GeneratingJSON The web server’s logging information confirms that the CGI executed. $ python3 simple_httpd.py Starting simple_httpd on port: 8080 localhost - - [18/Sep/2010 06:31:29] localhost - - [18/Sep/2010 06:35:29] localhost - - [18/Sep/2010 06:35:35] localhost - - [18/Sep/2010 06:35:38] localhost - - [18/Sep/2010 06:35:40] localhost - - [18/Sep/2010 06:35:49] "GET /cgi-bin/generate_names.py HTTP/1.1" 200 "GET /cgi-bin/generate_list.py HTTP/1.1" 200 "POST /cgi-bin/generate_timing_data.py HTTP/1.1" 200 "GET /cgi-bin/generate_list.py HTTP/1.1" 200 "GET /index.html HTTP/1.1" 200 "GET /cgi-bin/generate_names.py HTTP/1.1" 200 - That worked! Now all you have to do is arrange for the Android emulator to request the data within a Python script and display the list of names on the smartphone’s screen. How hard can that be? you are here 4 273 two apis The SL4A Android API The SL4A technology provides a high-level API to the low-level Android API, and SL4A’s API is documented in the online API reference: Recall the code from earlier, which demonstrated a minimal Android SL4A app: Import the “android” library and create a new app object instance. Create an appropriate message and display it on screen. import android app = android.Android() msg = "Hello from Head First Python on Android" app.makeToast(msg) Six calls to the Android API let you create a list of selectable items in a dialog, together with positive and negative buttons, which are used to indicate the selection your user made. Note how each of the calls to the Android “dialog” API results in something appearing on screen. Always start with an import. import android Create an Android app object. app = android.Android() app.dialogCreateAlert("Select an athlete:") app.dialogSetSingleChoiceItems(['Mikey', 'Sarah', 'James', 'Julie']) app.dialogSetPositiveButtonText("Select") app.dialogSetNegativeButtonText("Quit") app.dialogShow() resp = app.dialogGetResponse().result Display your dialog on the phone. 274 Chapter 8 se Wait for a resp.on from your user mobile app development Android Code Magnets is to rearrange the magnets to complete the program. import android import json import time All of this program’s messages are in one place. The name of the CGI script to run on the web server from urllib import urlencode Do the usual imports…these ones pull in web client functionality.)) else: page = urlopen(url) return(page.read().decode("utf8")) This code’s a mess…can you fix it? Change this to the web address that’s running your web server. This function takes both a web address (url) and some optional data (post_data) and sends a web request to your web server. The web response is returned to the caller. get_names_cgi))) athlete_names = sorted(json.loads(send_to_server(web_server + resp = app. status_update(qu dialogGetR it_msg) esponse(). result app.dialogShow() app.dialogCreateAlert(list_title) ): e(msg, how_long=2 def status_updat g) app.makeToast(ms ong) time.sleep(how_l app.dialogSetNegativeButtonText('Quit') status_update(he llo_msg) app.dialogSetPositiveBut tonText('Select') app = android.An droid() app.dialogSetSingleChoiceItems(athlete_names) you are here 4 275 android query Android Code Magnets Solution was to rearrange the magnets to complete the program.)) Create an Android app object. else: page = urlopen(url) return(page.read().decode("utf8")) app = android.Android() def status_update(msg, how_long=2): app.makeToast(msg) Say “hello”. This is a little function for displaying short messages on the phone. time.sleep(how_long) status_update(hello_msg) get_names_cgi))) athlete_names = sorted(json.loads(send_to_server(web_server + app.dialogCreateAlert(list_title) app.dialogSetSingleChoiceItems(athlete_names) app.dialogSetPositiveButtonText('Select') app.dialogSetNegativeButtonText('Quit') app.dialogShow() resp = app.dialogGetResponse().result status_update(quit_msg) 276 Chapter 8 Send the web request to your server, then turn the JSON response into a sorted list. Say “bye bye.” Create a two-buttoned dialog from the list of athlete names. Wait for the user to tap a ton , then assign the result to “rebut sp”. mobile app development Test Drive Recall that (for now) your Android Python scripts run within the emulator, not within IDLE. So use the tools/adb program to copy your program to the emulator. Call your program coachapp.py. When the code is copied over, start SL4A on your emulator, and then tap your script’s name. Tap your app’s name, and then tap the “run wheel." And there they are…Coach Kelly’s athletes. This is looking really good! Your app has communicated with your web server, requested and received the list of athlete names, and displayed the list on your emulator. If you app doesn’t run, don’t panic. Check your code for typos. Run your app again in the Python terminal by tapping on the little terminal icon to the left of the “run wheel” within SL4A. If your code raises an error, you’ll see any messages on the emulator’s screen, which should give you a good idea of what went wrong. you are here 4 277 positive or negative Select from a list on Android When your user taps on a button, the “result” of the call to dialogGetResponse() is set to positive if the first button is tapped or negative if the second button is tapped. In your code, you can check the value of resp, which is a dictionary, and the which key is set to either positive or negative. A subsequent call to dialogGetSelectedItems() returns the index value of the selected list item. Index item 0 Index item 1 Index item 2 Index item 3 Index item 4 The “positive” button The “negative” button So…if the positive button is tapped, you can index into the list of athlete names to see which athlete was selected from the displayed list. The selected name can then be sent to the web server to request the rest of the athlete’s data using the send_ to_server() function. You can use this behavior in the next version of your code. 278 Chapter 8 mobile app development 1 Assume that you have a CGI script called cgi-bin/ generate_data.py, which, when called, requests the data for a named athlete from the server. Provide the code (which includes a call to thensend_to_ server() function) to implement this functionality: 2 Additionally, write the code required to display the list of times returned from the server within an Android dialog. Hints: Use the dialogSetItems() method from the Android API to add a list of items to a dialog. Also, remember that the data arriving over the Internet will be formatted using JSON. you are here 4   279 ask for an athlete 1 You were to assume that you have a CGI script called cgi-bin/ generate_data.py, which, when called requests the data for a named athlete from the server. Provide the name of You were to provide the code (which includes a call to the send_to_server() function) to implement this the CGI to run. functionality: get_data_cgi = '/cgi-bin/generate_data.py' Send the request send_to_server(web_server + get_data_cgi, {'which_athlete': which_athlete}) to the web server, together with the Include the data. athlete name. 2 Additionally, you were to write the code required to display the list of times returned from the server within an Android dialog: Wh ich button was pressed? if resp['which'] in ('positive'): When your user ” ive sit “po the s tap selected_athlete = app.dialogGetSelectedItems().result[0] button…work out the index value chosen. which_athlete = athlete_names[selected_athlete] Look up the athlete’s name using the index value. athlete = json.loads(send_to_server(web_server + get_data_cgi, Dynamically create the dialog’s title. {'which_athlete': which_athlete})) athlete_title = which_athlete + ' top 3 times:' app.dialogCreateAlert(athlete_title) The user needs to see only the data this time, so you need to use “dialogSetItems()”. Wait for a tap from the user. 280 Chapter 8 app.dialogSetItems(athlete['Top3']) app.dialogSetPositiveButtonText('OK’) app.dialogShow() resp = app.dialogGetResponse().result Set the single button’s text. The index value is in the first element of the list of results returned from the dialog. Send a new web request to the server to fetch the athlete’s data. mobile app development The athlete’s data CGI script Here’s the code for the cgi-bin/generate_data.py CGI script, which takes a web request and returns the indicated athlete’s data from the model: #! /usr/local/bin/python3 import cgi import json import athletemodel import yate Process the data sent with the request and extract the athlete’s name. Get all the data from the model. athletes = athletemodel.get_from_store() form_data = cgi.FieldStorage() athlete_name = form_data['which_athlete'].value print(yate.start_response('application/json')) Start a web response, with JSON as the data type. print(json.dumps(athletes[athlete_name])) The complete Android app, so far Include the indicated athlete’s data in the web . response, formatted by JSON You’ve made quite a few changes to your program at this stage. Before you test it on the Android emulator, take a moment to look at your code in its entirety:' get_data_cgi The rest of your code is on the following page. = '/cgi-bin/generate_data.py' you are here 4 281 app code, continued def send_to_server(url, post_data=None): if post_data: page = urlopen(url, urlencode(post_data)) else: page = urlopen(url) return(page.read().decode("utf8")) app = android.Android() def status_update(msg, how_long=2): app.makeToast(msg) time.sleep(how_long)) 282   Chapter 8 mobile app development Test Drive Let’s give the latest version of your app a go. Copy the app to your emulator, and put the new CGI script in your cgi-bin folder on your web server (remember to set the executable bit, if needed). What happens when you run your latest app using the emulator’s Python shell as opposed to the “run wheel”? You are dumped into the Python shell with a rather nasty error message. You’re getting a “TypeError”. After reading the error message, click “Yes” to return to the SL4A script listing. Yikes! Your code has a TypeError, which is crashing your app when you try to display the selected athlete’s timing data. Why do you think this is happening? you are here 4 283 debugging data The data appears to have changed type Look at the CGI code...it gets the data from the model and sends it to the web browser... ...ummm, I see. But somehow, the data that arrives isn’t an AthleteList. Let’s add a debugging line of code to your CGI script to try and determine what’s going on. Recall that the CGI mechanism captures any output your script sends to standard output by default, so let’s use code like this to send your debugging messgage to the web server’s console, which is displaying on standard error: Import “sys” from the standard library. import sys print(json.dumps(athletes[athlete_name]), file=sys.stderr) Redirect the output from “print()” to “stderr”, rather than the default, which is “stdout”. Run your app again and, of course, it’s still crashes with a TypeError. However, if you check your web server’s console screen, you’ll see that the data being sent as the JSON web response is clearly visible. Notice anything? File Edit Window Help JustWhatsInTheData This is a list of athlete timing values…but where’s the name and DOB values? 284 Chapter 8 $ python3 simple_httpd.py Starting simple_httpd on port: 8080 192.168.1.33 - - [18/Sep/2010 17:40:04] "GET /cgi-bin/generate_names.py HTTP/1.1" 200 192.168.1.33 - - [18/Sep/2010 17:40:08] "POST /cgi-bin/generate_data.py HTTP/1.1" 200 ["2-44", "3:01", "2.44", "2.55", "2.51", "2:41", "2:41", "3:00", "2-32", "2.11", "2:26"] mobile app development JSON can’t handle your custom datatypes Unlike pickle, which is smart enough to pickle your custom classes, the JSON library that comes with Python isn’t. This means that the standard library’s JSON library can work with Python’s built-in types, but not with your AthleteList objects. The solution to this problem is straightforward: add a method to your AthleteList class to convert your data into a dictionary, and send that back to the app. Because JSON supports Python’s dictionary, this should work.. Q: A: What’s the purpose of this @property thing again? The @property decorator lets you specify that a method is to be presented to users of your class as if it were an attribute. If you think about things, your to_dict() method doesn’t change the state of your object’s data in any way: it merely exists to return the object’s attribute data as a dictionary. So, although to_dict() is a method, it behaves more like an attribute, and using the @property decorator let’s you indicate this. Users of your class (that is, other programmers) don’t need to know that when they access the to_dict attribute they are in fact running a method. All they see is a unified interface: attributes access your class’s data, while methods manipulate it. you are here 4 285 data to dictionary. Decorate your new method with “@property”. @property def as_dict(self): Create a new method. return({‘Name’: self.name, ‘DOB’: self.dob, ‘Top3’: self.top3}) Return a dictionary of the object’s data attributes. Did you remember to use “self”? Do this! As well as updating your AthleteList class code, be sure to change cgi-bin/ generate-data.py to return a dictionary, rather than the object instance, when servicing its web request. While you’re making changes, adjust the coachapp.py app code to include the athlete’s name and DOB values in the second dialog’s title. 286 Chapter 8 mobile app development Test Drive With your changes applied to AthleteList.py, cgi-bin/generate_data.py and coachapp.py, use the adb tool to copy the latest version of your app to the emulator. Let’s see how things work now. Here’s the code that your app uses in response to an athlete selection. Tap! Success. Your app displays the selected athlete’s top three times on screen. How cool is that? you are here 4 287 file transfer over wifi Run your app on a real phone Now that your app is running successfully on your emulator, it’s time to try it on a real phone. This is where things get interesting. There are many options when it comes to copying your code to a real device: • Use file transfer over Bluetooth. • Use file transfer with a USB connection. • Use the Android SDK’s adb tool with USB. • Use a file transfer tool over WiFi. Unfortunately, which technique to use (and which work) depends very much on your phone. At Head First Labs, we’ve had the greatest and most consistent success with the last option: use a file transfer tool over WiFi. These instructions do not work on the emulator. The Android emulator does not currently support Google’s Android Market, which you’ll need access to use when following along with the instructions on these pages. Step 1: Prepare your computer To transfer files securely between your Android phone and your computer, enable SSH file transfers by running an SSH server on your computer. How you do this depends on the operating system you are running: • Windows: download one of the many free SSH servers. • Mac OS X: enable remote logins. • Linux: install and enable OpenSSH Server. Step 2: Install AndFTP on your Android phone Use the Android Market on your phone to find and install the AndFTP app. This excellent tool lets you transfer files to and from your Android phone over FTP, SFTP, and FTPS. To use it with the SSH server running on your computer, you’ll want to select SFTP as the file transfer protocol within the app, because AndFTP defaults to using the FTP protocol. Let’s take a look at what’s involved. 288 Chapter 8 The AndFTP app is one of our faves. mobile app development Configure AndFTP With AndFTP running on your phone, configure it to connect to your computer (Hostname) using SFTP as the transfer protocol (Type). Leave the Port, Username, Password, and Remote dir entries as they are, but change the Local dir entry to /sdcard/sl4a/scripts. Change this entry to be the web name or address of your SSH server. Set this to “/sdcard/sl4a/scripts ” which ensures files transferred from your server are added to SL4A. Be sure to set this to “SFTP”. The value for “Port” should change to 22. Be sure to tap “Save”. With the connection set up, tap AndFTP’s Connect button to establish a connection to your SSH server, entering your Username and Password when prompted. With the connection to the server established, navigate to the server folder containing the file(s) you want to transfer to the phone, mark the files for download, and tap the Download button. When the download completes, click Disconnect to terminate the connection between the phone and your computer. If you transferred a Python program, it should now be added to the list of scripts within SL4A. Your app is ready! It’s time to let Coach Kelly take a look. you are here 4 289 app complete (almost) The coach is thrilled with his app That’s looking great! I knew you could do it... now all I need is a way to add a new timing value directly from my phone. That would be awesome! The coach’s app running on the coach’s phone. Welcome to the future! You’ve delivered a solution that automates interaction with your website while providing a modern interface on an Android phone. Your app allows your users to access web data directly on their mobile device. The fact that your server code runs on Python 3 and your Android client code runs on Python 2 makes very little difference: it’s all just Python code, after all. All that’s left to do is write some code to satisfy Coach Kelly’s latest request, and you’ll get to that in the next chapter. This is great work. 290 Chapter 8 mobile app development Your Python Toolbox go n i L n o h t y P s release the previou on 2” - has compatibility which ot of Python, Python 3 (and are n h “issues” wit ting worked up over). t worth ge • “Pyth Android Lingo • “SL4A” - the Scripting Layer for Android lets you run Python on your Android device. • “AVD” - an Android Virtual Device which lets you emulate you Android device on your computer. r CHAPTER 8 You’ve got Chapter 8 under your belt and you’ve added some key Python techiques to your toolbox. The json library module lets you convert Python’s built-in types to the textbased JSON data interchange format. Use json.dumps() to create a stringed version of a Python type. Use json.loads() to create a Python type from a JSON string. Data sent using JSON needs to have its Content-Type: set to application/json. The urllib and urllib2 library modules (both available in Python 2) can be used to send encoded data from a program to a web server (using the urlencode() and urlopen() functions). The sys module provides the sys. stdin, sys.stdout and sys. stderr input streams. you are here 4 291 9 manage your data Handling input Input this, input that...that’s all I ever hear...input, input, input, input...all day long. It’s enough to drive me mad!. this is a new chapter 293 add data anywhere Your athlete times app has gone national We love what you did for Coach Kelly, but it would be great if we could add times for an athlete no matter where we are. Is this possible? The National Underage Athletics Committee (NUAC) took one look at your Android app and realized it’s just what they need…almost. There are many ways to improve your webapp, but for now, let’s concentrate on the committee’s most pressing need: adding a new time value to an existing athlete’s data set. Adding new data to text files isn’t going to work: there are just too many coaches around the country adding data. The committee wants something that’s user friendly from any web browser or Android phone. Can you help? 294 Chapter 9 manage your data Use a form or dialog to accept input Simply use the standard <FORM> and <INPUT> tags within your HTML web page to get input from your users... ...or if you are on your phone, a call to the “dialogGetInput()” function will do the trick. On the Web, your user interacts with your web form and enters data. When she presses the submit button, the web browser gathers up all of the form’s data and sends it to the web server as part of the web request. On your Android phone, you can use the dialogGetInput() method to get input from the user, then mimic the behavior of the web form’s submit button in code. In fact, you’ve done this already: check out this line of code from your coachapp.py app, which sends the selected athlete name to your web server: Here’s where the data is included with the web request. athlete = json.loads(send_to_server(web_server + get_data_cgi, {'which_athlete': which_athlete})) you are here 4 295 form action Create an HTML form template Let’s extend yate.py to support the creation of a HTML form. Take a look a this simple form, together with the HTML markup used to produce it. The name of the CGI script to send the form’s data to. <form action="cgi-bin/process-time.py" method="POST"> Enter a timing value: <input type="Text" name="TimeValue" size=40> <br /> Click the “Send” button to submit the form’s data to your web server. <input type="Submit" value="Send"> </form> When your user clicks on the Send button, any data in the input area is sent to the web server as part of the web request. On your web server, you can access the CGI data using the facilities provided by the standard library’s cgi module: import cgi Get the data sent from the form as part of the web request. form = cgi.FieldStorage() timing_value = form["TimeValue"].value Access the value associated with the form’s data. The cgi module converts the data associated with the web request into a dictionary-like object that you can then query to extract what you need. 296 Chapter 9 the “TimeValue” key from manage your data Let’s turn the HTML form from the previous page into a template within the yate.py module. 1 Start by creating a new template called templates/form.html that allows you to parameterize the form’s CGI script name, method, input tags, and submit button text: 2 With the template ready, write the code for two functions you intend to add to yate.py. The first, called create_inputs(), takes a list of one of more strings and creates HTML <INPUT> tags for each string, similar to the one that accepts TimeValue on the previous page. The second, called do_form(), uses the template from Part 1 of this exercise together with the create_ inputs() function to generate a HTML form. Given def create_inputs(inputs_list): Return the generated tags to the caller. return(html_inputs) a list of <INPUT> tag names. The name of the CGI script a list of <INPUT> tag names are requirand ed arguments. def do_form(name, the_inputs, method="POST", text="Submit"): The HTTP method and texte to the “Submit” button hav sensible default values. Substitute the arguments and generated <INPUT> tags into the template to create the form. return(form.substitute(cgi_name=name, http_method=method, list_of_inputs=inputs, submit_text=text)) you are here 4 297 html form template You were to turn the HTML form into a template within the yate.py module. 1 You were to start by creating a new template called templates/form.html that allows you to parameterize the form’s CGI script name, method, input tags, and submit button text. The CGI script’s name and associated HTTP method are parameterized. 2 <form action=$cgi_name method=$http_method> Enter a timing value: $list_of_inputs<br /> <input type="Submit" value=$submit_text></form> The list of <INPUT> tags and the submit button’s text is also parameterized. With the template ready, you were to write the code for two functions to add to yate.py. The first, called create_inputs(), takes a list of one of more strings and creates HTML <INPUT> tags for each string, similar to the one that accepts TimeValue. The second, called do_form(), uses the template from Part 1 of this exercise together with the create_ inputs() function to generate a HTML form: def create_inputs(inputs_list): html_inputs = '' Take each name and create an <INPUT> tag. for each_input in inputs_list: This “continuation” character lets you split a long line of code over multiple lines. html_inputs = html_inputs + ‘<input type= “Text" name="' + \ each_input + '" size=40>' return(html_inputs) def do_form(name, the_inputs, method="POST", text="Submit"): Grab the template from your disk. Create a template form. with open(‘templates/form.html') as formf: form_text = formf.read() inputs = create_inputs(the_inputs) form = Template(form_text) Create the list of <INPUT> tag s. return(form.substitute(cgi_name=name, http_method=method, list_of_inputs=inputs, submit_text=text)) 298 Chapter 9 manage your data Test Drive Here’s the code to a CGI script called cgi-bin/test-form.py, which generates the HTML form from earlier. As you can see, there’s nothing to it. #! /usr/local/bin/python3 import yate Always start with a CGI response. Dynamically create the form, ed. supplying any arguments as requir print(yate.start_response('text/html')) print(yate.do_form('add_timing_data.py', ['TimeValue'], text='Send')) Set the executable bit (if required on your OS) using chmod + x test_form.py , and then use your browser to confirm that your HTML form-generating code is working. I script into Enter the URL for the CGbar your web browser’s location . Use your browser’s “View Source” menu option to confirm that the generated form is exactly what you need. The generated HTML form appears within the browser’s window. Great. You’ve extended yate.py to support the creation of a simple data entry form. Now all you need to do is to decide what happens once the data arrives on your server. you are here 4 299 data delivery The data is delivered to your CGI script In addition to running your webapp, the web server also arranges to deliver any submitted form data to your waiting CGI script. Python’s cgi library converts the data into a dictionary and, as you already know, provides you with convenient access to the submitted data: All of your form’s data has been added to the “form” dictionary. import cgi form = cgi.FieldStorage() Additional information about the web request is also available to you via the web server’s environment. Typically, you won’t need to access or use this data directly. However, occasionally, it can be useful to report on some of it. Here is some code that takes advantage of Python’s built-in support for querying your CGI script’s environment using the os library, assuming the environment values have been set by a friendly web server. Note that the data in the enviroment is available to your code as a dictionary. import os import time import sys Be sure to include the “os” library in your list of imports. addr = os.environ['REMOTE_ADDR'] host = os.environ['REMOTE_HOST'] method = os.environ['REQUEST_METHOD'] cur_time = time.asctime(time.localtime()) Display the queried data on standard error. les Query three environment variab les. iab var to ues val ir and assign the Get the current time. print(host + ", " + addr + ", " + cur_time + ": " + method, file=sys.stderr) Let’s exploit both code snippets on this page to log the data sent from a form to your web server’s console. When you are convinced that the data is arriving at your web server intact, you can extend your code to store the received data in your model. Let’s write a CGI to display your form’s data. 300 Chapter 9 manage your data CGI Magnets. Rearrange the magnets to produce a working program. Don’t forget this line if you are running on Max OS X or Linux. #! /usr/local/bin/python3 There’s not much of a response for now…so just send back plain text to the waiting web browser. import cgi import os import time import sys import yate There’s really nothing new here. print(yate.start_response('text/plain')) addr = os.environ['REMOTE_ADDR'] host = os.environ['REMOTE_HOST'] method = os.environ['REQUEST_METHOD'] cur_time = time.asctime(time.localtime()) print(host + ", " + addr + ", " + cur_time + ": " + method + ": ", end='', file=sys.stderr) print('OK.') end=' ', form = cgi.FieldStorage() print(each_form_item + '->' + form[each_form_item].value, print(file=sys.stderr) for each_form_item in form.keys(): file=sys.s tderr) you are here 4 301 add timing data CGI Magnets Solution. You were to rearrange the magnets to produce a working program. #! /usr/local/bin/python3 import cgi import os import time import sys import yate print(yate.start_response('text/plain')) addr = os.environ['REMOTE_ADDR'] host = os.environ['REMOTE_HOST'] method = os.environ['REQUEST_METHOD'] cur_time = time.asctime(time.localtime()) print(host + ", " + addr + ", " + cur_time + ": " + method + ": ", end='', file=sys.stderr) form = cgi.FieldStorage() for each_form_item in form.keys(): Ensure that this “print()” function does NOT take a newline. print(each_form_item + '->' + form[each_form_item].value, end=' ', print(file=sys.stderr) print('OK.') 302 Chapter 9 file=sys.stderr) Take a newline on standard error. manage your data Test Drive Let’s use your form-generating CGI script from earlier to try out add_timing_data.py. As you enter data in the form and press the Send button, watch what happens on the web server’s console. Enter some data into your web form. The web browser displays a very basic response. All is “OK”. File Edit Window Help DisplayingPOSTs $ The web server’s logging screen displays the data that arrived, as well as the name associated with it. That worked perfectly. The data entered into the form is delivered to your CGI script on the your server. Your next challenge is to provide the same user input experience on an Android phone. you are here 4 303 android data entry Ask for input on your Android phone When you ask for user input on Android, the dialog that your user sees looks something like this example, which asks your user to confirm or change the web address and port for your server. The input dialog has a title. There’s some additional descriptive text (or message). An “OK” button confirms the entry. A space for data entry, when tapped, bring ups the “soft” keyboard. A “Cancel” button lets you change your mind. A single Android call creates this interface for you using the dialogGetInput() method: title = 'Which server should I use?' message = "Please confirm the server address/name to use for your athlete's timing data:" data = '' resp = app.dialogGetInput(title, message, data).result Pressing the Ok button sets resp to the data entered into the input area. Pressing the Cancel button sets resp to None, which is Python’s internal null-value. Let’s create some Android data-entry dialogs. 304 Chapter 9 The result of your user’s interaction with the dialog is assigned to “resp”. manage your data Let is provided for you. Your job is to complete the program. Write the code you think you need under this code, and call your program get2inputsapp.py: import android from urllib import urlencode from urllib2 import urlopen There’s nothing new here…you’ve seen all of this code before.: page = urlopen(url) return(page.read().decode("utf8")) you are here 4 305 user interaction You were was provided for you. Your job was to complete the program by writing the code you think you need under this code and call your program get2inputsapp.py. import android from urllib import urlencode from urllib2 import urlopen: The first dialog asks your user to confirm the web address and port to use. page = urlopen(url) return(page.read().decode("utf8")) resp = app.dialogGetInput(server_title, server_msg, web_server).result if resp is not None: web_server = resp If your user did NOT tap on the Cancel button… …the second dialog asks for a new timing value. resp = app.dialogGetInput(timing_title, timing_msg).result if resp is not None: new_time = resp Again, if your user did NOT tap on the Cancel button… …the app sends the data to the web server. send_to_server(web_server + add_time_cgi, {‘TimingValue’: new_time}) 306 Chapter 9 manage your data Test Drive Let’s copy get2inputsapp.py to the emulator using the adb tool: tools/adb push get2inputsapp.py /sdcard/sl4a/scripts The get2inputsapp.py app appears on the list of scripts within SL4A. Go ahead and give it a tap: Your new app starts, and you can edit the web server address and port. When you click on the input area, Android’s “soft” keyboard pops up. Enter a new timing value, and then tap “Ok”. File Edit Window Help InputsFromAndroid $ 192.168.1.33 - - [21/Sep/2010 20:50:30] "POST /cgi-bin/add_timing_data.py HTTP/1.1" 200 localhost, 192.168.1.33, Tue Sep 21 20:50:30 2010: POST: TimingValue->2:56 The web server’s log confirms the data was sent from your emulator. Perfect. That’s working, too. Regardless of where your data originates—on the Web or a phone—your app can send it to your web server. you are here 4 307 update which dataset? It’s time to update your server data Yikes! I think there’s a problem here... your server data is in two places: within your pickle and in the NUAC’s text files. The question is: which one do you update? Which of your two datasets should you update? If you update the pickle, the next time the put_to_store() function runs, your most recent update will vanish as put_ to_store() recreates the pickle from the data in the text files. That’s not good. If you update the appropriate athlete’s text file, the data in the pickle will be stale until put_to_store() runs again. If another process calls the get_from_store() function in the meantime, the update to the pickle might not have been applied and will appear to be missing for anyone reading your data. That’s not good, either. Oh, look, how lovely: I have a new timing value to add to the system. Who’s going first? No, no, no, he’s busy. Just update me! Update me, then I’ll update him. Web Server text file 308 Chapter 9 Your pickle file manage your data Avoid race conditions Of course...I could write to the text file and then immediately call “put_to_store()” to update the pickle, right? Yes, that’s one possible solution, but it’s a poor one. You might think it highly unlikely…but it is possible for another process to call the get_from_store() function between the text file update and the pickle recreation, resulting in a short period of data inconsistency. These types of situations are known as race conditions and are hard to debug when they occur. It’s best to keep them from ever happening if you can. The basic problem here is that you have one update with one piece of data that results in two file interactions. If nothing else, that’s just wasteful. Hey, thanks for the update! Your up-todate text file What update?!? It’s been all quiet over here... temporarily inconsistent pickle file you are here 4 309 avoid race conditions You need a better data storage mechanism Your initial text files and pickle design is fine when only one user is accessing the data. However, now that more than one person can access the data at any time, and from anywhere, your design is in need of improvement. Above all, you need to avoid that race condition. This is very upsetting...I appear to be missing an update. Listen, bud, it’s not my fault...until someone, somewhere runs the “put_to_store()” function without someone, somewhere else running the “get_from_store()” function, you’ll have to do without that data update. I’m not a miracle worker...I just do what I’m inconsistent and upset pickle file Q: Surely you should have thought about this problem long ago and designed this “properly” from the start? A: That’s certainly one way to look at things, and hindsight is always a wonderful thing! However, programs have a tendency to start out small, then grow to provide more features, which can introduce complexity. Recall that the coach’s app started life as a simple “standalone” text-based program, which was then moved to the Web to support multiple users. Part of the app was then redeveloped for use on an Android phone. And yes, if we’d known all of this ahead of time, we might have been designed it differently. 310 Chapter 9 Web Server Q: A: So I’m facing a rewrite of large chunks of my code? Let’s see. You did build your program using the MVC pattern, and you are using Python, so those two facts should take the sting out of any potential rewrite, assuming a rewrite is what’s required here. manage your data Wouldn't it be dreamy if I could put my data in only one place and support all my app’s requirements? But I know it's just a fantasy... you are here 4 311 which database management system? Use a database management system You need to move away from your text file and pickle combination and use a real database management system. You have plenty of choices here… Back ends based on Oracle’s technology are rock solid. If you want rocksolid without the corporate bloat, it has to be PostgreSQL. MS SQL Server is used everywhere! I really like MySQL and MariaDB. All of these fine technologies will work, but they are overkill for your app’s data requirements. And besides some of these are way beyond the NUAC’s budget, let alone their ability to set up, run, and maintain such a system. What you need is something that’s effectively hidden from the NUAC yet lets you take advantage of what a database management system has to offer. If only such a technology existed… 312 Chapter 9 manage your data Python includes SQLite Python 3 comes preinstalled with Release 3 of SQLite, a full-featured, zeroconfig, SQL-based data management system. To use SQLite, simply import the sqlite3 library and use Python’s standardized database API to program it.There’s really nothing to it: no database setup, no config, and no ongoing maintenance. With your data stored in SQLite, rewrite your webapp’s model code to use SQL to access, manipulate, and query your data. You can plan to move to one of the bigger database systems if and when your application needs dictate such a move. SQLite sounds perfect for the NUAC’s data, doesn’t it? Geek Bits The material in this chapter assumes you are comfortable with SQL database technology. If you are new to SQL (or just need a quick refresher), take a look at Head First SQL, which comes highly recommended. ilable anywhere [Note from Marketing: Avaany with a good books are sold and to one valid credit card.] you are here 4 313 database connection process Exploit Python’s database API The Python Database API provides a standard mechanism for programming a wide variery of database management systems, including SQLite. The process you follow in your code is the same regardless of which back-end database you’re using. Connect Establish a connection to your chosen database back end. Create Create a cursor to communicate through the connecton to your data. Interact Using the cursor, manipulate your data using SQL. Commit Tell your connection to apply all of your SQL manipulations to your data and make them permanent. Poof! Close Destroy the connection to the database back end. 314 Chapter 9 Rollback Tell your connection to abort your SQL manipulations, returning your data to the state it was in before your interactions started. When you close your connection, your cursor is destroyed, too. manage your data The database API as Python code Here’s how to implement an interaction with a database using the sqlite3 module: This disk file is used to hold the database and its tables. As always, import the library you need. import sqlite3 Establish a connection to a database. connection = sqlite3.connect('test.sqlite') Create a cursor to the data. cursor = connection.cursor() Execute some SQL. cursor.execute("""SELECT DATE('NOW')""") Commit any changes, making them permanent. connection.commit() Close your connection when you’re finished. connection.close() Depending on what happens during the Interact phase of the process, you either make any changes to your data permanent (commit) or decide to abort your changes (rollback). You can include code like this in your program. It is also possible to interact with you SQLite data from within IDLE’s shell. Whichever option you choose, you are interacting with your database using Python. It’s great that you can use a database to hold your data. But what schema should you use? Should you use one table, or do you need more? What data items go where? How will you design your database? Let’s start working on the answers to these questions. you are here 4 315 design your database A little database design goes a long way Let’s consider how the NUAC’s data is currently stored within your pickle. Each athlete’s data is an AthleteList object instance, which is associated with the athlete’s name in a dictionary. The entire dictionary is pickled. { Sarah: AthleteList James: AthleteList Julie: AthleteList Mikey: AthleteLi st The pickled dictionary has any num ber of AthleteLists within it. Each AthleteList has the following attributes: Sarah: AthleteList The athlete’s name The athlete’s DOB The athlete’s list of times With this arrangement, it is pretty obvious which name, date of birth, and list of times is associated with which individual athlete. But how do you model these relationships within a SQL-compliant database system like SQLite? You need to define your schema and create some tables. 316 Chapter 9 ... } manage your data Define your database schema Here is a suggested SQL schema for the NUAC’s data. The database is called coachdata.sqlite, and it has two related tables. The first table, called athletes, contains rows of data with a unique ID value, the athlete’s name, and a date-of-birth. The second table, called timing_data, contains rows of data with an athlete’s unique ID and the actual time value. This is a new attribute that should make it easy to guarantee uniqueness. coachdata.sqlite CREATE TABLE athletes ( id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, name TEXT NOT NULL, dob DATE NOT NULL ) CREATE TABLE timing_data ( athlete_id INTEGER NOT NULL, value TEXT NOT NULL, FOREIGN KEY (athlete_id) REFERENCES athletes) two Note how this schema “links” the tables using a foreign key. There can be one and only one row of data for each athlete in the athletes table. For each athlete, the value of id is guaranteed to be unique, which ensures that two (or more) athletes with the same name are kept separate within the system, because that have different ID values. Within the timing_data table, each athlete can have any number of time values associated with their unique athlete_id, with an individual row of data for each recorded time. Let’s look at some sample data. you are here 4 317 athletes and values What does the data look like? If the two tables were created and then populated with the data from the NUAC’s text files, the data in the tables might look something like this. This is what the data in the “timing_data” table might s look like, with multiple row of data for each athlete and one row for each timing value. This is what the data in the “at hletes” table might look like, with one row of data for each athlete. If you create these two tables then arrange for your data to be inserted into them, the NUAC’s data would be in a format that should make it easier to work with. Looking at the tables, it is easy to see how to add a new timing value for an athlete. Simply add another row of data to the timing_data table. Need to add an athlete? Add a row of data to the athletes table. Want to know the fastest time? Extract the smallest value from the timing_data table’s value column? Let’s create and populate these database tables. 318 Chapter 9 There’s more data in this table than shown here. manage your data SQLite Magnets Let’s create a small Python program that creates the coachdata. sqlite database with the empty athletes and timing_data tables. Call your program createDBtables.py. The code you need is almost ready. Rearrange the magnets at the bottom of the page to complete it. import sqlite3 cursor.execute("""CREATE TABLE athletes ( athlete_id INTEGER NOT NULL, value TEXT NOT NULL, FOREIGN KEY (athlete_id) REFERENCES athletes)""") connection.commit() connection.close() dob DATE NOT NU LL )""") cursor = connec tion.cursor() cursor.execute(" ""CREATE TABLE timing_data ( connection = sqlite3.connect('coachdata.sqlite') name TEXT NOT NULL, id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, you are here 4 319 create database tables SQLite Magnets Solution Your job was to create a small Python program that creates the coachdata.sqlite database with the empty athletes and timing_data tables. You were to call your program createDBtables.py. The code you needed was almost ready, and you were to rearrange the magnets at the bottom of the page to complete it. import sqlite3 connection = sqlite3.connect('coachdata.sqlite') cursor = connection.cursor() cursor.execute("""CREATE TABLE athletes ( id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, name TEXT NOT NULL, dob DATE NOT NULL )""") cursor.execute("""CREATE TABLE timing_data ( athlete_id INTEGER NOT NULL, value TEXT NOT NULL, FOREIGN KEY (athlete_id) REFERENCES athletes)""") connection.commit() connection.close() 320 Chapter 9 The commit isn’t always required most other database systems, butwith it is with SQLite. manage your data Transfer the data from your pickle to SQLite As well as writing the code to create the tables that you need, you also need to arrange to transfer the data from your existing model (your text files and pickle combination) to your new database model. Let’s write some code to do that, too. You can add data to an existing table with the SQL INSERT statement. Assuming you have data in variables called name and dob, use code like this to add a new row of data to the athletes table: The data in these variables is substituted in place of the “?” placeholders. cursor.execute("INSERT INTO athletes (name, dob) VALUES (?, ?)",(name, dob)) ut supplying a value for You don’t need to worry aboLit provides one for you the “id” column, because SQ e automatically. Ready Bake Python Code Connect to the new database. Grab the data from the existing model. Here’s a program, called initDBathletes.py, which takes your athlete data from your existing model and loads it into your newly created SQLite database. import sqlite3 connection = sqlite3.connect('coachdata.sqlite') cursor = connection.cursor() import glob import athletemodel data_files = glob.glob("../data/*.txt") athletes = athletemodel.put_to_store(data_files) Get the athlete’s name and DOB from the pickled data. Use the INSERT statement to add a new row to the “athletes” table. for each_ath in athletes: name = athletes[each_ath].name dob = athletes[each_ath].dob cursor.execute("INSERT INTO athletes (name, dob) VALUES (?, ?)", (name, dob)) connection.commit() Make the change(s) permanent. connection.close() you are here 4 321 names and numbers What ID is assigned to which athlete? You need to query the data in your database table to work out which ID value is automatically assigned to an athlete. With SQL, the SELECT statement is the query king. Here’s a small snippet of code to show you how to use it with Python, assuming the name and dob variables have values: Again, the placeholders indica where the data values are te substituted into the query. cursor.execute("SELECT id from athletes WHERE name=? AND dob=?", (name, dob)) If the query succeeds and returns data, it gets added to your cursor. You can call a number of methods on your cursor to access the results: • cursor.fetchone() returns the next row of data. • cursor.fetchmany() returns multiple rows of data. • cursor.fetchall() returns all of the data. Each of these cursor methods return a list of rows. Names alone are not enough anymore...if you want to uniquely identify your athletes, I need to know their IDs. Web Server 322 Chapter 9 manage your data Insert your timing data You’re on a roll, so let’s keep coding for now and produce the code to take an athlete’s timing values out of the pickle and add them to your database. Specifically, you’ll want to arrange to add a new row of data to the timing_data table for each time value that is associated with each athlete in your pickle. Those friendly coders over at the Head First Code Review Team have just announced they’ve added a clean_data attribute to your AthleteList class. When you access clean_data, you get back a list of timing values that are sanitized, sorted, and free from duplicates.The Head First Code Review Team has excellent timing; that attribute should come in handy with your current coding efforts. Grab your pencil and write the lines of code needed to query the your Again, it’s OK to assume in “do b” and ” me “na code that the ues val e hav and variables exist assigned to them. athletes table for an athlete’s name and DOB, assigning the result to a variable called the_current_id. Write another query to extract the athlete’s times from the pickle and add them to the timing_data table. you are here 4 323 database queries You were to grab your pencil and write the lines of code needed to query the athletes table for an athlete’s name and DOB, assigning the result to a variable called the_current_id. You were then to write another query to extract the athlete’s times from the pickle and add them to the timing_data table. Query the “athletes” table for the ID. cursor.execute(“SELECT id from athletes WHERE name=? AND dob=?”, Remember: “fetchone()” returns a list. (name, dob)) the_current_id = cursor.fetchone()[0] It often makes sense to split your execute statement over multiple lines. Take each of for each_time in athletes[each_ath].clean_data: the “clean” times and use cursor.execute("INSERT INTO timing_data (athlete_id, value) VALUES (?, ?)”, it, together with the ID, (the_current_id, each_time)) within the SQL “INSERT” Add the ID and the time statement. val ue to the “timing_ the ke ma , ays As alw connection.commit() change(s) permanent. data” table. Do this! Add the code to your initDBathletes.py code from earlier, just after the connection.commit()call. Rename your program initDBtables.py, now that both the athletes and timing_data tables are populated with data by a single program. That’s enough coding (for now). Let’s transfer your pickled data. 324 Chapter 9 manage your data Test Drive You’ve got two programs to run now: createDBtables.py creates an empty database, defining the two tables, and initDBtables.py extracts the data from your pickle and populates the tables. Rather than running these programs within IDLE, let’s use the Python command-line tool instead. If you are running Windows, replace “python3” with this: “C:\Python31\python.exe”. File Edit Window Help PopulateTheTables Be careful to run both programs ONLY once. $ python3 createDBtables.py $ python3 initDBtables.py $ Hello? Something happened there, didn’t it? I ran the programs but nothing appeared on screen...how do I know if anything worked? you are here 4 325 sqlite manager SQLite data management tools When it comes to checking if your manipulations of the data in your database worked, you have a number of options: a Write more code to check that the database is in the state that you expect it. Which can certainly work, but is error-prone, tedious, and way too much work. b Use the supplied “sqlite3” command-line tool. Simply type sqlite3 within a terminal window to enter the SQLite “shell.” To find out which commands are available to you, type .help and start reading. The tool is a little basic (and cryptic), but it works. c Use a graphical database browser. There are lots of these; just Google “sqlite database browser” for more choices than you have time to review. Our favorite is the SQLite Manager, which installs into the Firefox web browser as an extension. This is what SQLite Manager looks like. Life really is too short. That’s a period, followed by the word “help”. Great, all of the athletes are in the But how do you integrate your new database into your webapp? 326 Chapter 9 Works great, but only on Firefox. “athletes” table. manage your data Integrate SQLite with your existing webapp So...we just need to change our model code to use SQLite...but what’s involved? Joe: This should be easy. We just have to rewrite the code in athletemodel.py to use the database, while keeping the API the same. Frank: What do you mean by keeping the API the same? Joe: Well…take the get_from_store() function, for instance. It returns an AthleteList dictionary, so we need to make sure that when we update get_from_store() to use our database that it continues to return a dictionary, just as it’s always done. Frank: Ah, now I get it: we can query the database, grab all the data, turn it into a big dictionary containing all of our AthleteList objects and then return that to the caller, right? Joe: Yes, exactly! And the best of it is that the calling code doesn’t need to change at all. Don’t you just love the beauty of MVC? Joe Jim Frank: Ummm…I guess so. Jim: [cough, cough] Frank: What’s up, Jim? Frank Jim: Are you guys crazy? Joe & Frank: What?!? Jim: You are bending over backward to maintain compatibility with an API that exists only because of the way your data model was initially designed. Now that you’ve reimplemented how your data is stored in your model, you need to consider if you need to change your API, too. Joe & Frank: Change our API? Are you crazy?!? Jim: No, not crazy, just pragmatic. If we can simplify the API by redesigning it to better fit with our database, then we should. Joe: OK, but we haven’t got all day, y’know. Jim: Don’t worry: it’ll be worth the effort. you are here 4 327 get out of a pickle Let’s spend some time amending your model code to use your SQLite database as opposed to your pickle. Start with the code to your athletemodel.py module.) 328   Chapter 9) Remember: there’s no requirement to maintain the existing API. you are here 4   329 out of a pickle Let’s spend some time amending your model code to use your SQLite database as opposed to your pickle. Start with the code to your athletemodel.py module. You were to) 330   Chapter 9 None of this code is needed anymore, because SQLite provides the data model for you.) This might seem a little drastic...but sometimes a redesign requires you to throw away obsolete code. you are here 4   331 get names from store You still need the list of names Throwing away all of your “old” model code makes sense, but you still need to generate a list of names from the model. Your decision to use SQLite is about to pay off: all you need is a simple SQL SELECT statement. Ready Bake Python Code Here’s the code for your new get_names_from_store() function: import sqlite3 Connect to the database. Extract the data you need. Formulate a response. db_name = 'coachdata.sqlite' def get_names_from_store(): connection = sqlite3.connect(db_name) cursor = connection.cursor() results = cursor.execute("""SELECT name FROM athletes""") response = [row[0] for row in results.fetchall()] connection.close() return(response) Return the list of names to the caller. I guess in this case it actually makes perfect sense to maintain the API for this call. 332 Chapter 9 manage your data Get an athlete’s details based on ID In addition to the list of names, you need to be able to extract an athlete’s details from the athletes table based on ID. Ready Bake Python Code A new function gets the data associated with a specific ID. Get the “name” and “DOB” values from the athletes table. Get the list of times from the “timing_data” table. Return the athlete’s data to the caller. Here’s the code for another new function called get_athlete_from_id(): Note the use of the placeh er to indicate where the “athleold id” argument is inserted intote_ the SQL SELECT query. def get_athlete_from_id(athlete_id): connection = sqlite3.connect(db_name) cursor = connection.cursor() results = cursor.execute("""SELECT name, dob FROM athletes WHERE id=?""", (athlete_id,)) (name, dob) = results.fetchone() results = cursor.execute("""SELECT value FROM timing_data WHERE athlete_id=?""", (athlete_id,)) data = [row[0] for row in results.fetchall()] response = { 'Name': name, 'DOB': dob, 'data': data, 'top3': data[0:3]} Take the data from both query results and turn it into a dictionary. connection.close() return(response) This function is a more involved than get_names_from_store(), but not by much. It still follows the API used with working with data stored in SQLite. This is coming along. nicely. With the model code converted, you can revisit your CGI scripts to use your new model API. Let’s see what’s involved with converting the CGIs. you are here 4 333 use ids internally Isn’t there a problem here? The “get_names_from_store()” function returns a list of names, while the “get_athlete_from_id()” function expects to be provided with an ID. But how does the web browser or the phone know which ID to use when all it has to work with are the athletes’ names? That’s a good point: which ID do you use? Your current CGIs all operate on the athlete name, not the ID. In order to ensure each athlete is unique, you designed your database schema to include a unique ID that allows for your system to properly identify two (or more) athletes with the same name, but at the moment, your model code doesn’t provide the ID value to either your web browser or your phone. One solution to this problem is to ensure that the athlete names are displayed to the user within the view, while the IDs are used internally by your system to unique identify a specific athlete. For this to work, you need to change get_names_from_store(). 334 Chapter 9 manage your data Here is the current code for your get_names_from_store() function. Rather than amending this code, create a new function, called get_namesID_from_store(), based on this code but including the ID values as well as the athlete names in its response.) you are here 4   335 get name’s id Here is your current code for your get_names_from_store() function. Rather than amending this code, you were to create a new function, called get_namesID_from_ store(), based on this code but including the ID values as well as the athlete names in its response. You were to) def get_namesID_from_store(): connection = sqlite3.connect(db_name) Arrange to include the value of “id” in the SQL “SELECT” query. cursor = connection.cursor() results = cursor.execute(“““SELECT name, id FROM athletes""") response = results.fetchall() connection.close() return(response) Remember: when you close you also destroyed, so you’ll generarteconnection, your cursor is and use “return(results.fetchall( an exception if you try ))”. 336 Chapter 9 There’s no need to process “results” in any way…assign the everything returned from query to “response”. manage your data Part 1: With your model code ready, let’s revisit each of your CGI scripts to change them to support your new model. At the moment, all of your code assumes that a list of athlete names or an AthleteList is returned from your model. Grab your pencil and amend each CGI to work with athlete IDs where necessary. This is the “generate_list.py” CGI script. #! /usr/local/bin/python3 import glob import athletemodel import yate data_files = glob.glob("data/*.txt") athletes = athletemodel.put_to_store(data_files) Note the change to the title. print(yate.start_response()) print(yate.include_header("NUAC's List of Athletes")) print(yate.start_form("generate_timing_data.py")) print(yate.para("Select an athlete from the list to work with:")) for each_athlete in sorted(athletes): print(yate.radio_button("which_athlete", athletes[each_athlete].name)) print(yate.end_form("Select")) print(yate.include_footer({"Home": "/index.html"})) This “Sharpen” is continued on the next page, but no peeking! Don’t flip over until you’ve amended the code on this page. y”. This is “generate_timing_data.p #! /usr/local/bin/python3 import cgi import athletemodel import yate athletes = athletemodel.get_from_store() form_data = cgi.FieldStorage() athlete_name = form_data['which_athlete'].value print(yate.start_response()) ange. Another title ch print(yate.include_header("NUAC's Timing Data")) print(yate.header("Athlete: " + athlete_name + ", DOB: " + athletes[athlete_name].dob + ".")) print(yate.para("The top times for this athlete are:")) print(yate.u_list(athletes[athlete_name].top3)) print(yate.para("The entire set of timing data is: " + str(athletes[athlete_name].clean_data) + " (duplicates removed).")) print(yate.include_footer({"Home": "/index.html", "Select another athlete": "generate_list.py"})) you are here 4 337 not done yet This is the “generate_names.py” CGI. Part 2: You’re not done with that pencil just yet! In addition to amending the code to the CGIs that support your web browser’s UI, you also need to change the CGIs that provide your webapp data to your Android app. Amend these CGIs, too. #! /usr/local/bin/python3 import json import athletemodel import yate names = athletemodel.get_names_from_store() print(yate.start_response('application/json')) print(json.dumps(sorted(names))) And here is the “generate_data.py” CGI. #! /usr/local/bin/python3 import cgi import json import sys import athletemodel import yate athletes = athletemodel.get_from_store() form_data = cgi.FieldStorage() athlete_name = form_data['which_athlete'].value print(yate.start_response('application/json')) print(json.dumps(athletes[athlete_name].as_dict)) 338 Chapter 9 manage your data Part 1: With your model code ready, you were to revisit each of your CGI scripts to change them to support your new model. At the moment, all of your code assumes that a list of athlete names or an AthleteList is returned from your model. You were to grab your pencil and amend each CGI to work with athlete IDs where necessary. This is the “generate_list.py” CGI script. #! /usr/local/bin/python3 You no longer need the “glob” mo as “get_nameID_from_store()” dule, does all this work for you. import glob import athletemodel import yate data_files = glob.glob("data/*.txt") get_namesID_from_store() athletes = athletemodel.put_to_store(data_files) print(yate.start_response()) print(yate.include_header("NUAC's List of Athletes")) The “athletes” are now a listgetof lists, so amend the code to at the data you need. print(yate.start_form("generate_timing_data.py")) print(yate.para("Select an athlete from the list to work with:")) each_athlete[0], each_athlete[1]) for each_athlete in sorted(athletes): print(yate.radio_button("which_athlete", athletes[each_athlete].name)) print(yate.end_form("Select")) print(yate.include_footer({"Home": "/index.html"})) y”. This is “generate_timing_data.p radio_button_id() ?!? It looks like you might need a slightly different “radio_ button()” function?!? #! /usr/local/bin/python3 import cgi import athletemodel Get the athlete’s data from the model, which returns a dictionary. import yate athletes = athletemodel.get_from_store() form_data = cgi.FieldStorage() athlete_name = form_data['which_athlete'].value athlete = athletemodel.get_athlete_from_id(athlete_id) print(yate.start_response()) print(yate.include_header("NUAC's Timing Data")) The rest of this “Sharpen Solution” is on the next page. Use the retur needed, accessinnegd data as the dictionary ke each of get at the athlety/values to e’s data. athlete[‘Name'] + “, DOB: " + athlete[‘DOB'] print(yate.header("Athlete: " + athlete_name + ", DOB: " + athletes[athlete_name].dob + ".")) print(yate.para("The top times for this athlete are:")) print(yate.u_list(athletes[athlete_name].top3)) athlete[‘top3'] str(athlete[‘data']) print(yate.para("The entire set of timing data is: " + str(athletes[athlete_name].clean_data) + " (duplicates removed).")) print(yate.include_footer({"Home": "/index.html", "Select another athlete": "generate_list.py"})) you are here 4 339 cgis for android Part 2: You weren’t done with that pencil just yet! In addition to amending the code to the CGIs that support your web browser’s UI, you also needed to change the CGIs that provide your webapp data to your Android app. You were to amend these CGIs, too. This is the “generate_names.py” CGI. #! /usr/local/bin/python3 import json import athletemodel import yate get_namesID_from_store() names = athletemodel.get_names_from_store() print(yate.start_response(‘application/json’)) print(json.dumps(sorted(names))) And here is the “generate_data.py” CGI. #! /usr/local/bin/python3 The tiniest of changes need to be made to these CGIs, because your Andro is only interested in your webapp id app ’s data, NOT all of that generated HTML . import cgi import json import sys import athletemodel import yate athletes = athletemodel.get_from_store() form_data = cgi.FieldStorage() athlete_name = form_data[‘which_athlete’].value athlete = athletemodel.get_athlete_from_id(athlete_id) print(yate.start_response(‘application/json’)) print(json.dumps(athletes[athlete_name].as_dict)) Add this code to “yate.py” to support the creation of radio buttons that provide a value for the button that differs from the button text. 340 Chapter 9 A third argument lets you cif y an ID to go with the radiospe button . def radio_button_id(rb_name, rb_value, rb_id): return('<input type="radio" name="' + rb_name + '" value="' + str(rb_id) + '"> ' + rb_value + '<br />') manage your data Test Drive Start (or restart) your web server. Before you run your amended webapp, be sure to move you SQLite database into the top-level directory of your webapp (that is, into the same folder your index.html file). That way, your model code can find it, so move it into your webapp’s root folder now. When you are ready, take your SQL-powered webapp for a spin. File Edit Window Help StartYourWebEngine $ python3 simple_httpd.py Starting simple_httpd on port: 8080 Click on the link on the home page. And there’s Sally’s timing data. Display the list of athlete names as radio buttons. That worked well. But what about your Android app? you are here 4 341 amend for android You need to amend your Android app, too Unlike your HTML-based webapp, where all of your code resides and is executed on your web server, your Android app runs on your phone and it is programmed to work with a list of names, not a list of names and athlete IDs. When you run coachapp.py on your emulator, weirdness ensues… Here’s your current Android app running on the emulator. This is weird…instead of the names, your app is displaying a list of lists! And if you tap “Select”, your app crashes with a “ValueError”. Bu mmer. Just like with the CGI scripts, you need to amend you Android app to work with the data that’s now arriving from your web server—that is, a list of lists as opposed to a list. That shouldn’t take too long, should it? 342 Chapter 9 manage your data Here is your current coachapp.py code, which you need to amend to support the way your webapp’s model now works.() def status_update(msg, how_long=2): # There is no change to this code from the previous chapter.) you are here 4 343 support the new model Here is your current coachapp.py code, which you need to amend to support the way your webapp’s model now works. You were to() Extract the athlete names ONLY from the list of lists. def status_update(msg, how_long=2): # There is no change to this code from the previous chapter. status_update(hello_msg) athletes = athlete_names = sorted(json.loads(send_to_server(web_server + get_names_cgi))) app.dialogCreateAlert(list_title) athlete_names = [ath[0] for ath in athletes] app.dialogSetSingleChoiceItems(athlete_names) This is a cool use of a comprehension. app.dialogSetPositiveButtonText('Select') app.dialogSetNegativeButtonText('Quit') app.dialogShow() resp = app.dialogGetResponse().result Determine the ID associated with the selected athlete. if resp['which'] in ('positive'): selected_athlete = app.dialogGetSelectedItems().result[0] which_athlete = athlete_names[selected_athlete] athletes[selected_athlete][1]) 344 Chapter 9 athlete[‘top3'] A small adjustment to next line is needed to access the “top3” attribute. manage your data Android Pool Puzzle Your job is to take the code from the pool and place it into the blank lines in the program. Your goal is to write the code to have your app provide the user with a mechanism to add a timing value to the server for the currently selected athlete. For now,'): sting Add another button to thenexi your app. of sio dialog in the current ver d, tton that’s tappe Based on the hibung (“pass”) or start a either do not h the user. new dialog wit send_to_server(web_server + add_time_cgi,{'Time': new_time, 'Athlete': which_athlete}) If some input is supplied, send it to the web server together with the athlete’s ID. = resp new_time '/cgi-bin/add_timing_data.py' resp = app.dialogGe tInput(timing_titl e, timing_msg).res ult = cgi me_ _ti add 'Provide a new tim ing value ' + ath lete['Name'] + ': ' timing_msg = ' me ti w ne a er le = 'Ent not None: timing_tit if resp is you are here 4 345 allow android input Android Pool Puzzle Solution Your job was to take the code from the pool and place it into the blank lines in the program. Your goal was to write the code to have your app provide the user with a mechanism to add a timing value to the server for the currently selected athlete. For now, you were to'): timing_title = 'Enter a new time' timing_msg = 'Provide a new timing value ' + athlete['Name'] + ': ' add_time_cgi = '/cgi-bin/add_timing_data.py' resp = app.dialogGetInput(timing_title, timing_msg).result if resp is not None: Define the dialog’s titles and specify the CGI to send the data to. g and wait for Display the diatlo. some user inpu new_time = resp send_to_server(web_server + add_time_cgi,{'Time': new_time, 'Athlete': which_athlete}) 346 Chapter 9 manage your data Test Drive Use the tools/adb command to copy your latest app to the emulator, and give your app a go. Select “Vera” from the list of athletes… …to see Vera’s top 3 times, then tap the “Add Time” button… …to enter a new time, which is then sent to your web server. File Edit Window Help DataFromAndroid Great: your data is sent from your Android app to your web server. $ python3 simple_httpd.py Starting simple_httpd on port: 8080 198.162.1.34 - - [27/Sep/2010 14:51:47] "GET /cgi-bin/generate_names.py HTTP/1.1" 200 198.162.1.34 - - [27/Sep/2010 14:52:01] "POST /cgi-bin/generate_data.py HTTP/1.1" 200 198.162.1.34 - - [27/Sep/2010 14:52:19] "POST /cgi-bin/add_timing_data.py HTTP/1.1" 200 localhost, 198.162.1.34, Mon Sep 27 14:52:19 2010: POST: Athlete->3 Time->1.33 you are here 4 347 database update Update your SQLite-based athlete data All that’s left is to change the cgi-bin/add_timing_data.py CGI script to write your submitted data to your database, as opposed to the web server’s console screen. At this point, it’s a trivial exercise, because a single SQL INSERT statement will do the heavy lifting. Get the data sent to your browser from your Android web app. INSERT the data into your “timing_data” table. With this version of your CGI script running on your web server, any new times entered by anyone on an Android phone are added to the data in the database. The NUAC no longer has to worry about adding data to text files, because the files are effectively obsoleted by the use of SQLite. You’ve produced a robust solution that is more manageable, scalable, programmable, and extendable. And it’s all thanks to the power of Python, it’s database API and the inclusion of sqlite3 in the standard library. All that’s left to do is sit back, relax and bask in the glory of your latest programming creation… 348 Chapter 9 manage your data The NUAC is over the moon! Of course, your use of SQLite gives you more than just easy insertions of data. With the NUAC’s data in tables, it’s easy to answer some of the questions that have been on their mind. With our data in a database, it’s a breeze to work out the fastest time among all our athletes. And if we need to know who had the fastest time, that’s easy, too. This is just great! I can get instant answers to my many questions in the blink of an eye. All thanks to Python and SQLite. To answer these and other queries on the data in the NUAC’s database, you’ll have to bone up on your SQL. Then it’s up to you to take it from there. You’ve converted your webapp to use an SQL database. As your data management needs increase, you can consider alternative heavy-duty data management technologies as needed. This is great work. Your webapp is ready for the big time. you are here 4 349 python toolbox CHAPTER 9 Your Python Toolbox You’ve got Chapter 9 under your belt and you’ve added some key Python tools to your evey expanding Python toolbox. go Python LAPiI”n- a “Database mechanism for se ed standardiz n SQL-based databa a accessing m within a Python system fro program. • Database Lingo • “Database” - a collection of one or more tables. • “Table” - a collectio n of one or rows or data, arranged as one or more more columns. • “SQL” - the “Struc tured Query Language” is the language of th e database world and it lets you wo with your data in your database rk statements such as CREATE, IN using SERT, and SELECT. The fieldStorage() method from the standard library’s cgi module lets you access data sent to your web server from within your CGI script. The standard os library includes the environ dictionary providing convenient access to your program’s environment settings. The SQLite database system is included within Python as the sqlite3 standard library. The connect() method establishes a connection to your database file. The cursor() method lets you communicate with your database via an existing connection. The execute() method lets you send an SQL query to your database via an existing cursor. The commit() method makes changes to your database permanent. The rollback() method cancels any pending changes to your data. The close() method closes an existing connection to your database. The “?” placeholder lets you parameterize SQL statements within your Python code. 350 Chapter 9 10 scaling your webapp Getting real It all started with the internal combustion engine, then it was the electric engine, and now there’s App Engine. Will this torture never end?. So…flip the page and find out how. this is a new chapter 351 a whale of data There are whale sightings everywhere entering data when all you want to do is be out on the water looking for humpbacks… 352 Chapter 10 scaling your webapp The HFWWG needs to automate We need to somehow automate the recording of our sightings... Yeah, but we aren’t technical, and we don’t have much money. Ideally, a solution that works on the Web would be great. That way, anyone from anywhere could record a sighting. Look! There’s one... Suggesting to the HFWWG that they invest in an expensive web hosting solution isn’t going to make you any friends. It’s way too expensive to buy the capacity they’ll need for the busy weekends and a total waste of capacity when sightings are infrequent. Suggesting that they invest in a large, state-of-the-art web server that can be hosted in the central office is also a nonstarter: there’s no one to look after a setup like that, and the broadband link required to handle the anticipated traffic would blow the their budget right out of the water. Is there another option? you are here 4 353 enter app engine Build your webapp with Google App Engine Google App Engine (GAE) is a set of technologies that lets you host your webapp on Google’s cloud computing infrastructure. GAE constantly monitors your running webapp and, based on your webapp’s current activity, adjusts the resources needed to serve up your webapp’s pages. When things are busy, GAE increases the resources available to your webapp, and when things are quiet, GAE reduces the resources until such time as extra activity warrants increasing them again. On top of this, GAE provides access to Google’s BigTable technology: a set of database technologies that make storing your webapp’s data a breeze. Google also backs up your webapp’s data on a regular basis, replicates your webapp over multiple, geographically dispersed web servers, and keeps App Engine running smoothly 24/7. And the best part? GAE can be programmed with Python. And the even better part? You can start running your webapp on GAE for free. That sounds perfect for the HFWWG. What’s the catch? Initially, there isn’t one. Google provides this webapp hosting service at no charge and will continue to do so until your webapp processes five million page views per month. Once it exceeds this threshold, you’ll need to pay Google for the extra capacity used. If you never reach the limit, your use of GAE is not charged. Five million page views? That’s a lot of sightings… 354 Chapter 10 scaling your webapp Download and install App Engine When your webapp is ready for deployment, you’ll upload it to the Google cloud and run it from there. However, during development, you can run a test version of your webapp locally on your computer. All you need is a copy of the GAE SDK, which is available from here: Download the GAE Python SKD for your operating system. Windows, Mac OS X, and Linux are all supported, and installation is straightforward. After installation, Windows and Mac OS X users will find a nice, graphi cal front end added to their system. On Linux, a new folder called er a “google_appengine” is created aft successful install. GAE uses Python 2.5 The version of Python built into GAE is a modified version of the Python 2.5 release. As when you worked with Python for Android, the fact that you aren’t running Python 3 isn’t such a big deal with GAE, although you do need to ensure Python 2.5 is installed on your computer. Open up a terminal window and type: python2.5 -V If this command gives an error, pop on over to the Python website and grab the 2.5 release for your operating system. Q: Aren’t things going backward here? First, there was Python 3, then it was Python 2.6 for Android, and now we are dropping down to 2.5 for App Engine? What gives? A: That’s a great question. It’s important to remember to always code to the restrictions placed on you.You might think that it sucks that GAE runs on Python 2.5, but you shouldn’t. Think of it as just another restriction placed on the code you write—that is, it must target Release 2.5 of Python. As with the Android code you created in the previous chapters, the GAE code you are about to write is not all that different than the Python code for 3. In fact, you will be hard pressed to spot the difference. you are here 4 355 testing app engine Make sure App Engine is working The environment supported by GAE within the Google cloud supports standard CGI or Python’s WSGI. To build a GAE-compatible webapp, you need three things: a folder to hold your webapp’s files, some code to execute, and a configuration file. To test your setup, create a folder called mygaetest. Within the folder, create a small CGI you can use to test GAE. Call this CGI sayhello.py. Use this code: print('Content-type: text/plain\n') print('Hello from Head First Python on GAE!') The configuration file must be called app.yaml and it, too, must be in your webapp’s folder. This file tells the Google cloud a little bit about your webapp’s runtime environment. Here’s a basic configuration file: s The “application” line identiefienam e sam the is and your webapp as your folder. “runtime” tells GAE that your webapp is written in and will run on Python. Think of the “handlers” section the configuration file as a top-leof vel webapp routing mechanism. Do this! application: mygaetest version: 1 runtime: python It doesn’t get much easier than this…a plain-text message is displayed within your browser whenever this CGI runs. The “version” line identifies the current version of your web (and usually starts at 1). app The “api_version” indicates the release of GAE you are targeting. api_version: 1 handlers: - url: /.* script: sayhello.py This entry tells GAE to route all requests to your webapp to your “sayhello.py” program. sayhello.py mygaetest Go ahead and create the folder called mygaetest and the two files shown here. 356 Chapter 10 app.yaml scaling your webapp Test Drive Click this button to start your webapp. The GAE SDK includes a test web server, so let’s use it to take your test GAE webapp for a spin. If you are running on Windows or Mac OS X, fire up the Google App Engine Launcher front end. This tool makes it easy to start, stop, and monitor your webapp. On Linux, you’ll need to invoke a command to kick things off. If you are using the GAE Launcher, choose File -> Add Existing Application from the menu system to browse and select your webapp’s folder. Also: be sure to edit the Launcher’s Preferences to select Python 2.5 as your preferred Python Path. There is no graphical front end for Linux, so start your GAE webapp from the command line. File Edit Window Help GAEonLinux $ python2.5 google_appengine/dev_appserver.py mygaetest/ This is how the GAE Launcher looks on Mac OS X…it looks similar on Windows. INFO 2010-10-02 12:41:16,547 appengine_rpc.py:149] Server: appengine.google.com INFO 2010-10-02 12:41:16,555 appcfg.py:393] Checking for updates to the SDK. INFO 2010-10-02 12:41:17,006 appcfg.py:407] The SDK is up to date. WARNING 2010-10-02 12:41:17,007 datastore_file_stub.py:657] Could not read datastore data from /tmp/dev_appserver.datastore INFO 2010-10-02 12:41:17,104 dev_appserver_main.py:431] Running application mygaetest on port 8080: With your webapp running and waiting on port 8080, open your favorite web browser and surf on over to the web address. And there it is… the message from your test webapp! you are here 4 357 more work? I don’t believe it. This is actually more work than plain old CGI...and you’re claiming this is better?!? Yes, it is more work. But that’s about to change. For now, this is more work than you’re used to, but remember that this is just a quick test to make sure your GAE test environment is up and running (and it is). When you start to work with some of GAE’s web development features, you’ll initially see that there’s a lot more going on behind the scenes than meets the eye. 358 Chapter 10 scaling your webapp App Engine uses the MVC pattern Google has built GAE to conform to the familiar Model-View-Controller (MVC) pattern. Like your webapp from the previous chapter, the model component of a GAE-enabled webapp uses a back-end data storage facility that’s known as the datastore. This is based on Google’s BigTable technology, which provides a “NoSQL” API to your data, as well as a SQL-like API using Google’s Query Language (GQL). The Model The View GAE’s views use templates, but unlike the simple string templates from the previous chapter, GAE uses the templating system from the Django Project, which is one of Python’s leading web framework technologies. In addition to templates, GAE includes Django’s forms-building technology. The Controller And, of course, any controller code is written in Python and can use the CGI or WSGI standards. Unfortunately, you can’t use your yate module with GAE, because it is a Python 3 library (and would need to be extensively rewritten to support Python 2). Not to worry: the facilities provided by GAE “out of the box” are more than enough to build great webapps. So...like any other webapp that I build, with App Engine I define a model for my data, create some templates for my view, and then control it all with code, right? Yes, it’s the same process as any other webapp. Google has worked hard to ensure that the move to App Engine is as painless as possible. If you understand MVC (as you now do), you are well on your way to creating with GAE. It’s just a matter of working out how GAE implements each of the MVC components. you are here 4 359 model data Model your data with App Engine App Engine refers to data items stored within its datastore as properties, which are defined within your model code. Think of properties as a way to define the name and types of data within your database schema: each property is like the column type associated piece of data stored in a row, which App Engine refers to as an entity. When you think “row,” I think “entity.” And when your think “column,” I think “property.” Get it? As with traditional SQL-based databases, your GAE datastore properties are of a specific, predeclared type. There are lots to choose from, for instance: • db.StringProperty: a string of up to 500 characters • db.Blob: a byte string (binary data) • db.DateProperty: a date • db.TimeProperty: a time, • db.IntegerProperty: a 64-bit integer • db.UserProperty: a Google account ple data from Here’s some samap ter. the prevoius ch This data is stored as a “db.IntegerProperty”. 360 Chapter 10 For the full list of propertyr types supported, pop on ove to store/ appengine/docs/python/datal and typesandpropertyclasses.htm take a look. Store this data as “db.StringProperty”. This data is stored as a “db.DateProperty”. scaling your webapp Pool Puzzle Your job is to take the properties from the pool and place them in the correct place in the class code, which is in a file called hfwwgDB.py. Your goal is to assign the correct property type to each of the attributes within your Sighting class. Import the “db” module from the GAE extensions. from google.appengine.ext import db class Sighting(db.Model): name = Create a class called “Sighting” that inherits from the GAE “db.Model” cla ss. Each property is assigned to a name. date = time = location = fin_type = whale_type = blow_type = wave_type = ) db.StringProperty( db.StringProperty( ) db.DateProperty() db.StringProperty() ) db.StringProperty( ) db.StringProperty( db.TimeProperty() ) db.StringProperty( db.StringProperty( ) you are here 4 361 property types Pool Puzzle Solution Your job was to take the properties from the pool and place them in the correct place in the class code, which is in a file called hfwwgDB.py. Your goal was to assign the correct property type to each of the attributes within your Sighting class. from google.appengine.ext import db class Sighting(db.Model): db.StringProperty() name = 362 Chapter 10 db.StringProperty() date = db.DateProperty() time = db.TimeProperty() location = db.StringProperty() fin_type = db.StringProperty() whale_type = db.StringProperty() blow_type = db.StringProperty() wave_type = db.StringProperty() Everything is a “StringProperty”, except the “date” and “time” fields. scaling your webapp What good is a model without a view? GAE not only lets you define the schema for your data, but it also creates the entities in the datastore. The first time you go to put your data in the datastore, GAE springs to life and makes room for your data. There’s no extra work required by you, other than defining your model in code. It’s useful to think of GAE as executing something similar to a SQL CREATE command on the fly and as needed. But how do you get data into the GAE datastore? The short answer is that you put it there, but you first need to get some data from your webapp’s user…and to do that, you need a view. And views are easy when you use templates. App Engine templates in an instant Recall that the templating technology built into GAE is based on technology from the Django Project. Django’s templating system is more sophisticated than the simple string-based templates used in the previous chapter. Like your templates, Django’s templates can substitute data into HTML, but they can also execute conditional and looping code. Here are four templates you’ll need for your HTWWG webapp. Two of them should be familiar to you: they are adaptions of those used in the previous chapter. The other two are new. Go ahead and grab them from this book’s support website. As you can see, rather that using the $name syntax for variable substitution in the template, Django uses the {{name}} syntax: header.html footer.html <p> {{ links }} <html> </p> <head> </body> <title>{{ title }}</title> </head> </html> <body> <h1>{{ title }}</ h1> <form method="PO ST" action="/"> <table> form_start.html form_end.html r> _title }}"></td></t mit" value="{{ sub sub e=" typ ut inp ><td>< <tr><th> </th </table> </form> you are here 4 363 use a template Use templates in App Engine To use a template, import the template module from google. appengine.ext.webapp and call the template.render() function. It is useful to assign the output from template.render() to a variable, which is called html in this code snippet: from google.appengine.ext.webapp import template As usual, start with your import . html = template.render('templates/header.html', {'title': 'Report a Possible Sighting'}) Call “template. render()”… …supplying the template name… t maps values to …as well as a dictionary thale. the named template variab This is similar to the mechanism your yate.py module uses to parameterize the data displayed within your HTML pages. And I can use a bunch of calls like this to create the view that I need for the HTWWG sightings form, right? Yes, create your view with templates. Just like the other webapps that you’ve built, you can create your view in much the same way using Python code. It’s a bummer that you can’t use your yate.py module, but Django’s templates provide most of the functionality you need here. Q: A: Should I create one big template for my entire web page? You could, if you want. However, if you build up your view from snippets of HTML in templates, you open up the possibility of reusing those HTML snippets in lots of places. For instance, to maintain a consistent look and feel, you can use the same header and footer template on all of your web pages, assuming of course that your header and footer aren’t already embedded in an entire web page (which can’t be reused). 364 Chapter 10 scaling your webapp 1 This code goes into a new program called “hfwwg.py”. Let’s write the rest of the code needed to create a view that displays a data entry form for your HFWWG webapp. In addition to your web page header code (which already exists and is provided for you), you need to write code that starts a new form, displays the form fields, terminates the form with a submit button, and then finishes off the web page. Make use of the templates you’ve been given and (here’s the rub) do it all in no more than four additional lines of code. from google.appengine.ext.webapp import template html = template.render('templates/header.html', {'title': 'Report a Possible Sighting'}) html = html + Extend the contents of “html” with the rest of the HTML you nee d. Remember: no more than 4 lines of code! 2 Now that you have attempted to write the code required in no more than four lines of code, what problem(s) have you encountered. In the space below, note down any issue(s) you are having. you are here 4 365 data-entry display 1 You were to write the rest of the code needed to create a view that displays a data entry form for your HFWWG webapp. In addition to your webpage header code (which already exists and is provided for you), you were to write code with starts a new form, displays the form fields, terminates the form which a submit button, then finishes off the webpage. You were to make use of the templates you’ve been given and (here’s the rub) you had to do it all in no more than four more lines of code. from google.appengine.ext.webapp import template html = template.render('templates/header.html', {'title': 'Report a Possible Sighting'}) The “render()” function always exp don’t need the second one, be sur ects two arguments. If you e to pass an empty dictionary. html = html + template.render('templates/form_start.html’, {}) This is an issue, isn’t it? # We need to generate the FORM fields in here…but how?!? html = html + template.render(‘templates/form_end.html’, {‘sub_title’: ‘Submit Sighting’}) html = html + template.render(‘templates/footer.html’, {‘links’: ''}) 2 Having attempted to write the code required in no more than four lines of code, you were to make a note of any issue(s) you encountered. This is IMPOSSIBLE to do in just four lines of code, because there’s no way to generate the FORM fields that I need. I can’t even use the “do_form()” function from “yate.py”, because that code is not compatible with Python 2.5… this just sucks! You may have written some thing like this…assuming, of course, you haven’t thrown your copy of this book out the near est window in frustration. § 366 Chapter 10 scaling your webapp Wouldn't it be dreamy if I could avoid hand-coding a <FORM> and generate the HTML markup I need from an existing data model? But I know it's just a fantasy… you are here 4 367 more borrowing from django Django’s form validation framework Templates aren’t the only things that App Engine “borrows” from Django. It also uses its form-generating technology known as the Form Validation Framework. Given a data model, GAE can use the framework to generate the HTML needed to display the form’s fields within a HTML table. Here’s an example GAE model that records a person’s essential birth details: This code is in a file called “birthDB.py”. from google.appengine.ext import db class BirthDetails(db.Model): name = db.StringProperty() date_of_birth = db.DateProperty() time_of_birth = db.TimeProperty() This model is used with Django’s framework to generate the HTML markup needed to render the data-entry form. All you need to do is inherit from a GAE-included class called djangoforms.ModelForm: from google.appengine.ext.webapp import template from google.appengine.ext.db import djangoforms import birthDB class BirthDetailsForm(djangoforms.ModelForm): class Meta: model = birthDB.BirthDetails ition Import the forms library in add . del mo a to your GAE dat Create a new class by inheriting from the “djangoforms.Model” class, and the n link your new class to your data model. ... html = template.render('templates/header.html', {'title': 'Provide your birth details'}) html = html + template.render('templates/form_start.html', {}) html = html + str(BirthDetailsForm(auto_id=False)) Use your new class to generate your form. html = html + template.render('templates/form_end.html', {'sub_title': 'Submit Details'}) html = html + template.render('templates/footer.html', {'links': ''}) just ’t worry: you’ll get to it inthe don but e… her m fro g sin mis e cod s between There is some trate on understanding the link rk. wo a moment. For now, just concenDj me fra n ango form validatio model, the view code, and the 368 Chapter 10 scaling your webapp Check your form The framework generates the HTML you need and produces the following output within your browser. It’s not the prettiest web page ever made, but it works. Use the View Source menu option within your web browser to inspect the HTML markup generated. By setting “auto_id” to “False” in your code, the form generator uses your model pro perty names to identify your form’s fields. rt enough to create The Django framework is sma input fields (based on sensible labels for each of you).r the names used in your model It’s time to tie things all together with your controller code. you are here 4 369 controller code Controlling your App Engine webapp Like your other webapps, it makes sense to arrange your webapp controller code within a specific folder structure. Here’s one suggestion: to be Your top-level folder needs ati on” line plic “ap the named to match in your webapp’s “app.yaml” file. Put all of your webapp’s controller code and configuration files in here. hfwwgapp static templates As you’ve seen, any CGI can run on GAE, but to get the most out of Google’s technology, you need to code to the WSGI standard. Here’s some boilerplate code that every WSGI-compatible GAE webapp starts with: Import App Engine’s “webapp” class. If you have static content, nt, put it in here (at the mome this folder is empty). Put your HTML templates in here. Import a utility that runs your webapp. from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app This class responds to a web request from your web browser. class IndexPage(webapp.RequestHandler): Create an new “webapp” object for your application. def get(self): pass request This method runs when a GET web is received by your webapp. This is not unlike switching on CGI tracking. app = webapp.WSGIApplication([('/.*', IndexPage)], debug=True) def main(): run_wsgi_app(app) if __name__ == '__main__': main() 370 Chapter 10 Start your webapp. Just use these two lines of code as- is. scaling your webapp App Engine Code Magnets Let’s put everything together. Your model code is already in your hfwwgDB.py file. All you need to do is move that file into your webapp’s top-level folder. Copy your templates folder in there, too.Your webapp’s controller code, in a file called hfwwg.py, also needs to exist in your top-level folder. The only problem is that some of the code’s all over the floor. What’s mis sing from in here? All of the imports have survived…so there’s no need to rearrange them. Let’s test how well you’ve been paying attention. There’s no guiding lines on the fridge door. html = template.render('templates/header.html', {'title': 'Report a Possible Sighting'}) html = html + template.render('templates/form_start.html', {}) html = html + template.render('templates/form_end.html’, {'sub_title': 'Submit Sighting'}) html = html + template.render('templates/footer.html', {'links': ''}) app = webapp.WSGIApplication([(‘/.*’, SightingInputPage)], debug=True) def main(): run_wsgi_app(app) There’s only one small change from the boilerplate code in that “IndexPage” is not being linked to. if __name__ == '__main__': main() class SightingForm(djangoforms.ModelForm): ()) htingForm + str(Sig l m t h = html self.response.ou t.write(html) class SightingInputPage(web app.RequestHandler): def get(self): ghting fwwgDB.Si model = h class Meta : import hfwwgDB you are here 4 371 everything together App Engine Code Magnets Solution Let’s put everything together. Your model code is already in your hfwwgDB.py file. You were to move that file into your webapp’s top-level folder, as well as copy your templates folder in there, too.Your webapp’s controller code, in a file called hfwwg.py, also needs to exist in your top-level folder. The only problem is that some of the code’s all over the floor. You were to data Import your GAE model code. import hfwwgDB class SightingForm(djangoforms.ModelForm): Use your model to create a form that inherits from thesighting “django.ModelForm” class. class Meta: model = hfwwgDB.Sighting : class SightingInputPage(webapp.RequestHandler) def get(self): called The connected handler class isvid es a pro “SightingInputPage” and it responds to a method called “get” which GET web request. html = template.render('templates/header.html', {'title': 'Report a Possible Sighting'}) html = html + template.render('templates/form_start.html', {}) html = html + str(SightingForm()) Include the generated form in the HTML response. html = html + template.render('templates/form_end.html’, {'sub_title': 'Submit Sighting'}) html = html + template.render('templates/footer.html', {'links': ''}) self.response.out.write(html) You need to send a Did you guess this correctlyg? web browser and this line response back to the waitin of code does just that. app = webapp.WSGIApplication([('/.*', SightingInputPage)], debug=True) def main(): run_wsgi_app(app) if __name__ == '__main__': main() 372 Chapter 10 scaling your webapp Test Drive It’s been a long time coming, but you are now ready to test the first version of your sightings form. If you haven’t done so already, create an app.yaml file, too. Set the application line to hfwwg and the script line to hfwwg.py. One final step is to use the Add Existing Application menu option within the GAE Launcher to select your top-level folder as the location of your webapp. The launcher adds your webapp into its list and assigns it the next available protocol port— in this case, 8081. And here’s your generated HTML form in all its glory. This is looking good. Let’s get a quick opinion from the folks over at the HFWWG. you are here 4 373 make it pretty I know what you’re thinking: “With a shirt like *that*, how can this guy possibly know anything about style?”... But let me just say that your form could do with a bit of, well...color, couldn’t it? Any chance it could look nicer? OK, we get it. Web design is not your thing. Not to worry, you know all about code reuse, right? So, let’s reuse someone else’s cascading style sheets (CSS) to help with the “look” of your generated HTML form. But who can you “borrow” from and not lose sleep feeling guilty over it? As luck would have it, the authors of Head First HTML with CSS & XHTML created a bunch of stylesheets for their web pages and have made them available to you. Grab a slightly amended copy of some of their great stylesheets from this book’s support website. When you unzip the archive, a folder called static appears: pop this entire folder into your webapp’s top-level folder. There’s a file in static called favicon.ico. Move it into your top-level folder. Improve the look of your form To integrate the stylesheets into your webapp, add two link tags to your header.html template within your templates folder. Here’s what the tags need to look like: Add these two lines to the top of your “header.html” template. <link type="text/css" rel="stylesheet" href="/static/hfwwg.css" /> <link type="text/css" rel="stylesheet" href="/static/styledform.css" /> GAE is smart enough to optimize the delivery of static content—that is, content that does not need to be generated by code. Your CSS files are static and are in your static folder. All you need to do is tell GAE about them to enable optimization. Do this by adding the following lines to the handers section of your app.yaml file: Provide the URL location for your static content. 374 Chapter 10 - url: /static static_dir: static Switch on the optimization. scaling your webapp Test Drive With your stylesheets in place and your app.yaml file amended, ask your browser to reload your form. Looking good. A little style goes a long way...that’s looking great! you are here 4 375 list of choices Restrict input by providing options At the moment, your form accepts anything in the Fin, Whale, Blow, and Wave input areas. The paper form restricts the data that can be provided for each of these values. Your HTML form should, too. Anything you can do to cut down on input errors is a good thing. As the youngest member of the group, I was “volunteered” to work on data clean-up duties... Providing a list of choices restricts what users can input. Instead of using HTML’s INPUT tag for all of your form fields, you can use the SELECT/OPTION tag pairing to restrict what’s accepted as valid data for any of the fields on your form. To do this, you’ll need more HTML markup. That’s the bad news. The good news is that the form validation framework can generate the HTML markup you need for you. All you have to provide is the list of data items to use as an argument called choices when defining your property in your model code. You can also indicate when multiple lines of input are acceptable using the multiline argument to a property. Apply these changes to your model code in the hfwwgDB.py file. Define your lists of values near the top of your code. This naming convention helps identify these lists as containing constant values. _FINS = ['Falcate', 'Triangular', 'Rounded'] _WHALES = ['Humpback', 'Orca', 'Blue', 'Killer', 'Beluga', 'Fin', 'Gray', 'Sperm'] _BLOWS = ['Tall', 'Bushy', 'Dense'] _WAVES = ['Flat', 'Small', 'Moderate', 'Large', 'Breaking', 'High'] 376 Chapter 10 ... location = db.StringProperty(multiline=True) fin_type = db.StringProperty(choices=_FINS) whale_type = db.StringProperty(choices=_WHALES) blow_type = db.StringProperty(choices=_BLOWS) wave_type = db.StringProperty(choices=_WAVES) Switch on multipleline input. Use your lists of values when defining your properties. scaling your webapp Test Drive With these changes applied to your model code, refresh your web browser once more. Your form is not only looking good, but it’s more functional, too. The “location” field is now displayed over multiple lines. Each of the “type” fields now have drop-down selection menus associated with them. Your form now looks great! Go ahead and enter some test data, and then press the Submit Sighting button. What happens? you are here 4 377 checking log console Meet the “blank screen of death” Submitting your form’s data to the GAE web server produces a blank screen. Whoops…that’s not exactly user-friendly. To work out what happened (or what didn’t happen), you need to look at the logging information for your GAE webapp. If you are running GAE on Linux, your logging messages are displayed on screen. If you are on Windows or Mac OS X, click the Logs button within the Launcher to open up the Log Console for your webapp. Click! Your request resulted in a 405 status code from the web server. According to the official HTTP RFC standards document, 405 stands for: “Method Not Allowed. The method specified in the Request-Line is not allowed for the resource identified by the Request-URI. The response MUST include an Allow header containing a list of valid methods for the requested resource”. 378 Chapter 10 Your last web request resulted in a 405. Ummm…that’s as clear as mud, isn’t it? scaling your webapp Process the POST within your webapp Listen, bud, I’ll happily process your web requests all day long...just as long as you give me the methods I need! What the 405 status code actually tells you is that posted data arrived at your webapp intact, but that your webapp does not have any way of processing it. There’s a method missing. Take a quick look back at your code: the only method currently defined is called get(). This method is invoked whenever a GET web request arrives at your webapp and, as you know, it displays your sightings form. In order to process posted data, you need to define another method. Specifically, you need to add a new method called post() to your SightingInputPage class. App Engine handles requests as well as responses App Engine Web Server Your get() method produces your HTML form and returns a web response to the waiting web browser using the self.response object and by invoking the out.write() method on it. In additon to helping you with your web responses, GAE also helps you process your web requests using the self.request object. Here are a few lines of code that displays all of the data posted to your web server: Define a new method called “post”. Don’t forget to use “self” with all your methods. The “arguments()” method return of the field names used on your s a list form. def post(self): for field in self.request.arguments(): self.response.out.write(field) self.response.out.write(': ') self.response.out.write(self.request.get(field)) self.response.out.write('<br />') value associated The “get()” method returnsldthe e. nam with the provided form fie So…if you know the name of your form field, you can access its value from within your webapp using the self.request.get() method. But what do you do with the data once you have it? you are here 4 379 storing data Put your data in the datastore Your data is sent to your webapp by GAE and you can use the self. request.get() method to access each input field value by name. Recall the BirthDetails model from earlier in this chapter: This code is in a file called “birthDB.py”. from google.appengine.ext import db class BirthDetails(db.Model): name = db.StringProperty() date_of_birth = db.DateProperty() time_of_birth = db.TimeProperty() Assume that an HTML form has sent data to your webapp. The data is destined to be stored in the GAE datastore. Here’s some code to do the heavy lifting: def post(self): Generate a HTML response to say “thanks.” Send your response to the waiting web browser. new_birth = birthDB.BirthDetails() Get each of the form’s data values and assign them to your new object’s attributes. new_birth.name = self.request.get('name') new_birth.date = self.request.get('date_of_birth') new_birth.time = self.request.get('time_of_birth')) new_birth.put() html = template.render('templates/header.html', {'title': 'Thank you!'}) Put (save) your data to the GA E datastore. html = html + "<p>Thank you for providing your birth details.</p>" html = html + template.render('templates/footer.html', {'links': 'Enter <a href="/">another birth</a>.'}) self.response.out.write(html) There’s nothing to it: create a new object from your data model, get the data from your HTML form, assign it to the object’s attributes, and then use the put() method to save your data in the datastore. 380 Chapter 10 Create a new “BirthDetails” object to hold your data. scaling your webapp Based on what you know about how to put your HTML form’s data into the GAE datastore, create the code for the post() method that your webapp now needs. Some of the code has been done for you already. You are to provide the rest. def post(self): Put your code here. html = template.render('templates/header.html', {'title': 'Thank you!'}) html = html + "<p>Thank you for providing your sighting data.</p>" html = html + template.render('templates/footer.html', {'links': 'Enter <a href="/">another sighting</a>.'}) self.response.out.write(html) you are here 4 381 post to datastore Based on what you know about how to put your HTML form’s data into the GAE datastore, you were to create the code for the post() method that your webapp now needs. Some of the code has been done for you already. You were to provide the rest. Create a new “Sighting” object. def post(self): new_sighting = hfwwgDB.Sighting() new_sighting.name = self.request.get(‘name’) new_sighting.email = self.request.get(‘email’) new_sighting.date = self.request.get(‘date’) new_sighting.time = self.request.get(‘time’) new_sighting.location = self.request.get(‘location’) new_sighting.fin_type = self.request.get(‘fin_type’) new_sighting.whale_type = self.request.get(‘whale_type’) For each of the data values received from the HTML form, assign them to the attributes of the newly created object. new_sighting.blow_type =self.request.get(‘blow_type’) new_sighting.wave_type = self.request.get(‘wave_type’) new_sighting.put() GAE Store your populated object in the datastore. html = template.render('templates/header.html', {'title': 'Thank you!'}) html = html + "<p>Thank you for providing your sighting data.</p>" html = html + template.render('templates/footer.html', {'links': 'Enter <a href="/">another sighting</a>.'}) self.response.out.write(html) 382 Chapter 10 scaling your webapp Test Drive Add your post() code to your webapp (within the hfwwg.py file) and press the Back button on your web browser. Click the Submit Sighting button once more and see what happens this time. Here’s your form with the data waiting to be submitted. But when you click the button, something bad has happened…your webapp has crashed. blem with the It looks like you might have aty,prodoesn’t it? format of your date proper Phooey…that’s disappointing, isn’t it? At the very least, you were expecting the data from the form to make it into the datastore…but something has stopped this from happening. What do you think is the problem? you are here 4 383 conservative responses to liberal requests Don’t break the “robustness principle” The Robustness Principle states: “Be conservative in what you send; be liberal in what you accept.” In other words, don’t be too picky when requesting data of a certain type from your users, but when providing data, give ’em exactly what they need. If you make it too hard for your users to enter data into your system, things will likely things break. For instance, within your model code, consider how date and time are defined: A date, and NOTHING but a date will do. ... date = db.DateProperty() time = db.TimeProperty() ... You must provide a valid value for time. Anything else is simply UNACCEPTABLE. The trouble is, when it comes to dates and times, there are lots of ways to specify values. Oh, la, la.. c’est temps to toot mon flute! It’s 14:00hr on 24/04/2011. 384 Chapter 10 Get the low-down on the hoedown: quarter after six on 6/17/2011. I say, old boy, tea is at noon on the first of each month. scaling your webapp Accept almost any date and time If you are going to insist on asking your users to provide a properly formatted date and time, you’ll need to do one of two things: • Specify in detail the format in which you expect the data. • Convert the entered data into a format with which you can work. Both appoaches have problems. For example, if you are too picky in requesting a date in a particular format, you’ll slow down your user and might end up picking a date format that is foreign to them, resulting in confusion. If you try to convert any date or time entered into a common format that the datastore understands, you’ll be biting off more than you can chew. As an example of the complexity that can occur, how do you know if your user entered a date in mm/dd/yyyy or dd/mm/yyyy format? (You don’t.) There is a third option If your application doesn’t require exact dates and times, don’t require them of your user. With your sightings webapp, the date and time can be free-format fields that accept any value (in any format). What’s important is the recording of the sighting, not the exact date/time it occurred. Of course, other webapps might not be as fast and loose with dates and times. When that’s the case, you’ll need to revert one of the options discussed earlier on this page and do the best you can. Use “db.StringProperty()” for dates and times If you relax the datatype restrictions on the date and time fields, not only do you make is easier on your user, but you also make it easier on you. For the sightings webapp, the solution is to change the property type for date and time within the hfwwgDB.py file from what they currently are to db.StringProperty(). ... date = db.StringProperty() time = db.StringProperty() ... It’s a small change, but it’ll make all the difference. Let’s see what difference this change makes. you are here 4 385 test drive Test Drive OK, folks… let’s try this again. Change the types of date and time within htwwgDB.py to db.StringProperty(), being sure to save the file once you’ve made your edit. Click Back in your web brwoser and submit your sightings data once more. Success! It appears to have worked this time. Let’s enter another sighting, just to be sure. By relaxing the restrictions you placed on the types of data you’ll accept, your webapp now appears to be working fine. Go ahead and enter a few sightings by clicking on the link on your thank-you page and entering more data. 386 Chapter 10 scaling your webapp With a few sightings entered, let’s use App Engine’s included developer console to confirm that the sightings are in the datastore. To access the console, enter into your web browser’s location bar and click on the List Entities button to see your data. In addition to viewing your existi ng data in the datastore, you can use the con sol e to enter new test data. and an “ID” to App Engine has assigned a “Key” es in handy each of your entities, which com fy a sighting. when you need to uniquely identi There’s all the data your entere d, which is in a slightly different order tha you might expect. But it’s all in n what (App Engine stores your propertiethere. alphabetical order, by name.) s in Your GAE webapp is now ready for prime time. Before you deploy it to Google’s cloud infrastructure, let’s run it by the folk at HFWWG to see if they are happy for their webapp to “go live.” you are here 4 387 restrict to registered users It looks like you’re not quite done yet Man, that’s looking good! There’s just one thing we forgot to tell you... we are worried about spam and need to be sure only registered users can enter a sighting. Is that a big change? Is this a big change? You would imagine that it would be. You’ll have to create an new entity to hold your registered user login information, and you’ll also need another form to ask users to provide their registration data (which you’ll need to store in the datastore). With that in place, you’ll need yet another form to ask your users to log in, and then you’ll have to come up with a mechanism to restrict only registered and logged-in users to view your webapp’s pages, assuming you can come up with something robust that will work…? Or…as this is GAE, you could just switch on authorization. 388 Chapter 10 scaling your webapp Sometimes, the tiniest change can make all the difference… The engineers at Google designed App Engine to deploy on Google’s cloud infrastructure. As such, they decided to allow webapps running on GAE to access the Google Accounts system. By switching on authorization, you can require users of your webapp to log into their Google account before they see your webapp’s pages. If a user tries to access your webapp and he isn’t not logged in, GAE redirects to the Google Accounts login and registration page. Then, after a successful login, GAE returns the user to your waiting webapp. How cool is that? To switch on authorization, make one small change to your app.yaml file: application: hfwwgapp version: 1 runtime: python api_version: 1 handlers: - url: /static static_dir: static - url: /.* script: hfwwg.py login: required That’s all there is to it. Now, when you try to access your webapp, you are asked to log in before proceeding. This is how the login screen looks within the GAE test environment running on your computer. you are here 4 389 log login info Capture your user’s Google ID, too Now that your webapp requires your users to log in, let’s arrange to capture the user login information as part of the sighting. Start by adding the following property to your entity’s list of attributes in your hfwwgDB.py file. Add it right after the wave_type property. Create a new attribute in your “Sighting” class… which_user = db.UserProperty() …and set its property type. Let’s ensure that Django’s form validation framework excludes this new attribute when generating your HTML form. Within your hfwwg.py file, change your SightingForm class to look like this: Make sure Django doesn’t include the new attribute in your generated form. class SightingForm(djangoforms.ModelForm): class Meta: model = hfwwgDB.Sighting exclude = ['which_user'] Staying within your hfwwg.py file, add another import statement near the top of your program: from google.appengine.api import users Import GAE’s Google Accounts API. In your post() method, right before you put your new sighting to the datastore, add this line of code: new_sighting.which_user = users.get_current_user() Every time a user adds a sighting to the datastore, GAE ensures that the user’s Google Account ID is saved, too. This extra identification information allows the HFWWG to track exactly who reported which sighting, and should (hopefully) cut down on the amount of spam your webapp might attract. All that’s left to do is to deploy your webapp to Google’s cloud. 390 Chapter 10 When you put your data to the datastore, this code includes the Google ID of the currently logged-in user. scaling your webapp Deploy your webapp to Google’s cloud The “Dashboard” button opens your web browser and takes you to the GAE “My Applications page (after you sign in with ” your Google ID). With your webapp developed and tested locally, you are now ready to deploy to the Google cloud. This is a two-step process: register and upload. To register your webapp on the Google cloud, click the Dashboard button on the GAE Launcher. On Linux, use the “appcfg.py” script to deploy. Enter your webapp’s name in the box, then click on the “Check Availability” button. Don’t use “hfwwgapp” as that’s already taken. § r webapp, and then Optionally, enter a title foron”youbutton. click the “Create Applicati Assuming all went according to plan and GAE confirmed that your application has been created, all that’s left to do is to deploy. Return to the GAE Launcher and click on the Deploy button. The console displays a bunch of status message while the deployment progresses. If all is well, you’ll be told that “appcfg.py has finished with exit code 0”. Your GAE webapp is now ready to run on Google’s cloud. you are here 4 391 test drive Test Drive, on Google Let’s take your webapp for a spin on Google’s cloud. Open your web browser and surf to a web address that starts with your webapp’s name and ends in .appspot.com. For the HFWWG webapp, the web address is. When you first attempt to go to their webapp, App Engine redirects you to the Google login page. Provide your Google ID and password, or sign up for a new Google account (if you don’t have one already). 392 Chapter 10 scaling your webapp After a successful login, your sighting form appears. Go ahead and enter some test data: The sighting form served from the Google cloud is EXACTLY the same as the form served by your test server. Return to the oogle. com site to log into the consol e. Th is a little different than the tes e UI but you can use the Datastore Viet console, confirm that your data has bee wer to n stored correctly. Click on this link to see your data as stored in the Google cloud. you are here 4   393 a winning webapp Your HFWWG webapp is deployed! I’m all packed up and ready for a weekend of whale watching. I can’t wait to enter my sightings data online! This is waaaay cool. Look at that fabulous webapp. This is exactly what we need. Super work! Lovely... I finally have time to relax, now that I have no more marathon data-entry sessions. This is professional work. You’ve built a great data-entry webapp and deployed it on Google’s cloud. No matter how busy things get, whether there’s a handful of sightings per day or tens of thousands, your webapp can handle the load, thanks to Google’s App Engine. And, best of all, the cash-strapped HFWWG doesn’t pay a penny until their sightings activity reaches the level of millions of sightings per month! 394 Chapter 10 Did you even notice that you wrote all of your code using Python 2.5? scaling your webapp Your Python Toolbox Every App Engine webapp must have a configuration file called app.yaml. App Engine Lingo data repository used • “Datastore” - the tly by Google App Engine to permanen store your data. e used for a “row of • “Entity” - the nam data”. e used for a “data • “Property” - the nam value”. Use the GAE Launcher to start, stop, monitor, test, upload, and deploy your webapps. App Engine’s templating technology is based on the one use in the Django Project. App Engine can also use Django’s Form Validation Framework. Use the self.response object to construct a GAE web response. Use the self.request object to access form data within a GAE webapp. When responding to a GET request, implement the required functionality in a get() method. When responding to a POST request, implement the required functionality in a post() method. Store data in the App Engine datastore using the put() method. you are here 4 395 CHAPTER 10 You’ve got Chapter 10 under your belt and you’ve added more great Python technology to your everexpanding Python toolbox. 11 dealing with complexity Data wrangling Once I build up a head of steam, it’s not all that hard to keep on running, and running, and running.... this is a new chapter 397 pace data. Here’s a portion of the Marathon Club’s spreadsheet data. The timed distance is 15km. The predicted marathon goal. 398 Chapter 11 The runner selects a distance, say 15K, and we time her over the length of her run. dealing with complexity Yes, she’s wicked fast! I run the 15K in 45:01. My coach looks up the closest match for my time along the 15K row. This benchmark allows me to look up or down the column to select a target time for any other distance, such as a marathon, for example. This spreadsheet is a little intimidating…but don’t worry. You’ll sort it out soon. you are here 4 399 rain delay So…what’s the problem? At the moment, we print our data onto multiple sheets of paper, which we carry with us. Most of the time, this works fine. But when it rains or gets really windy, our pages are either soaked through or they end up all over the place. All these sheets are a pain…especially in the rain. Not to mention: forgetting the sheets, keeping the sheets up to date, and having to flip back and forth through the sheets looking for a closest match. Of course, word of your newly acquired Python programming skills is getting around, especially among the running crowd. Ideally, the Marathon Club needs an Android app that can be loaded onto a bunch of phones and carried in each coach’s pocket. The app needs to automate the lookup and distance predictions. Are you up to the challenge? Do you think you can help? 400 Chapter 11 dealing with complexity Start with the data For now, let’s not worry about creating the Android app; you’ll get to that soon enough. Instead, let’s solve the central data wrangling problem and then, when you have a working solution, we’ll worry about porting your solution to Android. We’ll begin by getting the data into a format you can easily process with Python. Most spreadsheet programs can export data to the widely used CSV format. The club has done this for you and created a file called PaceData.csv, which includes the data for each of the rows from the original the spreadsheet. Here’s a sample of the raw data from the start of the CSV: k m the spreadsheet. They loogen fro gs din hea n um col the is a dat oxy maximal The first line of dings that represent estimated h column. eac in es like numbers but are actuallyinhea tim e the rac for ) min kgml/ x, Ma gs. 2 VO consumption (or a, we’ll just treat them as headin dat ing tim the on ect aff no e Because the hav Do this! Grab a copy of PaceData.csv from this book’s support website. V02,84.8,82.9,81.1,79.3,77.5,75.8,74.2,72.5,70.9,69.4,67.9,66.4,64.9,63.5,62.1,60.7,59.4,58.1,56.8,55. 2mi,8:00,8:10,8:21,8:33,8:44,8:56,9:08,9:20,9:33,9:46,9:59,10:13,10:26,10:41,10:55,11:10,11:25,11:40,1 5k,12:49,13:06,13:24,13:42,14:00,14:19,14:38,14:58,15:18,15:39,16:00,16:22,16:44,17:06,17:30,17:53,18: The first value on each of the res of the lines is the timed distance t or row label. The rest of each line is a list of run times. recorded You somehow have to model the data from the CSV file in your Python program. Can you think of a data structure that might help here? Justify your selection. you are here 4 401 multiple associations You somehow have to model the data from the CSV file in your Python program. You were to think of a data structure that might help here? You were also to justify your selection. The list of headings can be stored in a LIST. Ummm… there’s lots to think about here. The list of times from each row can also be stored in a LIST, but they also need to be associated with the headings in the very first row of data, so maybe a DICTIONARY is what’s needed here? Maybe it’s some COMBINATION of the two?!? Take another look at the data The first row of data in the CSV file is the column headings, with the very first value on the line, the V02 string, being redundant (it won’t ever be used in this version of the app). The rest of the first line’s data are headings associated with the time values in each of the columns. Of course, the data in the columns is also associated with each row, which is identified by a row label in the first column, such as 2mi, 5k, and so on. Let’s look at the data in the CSV file again, which has been reformatted to help highlight the associations. The column headings are on the first line. This string is redundant. The row labels are unique to each row of times. V02, 84.8, 82.9, 81.1, 79.3, 77.5, 75.8, 74.2 2mi, 8:00, 8:10, 8:21, 8:33, 8:44, 8:56, 9:08, 5k, 12:49, 13:06, 13:24, 13:42, 14:00, 14:19, 14:38, The times on each row are associ with their row label, but ALSO ated with a column heading. But can we capture all these associations in code? 402 Chapter 11 dealing with complexity Marathon Magnets What needs to go here?. See if you can arrange the magnets into their correct order. with open('PaceData.csv') as paces: Process the “column_headings” list here. for each_line in paces: Process “row_data” here. num_cols = len(column_headings) With the data loaded, this code lets you check if it’s all OK. print(num_cols, end=' -> ') print(column_headings) num_2mi = len(row_data['2mi']) print(num_2mi, end=' -> ') print(row_data['2mi']) num_Marathon = len(row_data['Marathon']) print(num_Marathon, end=' -> ') print(row_data['Marathon']) row = each_line.strip().split(',') column_hea dings = row_data[r ow_label] = row paces.readline().strip().split(',') row_label = row.pop( 0) row_data = {} column_headings.pop(0) you are here 4 403 read table data Marathon Magnets Solution. You were to see if you could arrange the magnets into their correct order. You need to be sure to create an dictionary for the row times. empty row_data = {} Create the column headings from the first line of data. with open('PaceData.csv') as paces: column_headings = paces.readline().strip().split(',') ip it Read a line from the file, strthe n of unwanted whitespace, and split the line on comma. Process the rest of the file. each_line in paces: row = each_line.strip().split(',') It’s the same deal here: take the line, strip it, and row_label = row.pop(0) then split on comma. column_headings.pop(0) Delete the first heading, the for “V02”string—you don’t need it. Extract the row label. row_data[row_label] = row num_cols = len(column_headings) print(num_cols, end=' -> ') print(column_headings) num_2mi = len(row_data['2mi']) print(num_2mi, end=' -> ') print(row_data['2mi']) num_Marathon = len(row_data['Marathon']) print(num_Marathon, end=' -> ') print(row_data['Marathon']) 404 Chapter 11 Use the row label together with the rest of the line’s data to update the dictionary. dealing with complexity Test Drive Load your code into IDLE and, with the CSV in the same folder as your code, run it to see what you get on screen. Your code in IDLE. The output confirms that each row of data has 50 data items. The column headings The “2mi” row of data The “Marathon” row of data That’s a great start: you’ve managed to read the data from the CSV and put the headings into a list and the data into a dictionary. What’s next? you are here 4 405 link data structures Did you forget to associate each time on each row with its heading? At the moment, the list and the dictionary are disconnected... Yes, the two data structures should be linked. At the moment, the column_headings list and the row_ data dictionary are not linked in any way, and they need to be. What we need is some way to connect each of the times in each row with the heading that tops their column of data. What options do you have here? When it comes to linking (or associating) two data items with each other, the Python dictionary is the data strucutre of choice, isn’t it? 406 Chapter 11 dealing with complexity Store each time as a dictionary Rather than simply storing each time in the row_data dictionary as a number, let’s store the data as as a dictionary, with the key set to the time and the value set to the column heading. That way, you can quickly and easily determine for any time which column it is associated with, right? Here’s a portion of what the data structure looks like in Python’s memory once this association exists: Instead of a single time val eac h row item is now an inner dicue, tio nar that associates the time with its y column heading. The “row_data” dictionary no longer contains a list. row_data['2mi'] ={ 8:00 8:10 8:21 84.8 82.9 81.1 } All you need to do is work out how to populate the inner dictionary with the row data and the associated columns headings…and you’ll have all the data you need. The trick in creating the data structure is to realize that each row, including the column headings, are of a fixed size: 50 items. Knowing this, it’s not much work to create the dictionary you need: row_data = {} No changes are needed here. with open('PaceData.csv') as paces: column_headings = paces.readline().strip().split(',') column_headings.pop(0) for each_line in paces: row = each_line.strip().split(',') Create another, empty dictionary. row_label = row.pop(0) Let’s not hardcode 50, calc the value instead. inner_dict = {} for i in range(len(column_headings)): inner_dict[row[i]] = column_headings[i] With each iteration, “i” is the current column number. Associate the column heading with the time value from the row. row_data[row_label] = inner_dict With the dictionary populated, ass ign it to its label in “row_data”. you are here 4 407 idle session Go ahead and add the extra dictionary populating code to your program. Let’s remove all of those print() statements from the end of your program, because you’ll use the IDLE shell to test your code. Run the code by pressing F5 or by selecting the Run Module option from the Run menu. Use the dir() BIF to confirm that your program code executed and that a collection of variables have been created in Python’s namespace: >>> dir() ['__builtins__', '__doc__', '__name__', '__package__', 'column_headings', 'each_line', 'i', 'inner_dict', 'paces', 'row', 'row_data', 'row_label'] All of your code’s variables exist. Take another look at (part of ) the spreadsheet data file above, and let’s try and find the column heading associated with the 43:24 time on the 15k row. Let’s then use the column heading to find the predicted time for a 20k run: >>> column_heading = row_data['15k']['43:24'] >>> column_heading '81.1' d as “81.1”. The associated column heading is correctly identifie >>> prediction = [k for k in row_data['20k'].keys() if row_data['20k'][k] == column_heading] >>> prediction ['59:03'] 408 Chapter 11 A time of “59:03” is correctly predicted, too. dealing with complexity Dissect the prediction code Let’s take a moment to review what just happened at the bottom of the IDLE Session from the last page. This line of code is a double-dictionary lookup on the dictionary-of-dictionaries stored in row_data: This is a dictionary key within “row_data”. column_heading = row_data['15k']['43:24'] And this is another dictionary key, which indexes into the dictionary at “row_data[‘15k’]”. n look up the Look up the ‘15k’ row data, the :24’ time, which value associated with the ‘43hea g”. is then assigned to “column_ din Working out the predicted time in the 20k row of data involves finding the key in the row’s dictionary whose value is set to the just-discovered value stored in column_heading. You are interested only in data that satisfies this conditional. prediction = [k for k in row_data['20k'].keys() if row_data['20k'][k] == column_heading] . This is the data you’re searching A conditional list comprehension is put to good use here. Recall that the list comprehension syntax is a shorthand notation for a for loop. The loop searches through the data in the list of keys associated with the dictionary stored at row_data['20k']. If the value associated with the key (in k) is the same as column_heading, the value of k is added to the comprehensions results, which are then assigned to a new list call predicton. There’s really an awful lot going on in that comprehension. you are here 4 409 list comprehension Sweet mother of all things Python! What’s going on here? I think my brain is going to explode... Don’t let the list comprehension put you off. Recall that you can always rewrite a list comprehension using an equivalent for loop… Ummm…now there’s an idea. 410 Chapter 11 dealing with complexity Rewrite each of the list comprehensions on this page to use a for loop. times = [t for t in row_data['Marathon'].keys()] headings = [h for h in sorted(row_data['10mi'].values(), reverse=True)] time = [t for t in row_data['20k'].keys() if row_data['20k'][t] == '79.3'] you are here 4   411 for loop You were to rewrite each of the list comprehensions to use a for loop. times = [t for t in row_data['Marathon'].keys()] Start with an empty list. Turn the dictionary’s keys into a list. times = [] for each_t in row_data[‘Marathon’].keys(): times.append(each_t) time With each iteration, append the key (which is a value) onto the “times” list. headings = [h for h in sorted(row_data['10mi'].values(), reverse=True)] Start with an empty list. Turn the dictionary’s values into a list… headings = [] for each_h in sorted(row_data[‘10mi’].values(), reverse=True): …being sure to sort the values in reverse order (biggest first). headings.append(each_h) With each iteration, append the value (which is a column heading) onto the “times” list. time = [t for t in row_data['20k'].keys() if row_data['20k'][t] == '79.3'] Start with an empty list. time = [] 412 Chapter 11 list. for each_t in row_data[‘20k’].keys(): There’s a definite pattern emerging here. § Turn the dictionary’s keys into a if row_data[‘20k’][each_t] == ‘79.3’: time.append(each_t) With each iteration, check to see if the column heading (the value part of the dictionary) equals “79.3” and if it does, append the time to the list. dealing with complexity Get input from your user Now that you have your data within a Python data structure, it’s time to ask your user what it is they are looking for. Specifically, you need to know three things: the distance run, the time recorded, and the distance a prediction is required for. When you get to move your app onto Android, you can use a nice graphical dialog to ask your user for input, but for now, let’s quickly create a textbased user interface, which will allow you to develop and test the rest of the functionality required from your application. When you’re done, you’ll create the Android app. Use input() for input Gimme, gimme, gimme...input() !!! >>> Python has the input() BIF that can help here, which is used to display a prompt on screen, and then accept keyboard input, returning what was entered as a string to your code. Using the input() BIF is best demonstrated with some examples: >>> res = input('What is your favorite programming language: ') What is your favorite programming language: Python >>> res 'Python' Provide the prompt to display to your user. The entered data is assigned to “res” and it’s a STRING. The input() BIF returns a string, which has been stripped of any trailing newline character, which would typically be included at the end of any input string. It is important to note that any input is returned as a string, regardless of what type of data you think you might be entering: The entered data is assigned to “age” and it’s a string, even though you might want to treat it like it’s a number. >>> age = input('What is your age: ') What is your age: 21 >>> age '21' Convert the input to the typa.e you need BEFORE using the dat >>> int(age) 21 [Editor’s note: Yeah… dream on, Paul. §] you are here 4 413 input error Getting input raises an issue… It’s not hard to use input() to get the, um, er…input you need. Here’s your code from earlier with three calls to input() added to interact with your user. There’s nothing to this, as ()” user-interaction with “inputn doesn’t get much easier tha this.. When your program runs, your user enters some data, and look what happens: A “KeyError” exception has been raised…but why? 414 Chapter 11 dealing with complexity If it’s not in the dictionary, it can’t be found. The data in the row_data dictionary originally comes from the spreadsheet and is read into your program from the CSV file. If the data value entered into the recorded_time variable is in the dictionary, things are going to be fine, because there’s a match. However, if the data entered into the recorded_time variable doesn’t match anything in the dictionary, you’ll get a KeyError. But how is this “problem” handled during training? If we have a match, great. If not, we look for the closest match and work from there... The entered time for a 20k run (59:59) falls between these values on the pace sheet. two you are here 4 415 close enough Search for the closest match All you need to do is search the row of data for the closest match, right? And guess what? The Head First Code Review Team think they have some functions that might help here. There’s nothing better than sharing our code with our fellow Python programmers. Check out our “find_it” module. This code is in a file called “find_it.py” and you can download a copy from this book’s support website. Here’s an example of a nested function, which is allowed in Python. Given two values this function returns the , difference between them. This may not be the most efficient search code ever written, but it works. 416 Chapter 11 The “find_closest” function does a simple linear search, returning the value in “target_data” that most closely matches the “look_for” argument. dealing with complexity Let’s test the find_it.py module to try and determine if it meets the requirements of your application. Load the module into IDLE and then press F5 or choose Run Module from the Run menu: >>> find_closest(3.3, [1.5, 2.5, 4.5, 5.2, 6]) 2.5 >>> find_closest(3, [1, 5, 6]) 1 >>> find_closest(3, [1, 3, 4, 6]) 3 >>> find_closest(3.6, [1.5, 2.5, 4.5, 5.2, 6]) 4.5 Given a value to look for and some target data, the “find_closest” function seems to be doing the trick. >>> find_closest(3, [1, 4, 6]) 4 >>> find_closest(2.6, [1.5, 2.5, 4.5, 5.2, 6]) 2.5 Let’s try it with some of data that more closely resembles your CSV data: >>> find_closest('59:59', ['56:29', '57:45', '59:03', '1:00:23', '1:01:45']) Traceback (most recent call last): File "<pyshell#23>", line 1, in <module> find_closest('59:59', ['56:29', '57:45', '59:03', '1:00:23', '1:01:45']) File "/Users/barryp/HeadFirstPython/chapter11/find_it.py", line 15, in find_closest if diff == 0: File "/Users/barryp/HeadFirstPython/chapter11/find_it.py", line 11, in whats_the_difference TypeError: unsupported operand type(s) for -: 'str' and 'str' Yikes! Something’s seriously broken here. What do you think has gone wrong here? Why does the find_closest() function crash when asked to work with data from your CSV file? you are here 4 417 time trials The trouble is with time The data in your CSV file is a representation of timing values. Rather than actual numbers, the values in the CSV are strings. This is great for you, because you understand what the representation means. Python, on the other hand, sees the data only as strings. When you send your data to the find_closest() function, Python attempts to treat your strings as numbers and chaos ensues. What might work would be to convert the time-strings into numbers. But how? When I have to work with times, I always convert my time strings to seconds first... 418 Chapter 11 Yeah, of course! Didn’t we write the “tm2secs2tm” module to handle this sort of thing? dealing with complexity The time-to-seconds-to-time module The Head First Code Review Team’s generosity knows no bounds. Sure enough, their rather strangely name tm2secs2tm.py module looks like it might help. Here’s the guy’s “tm2secs2tm.py” module. Grab a copy of this code fro this book’s support website. m atted in This function ensures that all times are form le when doing simp gs thin keep s “HH:MM:SS” format. This help conversions to seconds. Given a “time string”, convert it to a value in seco nds. Convert a value in seconds to a “time string”. The code you need has been started for you. Now that you have the tm2secs2tm.py and find_it.py modules, let’s create a function that uses the facilities provided by these modules to solve your searching problem. Your new function, called find_nearest_time(), takes two arguments: the time to look for and a list of times to search. The function returns the closest time found as a string: from find_it import find_closest from tm2secs2tm import time2secs, secs2time def find_nearest_time(look_for, target_data): Unlike in the previous chapter, it is possible to do what you need to do here in only four lines of code. you are here 4 419 time to string Now that you have the tm2secs2tm.py and find_it.py modules, you were to create a function that uses the facilities provided by these modules to solve your searching problem. Your new function, called find_nearest_time(), takes two arguments: the time to look for and a list of times to search. The function returns the closest time found as a string: Import the team’s code. from find_it import find_closest from tm2secs2tm import time2secs, secs2time def find_nearest_time(look_for, target_data): Convert the time string you are looking for into its equivalent value in seconds. what = time2secs(look_for) Call “find_closest()”, supplying the converted data. res = find_closest(what, where) where = [time2secs(t) for t in target_data] return(secs2time(res)) The function takes two arguments, a time string and a list of time strings. Convert the lines of time strings into seconds. Return the closest match to the calling code, after converting it back to a time string. Let’s try out your code at the IDLE shell to see if your time “problems” have been resolved: Here’s some of your pace data. Let’s work with data from the “20k” row. >>> find_nearest_time('59:59’, ['56:29', '57:45', '59:03', '1:00:23', '1:01:45']) ' 420 Chapter 11 Great! This appears to be working fine. dealing with complexity Test Drive With all this code available to you, it’s an easy exercise to put it all together in your program and produce a complete solution to the Marathon Club’s prediction problem. Let’s take it for a test run. This code is used “as is”. Search for Find the nearest time a predicted within the data. time at Extract the column heading. the desired distance and display it on screen. same Try out your program with the . data input from earlier Another “KeyError”… After all that, you’re getting the same error as before. Bummer. you are here 4 421 more time trials The trouble is still with time… Or, to be more precise, with how the tm2secs2tm.py module formats time strings. Take another look at the results from the previous IDLE Session. Do you notice anything strange about the results returned by the call to the find_nearest_time() function? >>> find_nearest_time('59:59’, ['56:29', '57:45', '59:03', '1:00:23', '1:01:45']) All the returned times use the “HH:MM:SS” format. ' When your code takes one of these returned values and tries to index into your dictionary, there’s no match found, because your dictionary’s keys do not confirm to the HH:MM:SS format. The solution to this problem is to ensure that every time you use a time-string in your code, make sure it’s in HH:MM:SS format: Import the “format_time()” function from the “tm2secs2tm.py” module. Use the function to ensure times used internally by your codethe are formatted in “HH:MM:SS” forma t. 422 Chapter 11 dealing with complexity Test Drive Let’s try your code one more time. Hopefully, now that all of the time strings within the system conform to HH:MM:SS format, your code will behave itself. This is the previous test, which crashed with a “KeyError”. behaves This time around, your program itself and works fine. Another test confirms that thi ngs are working well. And one final test makes sure. This is working well. You’ve solved your application’s central problem: your program reads in the spreadsheet data from the CSV file, turns it into a dictionary of dictionaries, and lets you interact with your user to acquire the recorded time at a particular distance before predicting a time for another distance. Not counting the code provided by the Head First Code Review Team, you’ve written fewer than 40 lines of code to solve this problem. That’s quite an achievement. All that’s left to do is to port your program to the club’s Android’s phones. And porting to Android won’t take too long, will it? you are here 4 423 android interface Port to Android Your code is working great. Now it’s time to port your text-based Python program to Android. Most of your code doesn’t need to change, only the parts that interact with your user. Obviously, you’ll want to make things as easy to use as possible for users of your latest Android app, providing an interface not unlike this one. 1. Start by picking a distance run… 2. Enter the recorded time… These are both “dialogSetSingleChoiceItems” dialog boxes. This is a “dialogGetInput” dialog box. 3. Select a distance to predict to… 424 Chapter 11 4. After the lookup, display the predicted time. This is a “dialogSetItems” dialog box. dealing with complexity Your Android app is a bunch of dialogs Your Android app interacts with your users through a series of dialogs. Other than the single dialog that requests data from your user, the other three share certain similarities. You can take advantage of these shared features by creating a utility function which abstracts the dialog creation details: The dialog’s title string. The list of values to display. The dialog creation method name. The text for the buttons, with defaults. def do_dialog(title, data, func, ok='Select', notok='Quit'): app.dialogCreateAlert(title) func(data) app.dialogSetPositiveButtonText(ok) if notok: app.dialogSetNegativeButtonText(notok) app.dialogShow() return(app.dialogGetResponse().result) Display the dialog and then return the selected item. Assume the existence of a list called distances, which contains the row distance labels (2mi, 5k, 5mi, and so on). In the space below, provide the two calls to the do_dialog() function needed to create the two dialogSetSingleChoiceItems shown on the left of the previous page. you are here 4 425 adding dialog You were to assume the existence of a list called distances, which contains the row distance labels. In the space below, you were to provide the two calls to the do_dialog() function needed to create the two dialogSetSingleChoiceItems. do_dialog("Pick a distance", distances, Provide the dialog title. Ditto: do it all again for the other dialog. do_dialog("Pick a distance to predict", distances, app.dialogSetSingleChoiceItems) Provide the type of dialog to use. do_dialog('The predicited time running ' + predicted_distance + Here’s another example. app.dialogSetSingleChoiceItems) Provide the list of items to display. ' is: ', prediction, app.dialogSetItems, "OK", None) Use a different dialog creating method this time. This last one’s a little trickier, because you have to build up the dialog tit from some variables (that you’ll le need to have created first). Override the default values for the dialog’s buttons. Get your Android app code ready To use your dialog creating code, import the necessary libraries, define some constants, create an Android object, and reuse some code from earlier in this book: Do your imports. import time import android ... distances = [ '2mi', '5k', '5mi', '10k', '15k', '10mi', '20k', '13.1mi', '25k', '30k', 'Marathon' ] ... hello_msg = "Welcome to the Marathon Club's App" quit_msg = "Quitting the Marathon Club's App." ... app = android.Android() Create an Android app object. def status_update(msg, how_long=2): app.makeToast(msg) time.sleep(how_long) 426 Chapter 11 Create a list of row labels. This function is taken “as-is” from earlier in this book. Define two friendly messages. dealing with complexity Android Pool Puzzle Your job is to take the code from the pool and place it into the blank lines in your Android app code. You can assume that the row_data dictionary exists and has been populated. The variables shown at the bottom of the last page have also been created, and the status_update() and do_dialog() functions are available to you. Your goal is to arrange the code so that it implements the UI interactions you need. status_update(hello_msg) resp = do_dialog("Pick a distance", distances, ) The dialogGetInput() method displays the input dialog box. distance_run = distance_run = distances[distance_run] = app.dialogGetInput("Enter a " + distance_run + " time:", "Use HH:MM:SS format:").result closest_time = find_nearest_time(format_time( ), row_data[distance_run]) closest_column_heading = row_data[distance_run][closest_time] resp = do_dialog("Pick a distance to predict", distances, ) predicted_distance = predicted_distance = distances[predicted_distance] prediction = [k for k in row_data[predicted_distance].keys() if row_data[predicted_distance][k] == closest_column_heading] do_dialog('The predicted time running ' + predicted_distance + ' is: ', prediction, app.dialogSetItems, "OK", None) status_update(quit_msg) recorded_time app.dialogGetSelec tedItems().result[ ems 0] eIt oic eCh ngl tSi app.dialogSe ems eIt oic eCh ngl tSi gSe alo app.di if resp['which'] in ('positive'): in ('positive'): if resp['which'] app.dialogGetSelec tedItems().result[ recorded_time 0] you are here 4   427 out of the pool Android Pool Puzzle Solution Your job was to take the code from the pool and place it into the blank lines in your Android app code.You were to assume that the row_data dictionary exists and has been populated. The variables you need also have been created, and the status_update() and do_dialog() functions were available to you. Your goal was to arrange the code so that it implements the UI interactions you need. Ask your user to pick a distance from the list of labels. status_update(hello_msg) resp = do_dialog("Pick a distance", distances, app.dialogSetSingleChoiceItems) Assign the selected distance label distance_run = app.dialogGetSelectedItems().result[0] to “distance_run”. Ask your user enter the distance_run = distances[distance_run] recorded time. recorded_time = app.dialogGetInput("Enter a " + distance_run + " time:", if resp['which'] in ('positive'): "Use HH:MM:SS format:").result closest_time = find_nearest_time(format_time(recorded_time), row_data[distance_run]) Work out what column heading to use. Look up the prediction. closest_column_heading = row_data[distance_run][closest_time] resp = do_dialog("Pick a distance to predict", distances, app.dialogSetSingleChoiceItems) if resp['which'] in ('positive'): predicted_distance = app.dialogGetSelectedItems().result[0] predicted_distance = distances[predicted_distance] prediction = [k for k in row_data[predicted_distance].keys() Ask your user to pick a distance from the list of labels to predict to. if row_data[predicted_distance][k] == closest_column_heading] do_dialog('The predicted time running ' + predicted_distance + ' is: ', prediction, app.dialogSetItems, "OK", None) status_update(quit_msg) 428 Chapter 11 Display the predicted time atr. the selected distance to your use dealing with complexity Put your app together… You now have all the code you need to create your app: Do your imports. Include your “find_nearest()” function. Declare your constants. NOTE: the location of the data file on the SDCARD is specific to Andro id. Grab and preprocess your CSV data. Create your Android app obj include your helper functions.ect and process Display your UI to your user and the resulting interaction. you are here 4 429 test drive Test Drive It’s time to test your Android app on the Android Emulator before loading a working application onto a “real” phone. Start your Android emulator and begin by transferring your code and the files it needs onto the emulator’s SDCARD. Use the adb command in the tools folder to copy marathonapp.py, find_it.py, tm2sec2tm.py and PaceData.csv to the emulator, and then take your app for a spin. File Edit Window Help CopyToEmulator $ tools/adb push marathonapp.py /mnt/sdcard/sl4a/scripts 43 KB/s (2525 bytes in 0.056s) $ tools/adb push find_it.py /mnt/sdcard/sl4a/scripts 7 KB/s (555 bytes in 0.069s) Copy your code and its support files to the emulator with these commands. $ tools/adb push tm2secs2tm.py /mnt/sdcard/sl4a/scripts 12 KB/s (628 bytes in 0.050s) $ tools/adb push PaceData.csv /mnt/sdcard/sl4a/scripts 59 KB/s (4250 bytes in 0.069s) And there it is… waiting for you to test it. Go on. You know you want to: tap that app! 430 Chapter 11 dealing with complexity Your app’s a wrap! All that’s left to do is transfer your working Android app to the Marathon Club’s phones…and that’s easy when you use AndFTP. When you show off your latest work, the club’s members can’t believe their eyes. This is fantastic! Now I can work with my coach and the other club members to hit my target times at my chosen distances. There’s no stopping me now... And there’s no stopping you! You’ve put your Python skills and techniques to great use here. Whether you’re building an app for the smallest handheld device or the biggest web server, your Python skills help you get the job done. Congratulations! you are here 4 431 python toolbox Your Python Toolbox CHAPTER 11 You’ve got Chapter 11 under your belt and you’ve demonstrated a mastery of your Python toolbox. Congratulations and well done! Python Lingo “conditional” list comprehension ” is one that includes a trailing “ifol which statement, allowing you to contr as the items are added to the new list comprehension runs. can be rewritten • List comprehensions as an equivalent “for” loop. •A 432 Chapter 11 The input() BIF lets you prompt and receive input from your users. If you find yourself using Python 2 and in need of the input()function, use the raw_input() function instead. Build complex data structures by combining Python’s built-in lists, sets, and dictionaries. The time module, which is part of the standard library, has a number of functions that make converting between time formats possible. dealing with complexity It’s time to go… It’s been a blast having you with us here on Lake Python. Come back soon and often. We love it when you drop by. This is just the beginning We’re sad to see you leave, but there’s nothing like taking what you’ve learned and putting it to use. You’re just beginning your Python journey and you’re in the driver’s seat. We’re dying to hear how things go, so drop us a line at the Head First Labs website,, and let us know how Python is paying off for YOU! you are here 4 433 appendix: leftovers The Top Ten Things (we didn’t cover) I don’t know about you, but I think it could do with more spam.... this is an appendix 435 pro tools #1: Using a “professional” IDE Throughout this book, you’ve used Python’s IDLE, which is great to use when first learning about Python and, although it’s a little quirky, can handle most programming tasks. It even comes with a built-in debugger (check out the Debug menu), which is surprisingly well equipped. Chances are, however, sooner or later, you’ll probably need a more full-featured integrated development environment. One such tool worth looking into is the WingWare Python IDE. This professional-level development tool is specifically geared toward the Python programmer, is written by and maintained by Python programmers, and is itself written in Python. WingWare Python IDE comes in various licencing flavor: it’s free if you’re a student or working on an open source project, but you’ll need to pay for it if you are working within a for-profit development environment. Written in Python by Python programmers for other Python programmers...what else could you ask for? The WingWare Python IDE More general tools also exist. If you are running Linux, the KDevelop IDE integrates well with Python. And, of course,there are all those programmer editors which are often all you’ll ever need. Many Mac OS X programmers swear by the TextMate programmer’s editor. There’s more than a few Python programmers using emacs and vi (or its more common variant, vim). Your author is a huge fan of vim, but also spends large portions of his day using IDLE and the Python shell. 436 appendix leftovers #2: Coping with scoping Consider the following short program: A global variable called “name”. Call the function. A function which attempts to read from and write to the global variable called “name”. See what “name” is set to after the function runs. If you try to run this program, Python complains with this error message: UnboundLocalError: local variable ‘name’ referenced before assignment…whatever that means! When it comes to scope, Python is quite happy to let you access and read the value of a global variable within a function, but you cannot change it. When Python sees the assignment, it looks for a local variable called name, doesn’t find it, and throws a hissy fit and an UnboundLocalError exception. To access and change a global variable, you must explicitly declare that’s your intention, as follows: Some programmers find thi quite ugly. Others think it’ss what comes to pass when you watch Monty Python reruns while designing your programm language. No matter what ing everyone thinks: this is what we’re stuck with! § you are here 4 437 testing, testing #3: Testing Writing code is one thing, but testing it is quite another. The combination of the Python shell and IDLE is great for testing and experimenting with small snippets of code, but for anything substantial, a testing framework is a must. Python comes with two testing frameworks out of the box. The first is familiar to programmers coming from another modern language, because it’s based on the popular xUnit testing framework. Python’s unittest module (which is part of the standard library) lets you create test code, test data, and a test suite for your modules. These exist in separate files from you code and allow you to exercise your code in various ways. If you already use a similar framework with your current language, rest assured that Python’s implementation is essentially the same. The other testing framework, called doctest, is also part of the standard library. This framework allows you to take the output from a Python shell or IDLE session and use it as a test. All you need to do is copy the content from the shell and add it to your modules documentation strings. If you add code like this to the end of your modules, they’ll be ready for “doctesting”: if __name__ == "__main__": import doctest doctest.testmod() If your code is imported as a module, this code does NOT run. If you run your module from the command line, your tests run. If you then run your module at your operating systems comand line, your tests run. If all you want to do is import your module’s code and not run your tests, the previous if statement supports doing just that. For more on unittest and doctest, search the online Python documentation on the Web or via IDLE’s Help menu. 438 appendix What do you mean: you can’t hear me...I guess I should’ve tested this first, eh? leftovers #4: Advanced language features With a book like this, we knew we’d never get to cover the entire Python language unless we tripled the page count. And let’s face it, no one would thank us for that! There’s a lot more to Python, and as your confidence grows, you can take the time to check out these advanced language features: Anonymous functions: the lambda expression lets you create small, oneline, non-named functions that can be incredibly useful once you understand what’s going on. Generators: like iterators, generators let you process sequences of data. Unlike iterators, generators, through the use of the yield expression, let you minimize the amount of RAM your program consumes while providing iterator-like functionality on large datasets. Custom exceptions: create your own exception object based on those provided as standard by Python. Function decorators: adjust the behavior of a preexisting function by hooking into its start-up and teardown mechanisms. Metaclasses: custom classes that themselves can create custom classes. These are really only for the truely brave, although you did use a metaclass when you created your Sightings form using the Django form validation framework in Chapter 10. Most (but not all) of these language features are primarily of interest to the Python programmer building tools or language extensions for use by other Python programmers. I know I look complex, but I really am quite useful. You might never need to use some of these language features in your code, but they are all worth knowing about. Take the time to understand when and where to use them. See #10 of this appendix for a list of my favorite Python books (other than this one), which are all great starting points for learning more about these language features. you are here 4 439 regexes #5: Regular expressions When it comes to working with textual data, Python is a bit of a natural. The built-in string type comes with so many methods that most of the standard string operations such as finding and splitting are covered. However, what if you need to extract a specific part of a string or what if you need to search and replace within a string based on a specific specification? It is possible to use the built-in string methods to implement solutions to these types of problems, but—more times than most people would probably like to admit to—using a regular expression works better. Consider this example, which requires you to extract the area code from the phone_number string and which uses the built-in string methods: Find the opening “(“. Calculate where the area code is in the string. Extract the area code. This code works fine, but it breaks when presented with the following value for phone_number: phone_number = "Cell (mobile): (555)-918-8271" Jeff Friedl’s regular expression “bible”, which is well worth a look if you want to learn more. Look up the “re” module in Python’s docs, too. Why does this phone number cause the program to fail? Try it and see what happens… When you use a regular expression, you can specify exactly what it is you are looking for and improve the robustness of your code: This looks a little strange, but this regular expression is looking for an opening “(“ followed by three digits and then a closing “)”. This specification is much more likely to find the area code and won’t break as quickly as the other version of this program. 440 appendix leftovers #6: More on web frameworks When it comes to building web applications, CGI works, but it’s a little old-fashioned. As you saw in Chapter 10, Google’s App Engine technology supports CGI, but also WSGI and a number of web framework technologies. If you aren’t deploying to the cloud and prefer to roll your own, you have plenty of choices. What follows is a representative sample. My advice: try a few on for size and see which one works best for you. Search for the following terms in your favorite search engine: Django, Zope, TurboGears, Web2py, and Pylons. The “old timers”…but don’t let maturity fool you: these are cracking web frameworks. The “new kids on the block”: leaner, meaner and stuffed full of features. you are here 4 441 data mappers and nosql #7: Object relational mappers and NoSQL Working with SQL-based databases in Python is well supported, with the inclusion of SQLite in the standard library a huge boon. Of course, the assumption is you are familiar with SQL and happy to use SQL to work with your data. But what if you aren’t? What if you detest SQL? An object relational mapper (ORM) is a software technology that lets you use an underlying SQL-based database without having to know anything about SQL. Rather than the procedural interface based on the Python database API, ORMs provide an object-oriented interface to your data, exposing it via method calls and attribute lookups as opposed to columns and rows. Many programmers find ORMs a much more natural mechanism for working with stored datasets and the Python community creates and supports a number of them. One of the most interesting is SQL Alchemy, which is popular and included in a number of the web framework technologies discussed in #6. Despite being hugely popular anyway, SQL Alchemy is also interesting because it supports both Python 2 and Python 3, which makes it a standout technology (for now). If you find yourself becoming increasingly frustrated by SQL, check out an ORM. Of course, you have already experienced a similar technology: Google App Engine’s datastore API is very similar in style to those APIs provided by the major Python ORMs. There’s NoSQL, too. In addition to database technologies that let you avoid working with the underlying SQL-based database, a new breed of technologies have emerged that let you drop your SQL database in its entirety. Known collectively as NoSQL, these data tools provide an alternative non-SQL API to your data and do not use an SQL-based database management system at all. As these technologies are relatively new, there’s been more activity around Python 2 than Python 3, but they are still worth checking out. CouchDB and MongoDB are the two most closely associated with robust Python implementations. If you like working with your data in a Python dictionary and wished your database technology let you store your data in much the same way, then you need to take a look at NoSQL: it’s a perfect fit. 442 appendix leftovers #8: Programming GUIs In this book, you’ve created text-based interfaces, web-based interfaces and interfaces that ran on Android devices. But what if you want to create a desktop application that runs on your or your user’s desktop computer? Are you out of luck, or can Python help here, too? Well…as luck would have it, Python comes preinstalled with a GUI-building toolkit called tkinter (shorthand for Tk Interface). It’s possible to create a usable and useful graphical user interface (GUI) with tkinter and deploy it on Mac OS X, Windows, and Linux. With the latest version of Tk, your developed app takes on the characteristics of the underlying operating system, so when you run on Windows, your app looks like a Windows desktop app, when it run on Linux, it looks like a Linux desktop app, and so on. You write your Python and tkinter code once, then run it anywhere and it just works. There are lots of great resources for learning to program with tkinter, with one of the best being the last few chapters of Head First Programming, but since plugging that book would be totally shameless, I won’t mention it again. Other GUI-building technologies do exist, with the PyGTK, PyKDE, wxPython, and PyQT toolkits coming up in conversation more than most. Be warned, however, that most of these toolkits target Python 2, although support for Python 3 is on its way. Search the Web for any of the project names to learn more. Oh, look: it’s one of the GUIs created in “Head Firstd Programming”…and yes, I sai I wouldn’t mention THAT I book again, but isn’t this GU beautiful? § you are here 4 443 your bugs, my bugs, and threads #9: Stuff to avoid When it comes to stuff to avoid when using Python, there’s a very short list. A recent tweet on Twitter went something like this: “There are three types of bugs: your bugs, my bugs... and threads.” Threads do indeed exist in Python but should be avoided where possible. This has nothing to do with the quality of Python’s threading library and everything to do with Python’s implementation, especially the implementation known as CPython (which is more than likely the one you’re running now). Python is implemented using a technology known as the Global Interpreter Lock (GIL), which enforces a restriction that Python can only ever run on a single interpreter process, even in the presence of multiple processors. What all this means to you is that your beautifully designed and implemented program that uses threads will never run faster on multiple processors even if they exist, because it can’t use them. Your threaded application will run serially and, in many cases, run considerably slower than if you had developed the same functionality without resorting to threads. Main message: don’t use threads with Python until the GIL restriction is removed…if it ever is. 444 appendix Don’t you like my threads...? leftovers #10: Other books There are lots of great books that cover Python in general, as well as specifically within a particular problem domain. Here is a collection of my favorite Python books, which we have no hestitation in recommending to you. If you are a sysadmin, then this is the Python book for you. Includes a great case study involving the porting of a comple Python 2 module to Python 3. x The best desktop reference on the market s, this is the for At 1,200 pageua ge reference definitive lang t everything in it! Python: it’s go examples with Includes some bigXML, parsing big technology:language features. and advanced you are here 4   445 Index Symbols and Numbers 404 Error, from web server 242 405 Error, from web server 378 >>> (chevron, triple) IDLE prompt 4 : (colon) in for loop 16 in function definition 29 in if statement 20 , (comma) separating list items 7 {} (curly braces) creating dictionaries 180 creating sets 166 = (equal sign) assignment operator 7 in function argument definition 63 (...) (parentheses) enclosing function arguments 29 enclosing immutable sets 91 + (plus sign) addition or concatenation operator 138 # (pound sign) preceding one-line comments 38 @property decorator 250, 253, 285 ? (question mark) parameter placeholder 321, 350 “...” or ‘...’ (quotes) enclosing each list item 7 “””...””” or ‘’’...’’’ (quotes, triple) enclosing comments 37 ; (semicolon) separating statements on one line 38 [...] (square brackets) accessing dictionary items 180, 212 accessing specific list items 9, 18 enclosing all list items 7, 18 A “a” access mode 110 access modes for files 110, 133 addition operator (+) 138 Alt-N keystroke, IDLE 5 Alt-P keystroke, IDLE 5 AndFTP app 288–289 Android apps accepting input from 278–282, 295, 304–307 converting from Python code 424–430 creating 274–277, 281–282 data for. See JSON data interchange format integrating with SQLite 342–348 running on phone 288–289 scripting layer for. See SL4A troubleshooting 277 Android emulator installing and configuring 260–262 running scripts on 264–265, 272–273, 283 Android Market 288 Android Virtual Device. See AVD anonymous functions 439 append() method, lists 10, 14 apps. See Android apps; webapps app.yaml file 356, 395 arguments for functions adding 52, 66–68 optional 63–64, 134 arrays associative. See dictionaries similarity to lists 9–10, 17 as keyword 119, 138 assignment operator (=) 7 associative arrays. See dictionaries attributes, class 190, 194, 212 authorization, user 389–393 AVD (Android Virtual Device) 261, 291 this is the index 447 the index B “batteries included” 32 BIFs. See built-in functions BigTable technology 354, 359 blue text in IDLE 4 books Dive Into Python 3 (CreateSpace) 445 Head First HTML with CSS & XHTML (O’Reilly) 374 Head First Programming (O’Reilly) 443 Head First SQL (O’Reilly) 313 Learning Python (O’Reilly) 445 Mastering Regular Expressions (O’Reilly) 440 Programming in Python 3 (Addison-Wesley Professional) 445 Python Essential Reference (Addison-Wesley Professional) 445 Python for Unix and Linux System Administration (O’Reilly) 445 braces. See curly braces brackets, regular. See parentheses brackets, square. See square brackets BSD, running CGI scripts on 239 build folder 42 built-in functions (BIFs). See also specific functions displayed as purple text in IDLE 4 help on 21 importing of, not needed 55 namespace for 55 number of, in Python 21 __builtins__ namespace 55, 71 C cascading style sheet (CSS) 374–375 case sensitivity of identifiers 17 cgi-bin folder 234, 235 CGI (Common Gateway Interface) scripts 217, 235, 243, 253. See also WSGI location of 234, 235 running 239 running from Android 264–265, 272–273, 283 448 Index sending data to 300–303 tracking module for 248–249 troubleshooting 242, 247–250 writing 236–238, 244–246 writing for Android. See SL4A cgi library 300 cgitb module 248–249, 253 chaining functions 146, 172 methods 142, 172 chevron, triple (>>>) IDLE prompt 4 chmod command 239, 253 classes 189–191 attributes of 190, 194, 212 benefits of 189 converting data to dictionary 285–286 defining 190–193, 194, 195–196 inherited 204–209, 212 instances of 190, 191, 194, 195–196 metaclasses 439 methods of 190, 195–196, 198–200 in modules 209, 212 class keyword 191, 212 close() method, database connection 315, 350 close() method, files 75, 103 code editors 35, 436. See also IDLE colon (:) in for loop 16 in function definition 29 in if statement 20 comma (,) separating list items 7 comments 37–38 commit() method, database connection 315, 350 Common Gateway Interface scripts. See CGI scripts comprehension, list 154–159, 172, 409–411, 432 concatenation operator (+) 138 conditional list comprehension 409–411, 432 conditions. See if/else statement connection, database closing 314, 315 creating 314, 315 the index connect() method, sqlite3 315, 350 context management protocol 120 Controller, in MVC pattern 221 for GAE webapps 359, 370–373 for webapps 234–238, 244–246 “copied” sorting 144, 145–146, 172 CREATE TABLE statement, SQL 317, 319–320 CSS (cascading style sheet) 374–375 CSV format, converting to Python data types 401–405 curly braces ({}) creating dictionaries 180 creating sets 166 cursor() method, database connection 315, 350 custom code 131 custom exceptions 439 D data for Android apps. See JSON data interchange format bundling with code. See classes duplicates in, removing 161–163, 166–167 external. See database management system; files for GAE webapps. See datastore, for GAE nonuniform, cleaning 148–153 race conditions with 309–310 Robustness Principle for 384–387 searching for closest match 416–417 sending to web server 275, 291 sorting 144–147, 172 storing. See persistence transforming, list comprehensions for 154–159 database API 314–315, 350 database management system 312 closing connection to 314, 315 commit changes to data 314, 315, 350 connecting to 314, 315 cursor for, manipulating data with 314, 315 designing 316–318 inserting data into 321, 324, 348 integrating with Android apps 342–348 integrating with webapps 327–341 managing and viewing data in 326 process for interacting with 314–315 querying 322, 332–333 rollback changes to data 314, 315, 350 schema for 317 SQLite for. See SQLite tables in 317, 319–320, 350 data folder 234 data interchange format. See JSON data interchange format data objects. See also specific data objects getting next item from 54 ID for 54 length of, determining 32 names of. See identifiers datastore, for GAE 359–360, 380–383, 384–387, 395 data types converting CSV data into 401–405 converting strings to integers 54 in datastore 360 immutable 91, 103, 116, 138 for JSON 285 for list items 8, 12 date and time data format compatibility issues 418–423 property type for 362, 384–385 db.Blob() type 360 db.DateProperty() type 360 db.IntegerProperty() type 360 db.StringProperty() type 360, 385 db.TimeProperty() type 360 db.UserProperty() type 360, 390 decision statement. See if/else statement decorators, function 439 def keyword 29, 191, 212 dialogCreateAlert() method, Android API 274, 276, 280 dialogGetInput() method, Android API 295, 304–306 dialogGetResponse() method, Android API 274, 276, 278, 280 dialogGetSelectedItems() method, Android API 278, 280 dialogSetItems() method, Android API 279, 280 dialogSetNegativeButtonText() method, Android API 274, 276 dialogSetPositiveButtonText() method, Android API 274, 276, 280 you are here 4 449 the index dialogSetSingleChoiceItems() method, Android API 274, 276 dialogShow() method, Android API 276, 280 dict() factory function 180, 212 dictionaries 178–182, 212 accessing items in 180, 212 compared to lists 179 converting class data to 285–286 creating 180, 182, 186 dictionaries within 407–409 keys for 178, 180, 212 populating 180, 212 reading CSV data into 403–404 values of 178, 180, 212 dir() command 225 directory structure. See folder structure dist folder 42 distribution creating 40–42 updating 60–61, 65 uploading to PyPI 48 Dive Into Python 3 (CreateSpace) 445 djangoforms.ModelForm class 368 Django Project Form Validation Framework 368–369, 395 templates 363–366, 395 doctest framework 438 documentation for Python 3 3, 80, 103 dot notation 10, 194, 196 double quotes. See quotes dump() function, pickle 133–134, 138 dumps() function, json 269, 272, 281, 291 dynamic content 216, 217 E Eclipse editor 35 editors 35, 436. See also IDLE elif keyword 108. See also if/else statement else keyword. See if/else statement emacs editor 35, 436 450 Index enable() function, cgitb 248, 253 end_form() function, yate 231, 233 entities, in datastore 360, 395 enumerate() built-in function 54 environ dictionary 300, 350 equal sign (=) assignment operator 7 in function argument definition 63 errors. See exception handling; troubleshooting exception handling 88–95, 103. See also troubleshooting benefits of 95, 100 closing files after 114–115, 120–123 custom exceptions 439 defining with try/except statement 89, 91–94 ignoring found errors 93–94 IndexError exception 17 IOError exception 103, 112–114, 117–119 for missing files 96–98 NameError exception 44, 118 PickleError exception 133–134 specific errors, checking for 101–102 specific errors, details about 117–119 TypeError exception 56–57, 116, 247–249, 283–285 ValueError exception 78–79, 81–82, 103 exception objects 119, 138 except keyword. See try/except statement execute() method, cursor 315, 322, 324, 350 extend() method, lists 10 F F5 key, IDLE 39, 44, 49, 71 factory functions 166 favicon.ico file, for webapp 234 fetchall() method, cursor 322 fetchmany() method, cursor 322 fetchone() method, cursor 322 FieldStorage() method, cgi 244, 253, 296, 300, 350 files. See also persistence access modes for 110, 133 appending data to 110 checking for existence of 118 the index closing 75, 110 closing after exception 114–115, 120–123 CSV format, converting to Python data types 401– 405 exceptions involving, determining type of 117–119 flushing 110 missing, exception handling for 96–98 opening 75, 109–110 opening in binary access mode 133 reading data from 75–78, 142–143 rewinding 76 splitting lines in 77–78 writing 110–113 writing, custom formats for 126–130 writing, default format for 124–125 writing, pickle library for. See pickle library finally keyword 115, 138 find() method, strings 84–86, 103 Firefox, SQLite Manager for 326 folder structure for distribution 42 for GAE 356, 370 for webapps 234 for loop 15–17, 32 compared to list comprehension 432 nesting 19–22 forms, HTML 295 creating from template 296–299 Form Validation Framework for 368–369 input restrictions for 376–377, 384–387 sending data to CGI scripts 300–303 stylesheets for 374–375 Form Validation Framework 368–369, 395 405 Error, from web server 378 404 Error, from web server 242 Friedl, Jeff (author, Mastering Regular Expressions) 440 from statement 46, 49 functional programming concepts 157 function decorators 439 functions adding arguments to 52, 66–68 anonymous 439 built-in. See built-in functions (BIFs) G chaining 146, 172 creating 28–30, 170–171 optional arguments for 63–64, 134 recursive 31 sharing. See modules GAE (Google App Engine) 354 configuration and setup for 356–357 controller code for 370–373 data modeling with 360–362 datastore for 359, 380–383, 384–387, 395 deploying webapps to Google cloud 391 folder structure for 356, 370 form generation for 368–369 form input restrictions for 376–377, 384–387 form stylesheets for 374–375 MVC pattern used by 359 SDK for, downloading 355 troubleshooting 378 user authorization for 389–393 view for, desigining 363–369 GAE Launcher 357, 391, 395 garbage collection 116 generators 439 get() method, GAE 370, 379, 395 GET web request 370 GIL (Global Interpreter Lock) 444 glob module 237, 253 Google App Engine. See GAE Google BigTable technology 354, 359 GQL (Google Query Language) API 359 green text in IDLE 4 GUI (graphical user interface), building 443 H hashes. See dictionaries header() function, yate 231, 233 Head First HTML with CSS & XHTML (O’Reilly) 374 Head First Programming (O’Reilly) 443 you are here 4 451 the index Head First SQL (O’Reilly) 313 help() built-in function 80, 103 HTML generating for webapp interface 230–231 learning 226 templates for, with Django 363–366 HTML forms. See forms, HTML HTTP server 235 http.server module 235, 253 I id() built-in function 54 IDE 436. See also IDLE identifiers 7, 17, 32 IDLE 3–5, 32 colored syntax used in 4 indenting enforced in 4 preferences, setting 5 prompt in (>>>) 4 recalling and editing code statements 5 running or loading code in 39, 44, 49, 71 TAB completion 5 if/else statement 20, 32 elif keyword 108 in list comprehension 432 negating conditions in 86, 103 images folder 234 immutable data types 138 lists 91, 103, 116 numbers 116 strings 116 import statement 43, 46, 49, 71 include_footer() function, yate 230, 232, 233 include_header() function, yate 230, 232 indentation rules enforced in IDLE 4 for for loops 16 for function definitions 29 for if statement 20 452 Index IndexError exception 17 index.html file, for webapp 234 inherited classes 204–209, 212 __init__() method 191, 212 in operator 16, 118, 138 “in-place” sorting 144, 145, 172 input from Android apps 278–282, 295, 304–307 HTML forms for. See forms, HTML from keyboard after screen prompt 413–414, 432 input() built-in function 413–414, 432 insert() method, lists 10, 14 INSERT statement, SQL 321, 324, 348 instances of classes 190, 191, 194, 195–196 int() built-in function 54 integers, converting strings to 54 interface. See View, in MVC pattern IOError exception 103, 112–114, 117–119 I/O (input/output), handling. See files isinstance() built-in function 20–22, 32 iterations for loop 15–17, 19–22, 32 generating with range() function 54–56 while loop 16–17 J JSON data interchange format 266–267, 291 API for, using 269–272 browser differences with 272 data types supported by 285 incompatibility with pickle data objects 284–285 K KDevelop IDE 436 keys, in dictionary 178, 180, 212 keywords, displayed as orange text in IDLE 4 the index L locals() built-in function 118, 138 loops. See iterations lambda expression 439 Learning Python (O’Reilly) 445 len() built-in function 10, 32 lib folder 42 Linux code editors for 35 GAE log messages on 378 IDEs for 436 installing Python 3 on 3 running CGI scripts on 239, 272 running GAE Launcher on 357 transferring files to Android device 288 list() built-in function 54 list comprehension 154–159, 172, 409–411, 432 lists 32. See also data objects adding items to 10–14 bounds checking for 17 classes inherited from 204–208 compared to dictionaries 179 compared to sets 167 creating 6–7, 54 data types in 8, 12 duplicates in, removing 161–163 extracting specific item from 175–176 getting next item from 54 identifiers for 7 immutable 91, 103, 116 iterating 15–17, 157 length of, determining 10, 32 methods for 10 nested, checking for 20–22 nested, creating 18–19 nested, handling 23–25, 28–31 numbered, creating 54 reading CSV data into 403–404 removing items from 10 similarity to arrays 9–10, 17 slice of 160, 172 load() function, pickle 133, 138 loads() function, json 269, 276, 280, 291 Mac OS X code editors for 35 GAE log messages on 378 IDEs for 436 installing Python 3 on 3 running CGI scripts on 239, 272 running GAE Launcher on 357 transferring files to Android device 288 __main__ namespace 45 MANIFEST file 42 mappings. See dictionaries Mastering Regular Expressions (O’Reilly) 440 metaclasses 439 methods 190. See also specific methods chaining 142, 172 for classes 195–196, 198–200 creating 212 results of, as attributes 250, 253 self argument of 212 ModelForm class, djangoforms 368 Model, in MVC pattern 221 for GAE webapps 359, 360–362 for webapps 222–225 Model-View-Controller pattern. See MVC pattern modules 34–36, 71 adding functionality to 50–52 classes in 209, 212 creating 35 distribution for, creating 40–42 distribution for, updating 60–61, 65 distribution for, uploading to PyPI 48 importing 43–44, 46 loading in IDLE 39, 49, 71 locations for 38, 49 namespaces for 45–46, 71 in Python Standard Library 36 third-party 36 M you are here 4 453 the index Monty Python 17 multiple inheritance 209 MVC (Model-View-Controller) pattern 221, 232, 253, 359 Controller 234–238, 244–246, 370–373 Model 222–225, 360–362 View 226–233, 363–369 N NameError exception 44, 118 names. See identifiers namespaces 45–46, 71 next() built-in function 54 NoSQL 359, 442 NotePad editor 35 not in operator 161–162 not keyword 86, 103 numbered lists 54 O object relational mapper. See ORM (object relational mapper) objects. See data objects open() built-in function 75, 103, 109–110 orange text in IDLE 4 ORM (object relational mapper) 442 os module 76, 300, 350 P para() function, yate 231, 233 parentheses ((...)) enclosing function arguments 29 enclosing immutable lists 91 pass statement 93, 103 persistence 105 pickle library for 132–137 reading data from files 222–224 writing data to files 110–113, 222–224 PickleError exception 133–134 454 Index pickle library 132–137, 138 data modeling using 222–224 incompatibility with JSON data types 284–285 transferring data to a database 321–325 plus sign (+) addition or concatenation operator 138 pop() method, lists 10, 175–176 post() method, GAE 379–383, 395 POST web request 379 pound sign (#) preceding one-line comments 38 print() built-in function 10, 32, 124–125 disabling automatic new-line for 56, 71 displaying TAB character with 56 writing to a file 110, 128, 138 Programming in Python 3 (Addison-Wesley Professional) 445 properties, in datastore 360, 395 @property decorator 250, 253, 285 purple text in IDLE 4 put() method, GAE 395 .pyc file extension 42, 49 .py file extension 35 PyPI (Python Package Index) 36 registering on website 47 uploading distributions to 48 uploading modules to 209 Python 2 compared to Python 3 17 raw_input() built-in function 432 running on Android smartphones 258–259, 291 using with Google App Engine 355 Python 3 compared to Python 2 17 documentation for 3, 80, 103 editors for 35, 436 installing 3 interpreter for. See IDLE learning 445 python3 command building a distribution 41 checking for Python version 3 installing a distribution 41 uploading a new distribution 68 the index Python Essential Reference (Addison-Wesley Professional) 445 Python for Unix and Linux System Administration (O’Reilly) 445 Python, Monty 17 Python Package Index. See PyPI Python Standard Library 36 Q querying a database 322, 332–333 question mark (?) parameter placeholder 321, 350 quotes (“...” or ‘...’) enclosing each list item 7 quotes, triple (“””...””” or ‘’’...’’’) enclosing comments 37 R “r” access mode 110 race conditions 309–310 radio_button() function, yate 231, 233 range() built-in function 54–56, 71 raw_input() built-in function 432 readline() method, files 76, 103, 142 recursion 31 regular brackets. See parentheses regular expressions 440 re module 440 remove() method, lists 10 render() function, template 364, 366 Robustness Principle 384–387 rollback() method, database connection 315, 350 runtime errors 88. See also exception handling; troubleshooting S schema, database 317 scoping of variables 437 Scripting Layer for Android. See SL4A scripts. See CGI scripts; SL4A sdist command 41 seek() method, files 76, 103 SELECT/OPTION, HTML tag 376 SELECT statement, SQL 322, 332–333 self argument 192–193, 212 self.request object 379, 395 self.response object 372, 379, 395 semicolon (;) separating statements on one line 38 set() built-in function 166, 172 sets 166, 167, 172 setup() built-in function 40 setup.py file 40, 71 single quotes. See quotes SL4A (Scripting Layer for Android) 258, 291 adding Python to 263 Android apps, creating 274–277 automatic rotation mode, setting 264 documentation for 274 installing 262 Python versions supported 258–259 slice of a list 160, 172 smartphones, apps on. See Android apps sorted() built-in function 144–147, 153, 158, 172 sort() method, lists 144–145, 153, 172 split() method, strings 77–78, 80–81, 103, 142 SQL Alchemy 442 SQLite 313, 350 closing connection to 314, 315 committing data to 314, 315 connecting to 314, 315 cursor for, manipulating data with 314, 315 designing database 316–318 inserting data into 321, 324, 348 integrating with Android apps 342–348 integrating with webapps 327–341 managing data in 326 process for interacting with 314–315 querying 322, 332–333 rollback changes to data 314 schema for database 317 tables in, creating 319–320 you are here 4 455 the index sqlite3 command 326 sqlite3 library 313, 315, 350 SQLite Manager, for Firefox 326 SQL (Structured Query Language) 313, 350. See also NoSQL; SQLite; ORM square brackets ([...]) accessing dictionary items 180, 212 accessing specific list items 9, 18 enclosing all list items 7, 18 standard error (sys.stderr) 248, 291 standard input (sys.stdin) 291 Standard Library, Python 36 standard output (sys.stdout) 126–128, 291 start_form() function, yate 231, 233 start_response() function, yate 230, 232 static content 216, 217 static folder 370 str() built-in function 119, 138 strings concatenating 138 converting other objects to 119 converting to integers 54 displayed as green text in IDLE 4 finding substrings in 84–86 immutable 116 sorting 148 splitting 77–78, 80–81 substitution templates for 230, 253 strip() method, strings 108, 138, 142 Structured Query Language. See SQL stylesheets for HTML forms 374–375 suite 16, 29, 32 sys module 291 sys.stdout file 126–128, 138 456 Index T TAB character, printing 56 TAB completion, IDLE 5 tables, database 317, 319–320, 350 target identifiers, from split() method 77, 91 .tar.gz file extension 42 Template class 230, 253 template module 364 templates folder 234, 370 templates for GAE 363–366, 395 testing code 438 TextMate editor 35, 436 third-party modules 36 threads 444 time data format compatibility issues 418–423 property type for 362, 384–385 time module 419, 432 Tk Interface (tkinter) 443 traceback 88, 103. See also exception handling; troubleshooting tracing code 58–59 triple chevron (>>>) IDLE prompt 4 triple quotes (“””...””” or ‘’’...’’’) enclosing comments 37 troubleshooting. See also exception handling 404 Error, from web server 242 405 Error, from web server 378 Android apps 277 GAE webapps 378 testing code 438 tracing code 58–59 try/except statement 89, 93–94, 101–102, 103 finally keyword for 115, 138 with statement and 120–123 tuples 91, 103, 116 TypeError exception 56–57, 116, 247–249, 283–285 the index U u_list() function, yate 231, 233 unittest module 438 urlencode() function, urllib 291 urllib2 module 291 urllib module 291 urlopen() function, urllib2 291 user authorization 389–393 user input. See forms, HTML; input UserProperty() type, db 390 V ValueError exception 78–79, 81–82, 103 values, part of dictionary 178, 180, 212 variables, scope of 437 vi editor 35, 436 View, in MVC pattern 221 for GAE webapps 359, 363–369 for webapps 226–233 vim editor 436 W “w” access mode 110 “w+” access mode 110 “wb” access mode 133 webapps 215–217, 253 controlling code for 221, 234–238, 244–246 data modeling for 221, 222–225 designing with MVC 221 design requirements for 218–220 directory structure for 234 Google App Engine for. See GAE input data, sending to CGI scripts 300–303 input forms for. See forms, HTML SQLite used with 327–341 view for 221, 226–233 Web-based applications. See webapps web frameworks 441. See also CGI; WSGI web request 216, 253, 395 web response 216–217, 253, 395 web server 216–217, 235 Web Server Gateway Interface (WSGI) 356, 370. See also CGI scripts while loop 16–17, 55 WingIDE editor 35 WingWare Python IDE 436 with statement 120–123, 138 WSGI (Web Server Gateway Interface) 356, 370. See also CGI scripts Y yate (Yet Another Template Engine) library 226–233 yield expression 439 you are here 4 457 Download from Wow! eBook <>
https://issuu.com/jrojas/docs/oreilly.head.first.python.nov.2010
CC-MAIN-2018-30
refinedweb
90,942
73.07
Out-of-Core Dataframes in Python: Dask and OpenStreetMap In recent months, a host of new tools and packages have been announced for working with data at scale in Python. For an excellent and entertaining summary of these, I'd suggest watching Rob Story's Python Data Bikeshed talk from the 2015 PyData Seattle conference. Many of these new scalable data tools are relatively heavy-weight, involving brand new data structures or interfaces to other computing environments, but Dask Basemap toolkit to visualize the results on a map. The Data: OpenStreetMap¶ The data we will be looking at here is the extracted database of marked locations in Open Street Map. OpenStreetMap is a free and open, crowd-sourced online mapping tool, meaning that everything on it has been added by an individual user. This has resulted in a compiled collection of nearly ten million distinct points of interest, comprising everything from post boxes to restaurants to historical sites to park benches. An extracted table of data on these points of interest can be found on the OSM-x-tractor site, a free project which compiles these points of interest and makes them available in a variety of common formats. For this post, I downloaded the CSV file for World Points of Interest, and extracted the compressed file into "POIWorld.csv", a standard comma-separated value file. This file has just over nine million lines, each of which corresponds to a specific point of interest: nrows = sum(1 for _ in open('POIWorld.csv')) nrows 9140114 Using command-line tools, we can also see that the size of the file is about 760MB. !ls -lh POIWorld.csv -rw-r--r-- 1 jakevdp staff 761M Jul 14 14:10 POIWorld.csv While this could fit into memory on most modern machines, here we'll take a more scalable approach, utilizing Dask to do our data ingestion and manipulation out-of-core. The benefit is that the approach used here will straightforwardly scale to even larger datasets analyzed across multiple machines. Before we begin to look at this data, let's take a look at Dask and how it can help us with this problem. Dask Basics¶ Dask, fundamentally, is a lightweight generator of task graphs for Python. A task graph is a way of describing a sequence of operations so that they can be executed at a later point. By building these task graphs, Dask describes the exact sequence of inputs, operations, and outputs that your algorithm requires, and can send this description to a variety of backends for efficient parallel and/or out-of-core computation. Though the low-level details are interesting, it is the higher-level interfaces that most people will use. These interfaces are: dask.bag: create task graphs using a functional programming style dask.array: create task graphs using a NumPy-like array interface dask.dataframe: create task graphs using a Pandas-like DataFrame interface Each of these provides a familiar Python interface for operating on data, with the difference that individual operations build graphs rather than computing results; the results must be explicitly extracted with a call to the compute() method. As an example of Dask in action, let's take a look at an example of using dask.array. import numpy as np # create an array of normally-distributed random numbers a = np.random.randn(1000) # multiply this array by a factor b = a * 4 # find the minimum value b_min = b.min() print(b_min) -11.4051061336 Dask allows us to use very similar code to build a task graph describing these operations: import dask.array as da # create a dask array from the above array a2 = da.from_array(a, chunks=200) # multiply this array by a factor b2 = a2 * 4 # find the minimum value b2_min = b2.min() print(b2_min) dask.array<x_3, shape=(), chunks=(), dtype=float64> In this case, the result of the computation is not a value, but is instead a Dask array object which contains the sequence of steps needed to compute that value. If you have the graphviz package installed ( pip install graphviz), you can visualize this sequence of steps on this chunked data: from dask.dot import dot_graph dot_graph(b2_min.dask) Reading from the bottom to the top, this graph shows exactly what will happen with our computation: first the array x is ingested by dd.from_array and split into five chunks of 200 elements. Each of these chunks is multiplied by 4, and the minimum value is found. Finally, the global minimum is found among these chunk-wise minima, and the result is returned. We can tell Dask to compute the result encoded in this graph using the compute() method of the dask object: b2_min.compute() -11.405106133564583 As expected, we get the same result as the normal, non-lazy method. I'm not going to go into any more technical details of Dask here, but you should be aware that it contains many more options for flexible creation and evaluation of such task graphs. For more information, I'd suggest reading through the dask documentation or watching the excellent Dask tutorial given by Matthew Rocklin at the recent PyData Seattle conference. Dask DataFrames and OpenStreetMap¶ With this framework of task graphs plus lazy evaluation, let's take a look at using Dask in a more interesting context: exploring the OpenStreetMap data. We can start by taking a look at the first few rows of the data using Pandas, just to see what we're working with: import pandas as pd data = pd.read_csv('POIWorld.csv', nrows=5) data.columns Index(['"osmid"', 'name', 'amenity', 'emergency', 'historic', 'leisure', 'man_made', 'office', 'shop', 'sport', 'tourism', 'craft', 'Longitude', 'Latitude'], dtype='object') Each point of interest has latitude and longitude, along with a variety of other data labels. We'll extract these locations, and focus on the "name" and "amenity" columns associated with them. This can be done with Dask's version of Pandas' read_csv command: from dask import dataframe as dd columns = ["name", "amenity", "Longitude", "Latitude"] data = dd.read_csv('POIWorld.csv', usecols=columns) data dd.DataFrame<read-csv-POIWorld.csv-ea29fef3df6f73002fe27223a3749930, divisions=(None, None, None, ..., None, None)> Notice here that the read_csv command did not actually open the file and access the data, but simply created a task graph describing the operations needed to access the data. Next let's use a Pandas-style filtering to select two subsets of this data: those rows with "amenity" specified, and those rows with "name" specified: with_name = data[data.name.notnull()] with_amenity = data[data.amenity.notnull()] Once again, we have not yet done any actual computation, but simply specified how to find the part of the data we're interested in. Diving Into the Data: Geography of Coffee¶ One thing we can do with this data is pull-out certain points of interest and compare their distribution on a map. Here we'll try to reproduce a recent Reddit Post which maps the distribution of Dunkin Donuts and Starbucks locations in the US. First, we must further filter the data by the name column. Dask lets us use Pandas-style masking and vectorized string operations, so we can do this as follows: is_starbucks = with_name.name.str.contains('[Ss]tarbucks') is_dunkin = with_name.name.str.contains('[Dd]unkin') starbucks = with_name[is_starbucks] dunkin = with_name[is_dunkin] To see how many results we have, we can use a count() call and pass it to dd.compute() to see the results. This is the point when we are finally actually loading the data and computing quantities from the values: dd.compute(starbucks.name.count(), dunkin.name.count()) (5301, 1276) We find about 5300 Starbucks and 1300 Dunkin Donuts locations in the global dataset; this is far fewer than the true numbers for these chains, which are around 12000 Starbucks and 8000 Dunkin Donuts in the United States alone! Evidently, the OpenStreetMap data is not all that complete. From my own anecdotal experience with the dataset, I've found that the data tends to be fairly complete in dense urban areas, while missing many details in more sparsely-populated locations. Despite this incompleteness, let's push-on with the data we have and see what we can discover. We can start by computing and extracting the latitude and longitude from the graphs we have generated. We will do this in a single dd.compute() call, so that the data will be ingested only once: locs = dd.compute(starbucks.Longitude, starbucks.Latitude, dunkin.Longitude, dunkin.Latitude) # extract arrays of values fro the series: lon_s, lat_s, lon_d, lat_d = [loc.values for loc in locs] %matplotlib inline import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap def draw_USA(): """initialize a basemap centered on the continental USA""" plt.figure(figsize=(14, 10)) return Basemap(projection='lcc', resolution='l', llcrnrlon=-119, urcrnrlon=-64, llcrnrlat=22, urcrnrlat=49, lat_1=33, lat_2=45, lon_0=-95, area_thresh=10000) m = draw_USA() # Draw map background m.fillcontinents(color='white', lake_color='#eeeeee') m.drawstates(color='lightgray') m.drawcoastlines(color='lightgray') m.drawcountries(color='lightgray') m.drawmapboundary(fill_color='#eeeeee') # Plot the values in Starbucks Green and Dunkin Donuts Orange style = dict(s=5, marker='o', alpha=0.5, zorder=2) m.scatter(lon_s, lat_s, latlon=True, label="Starbucks", color='#00592D', **style) m.scatter(lon_d, lat_d, latlon=True, label="Dunkin' Donuts", color='#FC772A', **style) plt.legend(loc='lower left', frameon=False); Again, these data are far from complete, but we can nevertheless see the general trend that many have noted before: Starbucks favors the west coast, while Dunkin Donuts favors the east coast. For anybody who has spent much time in, say, Seattle and Boston, this overall trend should not be all that surprising! dd.compute(with_amenity.amenity.count()) (5075909,) We see just over five million rows with an amenity label. With Pandas' value_counts() function, we can examine the most common of these labels in the dataset. Here the head() call triggers a computation: with_amenity.amenity.value_counts().head(20) bench 492190 restaurant 400620 place_of_worship 389266 school 335937 parking 282460 fuel 198865 post_box 181858 cafe 156946 bank 152218 fast_food 146962 recycling 135912 pharmacy 127259 waste_basket 119881 grave_yard 118324 bicycle_parking 110657 post_office 102431 drinking_water 94594 pub 94416 toilets 93708 telephone 90894 dtype: int64 Somewhat surprisingly, there are far more benches labeled than any other single labeled object. Down the list a bit, we see the fast_food category, which has around 150,000 global entries. Using a filter plus another value count, we can check which fast food restaurants are most common: is_fastfood = with_amenity.amenity.str.contains('fast_food') fastfood = with_amenity[is_fastfood] fastfood.name.value_counts().head(12) McDonald's 8608 Subway 6784 Burger King 3180 KFC 2812 Wendy's 1301 Taco Bell 1272 Pizza Hut 965 マクドナルド 918 Dairy Queen 744 Domino's Pizza 680 McDonalds 634 Arby's 606 dtype: int64 As an aside, one interesting thing we see is that there are three versions of McDonald's in this list: there are "McDonald's", and "McDonalds", of course, but also マクドナルド (roughly pronounced "Makudonarudo"), which is the Japanese adaptation of the well-known restaurant name. If you were attempting to use this dataset to count locations by chain, it would be important to take these multiple similar labels into account! Let's next take a look at the full collection of fast food restaurant locations, extract their latitude and longitude coordinates, and plot their locations on a map of the USA: lat, lon = dd.compute(fastfood.Latitude, fastfood.Longitude) m = draw_USA() m.drawmapboundary(fill_color='#ffffff', linewidth=0) m.fillcontinents(color="#fcfcfc", lake_color='#ffffff', zorder=1) m.scatter(lon.values, lat.values, latlon=True, s=1, marker='o', alpha=0.1, zorder=2); Here I've purposely left-out the geographical borders; we see that with fast food locations alone, we can see the major cities, and even trace-out some of the major interstate routes! I suspect that, like above, this data is far from complete, especially in more rural areas. I would love to see how a full fast-food-nation map would look, but after poking around it seems that most available data on that is proprietary (though FlowingData has an interesting visualization in the same spirit). Pubs of the British Isles¶ Let's take a look at one last example, reproducing a post by Ramiro Gomez, a developer in Berlin whose website is definitely worth clicking around for a bit. Here we will extract all the pub locations from the dataset, and use them to visualize a small island nation with an astounding density of these establishments. We'll start by filtering the amenities for the word "pub" (being careful to use regular expressions which mark word boundaries, so that we don't match things like "public toilet"): is_pub = with_amenity.amenity.str.contains(r'\bpub\b') pubs = with_amenity[is_pub] pubs.amenity.count().compute() 94478 We have about 95,000 world-wide points of interest with "pub" in the label. Next, as above, we can extract the longitude and latitude arrays from our data: lon, lat = dd.compute(pubs.Longitude, pubs.Latitude) Finally, with a few lines of Basemap code, we can visualize the results: fig, ax = plt.subplots(figsize=(10, 15)) m = Basemap(projection='mill', lon_0=-5.23636, lat_0=53.866772, llcrnrlon=-10.65073, llcrnrlat=49.16209, urcrnrlon=1.76334, urcrnrlat=60.860699) m.drawmapboundary(fill_color='#ffffff', linewidth=.0) x, y = m(lon.values, lat.values) m.scatter(x, y, s=1, marker=',', color="steelblue", alpha=0.8); The pub locations alone are enough to make out most of the borders and contours of the islands! The above visualizations are fun, but have merely scraped the surface of what can be done with this data – what interesting geographical visualizations can you come up with using these data and tools? Thanks for reading!
https://jakevdp.github.io/blog/2015/08/14/out-of-core-dataframes-in-python/
CC-MAIN-2018-39
refinedweb
2,287
53.31
jQuery Star Rating with ASP.NET MVC The star rating system allows people to easily provide feedback on a blog item if they don't feel like submitting a comment. The rating plugin that I chose can be configured to allow any score to be applied, although I opted to allow people the provide a score from 1 - 5. I also wanted to give accurate information on the current average rating for each item, which meant that I need to show stars partially completed when an item's average rate is not a whole number. I also needed to prevent people rating an item more than once. To begin with, I downloaded the plugin files, which consists of a number of .js files including jquery.MetaData.js, jquery.rating.pack.js and jquery.rating.js. There is also some documentation in the form of an html file, plus a style sheet for the rater and an image file for the stars. First thing I did was check the documentation for details on database integration, and was met with a message that this does not exist. The whole deal is completely up to me. Great. Then again, I suppose that there are so many options for connecting to databases - php to MySQL, ASP.NET to SQL Server via Linq To SQL, ADO.NET, Entity Framework, nHibernate, ColdFusion to whatever - that it would be a bit much to expect the authors to cover all angles. My method will be connecting to SQL Server via the Entity Framework, in keeping with previous articles on developing this site. The rater will appear on the articles page. At the moment, there is nothing in the database to cope with rating articles, so I add two columns to the Articles table - Rating (int) and TotalRaters (int). As each rating comes in, I will increment the Rating value for the article by the score given by the rater. I will also add 1 to the TotalRaters for that article. I can calculate the average rating per article by dividing the Rating by the TotalRaters. I set the default value for both columns at 0. Having done that, I refresh my Model. The Article class is enhanced with the two extra properties: I will be using a PartialView for the rater, so I create a class for the strongly typed ViewModel that it will use. The class will only contain 4 properties: namespace MikesDotnetting.Models { public class ArticleRating { public int ArticleID { get; set; } public int Rating { get; set;} public int TotalRaters { get; set; } public double AverageRating { get; set;} } } You may notice that this class is a subset of the full Article class properties (apart from the AverageRating property). You might be wondering why I am not using the full Article class. The reason is that I only need the data in this new class for the PartialView, and don't want to encumber it with more than necessary. Not only that, but exposing just these properties makes the PartialView more reusable. Speaking of the PartialView, here it is: <%@ Control $(function() { $('.star').rating('readOnly', true); $('#rater').hide(); $('#rated').mouseover(function() { $('#rated').hide(); $('#rater').show(); }); $('.auto-submit-star').rating({ callback: function(value, link) { $.ajax({ type: "POST", url: "/Article/Rate", data: $("#rate").serialize(), dataType: "text/plain",.'); } }); } }); }); </script> <div id="rated"> <div style="float:left"> <form id="Form1" method="post" action=""> <input class="star {split:4}" type="radio" value="1" name="rating" <%= Utils.Check(0,0.25,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="2" name="rating" <%= Utils.Check(0.25,0.5,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="3" name="rating" <%= Utils.Check(0.5,0.75,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="4" name="rating" <%= Utils.Check(0.75,1,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="5" name="rating" <%= Utils.Check(1,1.25,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="6" name="rating" <%= Utils.Check(1.25,1.5,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="7" name="rating" <%= Utils.Check(1.5,1.75,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="8" name="rating" <%= Utils.Check(1.75,2,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="9" name="rating" <%= Utils.Check(2,2.25,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="10" name="rating" <%= Utils.Check(2.25,2.5,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="11" name="rating" <%= Utils.Check(2.5,2.75,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="12" name="rating" <%= Utils.Check(2.75,3,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="13" name="rating" <%= Utils.Check(3,3.25,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="14" name="rating" <%= Utils.Check(3.25,3.5,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="15" name="rating" <%= Utils.Check(3.5,3.75,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="16" name="rating" <%= Utils.Check(3.75,4,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="17" name="rating" <%= Utils.Check(4,4.25,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="18" name="rating" <%= Utils.Check(4.25,4.5,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="19" name="rating" <%= Utils.Check(4.5,4.75,Model.AverageRating) %>/> <input class="star {split:4}" type="radio" value="20" name="rating" <%= Utils.Check(4.75,5,Model.AverageRating) %>/> </form> </div> <p id="currentlyrated" style="float:left;padding-left:20px;"> <%= Model.AverageRating > 0 ? "Currently rated " + Model.AverageRating.ToString("f") + " by " + Model.TotalRaters + " people" : "<span style=\"color:red\">Not yet rated. Be the first to rate this article!</span>"%> </p> </div> <div style="clear:both"></div> <div id="rater"> <div style="float:left;"> <form id="rate" method="post" action=""> <input class="auto-submit-star" type="radio" name="score" value="1"/> <input class="auto-submit-star" type="radio" name="score" value="2"/> <input class="auto-submit-star" type="radio" name="score" value="3"/> <input class="auto-submit-star" type="radio" name="score" value="4"/> <input class="auto-submit-star" type="radio" name="score" value="5"/> <input type="hidden" name="ArticleID" value="<%=Model.ArticleID %>" /> </form> </div> <p style="float:left;padding-left:20px;"> Rate Now! </p> </div> <div style="clear:both"></div> Right - before you simply copy and paste this and hope it will run, there is a fair amount to explain. I'll start with the second of the two divs. This is the 5 star rater which users will ise to score articles. I've used the option with a class of auto-submit-star, which according to the documentation is the one that does not need a submit button. It does need a bit of AJAX, which I will get to shortly. As well as the 5 stars for rating, the radio buttons are housed in a form which also contains a hidden field representing the ArticleID of the current article. The previous div is the one that contains the split stars and will be shown to users when they first load an article. It will feature the current rating for that item. There are 20 of these, which when split into 4 represents 5 whole stars. Each star is capable of displaying ¼, ½, ¾ or a full star. That's as accurate as I need for display purposes. You will notice the <%= Utils.Check(0,0.25,Model.AverageRating) %> which appears in each input tag. This refers to a utility method that returns a string: checked="checked" if the average rating falls between two values. Here is the code for the method. It's dead simple: public static string Check(double lower, double upper, double toCheck) { return toCheck > lower && toCheck <= upper ? " checked=\"checked\"" : null; } I tried a number of ways to set the input as checked from client script, but in the end gave up. My lack of javascript knowledge eventually defeated me. I need to spend a lot more time examining the jquery.rater.js file, along with the MetaData.js file, I suspect. Both the jquery.rater.pack.js file and the jquery.MetaData.js file are linked to in the main View, which is why they are not referenced in the Partial. The first segment of jQuery code sets the current rater (the first div) to disabled so that the user cannot use it to score with. It also hides the "live" rater, but adds a mouseover event to the disabled one which reveals the live rater, while hiding the disabled one. The message alongside the rater will change from a summary of the current score to one that invites users to Rate Now! The second part of the jQuery code manages the AJAX form submission. Both the values from the form (the rate and the ArticleID) are serialized and posted to the Rate action on the Article Controller: [AcceptVerbs("post")] public ActionResult Rate(FormCollection form) { var rate = Convert.ToInt32(form["Score"]); var id = Convert.ToInt32(form["ArticleID"]); if (Request.Cookies["rating" + id] != null) return Content("false"); Response.Cookies["rating" + id].Value = DateTime.Now.ToString(); Response.Cookies["rating" + id].Expires = DateTime.Now.AddYears(1); ArticleRating ar = repository.IncrementArticleRating(rate, id); return Json(ar); } Having taken the posted form values, the Action checks to see if there is a cookie called rating with the id of the article as part of its name, which would indicate that the user has rated this article previously. If there is, it sends back "false", but does not record the submitted score. If not, it creates a cookie showing that this article has been rated, and sets the expiry date of the cookie for one year's time. It then calls a method in the repository: public ArticleRating IncrementArticleRating(int rate, int id) { var article = de.ArticleSet .Where(a => a.ArticleID == id) .First(); article.Rating += rate; article.TotalRaters += 1; de.SaveChanges(); var ar = new ArticleRating() { ArticleID = article.ArticleID, Rating = article.Rating, TotalRaters = article.TotalRaters, AverageRating = Convert.ToDouble(article.Rating)/Convert.ToDouble(article.TotalRaters) }; return ar; } which increments the Rating value by the score submitted, and the TotalRaters by 1. The updated values are then sent back to the Rate action to be serialized as JSON before being returned to the jQuery's success callback (repeated here to save you scrolling back up the page):.'); } If the response is not "false", it will be JSON. This is parsed using eval(), and while an alert is shown to the user confirming successful logging of their score, the paragraph containing the average rate is updated, and the rater is shown again. Improvements There are some things that I would like to have included. The first is the ability to set the initial state of the split star rater via javascript so that I don't need the Check() helper method. That would also apply when an article has just been rated so that I can update the stars as well as the paragraph containing the revised average rating. However, I tried a number of ways to achieve this without success. Simply using jQuery to set the attribute on the relevant radio didn't work. It just stopped the starts being shown. Using FireFox to view the generated DOM didn't help much either. I really need to know more javascript than I do. The other thing that I would like to have done is to apply a mouseout to the second div - the one that allows people to rate. Again, just applying a mouseout to the div with the id of rater had the desired effect, until the mouse moved from one star to the next. It seemed that instead of being applied to the div itself, the mouseout had been applied to each individual star (or generated div containing a star). The plugin code actually generates some DOM which consists of a span containing a series of divs and <a> elements which house the star images. I could not understand why the mouseout did not apply to the containing elements, and could not find a solution. Currently rated 3.78 by 2807 people Rate Now! Date Posted: Tuesday, September 8, 2009 1:40 PM Last Updated: Friday, October 10, 2014 9:12 PM Posted by: Mikesdotnetting Total Views to date: 52005, October 8, 2009 5:49 PM from Jean Thank you for your post, great input. For one of the improvements you suggested: $('#rated').mouseenter(function() { $('#rated').hide(); $('#rater').show(); }); $('#rater').mouseleave(function() { $('#rater').hide(); $('#rated').show(); }); Have a nice day, Jean Thursday, October 8, 2009 9:14 PM from april sorry, I clicked kick it button mistakenly, thought it was a part of output. the article rocks.thanks! Friday, October 9, 2009 10:54 AM from hamid Great Artice , please also provide the dowbloadable code with your articles Tuesday, November 3, 2009 7:28 AM from emad your articles are great i hope you provide a working code to be downloaded thanks Sunday, December 6, 2009 1:44 AM from Iain Would moving some of the rating generation code into a loop (jquery or asp) be a good idea? Wednesday, March 24, 2010 1:04 PM from Dany If user will clear all the cookies, then it will be available him to rate again. Friday, May 7, 2010 8:28 AM from Adeel i want to ask you that from where i download the style sheet . because i downloaded the jquery.rating.css but there is no class named "auto-submit-star" . please help me out. can u give me full source code file or only this css file that contain "auto-submit-star" class Tuesday, May 11, 2010 7:40 AM from Mike @Adeel There is no css declaration for the .auto-submit-star option because I applied no styles to it. It is simply a convenient way to use jQuery selectors to create a wrapped set. Tuesday, October 19, 2010 10:12 AM from Berra Bertsson Nice article, one question though. After a rate is submitted, the stars aren't updated/repainted, only the text with the current average is replaces. Any ideas on how to implement that? Friday, March 25, 2011 8:14 AM from norman thank's for the idea but it's simple.. Tuesday, July 26, 2011 12:47 PM from sivasankari good Thursday, July 12, 2012 12:48 PM from mayuri Great Article. Can you please provide me source code. Saturday, July 14, 2012 9:21 PM from Mike @mayuri All the code you need is included in the article.
http://www.mikesdotnetting.com/Article/114/jQuery-Star-Rating-with-ASP.NET-MVC
CC-MAIN-2014-42
refinedweb
2,445
57.06
This tutorial teaches you two advanced Python skills: closures and decorators. Mastering them will make you a better coder today—so, let’s dive right into them! Closures Every function in Python is first class, because they can be passed around like any other object. Usually, when a programming language creates a function just like other data types, that programming language supports something called Closures. A closure is a nested function. It is defined within an outer function. def outer_hello_fn(): def hello(): print("Hello Finxter!") hello() Here, we have an outer function called outer_ hello_ fn, it has no input arguments. The function hello is a nested function defined within the outer function. The hello function is a closure. Try It Yourself: Exercise: What’s the output of this code snippet? Run the code to test if you’re correct. When the outer function is called, the hello function within it will be defined and then invoked. Here is the function call and output: outer_hello_fn() Output: Hello Finxter! hello has been defined within outer_hello_fn, which means if you try and invoke the hello function, it will not work. hello() Output: NameError: name 'hello' is not defined If you want access to a function that is defined within another function, return the function object itself. Here is how. def get_hello_fn(): def hello(): print("Hello Finxter!") return hello The outer function is called get_hello_fn. hello, is an inner function, or closure. Instead of invoking this hello function, simply return the hello function to whoever calls get_hello_fn. For example: hello_fn = get_hello_fn() Invoking get_hello_fn stores the return function object in the hello_fn variable. If you explore the contents of this hello_fn variable, you will see that it is a function object. hello_fn Output: <function __main__.get_hello_fn.<locals>.hello> As you can see in the structure, it is a locally defined function within get_hello_fn, that is, a function defined within another function, that is a closure. Now, this closure can be invoked by using the hello_fn variable. hello_fn() Output: Hello Finxter! Invoke hello_fn() will print out Hello Finxter! to screen. A closure is something more than just an inner function defined within an outer function. There is more to it. Here is another example: def hello_by_name(name): def hello(): print("Hello!", name) hello() return hello Here, the outer function is called hello_by_name, which takes in one input argument, the name of an individual. Within this outer function, there is the hello inner function. It prints to the screen Hello!, and the value of the name. The name variable is an input argument to the outer function. It is also accessible within the inner hello function. The name variable here can be thought of as a variable that is local to the outer function. Local variables in the outer function can be accessed by closures. Here is an example of passing an argument to the outside function: greet_hello_fn = hello_by_name("Chris") The function hello is returned and it is stored in the greet_hello_fn variable. Executing this prints out Hello! Chris to screen. That is because we invoked the closure from within the outer function. We have a reference to the closure that was defined by the outer function. greet_hello_fn() Output: Hello! Chris Notice something interesting here. Chris is available in the variable name which is local to the hello_by_name function. Now, we have already invoked and exited hello_by_name but the value in the name variable is still available to our closure. And this is another important concept about closures in Python. They hold the reference to the local state even after the outer function that has defined the local state has executed and no longer exists. Here is another slightly different example illustrating this concept. def greet_by_name(name): greeting_msg = "Hi there!" def greeting(): print(greeting_msg, name) return greeting The outer function, greet_by_name, takes in one input argument, name. Within the outer function, a local variable called greeting_msg is defined which says, “Hi there!”. A closure called greeting is defined within the outer function. It accesses the local variable greeting_msg as well as the input argument name. A reference to this greeting closure is returned from the outer greet_by_name function. Let’s go ahead and invoke greet_by_name and store the function object that it returns in the greet_fn variable. We will use this function object to greet Ray by name. Go ahead and invoke the greet_fn() by specifying parentheses. And it should say, Hi there! Ray. Observe how the closure has access not just to the name Ray but also to the greeting message, even after we have executed and exited the outer function. greet_fn = greet_by_name("Ray") greet_fn() Output: Hi there! Ray Closures carry around information about the local state. Let’s see what happens when the greet_by_name function is deleted, so you no longer have access to the outer function. del greet_by_name Now, remember that name and greeting message are both variables that were defined in the outer function. What happens to them? Now if you try to invoke greet by name. greet_by_name("Ray") Output: NameError: name 'greet_by_name' is not defined What about the greet_fn? Remember that greet_fn is a reference to our closure. Does this still work? greet_fn() Output: Hi there! Ray Not only does it work, but it still has access to the local variables that were defined in the outer function. The outer function no longer exists in Python memory, but the local variables are still available along with our closure. Decorators – Code Modification Decorators help to add functionality to existing code without having to modify the code itself. Decorators are so-called because they decorate code, they do not modify the code, but they make the code do different things using decoration. Now that we have understood closures, we can work our way step by step to understanding and using decorators. def print_message(): print("Decorators are cool!") Here is a simple function that prints a message to screen. print_message() Output: Decorators are cool! Each time you invoke this function it will always print the same message. I want to use a few characters to decorate the original message, and I do this using the highlight function. import random def highlight(): annotations = ['-', '*', '+', ':', '^'] annotate = random.choice(annotations) print(annotate * 50) print_message() print(annotate * 50) The outer function highlight has no input arguments. Within the highlight function, a random choice of annotations is used to decorate the original message. The message will be highlighted with a random choice between the dash, the asterisk, the plus, the colon, and the caret. The output will have an annotation of 50 characters before and after the message which is inside the print_message function. Try It Yourself: Exercise: What’s the output of this code snippet? Run the code to test your understanding! highlight() Output: :::::::::::::::::::::::::::::::::::::::::::::::::: Decorators are cool! :::::::::::::::::::::::::::::::::::::::::::::::::: Here is another function with a different message, print_another_message. def print_another_message(): print("Decorators use closures.") Now if I want to highlight this message as well, the existing highlight function will not work because it has been hardcoded to invoke the print_message function. So how do I change this highlight function so that it is capable of highlighting any message that I want printed out to screen? Remember that functions are first-class citizens in Python, which means whatever print function you have, you can pass it as an input argument to the highlight function. Here is a redefined highlight function, make_highlighted. def make_highlighted(func): annotations = ['-', '*', '+', ':', '^'] annotate = random.choice(annotations) def highlight(): print(annotate * 50) func() print(annotate * 50) return highlight The only difference here is that make_highlighted takes in an input argument that is a function. This function is what will print out the message to be displayed. The next change is that within the highlight closure, the function object that was passed in is invoked. That is the function object that will print out the message. Now we have two print functions so far. print_message() print_another_message() And now with the help of make_highlighted function, any printed message can be highlighted. For example: highlight_and_print_message = make_highlighted(print_message) highlight_and_print_message() Output: ++++++++++++++++++++++++++++++++++++++++++++++++++ Decorators are cool! ++++++++++++++++++++++++++++++++++++++++++++++++++ To print a different message and have it highlighted, simply pass a different function object to the make_highlighted function. highlight_and_print_another_message = make_highlighted(print_another_message) highlight_and_print_another_message() Output: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Decorators use closures. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It is clear that the make_highlighted function is very generic, you can use it to highlight any message that you want printed to screen. The function make_highlighted is a decorator. Why is it a decorator? Well, it takes in a function object and decorates it and changes it. In this example, it highlights the function with random characters. Decorators are a standard design pattern, and in Python, you can use decorators more easily. Instead of passing in a function object to make_highlighted, accessing the closure, and then invoking the closure, you can simply decorate any function by using @ and placing the decorator before the function to decorate. @make_highlighted def print_a_third_message(): print("This is how decorators are used") The use of the decorator @make_highlighted will automatically pass the function print_a_third_message as an input to make_highlighted and highlight the message. print_a_third_message() Output: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This is how decorators are used ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Now you can use the decorator to highlight any messages. @make_highlighted def print_any_message(): print("This message is highlighted!") And now if you invoke print_any_message, you will find that the result that is displayed to screen is highlighted. print_any_message() Output: ++++++++++++++++++++++++++++++++++++++++++++++++++ This message is highlighted! ++++++++++++++++++++++++++++++++++++++++++++++++++ Decorators – Customization Let’s see another example of a Decorator that will do some work. It will do some error checking for us. Here are two functions that will be the input to our decorator def square_area(length): return length**2 def square_perimeter(length): return 4 * length We assume that the value of the radius passed in is positive and correct. square_area(5) Output: 25 What if I invoke the square_area and pass in -1? square_area(-1) Output: -4 The input -1 doesn’t make sense as a value for the length. The function should have thrown an error or told us in some way that negative values of length are not valid. Now, if you were to perform an error check for each of these functions, we would have to do it individually. We would have to have an if statement within the area function as well as the perimeter function. Instead of that, let’s write a decorator that will perform this error checking for us. The decorator safe_calculate takes in one input argument that is a function object. def safe_calculate(func): def calculate(length): if length <= 0: raise ValueError("Length cannot be negative or zero") return func(length) return calculate This is the function object that will perform the calculation. Within the safe_calculate outer function, the inner function called calculate is the closure. calculate takes in one input argument, the length. It checks to see whether length is less than or equal to 0. If yes, it throws an error. And the way it throws an error is by simply calling a raise ValueError, “Length cannot be negative or zero”. Once we raise this error, Python will stop the execution. But if length is positive, it will invoke func and pass in length as an input argument. The safe_calculate is our decorator, which takes as its input a function object and returns a closure that will perform the safe calculation. square_area_safe = safe_calculate(square_area) Let’s test it first: square_area_safe(5) This is safe and I get the result here on the screen. 25 Invoking it with a negative number will raise an error square_area_safe(-1) Output: ValueError: Length cannot be negative or zero Let’s decorate the perimeter function as well with the safe_calculate. square_perimeter_safe = safe_calculate(square_perimeter) square_perimeter(10) Output: 40 But if you were to call square_perimeter_safe with a negative value for length well, that is a ValueError. square_perimeter_safe(-10) Output: ValueError: Length cannot be negative or zero Now that you have a decorator, you should decorate your functions rather than use the way that we have been using so far. @safe_calculate def square_area(length): return length**2 @safe_calculate def square_perimeter(length): return 4 * length Now, the next time square_area or the square_perimeter is called, the safety check will be performed. square_perimeter(3) Output: 12 If you try to calculate the perimeter for a negative value of the length, you will get a ValueError. The safe_calculate function that we set up earlier has a limitation, and you will see what it in a future example. square_perimeter(-3) Output: ValueError: Length cannot be negative or zero What happens when you have more than one input? Here is a function that calculates the area of a rectangle. @safe_calculate def rectangle_area(length, width): return length * width Within our safe_calculate function, we had invoked the func object which performs the calculation with just one input argument, with just the variable length. This is going to cause a problem when we use the safe_calculate decorator for the rectangle_area function. Once I have decorated this function, I’m going to invoke it with 4, 5. rectangle_area(4, 5) Output: TypeError: calculate() takes 1 positional argument but 2 were given The problem is with the way we had defined the closure inside the safe_calculate function. The calculate closure takes in just one input argument. If a function has multiple input arguments, then safe_calculate cannot be used. A redefined safe_calculate_all function is shown below: def safe_calculate_all(func): def calculate(*args): for arg in args: if arg <= 0: raise ValueError("Argument cannot be negative or zero") return func(*args) return calculate. It takes in one input argument that is the function object that is to be decorated. The main change is in the input arguments that are passed into the calculate closure. The function calculate now takes in variable length arguments, *args. The function iterates over all of the arguments that were passed in, and checks to see whether the argument is less than or equal to 0. If any of the arguments are less than or equal to 0, a ValueError will be raised. Remember, *args will unpack the original arguments so that the elements of the tuple are passed in individually to the function object, func. You can now use this safe_calculate_all decorator with functions that have any number of arguments. @safe_calculate_all def rectangle_area(length, width): return length * width rectangle_area(10, 3) Output: 30 Let’s try invoking the same function, but this time one of the arguments is negative. Width is negative and that gives me a ValueError, thanks to our safe_calculate_all decorator. rectangle_area(10, -3) When you invoke this function, it will check all arguments. ValueError: Argument cannot be negative or zero It doesn’t matter which argument is negative, you still get the ValueError. Here the length is negative: rectangle_area(-10, 3) Output: ValueError: Argument cannot be negative or zero Chaining Decorators You can have a function decorated using multiple decorators. And these decorators will be chained together. Here are two decorators, one prints asterisks and the other plus signs def asterisk_highlight(func): def highlight(): print("*" * 50) func() print("*" * 50) return highlight def plus_highlight(func): def highlight(): print("+" * 50) func() print("+" * 50) return highlight The print_message_one is decorated with the asterisk_highlight. @asterisk_highlight def print_message_one(): print("Decorators are cool!") print_message_one() Output: ************************************************** Decorators are cool! ************************************************** Now let’s define another print function, but this time we will decorate it using two decorators, the plus_highlight and the asterisk_highlight. @plus_highlight @asterisk_highlight def print_message_one(): print("Decorators are cool!") What you see here is an example of chaining decorators together. But how are they chained? Which decoration comes first, the asterisk_highlight, or the plus_highlight? Whichever decorator is the closest to the function definition is what is executed first, and then the decorator which is further away from the function definition. This means that the message will be first highlighted with the asterisk, then the plus. print_message_one() Output: ++++++++++++++++++++++++++++++++++++++++++++++++++ ************************************************** Decorators are cool! ************************************************** ++++++++++++++++++++++++++++++++++++++++++++++++++ If you change the order of the decorators, the decorations order will change as well. @asterisk_highlight @plus_highlight def print_message_one(): print("Decorators are cool!") You will have the same function print_message_one, but the decorator that is closest to the function definition is the plus_highlight and then the asterisk_highlight. print_message_one() Output: ************************************************** ++++++++++++++++++++++++++++++++++++++++++++++++++ Decorators are cool! ++++++++++++++++++++++++++++++++++++++++++++++++++ ************************************************** Use of kwargs in Decorators In this example we are using kwargs to display different messages for a decorator that times the execution of a function def timeit(func): def timed(*args, **kw): if 'start_timeit_desc' in kw: print(kw.get('start_timeit_desc')) ts = time.time() result = func(*args, **kw) te = time.time() if 'end_timeit_desc' in kw: print('Running time for {} is {} ms'.format(kw.get('end_timeit_desc'), (te - ts) * 1000)) return result return timed The timeit decorator is used for the test function. Three parameters are passed to the function test: a, b and, **kwargs. The parameters a and b are handled in the decorator with *args as we have seen before. The **kwargs parameter is used to pass descriptions for the function. These parameters are start_timeit_desc and end_timeit_desc. These two parameters are checked inside the timed closure and will display the messages that are in them. @timeit def test(a,b, **kwargs): return a * b result = test(10,20, start_timeit_desc = "Start of test(10,20)...", end_timeit_desc = "End of test(10,20)") print("result of test(10,20) = " + str(result)) Output: Start of test(10,20)... Running time for End of test(10,20) is 0.0 ms result of test(10,20) = 200
https://blog.finxter.com/closures-and-decorators-in-python/
CC-MAIN-2021-43
refinedweb
2,883
57.06
Ivy retrieve does not honor validate="false" from ivysettings ------------------------------------------------------------- Key: IVY-992 URL: Project: Ivy Issue Type: Bug Components: Ant, Core Affects Versions: 2.0-RC2 Reporter: Martin Eigenbrodt Attachments: ivybug.zip I tested this against rc2 and current trunk. If you call <ivy:retrieve> and one of the modules to retireve contains custom attributes within the default namespace ivy fails, although the ivysetting have the validate flag set to false. Howto Reproduce: Download the zip file an extract the files. Put an ivy jar in the same directory or change the path withing build.xml. run 'ant publish'. Afterwards run 'ant retrieve'. The retrieve fails. -- This message is automatically generated by JIRA. - You can reply to this email to add a comment to the issue online.
http://mail-archives.apache.org/mod_mbox/ant-notifications/200901.mbox/%3C1568335077.1231248884628.JavaMail.jira@brutus%3E
CC-MAIN-2018-30
refinedweb
127
68.87
On 08/04/2010 09:45 AM, Borislav Petkov wrote:> > 2. Do not use swapper_pg_dir to boot secondary CPUs like 64-bit> does.> > This patch implements solution 2. It introduces a trampoline_pg_dir> which has the same layout as swapper_pg_dir with low_mappings. This page> table is used as the initial page table of the booting CPU. Later in the> bringup process, it switches to swapper_pg_dir and does a global TLB> flush. This fixes the crashes in our test cases.> I would like to keep around a page directory with the low mappingsaround -- and not use it for kernel threads -- at all times *anyway*.This means we can remove any current hacks that we have to do around S3entry and exit, for example.--- a/arch/x86/kernel/head_32.S+++ b/arch/x86/kernel/head_32.S@@ -328,7 +328,7 @@ ENTRY(startup_32_smp) /* * Enable paging */- movl $pa(swapper_pg_dir),%eax+ movl pa(initial_page_table), %eax movl %eax,%cr3 /* set the page table pointer.. */ movl %cr0,%eax orl $X86_CR0_PG,%eax@@ -608,6 +608,8 @@ ignore_int: .align 4 ENTRY(initial_code) .long i386_start_kernel+ENTRY(initial_page_table)+ .long pa(swapper_pg_dir) /* * BSS section@@ -623,6 +625,10 @@ ENTRY(swapper_pg_dir) #endif swapper_pg_fixmap: .fill 1024,4,0+#ifdef CONFIG_X86_TRAMPOLINE+ENTRY(trampoline_pg_dir)+ .fill 1024,4,0+#endifI don't really see why this makes sense, though. It would make moresense that the initial page table we set up becomes trampoline_pg_dir;we can then set up and change to swapper_pg_dir almost immediately.I realize this isn't how the 64-bit code works at the moment, but in alot of ways I think it would be better if it did. -hpa
http://lkml.org/lkml/2010/8/4/507
CC-MAIN-2016-50
refinedweb
265
66.84
As I’ve mentioned recently, I’m currently working on a NativeScript Vue applicationfor a client and as part of that work, I’m dealing with custom forms. My initial approach made use of custom components, which Vue made pretty trivial, but I’ve decided to give NativeScript UI a try. This is a set of free components covering the following features: - Calendar - Chart - ListView - DataForm - SideDrawer - Gauge - AutoComplete Specifically, DataForm looked like it could be useful. While it’s not too difficult to build forms with NativeScript, DataForm attempts to automate as much as possible of the process. As an example, consider the following data: { "name":"Raymond Camden", "yearBorn":1973, "cool":true, "email":"raymondcamden@gmail.com" } Now imagine we tie this to a dataform control: <RadDataForm ref="dataForm" : And if we literally leave it at this - the control will automatically render a nice form for us: Notice how the control looked at my data properties and figured out what controls to use as well as how to create labels. yearBorn for example becomes Year Born. This all happens by default and is freaking cool, but you can control all of this as well if you don’t care for their defaults. All in all a neat little control, but I ran into some issues right away as soon as I started trying some of the more advanced features. Part of this is due to some poor docs (I’ve already sent reports in!) and I hope this post can help others avoid the issues I ran into. Install with Vue Issues So the docs tell you to install the relevant plugin, but right after that things go a bit awry. The “Getting Started” for the Vue docs and DataForm, which isn’t even labelled that (in the nav it’s called “Provide the Source” tell you to do this: “Add this to the main Javascript or Typescript file, usually called main.js or main.ts:” import RadDataForm from 'nativescript-ui-dataform/vue'; Vue.use(RadListView); Vue.use(RadDataForm); Ok, I did that. Then it says: “Before proceeding, make sure that the nativescript-ui-dataform/vue module is required inside your application. This module handles the registration of the custom directives and elements required by nativescript-vue. After that simply add the RadDataForm tag to the HTML and set its source accordingly:“ So that first paragraph didn’t make sense. I mean, didn’t I already do that? To make matters worse, the code sample below doesn’t provide any additional help. I was only able to get things working by going to the NativeScript Playground, dragging a DataForm control on the page, and looking at what it did. Based on that, this is what worked for me: Do not add code to main.js/main.ts. From what I can see it wasn’t necessary. In your component, do require the dataform like so: Edit on 11/7/2018, a mere hour after posting… @bundyo reduced the original 5 lines of code I had to just one: import 'nativescript-ui-dataform/vue'; Looking at that code, the paragraph I quoted above makes sense now, but I had no idea what code to even use. If the code sample on the page had included this, it would have saved me about two hours - I kid you not. Working with Groups Alright - so the main reason I even looked at the dataform control was to make use of the “groups” feature. This lets you take a large form and create groups that can ben opened and collapsed. It isn’t an “accordion” control per se, but it achieves the same purpose. (For folks curious, there is a NativeScript Accordion control but it has certain restrictions that made it unusable for my case.) Here are two screenshots I stole from the docs - first the Android version: And then iOS: So, while cool, the docs on this were pretty slim, especially in regards to providing dynamic groups, by that I mean groups defined in data and not hard coded as tags on the page. I spent a heck of a lot of time trying to get this to work and finally gave up and asked for help on the NS Slack group. Thankfully @bundyo came to the rescue. What follows is his solution, not mine. My data is still hard coded but you can see where it could be modified to support data loaded from Ajax or some such. <template> <Page class="page"> <ActionBar title="Demo" class="action-bar" /> <ScrollView> <RadDataForm ref="dataForm" : </RadDataForm> </ScrollView> </Page> </template> <script> import { RadDataForm, PropertyGroup } from 'nativescript-ui-dataform'; require("nativescript-vue").registerElement( "RadDataForm", () => require("nativescript-ui-dataform").RadDataForm ); export default { data() { return { groups:[], album: { bandName: "Edwhat Sheeran", albumName: "X", year: "2017", owned: true, myRating: 9.5, }, md:{ } }; }, created() { this.md = { propertyAnnotations:[ { name:"bandName", displayName:"My band name", required:true, groupName:"Group One" }, { name:"albumName", displayName:"My album", required:true }, { name:"year", required:true, groupName:"Group One" }, { name:"owned", required:true, groupName:"Group Two" }, { name:"myRating", required:true, groupName:"Group Two" } ] }; let pg = new PropertyGroup(); pg.name = "Group One"; pg.collapsible = true; pg.collapsed = false; this.groups.push(pg); pg = new PropertyGroup(); pg.name = "Group Two"; pg.collapsible = true; pg.collapsed = true; this.groups.push(pg); } }; </script> <style scoped> </style> Let’s break it down. First, look at the dataform: <RadDataForm ref="dataForm" : </RadDataForm> There’s two new attributes here - metadata and groups. So metadata is where you can do overrides on the default behaviors of the control. Don’t like the label it selects for your property value? You can tweak it here. Want to use a custom drop down with specific values? You can set it here. We use this feature to specify the groups for each field. (And again, it’s hard coded here but it could be dynamic.) The next part is creating the groups. In this case we use an instance of PropertyGroup, one for each group, and ensure that the names match the names used in metadata. If you want to see, and play with, a slimmer version, check out the Playground @bundyo made here: It really does a nice job of setting up the groups and fields all in one fell swoop. And because it’s on the Playground, you can point the NativeScript Playground app at it and have it running on your device in 10 seconds. Anyway, I hope this helps. As I said, the docs here were a bit painful, but I’ve sent multiple reports in to the NativeScript folks so hopefully it gets improved soon. If you have any questions, just drop me a line below! Discussion
https://dev.to/raymondcamden/using-nativescript-dataform-with-vuejs---some-tips-4db0
CC-MAIN-2020-50
refinedweb
1,112
71.95
Use Azure Data Lake Store to capture data from Event Hubs Learn how to use Azure Data Lake Store to capture data received by Azure Event Hubs. Prerequisites An Azure subscription. See Get Azure free trial. An Azure Data Lake Store account. For instructions on how to create one, see Get started with Azure Data Lake Store. An Event Hubs namespace. For instructions, see Create an Event Hubs namespace. Make sure the Data Lake Store account and the Event Hubs namespace are in the same Azure subscription. Assign permissions to Event Hubs In this section, you create a folder within the account where you want to capture the data from Event Hubs. You also assign permissions to Event Hubs so that it can write data into a Data Lake Store account. Open the Data Lake Store account where you want to capture data from Event Hubs and then click on Data Explorer. Click New Folder and then enter a name for folder where you want to capture the data. Assign permissions at the root of the Data Lake Store. a. Click Data Explorer, select the root of the Data Lake Store heirarchyindividually to each folder in the path to your final destination folder. Click OK. Assign permissions for the folder under Data Lake Store account where you want to capture data. a. Click Data Explorer, select the folder in the Data Lake Store. Configure Event Hubs to capture data to Data Lake Store In this section, you create an Event Hub within an Event Hubs namespace. You also configure the Event Hub to capture data to an Azure Data Lake Store account. This section assumes that you have already created an Event Hubs namespace. From the Overview pane of the Event Hubs namespace, click + Event Hub. Provide the following values to configure Event Hubs to capture data to Data Lake Store. a. Provide a name for the Event Hub. b. For this tutorial, set Partition Count and Message Retention to the default values. c. Set Capture to On. Set the Time Window (how frequently to capture) and Size Window (data size to capture). d. For Capture Provider, select Azure Data Lake Store and the select the Data Lake Store you created earlier. For Data Lake Path, enter the name of the folder you created in the Data Lake Store account. You only need to provide the relative path to the folder. e. Leave the Sample capture file name formats to the default value. This option governs the folder structure that is created under the capture folder. f. Click Create. Test the setup You can now test the solution by sending data to the Azure Event Hub. Follow the instructions at Send events to Azure Event Hubs. Once you start sending the data, you see the data reflected in Data Lake Store using the folder structure you specified. For example, you see a folder structure, as shown in the following screenshot, in your Data Lake Store. Note Even if you do not have messages coming into Event Hubs, Event Hubs writes empty files with just the headers into the Data Lake Store account. The files are written at the same time interval that you provided while creating the Event Hubs. Analyze data in Data Lake Store Once the data is in Data Lake Store, you can run analytical jobs to process and crunch the data. See USQL Avro Example on how to do this using Azure Data Lake Analytics.
https://docs.microsoft.com/en-us/azure/data-lake-store/data-lake-store-archive-eventhub-capture
CC-MAIN-2018-26
refinedweb
578
72.97
Tools that honor the Maintain Curve Segments environment will maintain input curve segments in the output. Usage notes - Maintaining curve segments can be time-consuming in some situations. The number of curve segments, as well as the complexity of the curve segments, could require more processing time. - Depending on the arc of the curve, tool output may differ slightly when maintaining curve segments, compared to using densified segments. More or less features could interact, causing differences in output. Dialog syntax Maintain Curve Segments—Determines if curve segments in the input will be maintained as curves in the output. - Unchecked—Input curve segments will be densified in the output. This is the default. - Checked—Input curve segments will be maintained in the output as is. Scripting syntax arcpy.env.maintainCurveSegments = Boolean Script example import arcpy # Maintain input curve segments if they are needed in the output. arcpy.env.maintainCurveSegments = True
https://pro.arcgis.com/en/pro-app/latest/tool-reference/environment-settings/maintain-curve-segments.htm
CC-MAIN-2022-33
refinedweb
149
50.02
how to write a thread in ROS I would like to know how to write a async thread in ROS. An example would be greatly appreciated. Edwin I would like to know how to write a async thread in ROS. An example would be greatly appreciated. Edwin Assuming you are looking for C++ solution, here is what the Wiki page on Callbacks and Spinning says:. Please also see this question with answers. In short: you can choose any threading library you like; ROS nodes and callbacks are thread-safe. However, to avoid extra dependencies it is a good idea to use Boost threads (ROS utilises Boost for many internal components). Here are the docs and examples for Boost threads. Also, if c++11 compiler is available then you can try std::thread. Or you even can use POSIX Threads. EDIT: The following example uses Boost threads with ROS. It publishes empty messages to two topics with different rates, which you can easily check with rostopic hz <topic_name>. #include <ros/ros.h> #include <std_msgs/Empty.h> #include <boost/thread/thread.hpp> void do_stuff(int* publish_rate) { ros::NodeHandlePtr node = boost::make_shared<ros::NodeHandle>(); ros::Publisher pub_b = node->advertise<std_msgs::Empty>("topic_b", 10); ros::Rate loop_rate(*publish_rate); while (ros::ok()) { std_msgs::Empty msg; pub_b.publish(msg); loop_rate.sleep(); } } int main(int argc, char** argv) { int rate_b = 1; // 1 Hz ros::init(argc, argv, "mt_node"); // spawn another thread boost::thread thread_b(do_stuff, &rate_b); ros::NodeHandlePtr node = boost::make_shared<ros::NodeHandle>(); ros::Publisher pub_a = node->advertise<std_msgs::Empty>("topic_a", 10); ros::Rate loop_rate(10); // 10 Hz while (ros::ok()) { std_msgs::Empty msg; pub_a.publish(msg); loop_rate.sleep(); // process any incoming messages in this thread ros::spinOnce(); } // wait the second thread to finish thread_b.join(); return 0; } Please start posting anonymously - your entry will be published after you log in or create a new account. Asked: 2015-09-21 08:59:43 -0500 Seen: 8,971 times Last updated: Sep 21 '15 Running a parallel function ROS2 Best practices: multiple nodes in the same process How to run ros::spin in the background ImageTransport throws Segmentation Fault Multithreaded nested service calls Calling ros::spin from a new Thread Gazebo world reset and Duration sleep
https://answers.ros.org/question/217960/how-to-write-a-thread-in-ros/
CC-MAIN-2019-39
refinedweb
368
55.64
WADL Submitted to W3C For those of you who've been patiently waiting for WADL to be submitted to a standards organization, I'm pleased to announce that WADL is now a W3C Member Submission. The specification submitted to the W3C is a reformatted version of the draft update I blogged about here. If you already switched to that version then you are current. If not, now would be a good time to think about updating to the latest grammar, the changes are fairly minimal but did require a new namespace since they were not backwards compatible. - Printer-friendly version - mhadley's blog - 4018 reads
https://weblogs.java.net/node/300029/atom/feed
CC-MAIN-2015-27
refinedweb
105
59.43
Very The book Rapid GUI Development with QtRuby (for Qt version 3.x) is available. Being Smoke-based bindings means that they offer full access to most KDE 4.x and Qt 4.x classes. Hello world example: require 'Qt4' a = Qt::Application.new(ARGV) hello = Qt::PushButton.new("Hello World!") hello.resize(100, 30) hello.show a.exec Hello Qt example in a more 'Rubyish' way: require 'Qt4' Qt::Application.new(ARGV) do Qt::Widget.new do self.window_title = 'Hello QtRuby v1.0' resize(200, 100) button = Qt::PushButton.new('Quit') do connect(SIGNAL :clicked) { Qt::Application.instance.quit } end label = Qt::Label.new('Hello Qt in the Ruby way!') self.layout = Qt::VBoxLayout.new do add_widget(label, 0, Qt::AlignCenter) add_widget(button, 0, Qt::AlignRight) end show end exec end()'.. For slots and signals without arguments you can use Ruby symbols: slots :slotLoad signals :clicked Currently C++ type signatures must be used, a future version of QtRuby will allow ruby type signatures instead. (see class MyObject < Qt::Object signals "mySignal(QVariant)" def doEmit # since KDE 4.5: # emit mySignal(Qt::Variant.fromValue(ruby_object)) emit mySignal(ruby_object.to_variant) end slots "mySlot(QVariant)" def mySlot(variant) ruby_object = variant.value end end: ok = Qt::Boolean.new font = Qt::FontDialog.getFont(ok, Qt::Font.new("Helvetica [Cronyx]", 10), self) if !ok.nil? else end Use 'nil?' to test the value returned in the Boolean In some cases Qt/KDE object "takes ownership" over Ruby String passed as char* argument type. Programmer needs to make sure that Ruby String is not being garbage collected or changed for the time it's being used by Qt/KDE object. It is also quite possible that Qt/KDE object will change and eventually free it(memory used internally by Ruby String to store its data). Be very careful when you call this kind of methods and make sure that there is no overloaded version witch accepts QString or QByteArray first! Very few functions (as QImage::bits()) return a uchar* to directly manipulate data. These functions are not supported in Ruby and will throw an ArgumentError. More information on the mail list. If a method call can't be matched in the Smoke library giving a 'method_missing' error, first check that you are passing correct class instance that is properly initialized (with super method called in constructors of custom Qt classes descendants). You can also Use the bin/rbqtapi tool to discover which methods are available in the QtRuby api. This command: $ rbqtapiexamples and korundum/examples. Using Ruby allows you the power to leverage testing frameworks such as RSpec in order to unit and integration test your classes. Most classes such as Models can be easily tested via the APIs they expose. Below is a small extract with some interesting tests from the mingle_mover project (). The class being tested is a TableModel Note how we create a stub to pass to QAbstractTableModel::data, which accepts a QModelIndex as an argument. describe CardModel do it "Should Implement QAbstractTableModel" do CardModel.ancestors.should include Qt::AbstractTableModel end it "Should Not Be Editable" do @model.data(mock_index(1,2), Qt::EditRole).should_not be_valid @model.headerData(nil, nil, Qt::EditRole).should_not be_valid flags = @model.flags(nil) flags.should have_flag Qt::ItemIsEnabled flags.should have_flag Qt::ItemIsSelectable flags.should_not have_flag Qt::ItemIsEditable end it "Should not return any vertical headers" do @model.headerData(1, Qt::Vertical).should_not be_valid end it "Should color a row according to the status" do @model.data(mock_index(1,2), Qt::ForegroundRole).value.color.should == Qt::Color.new(Qt::red) @model.data(mock_index(0,2), Qt::ForegroundRole).value.color.should == Qt::Color.new(Qt::green) @model.data(mock_index(0,0), Qt::ForegroundRole).should_not be_valid end def mock_index(row, col) stub(:row => row, :column => col) end def have_flag(flag) return simple_matcher("A flag that matches " + flag.to_s) { |given| (given & flag) != 0 } end end Below is a simple class which can be used to test out a class via the signals it emits: class RubySignalSpy < Qt::Object def self.create(*args, &block) Class.new(self).new(*args, &block) end def count(name) @calls[name].size end def params(name, invocation = 0) @calls[name][invocation] end def method_missing(name, *args, &block) @calls[name.to_sym] << args exec_action_for(name, args) end def responds_to?(name) true end private def initialize @calls = {} def @calls.[](index) super || self[index] = [] end @actions = {} super end def mocked_slots(*names, &block) slots *names names.each { |name| @actions[name] = block } end def exec_action_for(name, args) @actions[name].call(self, args) if @actions[name] end def slots(*args) self.class.slots(*args) end end it "Should be able to emit a signal when called" do reciever = RubySignalSpy.create do slots "recieved(int, int)" # Explicitly name slots with parameters mocked_slot :some_other_slot do |spy, params| # Pass a block to be executed when called end # You must call mocked_slot with a symbol end class ClassWeAreTesting < Qt::Object signals "sending(int, int)" def broadcast emit sending(4, 2) end end sender = ClassWeAreTesting.new Qt::Object.connect(sender, SIGNAL("sending(int, int)"), reciever, SLOT("recieved(int, int)")) sender.broadcast reciever.count(:recieved).should == 1 # Get count of calls reciever.params(:recieved, 0).should == [4, 2] # Get the parameters of nth invocation end It is much more difficult to test UI classes. In most cases, you will need to instantiate a QApplication so that you can create your widgets. Some rudimentary tests can be written by simulating clicks are various locations. Watch this space as more tests get written describe SomeTableView do before(:all) do @app = Qt::Application.new(ARGV) @view = SomeTableView.new end it "Should accept a mouse click on second row" @row2 = @view.rowViewportPosition 1 @view.mousePressEvent(Qt::MouseEvent.new(Qt::Event::MouseButtonPress, Qt::Point.new(0,@row2), Qt::LeftButton, Qt::LeftButton, Qt::NoModifier)) # Assert something here, such as that a signal has been emitted end after(:all) do @app.dispose! end end Instead of require 'Qt4', use require 'korundum4'. There is a ruby translation of Qt Tutorial #1, and the corresponding ruby code is in qtruby/rubylib/tutorial/t1 to t14. And a Qt4 version of the same tutorial translated to Ruby by Darshan Ishaya Qt4 Ruby Tutorial Qt Tutorial #2, a Charting Application with ruby code in qtruby/rubylib/examples/qt-examples/chart. The Qt Designer 3.0 tutorial originally written for C++ by Antonio Larrosa Jiménez. The sources are in korundum/rubylib/tutorials/p1 to p9. The book Rapid GUI Development with QtRuby is now available. There is also an approach to create an Ruby-Qt/KDE Book under a free license. The content will be created in this wiki. The book made with latex will be derived from the content in the wiki. Any Questions? Contact me!):
https://techbase.kde.org/index.php?title=Development/Languages/Ruby&oldid=52682
CC-MAIN-2015-35
refinedweb
1,115
51.75
The. The velocity and position are calculated from the acceleration at each time step of the simulation, which is taken to be due to the force of gravity only (a ballistic trajectory): $$ \mathbf{a} = -\frac{GM}{r^3}\mathbf{r}, $$ where $M$ is the mass of the planet. Note that atmospheric drag and the Earth's own rotation are neglected. import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Circle from scipy.constants import G # Convert Newtonian constant of gravitation from m3.kg-1.s-2 to km3.kg-1.s-2 G /= 1.e9 # Planet radius, km R = 6371 # Planet mass, kg M = 5.9722e24 fac = G * M def calc_a(r): """Calculate the acceleration of the rocket due to gravity at position r.""" r3 = np.hypot(*r)**3 return -fac * r / r3 def get_trajectory(h, launch_speed, launch_angle): """Do the (very simple) numerical integration of the equation of motion. The satellite is released at altitude h (km) with speed launch_speed (km/s) at an angle launch_angle (degrees) from the normal to the planet's surface. """ v0 = launch_speed theta = np.radians(launch_angle) N = 100000 tgrid, dt = np.linspace(0, 15000, N, retstep=True) tr = np.empty((N,2)) v = np.empty((N,2)) # Initial rocket position, velocity and acceleration tr[0] = 0, R + h v[0] = v0 * np.sin(theta), v0 * np.cos(theta) a = calc_a(tr[0]) for i, t in enumerate(tgrid[1:]): # Calculate the rocket's next position based on its instantaneous velocity. r = tr[i] + v[i]*dt if np.hypot(*r) < R: # Our rocket crashed. break # Update the rocket's position, velocity and acceleration. tr[i+1] = r v[i+1] = v[i] + a*dt a = calc_a(tr[i+1]) return tr[:i+1] # Rocket initial speed (km.s-1), angle from local vertical (deg) launch_speed, launch_angle = 2.92, 90 # Rocket launch altitute (km) h = 200 tr = get_trajectory(h, launch_speed, launch_angle) def plot_trajectory(ax, tr): """Plot the trajectory tr on Axes ax.""" earth_circle = Circle((0,0), R, facecolor=(0.9,0.9,0.9)) ax.set_facecolor('k') ax.add_patch(earth_circle) ax.plot(*tr.T, c='y') # Make sure our planet looks circular! ax.axis('equal') # Set Axes limits to trajectory coordinate range, with some padding. xmin, xmax = min(tr.T[0]), max(tr.T[0]) ymin, ymax = min(tr.T[1]), max(tr.T[1]) dx, dy = xmax - xmin, ymax - ymin PAD = 0.05 ax.set_xlim(xmin - PAD*dx, xmax + PAD*dx) ax.set_ylim(ymin - PAD*dy, ymax + PAD*dy) fig, axes = plt.subplots(nrows=2, ncols=2) for i, launch_speed in enumerate([3, 6.5, 7.7, 8]): tr = get_trajectory(h, launch_speed, launch_angle) ax = axes[i//2,i%2] plot_trajectory(ax, tr) ax.set_title('{} km/s'.format(launch_speed)) plt.tight_layout() plt.savefig('orbit.png') plt.show() Comments are pre-moderated. Please be patient and your comment will appear soon. There are currently no comments New Comment
https://scipython.com/blog/reaching-orbit/
CC-MAIN-2019-51
refinedweb
485
53.58
Forum:The dwindling of Uncyclopedia From Uncyclopedia, the content-free encyclopedia Along with fatigue at the many editing and policy "wars" and the manner in which they are fought, and the recent overt declaration of our hosts that (initially, for things found obscene) our creativity is to be subordinated to the needs of corporate legal staff, an article in The Atlantic suggests that the reason Wikipedia is seeing the same decline is simply that the job is nearing completion. And an increasing number of Uncyclopedia articles are done well enough that the average rank amateur does not think he can improve on them (and if he is sufficiently rank, we revert him when he tries), and an increasing number of these have been Featured Articles and implicitly resist alleged improvement. Something separate is that the Internet continues to grow and diversify. Like any amateur starting a Web-based (or not Web-based) business, once your Bright Idea has been put into effect, you achieve traffic--usually through advertising--or the business dies. It is just not enough any more to be the world's best parody of an on-line encyclopedia that is itself becoming less notable among web pastimes. People have to care about it. Failing that, there is an ever-increasing number of alternative websites begging for hours of amateur effort. Spıke ¬ 10:36 5-Nov-12 - I dunno, man. "Job complete"? We certainly have plenty of quantity, but precious little quality. A random sampling of pages (Halloween, Paul McCartney, Shark, Oklahoma, Russia) turns up a cornucopia of mediocrity. We have 2,000+ featured articles; let's say we've got three times that number that are decent or better. That leaves 20,000 stinkers that are doing double duty as "placeholders".I think what's doing this place in is Facebook, Twitter, YouTube, Hulu, Crackle, etc. When the site started in '05, our target demographic didn't really have other major, organized distractions besides Wikipedia. Twitter didn't exist. Facebook had 5 million users. MySpace had less than 30 million. Hulu didn't exist; YouTube didn't go public until May; and streaming video was still a buggy mess. The iPhone itself wasn't even released until mid-2007, and Android handsets over a year later. (These facts amaze me.)So when the original base of users who joined from 2005-08 grew up, graduated, or got "work" or "lives", or got eaten by lions, their replacements were being drawn to brain-droppings on Twitter, or cat videos on UBoob. When the user base didn't replenish itself after 2008, Uncyc was set up for the constant and now-accelerating attrition. Unlike, say, The Onion or Cracked.com, we have no marketing, so the actions of Wikia are just driving the damned knife in.I agree with you, SPIKE, that there needs to be some diversification, but how to accomplish that is beyond me, especially with our Corporate Overlords. The only way of getting more users (and viewers) here is by word-of-mouth, pulling them in ourselves, kicking and screaming. I'm not giving up just yet, but I'll be sending out prayers to St. Jude, the patron of hopeless causes. ~ BB ~ (T) ~ Mon, Nov 5 '12 16:40 (UTC) - There's always been new discoveries, but not as much groundbreaking ones though. And new events come and go, and comedy is not going to die out just yet. When comedy goes extinct, we go extinct. |Si Plebius Dato' (Sir) Joe ang Man on Fire CUN|IC Kill | 06:45, November 9, 2012 (UTC) A Cornucopia of Mediocrity Chart showing the current composition of Uncyclopedia articles. Stolen from Wikipedia's version, because fuck originality. ~ BB ~ (T) ~ Thu, Nov 8 '12 13:45 (UTC) Uncyclopedia: A Cornucopia of Mediocrity is our motto. Always has been. Besides, comedy's dead. The next big things are Korean rappers dancing like they're riding horses and shorting Facebook stock. Sir Modusoperandi Boinc! 03:30, November 6, 2012 (UTC) - I thought it was Uncyclopedia: Thank God for Ceftriaxone. ~ BB ~ (T) ~ Tue, Nov 6 '12 4:38 (UTC) - A new joke mine needs to be discovered. A new Columbus required....-- RomArtus*Imperator ® (Orate) 12:46, November 6, 2012 (UTC) The next Big Thing There are far more "my sojourn" jokes out there than there are "suddenly raccoons". Maybe what Uncyclopedia needs is a new forced meme. -- Simsilikesims(♀UN) Talk here. 15:48, November 8, 2012 (UTC) - Promote Navelism? -- RomArtus*Imperator ® (Orate) 23:03, November 8, 2012 (UTC) Add a section about "userspace" to the Beginner's Guide Userspace is no longer mentioned in the Welcome message, nor is it mentioned anywhere in the Beginner's Guide. This should be fixed, so that new users can take more time to practice writing articles in userspace before getting them stomped on in mainspace. -- Simsilikesims(♀UN) Talk here. 06:35, November 9, 2012 (UTC) - Was the Welcome message not slimmed down exactly to be more "user-friendly"?! Spıke ¬ 12:09 11-Nov-12 The notorious {Fix} tag Let's allow the creation of encyclopedia dramatica-like articles, articles about famous wikipedia vandals, articles about famous memes, and stop deleting funny articles like Uwe boll. In fact revive most articles that were killed by fix tag abuse in 2011-2012. that will help a lot.--fcukman LOOS3R! 08:05, November 8, 2012 (UTC) - Sounds like a bad idea. ~Sir Frosty (Talk to me!) 08:06, November 8, 2012 (UTC) - Also, don't forget to exodus from wikia. LIKE RIGHT NOW--fcukman LOOS3R! 09:40, November 8, 2012 (UTC) - Illogicopedia tried that...their new server was so slow that they lost even more users. -- Simsilikesims(♀UN) Talk here. 15:37, November 8, 2012 (UTC) Revive most articles killed in the fix tag abuse in 2010-2012 if they are not overly terrible.--fcukman LOOS3R! 03:08, November 9, 2012 (UTC) - @Mr-ex777 if you want to fix up such articles all you have to do is ask an admin to restore it for you, so you can fix it... ~Sir Frosty (Talk to me!) 05:45, November 9, 2012 (UTC) - I don't think the {{Fix}} tag was indiscriminately applied to good articles, nor that the Way Forward is to encourage a new supply of bad (def.: Encyclopedia Dramatica) ones. But it was offputting, when I returned after a ban, to see that some of my articles had been tagged, making my first task on return either to obey the Admin (in fact, the one who banned me), or remove the Fix tag without repair (which was called for in a couple cases) and thereby openly defy him. That dilemma might have induced users not to return at all. Spıke ¬ 12:09 11-Nov-12 - New Direction: Allow ED-style humour with wit of the Onion. |Si Plebius Dato' (Sir) Joe ang Man on Fire CUN|IC Kill | 07:50, November 13, 2012 (UTC) I have an intricate plan to potentially bring in a brand new userbase for you guys... ...However it involves bringing in a flood of bronies. Is it worth it? Choose carefully and let me know if, as a last resort, you want me to press the button. It's Mrthejazz... a case not yet solved. 16:15, November 11, 2012 (UTC) - Enough defeatism! This is turning into Vichy Uncyclopedia!! Bah!!! -- RomArtus*Imperator ® (Orate) 21:38, November 11, 2012 (UTC) - We have two options: Die now but have a good legacy, or die later and have a shitty, brony-infested legacy. We already brought in Kirby, and we haven't died yet, so...I guess it's worth a shot. I lost all my brony contacts in the Great Aimsplode Rapture of 2012 (which was significantly more rapture-like than the previous two years), so we'd have to rely on Kirby and THS to recruit. - Seeing as they are both teenagers (*shiver*), but also noting that the majority of our audience are <13 and 14-20, that actually makes pretty good sense. In fact, why don't we have sort of a recruiting competition? We can form teams of three or so, and try and get as many people to the site as possible. Whoever wins gets, like, a free UN:STORE item for each member of the team (it will be one of the cheapest things there, of course). That seems like some good initiative. Although I realize this site is a part-time weekend job for most of us, I believe that we can revitalize it sufficiently to the point that we have a constant stream of new articles being put out and fixed up. History never stops - it never will - so we should focus more on ongoing events and recent things than old-time things that people have forgot about. Nobody cares about Y2K anymore, so that's irrelevant: but the upcoming "Fiscal Cliff" that everyone's got a stick up their ass about? We could make some good shit out of that. ←HGA→ (Der_Führerbunker--Wehrmacht Factories) 22:03 11 November 2012 And that was HGA's response that veered wildly off-course and turned into a full-out rant. Maybe someone could make some use out of it. Also note that we should write more about things younger people are in to, not old fogies like us (meaning the people older than 25). - You're going the wrong way. We should try to appeal to old people. Their kids bought them computers, and they're just filling the empty days before they kick off. Plus, once they get going, they're full of inappropriately racist humour. And the best part is, all we need to do to attract them is put out a bowl of mints, kick up the thermostat ten or twenty degrees, and knit cozies for the toaster and spare toilet rolls. Sir Modusoperandi Boinc! 22:27, November 11, 2012 (UTC) - No; and, no. Self-styled leaders of an Uncyclopedia recruitment drive have no greater expertise to target specific age brackets than national leaders have expertise to decide we need to reinvigorate manufacturing versus push solar energy. The energy of the young and the perspective of the old have always combined to give this place vitality. Yes, whether Y2K or the resignation of Spiro Agnew, as an event fades from importance, articles get deleted or at least face a greater burden to prove to the new reader that he should care; that's a natural part of staying relevant. Let us not give anyone the feeling that "you are not whom we are after." (I understand that Modusoperandi's tongue is in cheek, and I'm not sure about Aimsplode.) Spıke ¬ 00:32 12-Nov-12 - Bronies are all ages, just saying. I could pull off the plan single-highhandedly. I'm only half joking. Just how desperate are we, anyway? - What I said is serious, Spike. THere are more young with computer access than old. ←HGA→ (Der_Führerbunker--Wehrmacht Factories) 02:31 12 November 2012 - Eh, my experience with bronies is that they're often intelligent, funny people (which is exactly what we need, no?). I'd be willing to look past their kinda weird obsession with a kiddy show and accept them in. At least they're not Twilight fans, right? —Sir Socky (talk) (stalk) GUN SotM UotM PMotM UotY PotM WotM 02:34, 21 November 2012 - Worse. Bronies are in the same level as furries. any brony saying that they won't clop is in denial, the same for furries and yiffing.--fcukman LOOS3R! 04:10, November 21, 2012 (UTC) Advertising, anyone? Why not we move to a new server, like some cloud computing service that can host our shit for us? |Si Plebius Dato' (Sir) Joe ang Man on Fire CUN|IC Kill | 07:46, November 13, 2012 (UTC) - Someone has to own the site to deal with payments, legal issues (people complaining about articles, copyrights etc) and general work to keep a site running. That is why the original owner of this site sold out so quickly. -- RomArtus*Imperator ® (Orate) 08:24, November 13, 2012 (UTC) - Yup. (Joe, editing articles from home using Wikia software is exactly "cloud computing.") Also: Servers are good, but not yet immortal, and someone has to pay for the repairman. Also salaries of software engineers to deal with what I am told are bugs in MediaWiki, also plane fare for junkets to Poland to visit them, also--if some on the site are bigger slaves to fashion than this XP fanatic is--installation and debugging of follow-on MediaWikis. As always, the only sustainable solution is a solution with revenue, either user "subscription" fees (a technique whose use elsewhere induced me to return here) or advertising aimed at visitors (which, back to the start, assumes there are visitors, which there will not be without advertising). At which point, hiring another team of psychiatrists to tell Aleister why Free Speech doesn't mean free servers. - I don't like writing in corporate environments, and site-warnings are only the start of the concern; after a while, writing is done not to communicate but comply. I don't understand the business plan that lets Wikia deal with us so passively and tolerate page-footer advertising space that apparently no one ever buys, unless it is to develop a pool of creative talent for harvesting later, or to use this and other wikis to build a competing mega-social site for those with the desire to build something bigger than a personal page. The next step for them must be to spend real money to actively tout Uncyclopedia as a destination. As in: 15-second radio spots, cryptic ads in magazines, and perhaps another prankster shutting down the Boston subway system, this time based on something he read in UnNews. If Wikia is getting serious about disclaimers, they should get more serious about boosting traffic. Spıke ¬ 12:43 13-Nov-12 - The sad things is, we really can't do anything about what Wikia is doing. Who really has the authority to ask to "buy" Uncyclopedia off of Wikia's servers and move it? Since Wikia "owns" us now, would they even allow us to move? ←HGA→ (Der_Führerbunker--Wehrmacht Factories) 14:51 13 November 2012 - We are allegedly free to "fork" the wiki, i.e. take the articles and our current user base somewhere else, leaving this particular site as an abandoned amusement park. However, according to some interview which you can find posted somewhere in these forums, Jimbo Wales "liked uncyclopedia so much that [he] purchased it" (allegedly for many thousands of dollars). So, while Wikia does not own the actual articles, and does not apparently own any trademarks on the "uncyclopedia" brand, they DO own the domain uncyclopedia.org, and they'll be damned if they relinquish it to us. (I assume, I haven't asked.) Someone else owns uncyclopedia.com, and is no longer open to discussions about purchasing it. As for "buying back" the uncyclopedia.org domain (which is almost the only thing about uncyclopedia that isn't freely available to us through a database dump)...I doubt uncyclopedia is for sale, in the first place, and I doubt anyone here has the money to buy it back, even if it were. Our options are (a) continuing to allow Wikia to screw with us, in exchange for freely hosting our drivel, or (b) paying for a move to some other domain, ourselves, a move which will most likely be ignored by Wikia, and even actively covered up. Rock and a hard place. ~ BB ~ (T) ~ Tue, Nov 13 '12 16:27 (UTC) - The smart thing to do here would be to have everyone start researching a different webhost, than decide upon one and pool some money together each month/bill time to pay for the hosting. I would definitely help pay for webhosting, and on the subject, one of my closer friends happens to be a lawyer. I could probably convince him to represent us for free, or maybe some small favors from me on the side. Is anyone even up for changing hosts, if we had the money, and if we had a webhost willing to take us in? Again, I believe the pooling in money system is a good way to go. We could set up a PayPal and everything, sort of like Wikipedia and their fundraising. Together, if we all gave like $5, we could pay for hosting pretty easily. As for hosting, we could get, or, or some flashy new domain they have out there (I see an uncyclopedia.xxx in our future...!). Any suggestions/comments on that? ←HGA→ (Der_Führerbunker--Wehrmacht Factories) 01:57 14 November 2012 - I am seeing an uncyclopedia.org in the future. But uncyclopedia.com is a safe bet, and we can get some much needed money from advertisers. Google hits ahoy, like Cracked! (But Cracked was a magazine) |Si Plebius Dato' (Sir) Joe ang Man on Fire CUN|IC Kill | 04:36, November 15, 2012 (UTC) Don't wanna sound like a broken record here guys - The solution to all of our problems is clear: deletions on a massive scale. Tear down all the old rotten trees, and clear up room for new trees to grow. The logging industry calls it clearcutting, and they practice it for "scientific, safety, and economic reasons." Let's burn all the shit to the ground and save the site! -- Brigadier General Sir Zombiebaron 06:30, November 15, 2012 (UTC) - Homosexual idea right here. ~Sir Frosty (Talk to me!) 06:37, November 15, 2012 (UTC) - Ghey as hell--fcukman LOOS3R! 06:54, November 15, 2012 (UTC) - Why not you tell Wikipedia that? There are 4 million articles already! We only have 30,000 something of them! |Si Plebius Dato' (Sir) Joe ang Man on Fire CUN|IC Kill | 08:41, November 15, 2012 (UTC) - We are not Wikipedia. Wikipedia should have an article about everything. Uncyclopedia does not need an article about everything, we need funny articles. It takes a certain rare talent to create a full article out of an Uncyclopedia stub, but on Wikipedia it is fairly easy because all you have to do is talk about the facts. It has always been my opinion that it is easier to write Uncyclopedia articles when given a blank slate. -- Brigadier General Sir Zombiebaron 08:50, November 15, 2012 (UTC) - we do my dear.--fcukman LOOS3R! 08:54, November 15, 2012 (UTC) - Advertising and marketing is very much the way to go here. Perhaps Wikipedia itself may be of some help.... --Lord Scofield Stark 09:13, November 15, 2012 (UTC) - I never used to be one for mass deletions...but I'm beginning to think there is good reason to put most of the other articles under a pedestal. What I have in mind is keeping the main space for featured articles and perhaps a few more (however that is decided). Then a sandbox space for highly searched articles and reasonable articles in developement (surviving ICU for example), and yes...burn the crap, even if it might have some good ideas. Burn it. In any case, I finaly agree that articles that are immediately visible, should be limited to very good and funny ones, even if it means a limited amount of immediately visible articles. (all other good humour sites are composed only of good featurable funny articles). - The ultimate goal is, that if someone searches for an article and its not a very good one or featurable one, that the article as is, is not shown, but instead an invitation to view the article in its current status and or improve it...which tells the visitor that only the best of the best is shown and that they are welcome to join in and help. People who wanna read funny articles will go on and search somthing else or click on "read a random article". People who are interested in the article will go ahead and read it (which may have a few laughs) and maybe help out and write. - This...I think...would acommadate certain people who we would not name...who don't like the idea of destroying reasonable material good or not, I think Ali would agree. - By the way...I have wet dreams about all of you...sometimes wet dreams about all of you at once. Hot wet dreams that soak my bed. --ShabiDOO 04:44, November 16, 2012 (UTC) - I don't see how this is workable - IP's and users who are new to the site could easily create new, garbage articles in the mainspace, not knowing any better, when a better article already exists in this "Sandbox" namespace. So unless we are willing to protect the entirety of the mainspace, I just don't think this idea would work. Besides, sometimes articles of very high quality can actually scare new writers away from writing if they don't think they can do any better. On the other hand, it might attract writers of higher caliber, but there are fewer quality writers than there are mediocre writers, and it seems like you need to attract both the bad and the good in order to find the ones who are good. -- Simsilikesims(♀UN) Talk here. 07:35, November 16, 2012 (UTC) - Shabidoo, stop being a anti-humor nazi. Deletionism is tourneyfaggotry and anti humor, and we don't need them. We have enough of them.--fcukman LOOS3R! 09:42, November 16, 2012 (UTC) - Yeah and you are now banned for your rather rude remark. ~Sir Frosty (Talk to me!) 10:10, November 16, 2012 (UTC) - Sims...I think you have my wrong here. I said...only featured articles (and perhaps excelent articles chosen in some way which I dont know) would be allowed in the main space. All other articles are in another space (call it sandbox) which would have a message the first time viewed saying something along the lines of "work in progress...help make this article better (or create it if non existant" or "check out featured content" or something of the like. Also...ICU stuff that didnt survive would be burnt. - I used to be a pretty big defender of saving all articles, but i just don't see the point anymore of presenting awul awful crud as though its publishable. Is isn't...and it should be sandboxed...out of view from readers and available to would be editors/creators. - And Mr-ex777 ... I love you too. --ShabiDOO 10:41, November 16, 2012 (UTC) . - Well, i have been warned to not argue with you, but many others disagree. Why do something that would disenchant our readers?--fcukman LOOS3R! 11:47, November 16, 2012 (UTC) - Also, VFH is low on votes in a really bad way. Doing this won't help. Even ED does not have such a rule. And those said bad articles are sometimes classics and are not that unfunny. Doing this would just disenchant them.--fcukman LOOS3R! 11:54, November 16, 2012 (UTC) - While I didn't really mind your post, I can understand how an admin would find calling someone an nazi in the lines of tourney faggotry (and I have no clue what that means) is not disagreeing with a colleague (and apparantly not the first time they've noticed it), but insulting someone and attacking the person rather than the idea. I've been banned at least a dozen times but I've never inuslted another user. Throwing out names is the least effective way of expressing your view and only makes one look like a dick rather than wise. Note how simsims expressed his (valid) disagreement in a more than agreeable way without throwing feces at me. - As per your recent post, there's no reason why having a minimum standard for main space that it would exclude non featured items (as I've said others can be included in some agreed upon way) nor is there any reason why it would affect VFH as we still feature a new article every day, and that very process is what gets your article into not only the "best of the best" but in the case of a limited main space, an article that's not just given a sticker, but equal standing among other articles that are actually worth reading. And finaly, I don't think uncyclopedia can be compared to ED at all. --ShabiDOO 15:03, November 16, 2012 (UTC) - many articles are wroth reading, but they did not get featured. And they are funnier than most featured articles (in my opinion). Your arguement sums up to we can delete those as well. Well. Most readers, and especially us don't like that.--fcukman LOOS3R! 15:40, November 16, 2012 (UTC) - I feel like a broken record. I have no argument and it cannot be summed up. I said three times now...that we can agree on a way to include some article that are not featured as well...meaning...that the articles that you love...that are not featured...can be mainspaced. I agree...that this is important. We are not in disagreement. We love each other. We share everything...including the things that touch us deep within our tiny animal hearts. --ShabiDOO 19:31, November 16, 2012 (UTC) - What if one person loves an article, and another detests it? People like different things. I may never like the article on Rape for example, nor the article on Girls. But that doesn't mean someone else doesn't find it hilarious. You say the method of figuring out which ones that weren't features could go in mainspace is "to be determined", and even if it involves voting, eventually, it could deteriorate into a dramafest ending in someone ragequitting. Especially this would be the case if the vote is divided on an article. Finally, we have trouble enough getting people to vote for featured articles or articles to be deleted - imagine trying to vote on all the articles on the site that aren't features! This seems like a monumentally impossible task. -- Simsilikesims(♀UN) Talk here. 08:00, November 17, 2012 (UTC) - You guys are getting off topic. This header is for discussing mass deletions. -- Brigadier General Sir Zombiebaron 16:45, November 17, 2012 (UTC) - Indeed...only one out of 100 ideas for a new way of thinking are ever implemented. A forest fire month...of double intensity might be the best idea. --ShabiDOO 20:18, November 17, 2012 (UTC) - We do not wish for such things.--fcukman LOOS3R! 03:20, November 18, 2012 (UTC) - When you say WE are you talking for yourself? For certain users of uncyclopedia? As a super natural diety? As the queen of England? As an invisible pink unicorn? --ShabiDOO 04:14, November 18, 2012 (UTC) - My biggest problem with this proposal would be that might not actually change anything. Wouldn't new readers be equally turned off by seeing that half their searches are turning up "Work in Progress" messages? They'd probably think this is a parody encyclopedia without much of quality content, which is probably the same way they'll feel if we don't do anything. What I feel is that we need people to be actually interested in contributing to this place and improving upon existing pages if they can. With the way things are, I don't think articles excluded from the mainspace might ever find their way back. It's almost like dying a slow death. --Lord Scofield Stark 14:06, November 19, 2012 (UTC) - Agreed.--fcukman LOOS3R! 03:51, November 20, 2012 (UTC) - I guess we see things differently. I would venture to guess that most people give up searching for articles they'd like to see a parody of after they discover that most of the ones they already searched for are awful, not funny and an utter waste of time. It would be far better to have a few thousand even minimally acceptable articles than tons and tons of searchable crap. All successful commedy sites have limited material and quality standards FAR above "minimal" and maintain a much higher audience than we do. The fact that we are a parody of a very large site with millions of articles doesn't mean we have to write millions of shitty ones in an attempt to resemble them. The goal is to make people laugh and enjoy our articles...not comb through a pile of garbage searching for the rare gem. But I think this is the same old dichotomy that comes up again and again and that forest fire week is the only foreseeable lowest common denominator solution...as it makes few happy as some think every article...even stubs...are sacred...and others think the whole website must be burnt and rebooted. --ShabiDOO 04:33, November 20, 2012 (UTC) - just get your lazy asses up and rewrite them. Like i do.--fcukman LOOS3R! 06:12, November 20, 2012 (UTC) - I recently attempted a rewrite of an article that was VFD on a site that shall not be named, and was not satisfied with the result. Otherwise I would have copied it back to my sandbox here and had an admin restore the revised article. I wish I were better at rewrites. -- Simsilikesims(♀UN) Talk here. 02:15, November 21, 2012 (UTC) I Have an Answer! Uncyclopedia will likely not start growing again until the shackles Wikia has placed on us are removed. That is all. --Revolutionary, Anti-Bensonist, and TYATU Boss Uncyclopedian Meganew (Chat) (Care for a peek at my work?) (SUCK IT, FROGGY!) 23:51, November 18, 2012 (UTC) - Meganew! I thought you were dead! |Si Plebius Dato' (Sir) Joe ang Man on Fire CUN|IC Kill | 00:43, November 23, 2012 (UTC) - Nope. I'm alive again. --Revolutionary, Anti-Bensonist, and TYATU Boss UncyclopedianMeganew (Chat) (Care for a peek at my work?) (SUCK IT, FROGGY!) 18:30, November 23, 2012 (UTC) Move back to Wikipedia? I think one final solution to the Uncyclopedia question is this: move back to Wikipedia, and revive BJAODN. Because Wikipedia had its 4 million articles completed. |Si Plebius Dato' (Sir) Joe ang Man on Fire CUN|IC Kill | 00:45, November 23, 2012 (UTC) - Wouldn't that be a bit like Seth Rogan trying to climb back inside his mother's vagina? I mean, I'd pay to watch that shit. But there's no way it would ever work. ~ BB ~ (T) ~ Fri, Nov 23 '12 10:59 (UTC) Better solution Redirect Uncyclopedia.wikia.com to porn.info. ~Sir Frosty (Talk to me!) 11:01, November 23, 2012 (UTC) - Better idea: delete everything, lock up wiki with sysop protection and javascript haxx, call it a life. All in favor? ~ BB ~ (T) ~ Fri, Nov 23 '12 11:06 (UTC) - Against. Then I'd have nothing fun to do. --Revolutionary, Anti-Bensonist, and TYATU Boss UncyclopedianMeganew (Chat) (Care for a peek at my work?) (SUCK IT, FROGGY!) 01:13, November 24, 2012 (UTC) - Against. There are still some features on here that I still haven't read. -- Simsilikesims(♀UN) Talk here. 03:30, November 24, 2012 (UTC) For. →A (Fallen Reich)18:29 24 November 2012 Here's MY solution Get a celebrity, any rich or famous or both person to create an account and add quality content to this place. They can also whore our site on the multiple public platforms they use! I don't care who it is, it could be Seth Rogan, Peter Dinklage, Justin Bieber or even Donald Trump for all I know! Only the cult of the famous person can give us back what was taken from us! --Lord Scofield Stark 13:21, November 24, 2012 (UTC) - If you can find their email addresses, send them a link. -- RomArtus*Imperator ® (Orate) 19:26, November 24, 2012 (UTC) - There is only one celebrity who will do well on Uncyclopedia. Charlie Sheen for Sysop! --Revolutionary, Anti-Bensonist, and TYATU Boss UncyclopedianMeganew (Chat) (Care for a peek at my work?) (SUCK IT, FROGGY!) 22:22, November 24, 2012 (UTC)
http://uncyclopedia.wikia.com/wiki/Forum:The_dwindling_of_Uncyclopedia?t=20121115091328
CC-MAIN-2014-23
refinedweb
5,367
72.76
4.2. PBURG example¶ Here is another method to estimate an AR model, based on arburg() . This example is inspired by an example found in Marple book. This is very similar to the previous example, where you will find more explanation (see yule-Walker tutorial). from pylab import * import scipy.signal from spectrum import * # Define AR filter coefficients a = [1, -2.2137, 2.9403, -2.1697, 0.9606]; [w,H] = scipy.signal.freqz(1, a, 256) Hp = plot(w/pi, 20*log10(2*abs(H)/(2.*pi)),'r') x = scipy.signal.lfilter([1], a, randn(256)) AR, rho, ref = arburg(x, 4) PSD = arma2psd(AR, rho=rho, NFFT=512) PSD = PSD[len(PSD):len(PSD)/2:-1] plot(linspace(0, 1, len(PSD)), 10*log10(abs(PSD)*2./(2.*pi))) xlabel('Normalized frequency (\times \pi rad/sample)')
http://www.thomas-cokelaer.info/software/spectrum/html/user/tutorial_pburg.html
CC-MAIN-2018-39
refinedweb
137
53.68
SIGSETOPS(3) BSD Programmer's Manual SIGSETOPS(3) sigemptyset, sigfillset, sigaddset, sigdelset, sigismember - manipulate signal sets #include <signal.h>); These functions manipulate signal sets stored in a sigset_t. Either sigemptyset() or sigfillset() must be called for every object of type sigset_t before any other use of the object. sigemptyset() and sig- fillset() are provided as macros, but actual functions are available if their names are undefined (with #undef name). The sigemptyset() function initializes a signal set to be empty. sigfillset() initializes a signal set to contain all signals. sigaddset() adds the specified signal signo to the signal set. sigdelset() deletes the specified signal signo from the signal set. sigismember() returns whether a specified signal signo is contained in the signal set. The sigismember() function returns 1 if the signal is a member of the set and 0 otherwise. The other functions return 0 upon success. A -1 return value indicates an error occurred and the global variable errno is set to indicate the reason. These functions may fail if one of the following occurs: [EINVAL] The specified signal signo is not a valid signal number. kill(2), sigaction(2), sigpending(2), sigprocmask(2), sigsuspend(2) These functions are defined by IEEE Std 1003.1-1990 ("POS.
https://www.mirbsd.org/htman/i386/man3/sigfillset.htm
CC-MAIN-2015-32
refinedweb
206
57.37
Filtfilt¶ This sample code demonstrates the use of the function scipy.signal.filtfilt, a linear filter that achieves zero phase delay by applying an IIR filter to a signal twice, once forwards and once backwards. The order of the filter is twice the original filter order. The function also computes the initial filter parameters in order to provide a more stable response (via lfilter_zi). For comparison, this script also applies the same IIR filter to the signal using scipy.signal.lfilter; for these calculations, lfilter_zi is used to choose appropriate initial conditions for the filter. Without this, these plots would have long transients near 0. As it is, they have long transients near the initial value of the signal. Code¶ from numpy import sin, cos, pi, linspace from numpy.random import randn from scipy.signal import lfilter, lfilter_zi, filtfilt, butter from matplotlib.pyplot import plot, legend, show, hold, grid, figure, savefig # Generate a noisy signal to be filtered. t = linspace(-1, 1, 201) x = (sin(2 * pi * 0.75 * t*(1-t) + 2.1) + 0.1*sin(2 * pi * 1.25 * t + 1) + 0.18*cos(2 * pi * 3.85 * t)) xn = x + randn(len(t)) * 0.08 # Create an order 3 lowpass butterworth filter. b, a = butter(3, 0.05) # Apply the filter to xn. Use lfilter_zi to choose the initial condition # of the filter. zi = lfilter_zi(b, a) z, _ = lfilter(b, a, xn, zi=zi*xn[0]) # Apply the filter again, to have a result filtered at an order # the same as filtfilt. z2, _ = lfilter(b, a, z, zi=zi*z[0]) # Use filtfilt to apply the filter. y = filtfilt(b, a, xn) # Make the plot. figure(figsize=(10,5)) hold(True) plot(t, xn, 'b', linewidth=1.75, alpha=0.75) plot(t, z, 'r--', linewidth=1.75) plot(t, z2, 'r', linewidth=1.75) plot(t, y, 'k', linewidth=1.75) legend(('noisy signal', 'lfilter, once', 'lfilter, twice', 'filtfilt'), loc='best') hold(False) grid(True) show() #savefig('plot.png', dpi=65) Section author: Unknown[1], Unknown[48], Unknown[49], Unknown[50], Unknown[51], WarrenWeckesser
http://scipy-cookbook.readthedocs.io/items/FiltFilt.html
CC-MAIN-2017-39
refinedweb
350
70.6
Xerces-C++ is a very robust XML parser that offers validation, plus SAX and DOM APIs. XML validation is well supported for a Document Type Definition (DTD), and essentially complete open-standards support for W3C XML Schema was added in December 2001. Xerces-C++: a capsule bio Xerces-C++ originated as the XML4C project at IBM. XML4C was a companion project to XML4J, which likewise was the origins of Xerces-J -- the Java implementation. IBM released the source for both projects to the Apache Software Foundation, where they were renamed Xerces-C++ and Xerces-J, respectively. These two are core projects of the Apache XML group. (If you see "Xerces-C" instead of "Xerces-C++", it's the same thing; the project was written in C++ from the start.) The XML4C project continues at IBM, based on Xerces-C++. XML4C's distinguishing merit relative to Xerces-C++ is better out-of-the-box support for a huge number of international character encodings in the version that I explored (see Resources). The two principal means of specifying the structure of an XML document are the DTD and W3C XML Schema, with DTD being the much older of the two. XML Schema is basically a DTD expressed as XML. Xerces-C++ offers great out-of-the-box validation capabilities for ensuring that an XML document conforms to a DTD. Xerces-C++ is made available under the terms of the Apache Software License (see Resources), which happens to be one of the more readable open-source licenses around. It compares very well to the BSD license. Essentially, you can use Xerces-C++ in your (or your company's) software royalty free at the mere expense of disclosing to your customers and users that your software includes Apache code, and including the proper copyright notice. Check the Web page for the exact text of the license. SAX, as you may know, is an event-oriented programming API for parsing XML documents. A parsing engine consumes XML sequential data and makes callbacks into the application as it discovers the structure of the incoming XML data. These callbacks are referred to as event handlers. SAX is actually two APIs: SAX 1.0 is the original, and SAX 2.0 is the current revised specification. The two are similar, but different enough that most applications based on SAX 1.0 break when they are moved to the newer specification. The SAX API specification was moved to SourceForge as a project of its own (see Resources). The SAX examples I give later in this article make use of SAX 2.0. DOM: the Document Object Model Unlike SAX, the DOM API permits editing and saving an XML document back to a file or stream. It also permits programmatically constructing a new XML document from scratch. The reason for this is that DOM provides an in-memory model for the document. You can traverse the document tree, prune nodes, or graft on new ones. DOM is a family of W3C technical recommendations affectionately called tech wrecks. DOM has three levels, with Levels 1 and 2 at full technical recommendation status and Level 3 at working draft status. The DOM Level 1 Core defines most of what is needed for basic XML functionality: the ability to construct a representation of an XML document. The DOMString type is explicitly specified to consist of wide UTF-16 characters. Level 1 goes on to define the interfaces for programmatically interacting with the various pieces of a DOM tree. Serialization of XML is intentionally omitted from Level 1. Just beyond the Level 1 core is the DOM Level 1 HTML definition. This area attempts to resolve DOM Level 1 core with the earlier Dynamic HTML object model (loosely referred to as Level 0). The DOM Level 2 adds namespaces, events, and iterators, plus view and stylesheet support. You need DOM Level 2 for some applications: For instance, assigning an XML Schema to a namespace is essential for applications like RDF, where XML tags come from different schemas and the chance for a name collision is high. Level 2 adds a pair of createDocument methods to the DOMImplementation interface. One of the examples will show why this is important. Just when you thought you were safe from the callbacks and event handlers found in SAX, here they are again in the Event interface. Unlike the SAX events, which are for parsing, DOM events can reflect user interactions with a document as well as changes to a live document. DOM events that reflect the change in the structure of a document are called mutation events. TreeWalkers and NodeIterators enhance DOM tree traversal. Programs can inspect style information through the StyleSheet interface. Finally, view support allows an XML application to examine a document in both original and stylesheet rendered forms. These before and after views are called the document and abstract views. DOM Level 3 Core adds the getInterface method to the DOMImplementation interface. In a Level 3 document, you can specify the document's character encoding or set some of its basic XML declarations like version and standalone. Level 2 doesn't permit moving DOM nodes from one document to another. Level 3 drops this limitation. Level 3 adds user data -- extra application data that can be optionally attached to any node. Level 3 has a number of other advanced features, but the W3C committee is still working on the Level 3 drafts. Check Resources for a link to read up on the committee's progress. You can download Xerces-C++ as a zipped tarball or a precompiled binary (see Resources). Script users accessing the library through Perl, Python, VBScript, or JavaScript can download the binary for their platform to get a jumpstart on installation. C++ programmers will most likely prefer to go with building their own binaries from the source tarball. The building instructions on the Apache XML group Web site are well written; a little farther on in this article I discuss a couple of subtle issues that I have discovered -- a pthreads linking problem and a fix for potential memory leaks on Windows platforms. Part 2 will include a tip for specifying a DOCTYPE in the SVG example. If you want to build the library as you read this, look at the Xerces build documentation found on the Apache site (see Resources) first and then come back here to read about linking Xerces to your own applications. You can download the tarball and work offline (with a laptop, for example). The full HTML documentation is included in the tarball, so you don't need to keep referring back to the Web site for the instructions. The steps for installing the software on Visual Studio dot-NET or Win64 are nearly identical to these steps for building on Win32. - Unzip and untar the Xerces source tarball to a working directory. Xerces-C++ has its own directory structure, so you should make sure you preserve relative path names during this step. - Using Windows Explorer or your favorite file manager, drill down to the \xerces-c-src_2_3_0\Projects\Win32\VC6\xerces-all\folder and click the xerces-all.dswworkspace file to launch Microsoft Developer Studio. Note: These instructions assume that you're building Win32 applications in Visual Studio 6. For Visual Studio dot-NET or Win64 applications, repeat steps 1 and 2 in the Win64 or VC7 variants of the directory. - From Developer Studio, make XercesLib the current active project and press F7 to build the DLL. On last year's hardware this takes a minute or two. - Add a path to the Xerces header files into your project. (Applications wanting to link against Xerces-C++ need to either include the XercesLib DSP project file in their workspace or add the LIB file in their project file to permit linking.) Select Project>Settings to bring up the project settings dialog box. Select All Configurations from the Settings combo box, click the C++ tab, select the Preprocessor category, and add the Xerces include path (something like \xerces-c-sr2_2_0\src) to the Additional include directories text box. - If you have added the XercesLib DSP to your workspace, remember to mark your own project as dependent upon the XercesLib project; otherwise, you will be greeted with link errors. - Create a stub C++ source file that does nothing but contain a line that reads #include <xercesc/sax/HandlerBase.hpp>. If you can compile this one-line C++ file, your include paths are probably right. Save your workspace after doing that. To run and debug your application, place a copy of the Xerces DLL in the working directory. Build the Xerces-C++ shared library by following the thorough instructions in the doc/html folder. The commands below illustrate how to build the Xerces-C++ library from the zipped source. This assumes that the xerces-c-src_2_3_0.tar.gz file is present in a directory like /home/user. Whatever directory you choose should match the XERCESCROOT variable; the configure script requires it. For the rest of this example, I'll assume the source tree is under the /home/user/xerces-c-src_2_3_0 directory. If all goes well, the shared library should appear in the lib folder. If you have problems, review the build instructions in the /doc/html folder. At this point, you can either copy the library (and symlinks) to /usr/lib or define the appropriate environment variable so that the loader can locate your newly-compiled library. The easy way to test out your new library is to build and run one of the samples: I tripped over a small problem building one of the samples on a fresh installation of Slackware Linux 9.0. The linker complained of some missing pthreads-related exports. I edited the Makefile.in file to include a reference to -lpthread and ran configure again. The second time around, typing make all worked. Once you know the library works, you can start your own Xerces-C++ project. Use the -I compiler option to help the compiler locate the Xerces header files. Use the -L and -l linker options to help the linker locate the Xerces-C++ library. Listing 1 gives you a working minimal makefile to get started. Listing 1. A minimal makefile The command to kick off Listing 1 is make or gmake. You can change the APP variable to whatever source file suits you. The examples in this article use similar makefiles. Xerces C++ added C++ namespace support (not to be confused with XML namespaces) as of Version 2.2.0. If you have code that works on 2.1.0 and you'd like to take advantage of the newer version, add the following three lines to your code, just after including the Xerces C++ headers. Listing 2. Xerces C++ namespace support You could, of course, just prefix all of your Xerces-C++ objects with the XERCES_CPP_NAMESPACE:: namespace. To keep things interesting as I explain the basics of using Xerces-C++, I'm going to create a simple bar graph using XML as the data format. To dodge the cross-platform bullet of platform GUI specifics, I'm doing the bar graph using ASCII art. This is, after all, an article on XML and not GTK, OpenGL, or Direct-X. If you are interested in using an XML representation of graphical data, look at SVG and SMIL (see Resources). The DOM example that I describe in Part 2 outputs SVG. I'll start with the simple text-only app. Listing 3 is the DTD for the data. Next I'll construct a program to load the data, determine what scale to use, and then actually plot the data to the screen. Listing 3. DTD for sample application data Listing 4 shows a sampling of what the data might look like. Listing 4. Sample input XML data Listing 5 is a baseline SAX implementation. This isn't a complete program because it is missing the handler implementation, but it does show what exactly is needed to put the framework into place. The calls to XMLPlatformUtils:Initialize() and XMLPlatformUtils::Terminate() are very important. The library guards against applications that fail to initialize the library properly by throwing an exception. To make the program in Listing 5 a complete application, you need to add the event-handler class in Listing 6. SAX2 comes with a default event-handler class called DefaultHandler, defined in the C++ header file of the same name. The default handler does nothing -- it is just a stub implementation -- but it is complete, and so I'm using it here as a base class for the graphing event-handler class. This file in Listing 7 is the actual implementation of the event-handler class in Listing 6. While the rest of the program is pretty much just boilerplate code to get the SAX2 parser running, the part in Listing 7 defines the application's personality. Xerces-C++ uses XMLCh as a typedef'd character representation. On some platforms it is compatible with the C type wchar_t, which is usually two -- but sometimes four -- bytes wide. Because of that possibility, the docs discourage the practice of interchanging wchar_t and XMLCh. You can get away with it on some platforms, but it will break on others. Xerces-C++ uses this larger character representation to exchange text as UTF-16 as opposed to UTF-8 or ISO-8859. To debug this program, I'm using the XMLString::transcode function to convert the wide character strings for display on a console, as shown in Figure 1. Figure 1. Screen shot of SAX parser output I discovered a problem using the Xerces internal string class on Microsoft Windows. The XMLString.hpp require the caller of replicate and other similar functions to release the memory returned. The problem comes from linking your application against the Xerces-C++ library as a DLL. The strings are allocated from the DLL's local heap. If both your application and the XercesLib DLL use the exact same C runtime (CRT) library DLL, then all is well. If, however, your program uses the single-threaded CRT and XercesLib uses the multithreaded CRT, DLL problems happen. When your program attempts to release the string memory, the C runtime notices that the memory did not come from your application's local heap. For debug builds it throws an exception, but for release builds it may silently leak memory. The sample programs found in earlier versions of Xerces (like 1_5_1) avoided this by simply not releasing the memory. My fix for this was to add a pair of static discard functions to the XMLString class. Because the string memory is released by code executing inside the DLL, the correct local heap is used, and no debug assertion results. I was pleased to see that Xerces developer Tinny Ng added this to the XMLString class and went a step further to null the string pointer (see Resources). The other nice feature of this is that programmers don't need to worry about how the implementation of XMLString allocates memory. Instead of guessing whether they should be using delete[] or free, they can just call XMLString::release. You can, of course, just make sure the CRT that your application expects is the same as the CRT used by the XercesLib DLL. Here in Part 1, you've seen how to link the Xerces-C++ XML library into applications written in Linux and Windows, and I've demonstrated parsing with the SAX API by creating a bar graph in ASCII art. In Part 2, I'll show you how to load, manipulate, or synthesize a DOM document, and create the same bar graph using Scalable Vector Graphics (SVG). Information about download methods - Download the source code and figures for this article. - In "Make the most of Xerces-C++, Part 2" by the author, learn to load, manipulate, or synthesize a Document Object Model (DOM) document, and how to recreate the bar graph in Part 1 using Scalable Vector Graphics (SVG) (developerWorks, August 2003). - Find out more about IBM's XML4C++ parser project, which is based on Xerces-C++, and available on alphaWorks. - Download the Xerces-C++ XML parser library from the Apache site. While you're). - Read "Serialize XML data" by IBMer and Xerces developer Tinny Ng .
http://www.ibm.com/developerworks/xml/library/x-xercc/
crawl-003
refinedweb
2,719
62.78
Eucl it with an example, consider there is a group of four people and corresponding to each of them we have some data, in our case we have some ratings provided by them for some fruits, 5 means best and 0 means worst. Now, if we want to compare John with Mathew, we can simply calculate the Euclidean Distance between the ratings they have provided corresponding to same item. Here Mango, Strawberry and Pineapple are the fruits which are common for both of them. Consider the rating corresponding each fruit be the distance elements. Lower the distance higher is the similarity. Euclidean Distance Theory This formula helps in calculating the Euclidean Distance, where ‘n’ is the total number of elements, ‘x’ and ‘y’ are the two distance elements. In our example, total elements ‘n’ = 3 Value of ‘x’ corresponds to the ratings of fruits of John and value of ‘y’ corresponds to the ratings of fruits of Mathew. Euclidean distance for both of them is = 1.2747548783981961. Now, we need to normalize it, for that we can do the following Result = (1 / (1 +Euclidean Distance)) For our example it comes out to be 0.439607805437114. ‘Result’ value always lies between 0 and 1, the value 1 corresponds to highest similarity.}} import pandas as pd from math import sqrt class testClass(): def create_csv(self): df = pd.DataFrame.from_dict(choices, orient=’index’) df.to_csv(‘fruits.csv’) #Finding Similarity among people using Eucledian Distance Formula def choice_distance #If both person does not have any similarity or similar items return 0 if len(sample_data)==0: return 0 #Calculating Euclidean Distance final_sum = sum([pow(cho[per1][items]-cho[per2][items],2) for items in cho[per1] if items in cho[per2]]) return(1/(1+sqrt(final_sum))) #Value being returned above always lies between 0 and 1 #Value 1 is added to sqrt to prevent 1/0 division and to normaloze result. def main(): ob = testClass() ob.create_csv() print(ob.choice_distance(choices, ‘John’, ‘Nick’)) print(ob.choice_distance(choices, ‘John’, ‘Martha’)) print(ob.choice_distance(choices, ‘John’, ‘John’)) if __name__ == “__main__”: main() [/sourcecode] Output There are many other mathematical models for calculating this type of similarity. In next article we will learn about Pearson Correlation Score, which is a bit complex way for finding out similarity among people. Stay tuned and keep learning!! For more updates and news related to this blog as well as to data science, machine learning and data visualization, please follow our facebook page by clicking this link. 2 thoughts on “Euclidean Distance for finding Similarity”
https://mlforanalytics.com/2018/04/01/euclidean-distance-for-finding-similarity/
CC-MAIN-2021-21
refinedweb
422
52.9
(For more resources on Python, see here.) There are numerous functions in Python that perform a task or calculate a result on certain objects without being methods on the class. Their purpose is to abstract common calculations that apply to many types of classes. This is applied duck typing; these functions accept objects with certain attributes or methods that satisfy a given interface, and are able to perform generic tasks on the object. Len The simplest example is the len() function. This function counts the number of items in some kind of container object such as a dictionary or list. For example: >>> len([1,2,3,4]) 4 Why don't these objects have a length property instead of having to call a function on them? Technically, they do. Most objects that len() will apply to have a method called __len__() that returns the same value. So len(myobj) seems to callmyobj.__len__(). Why should we use the function instead of the method? Obviously the method is a special method with double-underscores suggesting that we shouldn't call it directly. There must be an explanation for this. The Python developers don't make such design decisions lightly. The main reason is efficiency. When we call __len__ on an object, the object has to look the method up in its namespace, and, if the special __getattribute__ method (which is called every time an attribute or method on an object is accessed) is defined on that object, it has to be called as well. Further __getattribute__ for that particular method may have been written to do something nasty like refusing to give us access to special methods such as __len__! The len function doesn't encounter any of this. It actually calls the __len__ function on the underlying class, so len(myobj) maps to MyObj.__len__(myobj). Another reason is maintainability. In the future, the Python developers may want to change len() so that it can calculate the length of objects that don't have a __len__, for example by counting the number of items returned in an iterator. They'll only have to change one function instead of countless __len__ methods across the board. Reversed The reversed() function takes any sequence as input, and returns a copy of that sequence in reverse order. It is normally used in for loops when we want to loop over items from back to front. Similar to len, reversed calls the __reversed__() function on the class for the parameter. If that method does not exist, reversed builds the reversed sequence itself using calls to __len__ and __getitem__. We only need to override __reversed__ if we want to somehow customize or optimize the process: normal_list=[1,2,3,4,5] class CustomSequence(): def __len__(self): return 5 def __getitem__(self, index): return "x{0}".format(index) class FunkyBackwards(CustomSequence): def __reversed__(self): return "BACKWARDS!" for seq in normal_list, CustomSequence(), FunkyBackwards(): print("\n{}: ".format(seq.__class__.__name__), end="") for item in reversed(seq): print(item, end=", ") The for loops at the end print the reversed versions of a normal list, and instances of the two custom sequences. The output shows that reversed works on all three of them, but has very different results when we define __reversed__ ourselves: list: 5, 4, 3, 2, 1, CustomSequence: x4, x3, x2, x1, x0, FunkyBackwards: B, A, C, K, W, A, R, D, S, !, Note: the above two classes aren't very good sequences, as they don't define a proper version of __iter__ so a forward for loop over them will never end. Enumerate Sometimes when we're looping over an iterable object in a for loop, we want access to the index (the current position in the list) of the current item being processed. The for loop doesn't provide us with indexes, but the enumerate function gives us something better: it creates a list of tuples, where the first object in each tuple is the index and the second is the original item. This is useful if we want to use index numbers directly. Consider some simple code that outputs all the lines in a file with line numbers: import sys filename = sys.argv[1] with open(filename) as file: for index, line in enumerate(file): print("{0}: {1}".format(index+1, line), end='') Running this code on itself as the input file shows how it works: 1: import sys 2: filename = sys.argv[1] 3: 4: with open(filename) as file: 5: for index, line in enumerate(file): 6: print("{0}: {1}".format(index+1, line), end='') The enumerate function returns a list of tuples, our for loop splits each tuple into two values, and the print statement formats them together. It adds one to the index for each line number, since enumerate, like all sequences is zero based. Zip The zip function is one of the least object-oriented functions in Python's collection. It takes two or more sequences and creates a new sequence of tuples. Each tuple contains one element from each list. This is easily explained by an example; let's look at parsing a text file. Text data is often stored in tab-delimited format, with a "header" row as the first line in the file, and each line below it describing data for a unique record. A simple contact list in tab-delimited format might look like this: first last email john smith jsmith@example.com jane doan janed@example.com david neilson dn@example.com A simple parser for this file can use zip to create lists of tuples that map headers to values. These lists can be used to create a dictionary, a much easier object to work with in Python than a file! import sys filename = sys.argv[1] contacts = [] with open(filename) as file: header = file.readline().strip().split('\t') for line in file: line = line.strip().split('\t') contact_map = zip(header, line) contacts.append(dict(contact_map)) for contact in contacts: print("email: {email} -- {last}, {first}".format( What's actually happening here? First we open the file, whose name is provided on the command line, and read the first line. We strip the trailing newline, and split what's left into a list of three elements. We pass '\t' into the strip method to indicate that the string should be split at tab characters. The resulting header list looks like ["first", "last", "email"]. Next, we loop over the remaining lines in the file (after the header). We split each line into three elements. Then, we use zip to create a sequence of tuples for each line. The first sequence would look like [("first", "john"), ("last", "smith"), ("email", "jsmith@example.com")]. Pay attention to what zip is doing. The first list contains headers; the second contains values. The zip function created a tuple of header/value pairs for each matchup. The dict constructor takes the list of tuples, and maps the first element to a key and the second to a value to create a dictionary. The result is added to a list. At this point, we are free to use dictionaries to do all sorts of contact-related activities. For testing, we simply loop over the contacts and output them in a different format. The format line, as usual, takes variable arguments and keyword arguments. The use of **contact automatically converts the dictionary to a bunch of keyword arguments (we'll understand this syntax before the end of the chapter) Here's the output: If we provide zip with lists of different lengths, it will stop at the end of the shortest list. There aren't many useful applications of this feature, but zip will not raise an exception if that is the case. We can always check the list lengths and add empty values to the shorter list, if necessary. The zip function is actually the inverse of itself. It can take multiple sequences and combine them into a single sequence of tuples. Because tuples are also sequences, we can "unzip" a zipped list of tuples by zipping it again. Huh? Have a look at this example: >>> list_one = ['a', 'b', 'c'] >>> list_two = [1, 2, 3] >>> zipped = zip(list_one, list_two) >>> zipped = list(zipped) >>> zipped [('a', 1), ('b', 2), ('c', 3)] >>> unzipped = zip(*zipped) >>> list(unzipped) [('a', 'b', 'c'), (1, 2, 3)] First we zip the two lists and convert the result into a list of tuples. We can then use parameter unpacking to pass these individual sequences as arguments to the zip function. zip matches the first value in each tuple into one sequence and the second value into a second sequence; the result is the same two sequences we started with! Other functions Another key function is sorted(), which takes an iterable as input, and returns a list of the items in sorted order. It is very similar to the sort() method on lists, the difference being that it works on all iterables, not just lists. Like list.sort, sorted accepts a key argument that allows us to provide a function to return a sort value for each input. It can also accept a reverse argument. Three more functions that operate on sequences are min, max, and sum. These each take a sequence as input, and return the minimum or maximum value, or the sum of all values in the sequence. Naturally, sum only works if all values in the sequence are numbers. The max and min functions use the same kind of comparison mechanism as sorted and list.sort, and allow us to define a similar key function. For example, the following code uses enumerate, max, and min to return the indices of the values in a list with the maximum and minimum value: def min_max_indexes(seq): minimum = min(enumerate(seq), key=lambda s: s[1]) maximum = max(enumerate(seq), key=lambda s: s[1]) return minimum[0], maximum[0] The enumerate call converts the sequence into (index, item) tuples. The lambda function passed in as a key tells the function to search the second item in each tuple (the original item). The minimum and maximum variables are then set to the appropriate tuples returned by enumerate. The return statement takes the first value (the index from enumerate) of each tuple and returns the pair. The following interactive session shows how the returned values are, indeed, the indices of the minimum and maximum values: >>> alist = [5,0,1,4,6,3] >>> min_max_indexes(alist) (1, 4) >>> alist[1], alist[4] (0, 6) We've only touched on a few of the more important Python built-in functions. There are numerous others in the standard library, including: - all and any, which accept an iterable and returns True if all, or any, of the items evaluate to true (that is a non-empty string or list, a non-zero number, an object that is not None, or the literal True). - eval, exec, and compile, which execute string as code inside the interpreter. - hasattr, getattr, setattr, and delattr, which allow attributes on an object to be manipulated as string names. - And many more! See the interpreter help documentation for each of the functions listed in dir(__builtins__). Summary In this article we took a look at many useful built-in functions. Further resources on this subject: - Python Graphics: Animation Principles [Article] - Animating Graphic Objects using Python [Article] - Python 3: When to Use Object-oriented Programming [Article] - Objects in Python [Article]
https://www.packtpub.com/books/content/python-built-functions
CC-MAIN-2015-18
refinedweb
1,902
62.38
09 May 2011 22:40 [Source: ICIS news] TORONTO (ICIS)--A new survey of consumer attitudes toward “bio-based” products in the ?xml:namespace> Speaking on Monday at the World Congress of Industrial Biotechnology and Bioprocessing, de Ruiter said he was nonetheless pleased by the results, which showed that a solid minority of consumers in both countries are familiar with the concept of bio-based products, as well as the more familiar “green” label. In both the US and Canada, a majority of respondents reported recently purchasing green products, although the proportion was higher in Canada, at 71%, than in the US, at 53%, a difference attributed to a difference in the purchase of green household cleaning products. The study found some public recognition of the term “bio-based” in regard to household products, as indicated by 40% of American consumers and 35% of Canadians. However, there was only limited understanding of what types of product fit the category, fuel ethanol and cleaning products being the most common. “We thought it would be a good idea to have a reality check, and see what the consumers are actually thinking of this,” said de Ruiter of the decision to do the survey. “I must actually say, I was very pleased with the results, because it shows there is clearly recognition that bio-based products in household goods are environmentally sustainable. “We also learned that there is still quite a ways to go, because it is still difficult for the consumer to specify what bio-based exactly means,” he added. “It's also clear that there is still some skepticism whether these products are actually good for the environment.” De Ruiter said that the study, the first of its kind, would serve as a baseline against which to trace evolving consumer attitudes in future surveys which would take place every two or three years. He also said the survey would also be
http://www.icis.com/Articles/2011/05/09/9458318/genencor-survey-tests-public-awareness-of-bio-based.html
CC-MAIN-2014-15
refinedweb
320
51.21
Rewriting number field related Magma code in Sage I have the following Magma code, which I want to rewrite in Sage: G := Sz(8); T := CharacterTable(G); M := GModule(T[2]:SparseCyclo := false); N := AbsoluteModuleOverMinimalField(M); Currently, I have something like this: from sage.all import * proof.arithmetic(False) G = SuzukiGroup(8) T = gap(G).CharacterTable() print(gap.eval("Display(%s)"%T.name())) Though, I do not know how to rewrite the rest in Sage. Sz in Magma is Suzuki group. The result of M here is GModule M of dimension 14 over Cyclotomic Field of order 52 and degree 24. Also, the result of T[2] in Magma is T[2] = ( 14, -2, 2*zeta(4)_4, -2*zeta(4)_4, -1, 0, 0, 0, 1, 1, 1 ). AbsoluteModuleOverMinimalField is defined here. Unfortunately, it is hard to extract the needed information from the documentation of GModulestarting from here: GModule The following extracts the character table: with results: and T[1]has for instance for its last component Which is question in this context?
https://ask.sagemath.org/question/41224/rewriting-number-field-related-magma-code-in-sage/
CC-MAIN-2019-18
refinedweb
174
55.03
This topic describes how to use MaxCompute to analyze IP sources. The procedure includes downloading and uploading data from an IP address library, writing a user-defined function (UDF), and writing a SQL statement. Background HTTP requests are not directly allowed in MaxCompute. However, you can query IP addresses in MaxCompute using one of the following methods: - Run a SQL statement and then initiate an HTTP request. This method is inefficient. The request will be rejected if the query frequency is lower than 10 QPS. - Download the IP address library to the local server. This method is inefficient and will affect the data analysis in data warehouses. - Maintain the IP address library regularly and upload it to MaxCompute. This method is relatively effective. However, you need to maintain the IP address library regularly. The following further describes the third method. Download an IP address library - You need to obtain data from an IP address library. This section provides a demo of an incomplete UTF-8 IP address library. - Download the UTF-8 IP address library and check the data format, as shown in the following figure. The first four strings of data are the starting and ending IP addresses, among which the first two are decimal integers and the second two are expressed in dot-decimal notation. The decimal integer format is used to check whether an IP address belongs to the target network segment. Upload data from the IP address library - Create a table data definition language (DDL) on the MaxCompute client, or create a table on the GUI in DataWorks. DROP TABLE IF EXISTS ipresource ; CREATE TABLE IF NOT EXISTS ipresource ( start_ip BIGINT ,end_ip BIGINT ,start_ip_arg string ,end_ip_arg string ,country STRING ,area STRING ,city STRING ,county STRING ,isp STRING ); - Run the Tunnel commands to upload the ipdata.txt.utf8 file, which is stored on the D drive. odps@ workshop_demo>tunnel upload D:/ipdata.txt.utf8 ipresource; You can use the select count(*) from ipresource;SQL statement to view the uploaded data. Generally, the quantity of data increases in the library due to regular updates and maintenance. - Use the select * from ipresource limit 10;SQL statement to view the first 10 pieces of data in the ipresource table, as shown in the following figure. Write a UDF - Choose Resource and choose . In the displayed dialog box, enter the name of the Python resource, select Upload to ODPS and click OK, as shown in the following figure.. Right-click - Write code for the Python resource. The following is an example: from odps.udf import annotate @annotate("string->bigint") class ipint(object): def evaluate(self, ip): try: return reduce(lambda x, y: (x << 8) + y, map(int, ip.split('.'))) except: return 0Click Submit and Unlock. - Choose Function and select Create Function.. Right-clickSet the function class name to ipint.ipint, and the folder to the resource name, and click Submit and Unlock. - Create an ODPS SQL node and run the SQL statement to check whether the ipint function works as expected. The following is an example. After uploading the resource, use the client to register the function.After uploading the resource, use the client to register the function. odps@ MaxCompute_DOC>add py D:/ipint.py; OK: Resource 'ipint.py' have been created. odps@ MaxCompute_DOC>create function ipint as ipint.ipint using ipint.py; Success: Function 'ipint' have been created. select ipint('1.2.24.2');on the client to test the function. - Create a package named ipint. odps@ MaxCompute_DOC>create package ipint; OK - Add the UDF to the package. odps@ MaxCompute_DOC>add function ipint to package ipint; OK - Allow a bigdata_DOC project to install the package. odps@ MaxCompute_DOC> allow project bigdata_DOC to install package ipint; OK - Switch to a bigdata_DOC project that needs to use the UDF and install the package. odps@ MaxCompute_DOC>use bigdata_DOC; odps@ bigdata_DOC>install package MaxCompute_DOC.ipint; OK - Then, the UDF can be used. If a user (such as Bob) of the bigdata_DOC project wants to access the resource, the administrator can grant the access permission to the user by using the ACL. odps@ bigdata_DOC>grant Read on package MaxCompute_DOC.ipint to user aliyun$bob@aliyun.com; --Use the ACL to grant the package access permission to Bob. Use the IP address library in SQL select * from ipresource WHERE ipint('1.2.24.2') >= start_ip AND ipint('1.2.24.2') <= end_ip To ensure the data accuracy, you can regularly obtain data from the Taobao IP address library to maintain the ipresource table.
https://www.alibabacloud.com/help/doc-detail/97075.htm
CC-MAIN-2019-26
refinedweb
746
59.19
Covariance and contravariance in generic types February 10th, 2020 Static typing is awesome. It helps to detect bugs, acts as in-code documentation and makes development more enjoyable. Recently I've started to use Python's typing module to add static typing to all of my Python projects. Python's typing system may not be as as powerful as one might hope, but I think once you go typed, you don't go back. It is, however, easy to run into non-intuitive errors when defining types for generic types such as lists and dictionaries. Assume, for example, that a DogContainer takes a List of Dogs in the constructor: from typing import List class Dog: ... class DogContainer: def __init__(self, dogs: List[Dog]): self.dogs = dogs Now let's add some corgis to the container: class Corgi(Dog): ... corgis: List[Corgi] = [Corgi(), Corgi()] container = DogContainer(dogs=corgis) # ERROR! Type-checker raises the following error: Argument of type 'List[Corgi]' cannot be assigned to parameter 'dogs' of type 'List[Dog]': 'Corgi' is incompatible with 'Dog'. Why doesn't this work? Surely one can substitute a list of corgis for a list of dogs? Covariance Let's step back and ponder why we think we can substitute List[Corgi] for List[Dog]. Because Corgi is a subtype of Dog (we write Corgi <: Dog, meaning we can use an instance of Corgi wherever an instance of Dog is expected), we think it should follow that List[Corgi] <: List[Dog]. This property is called covariance. Generic class C[T] is covariant in type variable T if the following holds: A <: B ⇒ C[A] <: C[B] We saw above that List[Corgi] is not a subtype of List[Dog]. Therefore, List is not covariant in its type variable. Why is List not covariant? The problem is that one can add new objects to List, i.e., it's mutable. Or to put it another way, List is not covariant because it acts as a sink of its type variable. Think of the following code: class StBernard: ... def do_stuff(dogs: List[Dog]) -> None: """Function that secretly adds Beethoven to the input list. """ dogs.append(StBernard(name="Beethoven")) corgis: List[Corgi] = ... do_stuff(corgis) last_corgi = corgis.pop() # BOOM, type-safety broken! You think you have a corgi but you have Beethoven The problem here is the append method. What if we removed any methods from List that make it mutable? That would essentially give us a Sequence, an immutable list. Sequence is different from List in that it's a pure source of data. Intuitively, it is clear that any piece of code expecting a source of dogs would be happy to receive a source of corgis instead. Therefore, we can state that A <: B ⇒ Sequence[A] <: Sequence[B] Therefore, Sequence[T] is covariant in T. Another example of an immutable container is Mapping, a read-only version of Dict. Pure sources are covariant Let us generalize the previous discussion to the following rule of thumb: If a generic class C[T] is a pure source of instances of type T, it is covariant in T. Why are sources covariant? Assume you have A <: B. Now imagine you have any code that depend on Source[B]. Can you substitute Source[A]? Yes, because Source[A] produces instances of type A that are valid instances of B. For example, think of a class Breeder[T] that produces dogs of type T. Covariance means that Breeder[Corgi] <: Breeder[Dog]. In other words, any code expecting a Breeder[Dog] would be happy to receive a Breeder[Corgi]. This makes sense intuitively. The following piece of code shows how we could define a Breeder class and mark it covariant in its type variable with covariant=True: import abc from typing import TypeVar DogType_co = TypeVar('T', bound=Dog, covariant=True) class Breeder(abc.ABC, Generic[DogType_co]): @abc.abstractmethod def get(self) -> DogType_co: ... class CorgiBreeder(Breeder[Corgi]): def get(self) -> Corgi: print("Breeding a corgi") return Corgi() breeder: Breeder[Dog] = CorgiBreeder() # cast allowed because of covariance Contravariance If you look at the code from above where type-safety was broken, you may notice that another way to achieve type-safety would be to prevent reading from corgis. In other words, List should be write-only or a sink. As a concrete example of a sink, consider the Photographer class where the generic type must be a Dog: from typing import Generic,TypeVar DogType = TypeVar('DogType', bound=Dog) class Photographer(Generic[DogType_contra]): def photograph(self, dog: DogType_co): print("Photographing dog") class CorgiPhotographer(Photographer[Corgi]): def photograph(self, corgi: Corgi): self.feed_sausage_to(corgi) super().photograph(corgi) def feed_sausage_to(self, corgi: Corgi): print("Feeding sausage to corgi") class DogPhotographer(Photographer[Dog]): def photograph(self, dog: Dog): super().photograph(dog) Can we assume Photographer[Corgi] <: Photographer[Dog]? This would mean that anywhere we need a dog photographer, we can use a corgi photographer. This is not the case: a corgi photographer cannot photograph a St. Bernard. However, the inverse does hold: If one needs a photographer of corgis, a photographer of generic dogs definitely can handle the job. Formally, Photographer[Dog] <: Photographer[Corgi]. Photographer is an example of a class that is contravariant in its type variable.The formal definition is: Generic class C[T] is contravariant in type variable T if the following holds: A <: B => C[A] :> C[B] The way to mark the class to be contravariant in its type variable is to set contravariant=True: DogType_contra = TypeVar('DogType_contra', bound=Dog, contravariant=True) Thanks to contravariance, we can perform casts as follows: dog_photographer: Photographer[Corgi] = DogPhotographer() # Photographer[Dog] <: Photographer[Corgi] Pure sinks are covariant Photographer is contravariant in its type variable because it's a pure sink for dogs. It does not produce new dogs, but only consumes them (eww). We arrive at the following rule of thumb: If a generic class C[T] is a pure sink of instances of type T, it is contravariant in T. Why are sinks covariant? Assume you have A <: B and imagine you have any code that depends on Sink[A]. Can you substitute a Sink[B]? Yes, because Sink[B] is prepared to consume any B and, because A is a subtype of B, it can also consume instances of type A. Invariant types Generic types that are neither covariant nor contravariant are invariant in their type variables. We saw above that List is an example of an invariant generic type. As another example of an invariant generic type, consider a RescueShelter[D] class. An instance of RescueShelter[D] accepts new poorly treated dogs of type D and delivers them to their new homes. RescueShelter is both a source and a sink of dogs, so it's neither contravariant nor covariant. One cannot substitute RescueShelter[Corgi] for RescueShelter[Dog], because RescueShelter[Corgi] does not accept dogs other than corgis. Similarly, one cannot substitute RescueShelter[Dog] for RescueShelter[Corgi], because a person looking for a corgi from rescue shelter would not be happy to get a St Bernard dog. This example illustrates the danger of relying too much on intuition. Intuitively, it might seem clear that a rescue shelter of corgis "is" a rescue shelter of dogs, and one might substitute the former for the latter. We saw above that this is only true if the rescue shelter only acts as a source of dogs. Conclusion In conclusion, it's generally easier to work with generic types that are either covariant or contravariant. Next time you define a function or data structure depending on List or Dict, think carefully if you need to mutate the object. If not, you should probably use Sequence or Mapping instead. Similarly, if you do not need to read from your generic type, you may want to define it as write-only and mark the type variable as contravariant. Thanks for reading! If there's anything here that seems wrong or inaccurate, please leave a comment. Remember to enjoy corgis responsibly.
https://kimmosaaskilahti.fi/blog/2020-02-10-covariance-and-contravariance-in-generic-types/
CC-MAIN-2022-40
refinedweb
1,328
55.95
PTHREAD_EXIT(3) Linux Programmer's Manual PTHREAD_EXIT(3) pthread_exit - terminate calling thread #include <pthread.h> void pthread_exit(void *retval); Compile and link with -pthread.. This function does not return to the caller. This function always succeeds._exit() │ Thread safety │ MT-Safe │ └───────────────┴───────────────┴─────────┘ POSIX.1-2001, POSIX.1-2008.(). pthread_create(3), pthread_join(3), pthreads(7) This page is part of release 4.13 of the Linux man-pages project. A description of the project, information about reporting bugs, and the latest version of this page, can be found at. Linux 2017-09-15 PTHREAD_EXIT(3) Pages that refer to this page: prctl(2), pthread_cancel(3), pthread_cleanup_push(3), pthread_create(3), pthread_detach(3), pthread_join(3), pthread_tryjoin_np(3), proc(5), pthreads(7)
http://man7.org/linux/man-pages/man3/pthread_exit.3.html
CC-MAIN-2017-47
refinedweb
117
52.97
Type: Posts; User: gaby32 All working now, code attached below:wave: //#include <stdafx> #include <iostream> #include <string> #include <iomanip> // setprecision() funcion #include <conio.h> // _getch() function... updated code, program still does not work, i'm pulling my hair out now ahhhhh, enough for the day. #include "stdafx.h" #include <iostream> #include <string> #include <iomanip> //... Added while(choice !=5); see below }// End of switch statement } // End of loop while(choice !=5); } //End of 'display_main_menu' method void open_account() #include <iostream> #include <string> #include <iomanip> // setprecision() funcion #include <conio.h> // _getch() function #include <ctime> // add system time and date using namespace std; ... closing bracket for problem 2 still presets, can't see any silliy mistakes } //End of 'display_main_menu' method ms v studio 2008 express edition thanks for that tip. but why is #include "stdafx.h" highlighted in red? update problem 3 sorted added ; at end of getch() so it becomes getch(); update problem 4 sorted, i had wrong bracket above display_main_menu(); void main() { display_main_menu(); } hello Thanks i got that: balance = balance + deposit; but I'm still having the above 4 problems in my first post // Title: Bank Account Program // Author: // Descripition: A bank account program that lets the user open an account by adding a // customer name then adding a balance to the account, also lets... Problem fixed last line void pr_string() { cout << "my name is " << first_name << " " << surname << name << "\n"; } final code still can not get space in between the first and surname // Enter first and surname and then display the full name. #include<iostream> #include<string> #include<cctype> using namespace std;... ok it works now only one wee minor problem, when you enter your first name then enter surname enter the full name is displayed, but no space in between first and surname, example johnsmith should be... Ok made some changes when i press ctrl and f5 runs program asks for my first_name then surname then i press enter system says name isfirst_namesurname // Enter first and surname and then display... ok i understabd a little bit, if you were to right the same program, what would your code be? Hi I'am new to C++, i done various basic programs in C++, but one program is giving me a major headache: enter first and surname and then display the full name: code below: // Enter first...
http://forums.codeguru.com/search.php?s=41574fc852683c047e71654c2aaff3e0&searchid=6780517
CC-MAIN-2015-18
refinedweb
383
66.37
XML::RSS::Parser - A liberal object-oriented parser for RSS feeds. #!/usr/bin/perl -w use strict; use XML::RSS::Parser; my $p = new XML::RSS::Parser; my $feed = $p->parsefile('/path/to/some/rss/file'); # output some values my $title = XML::RSS::Parser->ns_qualify('title',$feed->rss_namespace_uri); print $feed->channel->children($title)->value."\n"; print "item count: ".$feed->item_count()."\n\n"; foreach my $i ( $feed->items ) { map { print $_->name.": ".$_->value."\n" } $i->children; print "\n"; } XML::RSS::Parser is a lightweight liberal parser of RSS feeds that is derived from the XML::Parser::LP module the I developed for mt-rssfeed -- a Movable Type plugin. This parser is "liberal" in that it does not demand compliance of a specific RSS version and will attempt to gracefully handle tags it does not expect or understand. The parser's only requirements is that the file is well-formed XML and remotely resembles RSS. few assumptions to "normalize" the parse tree into a more consistent form. For instance, it forces channel and item into a parent-child relationship and locates one (if any) of the known RSS Namespace URIs and maps them into a common form.. Your feedback and suggestions are greatly appreciated. See the "TO DO" section for some brief thoughts on next" for just a sampling of what I mean.) To ease working with RSS data in different formats, the parser does not create the feed's parse tree verbatim. Instead it makes a few assumptions to "normalize" the parse tree into a more consistent form. rssor RDFin the tree. Namespace declaration information is still extracted. channeland iteminto a parent-child relationship. In versions 0.9 and 1.0, channeland itemtags are siblings. valuein most cases. These instances include direct descendants of itemand imagein addition to direct descedents of channelnot mentioned. The following objects and methods are provided in this package. Constructor. Returns a reference to a new XML::RSS::Parser object. Inherited from XML::Parser, the SOURCE parameter should either open an::Element, XML::RSS::Parser::Feed, XML::Parser, XML::RAI The Feed Validator What is RSS? Raising the Bar on RSS Feed Quality "/ rssfeedquality.html" in http: The myth of RSS compatibility The software is released under the Artistic License. The terms of the Artistic License are described at. Except where otherwise noted, XML::RSS::Parser is Copyright 2003-2004, Timothy Appnel, cpan@timaoutloud.org. All rights reserved.
http://search.cpan.org/~tima/XML-RSS-Parser-2.15/lib/XML/RSS/Parser.pm
crawl-002
refinedweb
402
51.04
nvm_hal.c File Reference Non-Volatile Memory Wear-Leveling driver HAL_hal.c. #include <stdbool.h> #include " em_msc.h" #include " nvm.h" #include " nvm_hal 318 of file nvm_hal.c. De-initialize NVM . This function is run when the API deinit function is run. This should be done before any graceful halts. Definition at line 147 of file nvm_hal.c. References MSC_Deinit(). Initialize NVM driver. This function is run upon initialization, at least once before any other functions. It can be used to call necessary startup routines before the hardware can be accessed. Definition at line 134 of file nvm_hal.c. References MSC_Init(). Referenced by NVM_Init(). Erase a page in the NVM. This function calls MSC_ErasePage and converts the return status. - Parameters - - Returns - Returns the result of the erase operation. Definition at line 289 173 210 of file nvm_hal.c. References MSC_WriteWord(), and mscReturnOk. Referenced by NVM_Erase(), and NVM_Write().
https://docs.silabs.com/mcu/5.9/efm32zg/nvm-hal-c
CC-MAIN-2021-04
refinedweb
147
72.32
This excerpt is from ActionScript 3.0 Design Patterns. At the lowest cognitive level, they are processes of experiencing, or, to speak more generally, processes of intuiting that grasp the object in the original.--Edmund Husserl The idea of design patterns is to take a set of patterns and solve recurrent problems. At the same time (even in the same breath), the patterns reflect good object-oriented programming (OOP) practices. So, we cannot separate OOP from design patterns, nor would we want to do so. In answering the question of why bother with design patterns, we are really dealing with the question of why bother with OOP. The standard response to both design patterns and OOP often points to working with a team of programmers and speaking the same language. Further, it's easier to deal with the complexities involved with programming tasks requiring a division of labor for a large project using an object metaphor and practices. In addition to coordinating large projects, programmers use both OOP and design patterns to deal with change. One key, important element, of design patterns is that they make changing a program much easier. The bigger a program and the more time you've spent developing it, the greater the consequences in making a change to that program. Like pulling a string in a sweater that unravels it, changing code in a program can have the same unraveling consequences. Design patterns and good OOP ease the task of making changes in complex programs, and reduce the changes or global problems. Team coordination and application update and maintenance are reasons enough to learn design patterns. However, this is not the case if most programs you write are relatively short, you don't work with teams, and you don't worry about change. Then rewriting the short program is neither time-consuming nor difficult. What possible reason would you then have for learning design patterns? Beside the fact that ActionScript 3.0 is based on ECMAScript and is not likely to have major changes with each new release of Flash or Flex as in the past, you have a far more personal reason for learning design patterns. Alexander Nakhimovsky and Tom Myers, in writing about using OOP with JavaScript (Wrox, 1998), point out the value in the pleasure derived from doing something well. Like any endeavor, whether it be skateboarding or building a house, people can find pleasure in doing a task well. By "doing something well," we do not mean an obsessive perfectionism—especially since perfectionism often leads to task paralysis. Rather, like any task that one can see an outcome and experience a process of accomplishment, when it's done right, you experience the craftsman's pleasure of the creative process and results. If you've never heard of sequential programming, that's the kind of programming you've most likely been doing. Most amateur programmers just write one statement after another, and any program that has the correct sequence of statements works just fine. However, as programs became more complex, programmers ran into an unruly jumble of code often called spaghetti programs. To remedy the jumble effect of sequential programming, programmers began organizing programs into a set of procedures and set of rules, and procedural programming was born. Instead of willy-nilly GOTO statements jumping all over a program, subroutines modularly defined program flow with appropriate GOSUB/RETURN procedures to keep everything tidy. The RETURN statements back then were different from what they are today. A RETURN meant to return to the position in a sequence of code where the GOSUB had originated. In ActionScript, a return statement means that an operation sends back information generated in the operation [the method or procedure]. Also, from procedural programming came the concept of scope so that variables in functions and subroutines could be reused and one procedure would not contaminate another. The great majority of programming languages today are considered procedural in that they have the concepts and syntax that support it. The different versions of BASIC are procedural, as are languages like ColdFusion, PHP and Perl. However, C++ is a procedural language, as is ECMAScript (ActionScript 3.0) and Ada, languages many consider object-oriented. Languages like Java are considered true OOP languages. Without going into a lot of detail, the reason Java is considered a true OOP language and the others are not is because the only kind of procedure in Java is a class method. Its structure forces procedures to be class methods, and doesn't allow other procedures to operate outside the class structure. You might be surprised at how heated a discussion can get when it comes to a language being an OOP language or not. Two versions of OOP criteria exist. One is fairly inclusive and allows any language with certain features that can generate OOP code to be considered OOP. (ActionScript 3.0 is among those.) The other version has a restrictive criterion that includes those languages that only allow methods as procedures to be admitted to the exclusive club of OOP languages. Both versions have valid points. However, we will sidestep the issue by not taking a position, but note that both sides agree that you can create good OOP code with a procedural language. To make a long story short, this does not mean that the other languages are unable to generate true OOP programs. Well before Java was even available, developers were creating OOP programs. Some languages, especially those with the ability to use class structures and override methods, such as ActionScript 3.0, are more OOP friendly than others. As ActionScript has matured from a few statements to a true ECMAScript language, it has become more OOP friendly. Changing from sequential or procedural programming to OOP programming is more than picking up a language that gives you little choice in the matter, as is the case with Java. However, certain changes in a language can make it more amenable to OOP, even if it's not considered a true OOP language by some criterion. In the following sections, some new features in Flash CS3 provide a summary of relevant changes to the way ActionScript is used. For the ActionScript veterans whose introduction to programming was writing little sequential scripts or procedures using the on statements associated with MovieClip or Button objects, you're probably aware that the latest version of Flash doesn't allow script embedded in either. Built-in State Machines While most programmers welcomed the demise of movie clip and button embedded scripts, one astute programmer observed that Flash used to have built-in state machines. Jonathan Kaye, PhD, co-author of Flash MX for Interactive Simulation: How to Construct and Use Device Simulations (Delmar Learning, 2002), noted that the button and movie clip scripts often served to create state machines. Each button or movie clip could serve as an encapsulated, context-sensitive trigger for changing state. (See how design patterns deal with State Machine in Chapter 10.) In general, the demise of movie clip and button scripts is seen as a boon to better programming, especially OOP programming. Keeping track of the isolated button and movie clip codes could be a headache, even with relatively small applications. For structuring larger programs, where OOP and Design Patterns are most useful, having movie clips and buttons floating around with their own code moves the problem from the realm of headache to nightmare. So, for learning design patterns, be glad you don't even have to think about little scripts isolated in movie clips and buttons. Another kind of scripting you'll be seeing less of in Flash are those embedded in your Timeline. For the most part, placing scripts in the Timeline probably left a lot to be desired in the first place, but worked out to be a convenient location. In ActionScript 2.0, you were able to place a script in a class and call it from a script embedded in the Timeline, and so all that the Timeline code was really used for was to call a class that would launch a script in an ActionScript file ( .as). That being the case, the Flash CS3 .fla file has a little window where you can add the name of the class to call. (See the next section.) So, if all you want to do is to call a program and compile it into an SWF file, you no longer need to use the Timeline for your code at all. However, Flash CS3 doesn't do away with code in the Timeline. You can still use it, but in this book, we use it selectively only with movie clips that are called from a class outside the movie clip or button class. (See the section "Movie Clip and Button Classes.") You won't be placing much, if any, code in the Timeline using ActionScript 3.0. Rather than using an object with a Timeline script, you can now compile your .as files by entering the name of the class name you want to launch your application. Figure 1.1, “Document class window” shows how to use the Document class window in the Properties panel to enter the name of the class you want to launch: You can still use the Timeline, but unless there's a good reason to do so, there's no need. Most of the examples in this book use the Sprite object instead of the MovieClip class. A Sprite object has no Timeline, but a MovieClip class does. So using Sprite objects save a bit of extra weight that the Timeline has. In Flash CS3, MovieClip and Button objects you create using the Symbol dialog box and store in the Library can be made available to work with ActionScript 3.0. Unlike ActionScript 2.0 where MovieClip and Button symbols could be associated with a class, with Flash CS3, they can be made into classes themselves. The object's name entered into the Name window when the symbols are created becomes the class name for the object. (In past versions, references to a movie clip are made through an instance name. You can still make those references can still be made, but in a different context.) The advantage of this new procedure is that the symbol objects can be instantiated just like any other class through the code, as long as the symbols are in the Library. You don't have to place them on the stage. They can be dynamically instantiated and placed into a display array just like a dynamically generated object. Further, objects contained within the MovieClip or Button can be addressed as a property just like any other class. While this book is in no way an introduction to Flash CS3, walking through one example of this new way of creating a class with movie clips and buttons may be useful to those new to Flash and experienced users alike. The following steps walk you through this new feature: Open a new Flash document and save it as rocket.fla. Select Insert→New Symbol from the menu bar to open the Create New Symbol Dialog box. Enter "Rocket" in the Name window, and Click OK to enter the Symbol Edit Mode. In the Symbol Edit Mode, draw a rocket on the stage with the dimensions W=89, H=14, as shown in Figure 1.2, “Rocket drawing”. Once finished, position the drawing at X=0, Y=0. Click the Scene 1 icon to exit the Symbol Edit Mode. Select Insert→New Symbol from the menu bar to open the Create New Symbol Dialog box. Enter "FireRocket" in the Name window, select Movie clip as Type, and click the Export for ActionScript checkbox. Once you've clicked the checkbox, Figure 1.3, “Setting a MovieClip class” shows what the dialog box looks like. Notice that the Base class is flash.display.MovieClip. The base class is the reference to the package required for ActionScript to display a MovieClip object. Click OK to enter the Symbol Edit Mode Drag a copy of the Rocket movie clip from the Library to the center of the stage. Move the center point of the movie clip to the rear of the rocket and position it at X=0, Y=0. Click on Frame 40 of the Timeline and press F5 to create 40 frames. Click Frame 40 again and press F6 to insert a keyframe. Click on the keyframe in frame 40 and move the rocket to X=400, Y=0. Click on the first keyframe, and, in the tween drop-down menu in the Properties inspector, select Motion. You should now see a blue arrow in the Timeline. Move the playhead from left to right to make sure that the motion tween is working right. Figure 1.4, “Rocket in motion tween” shows what you should see: Open the Actions panel. Click on a blank area of the stage to make sure you don't have any objects selected, and then click on Frame 1. In the Actions panel, type in the stop() statement. Save the Rocket.fla file. Open a new ActionScript file and save it as TestRocket.as in the same folder as the Rocket.fla file. Enter the script in Example 1.1, “TestRocket.as” in the TestRocket.as file, and save the file once again: Example 1.1. TestRocket.as package { import flash.display.Sprite; public class TestRocket extends Sprite { private var fireRocket:FireRocket; public function TestRocket() { fireRocket=new FireRocket(); fireRocket.x=50; fireRocket.y=100; addChild(fireRocket); fireRocket.gotoAndPlay(2); } } } Finally, open the Rocket.fla file, and in the Document class window in the Properties panel, type in TestRocket and save the file. Then test the movie by pressing Ctrl + Enter (Command + Return on the Mac). You should see the rocket move from left to right across the screen and then return to its original position. Using Flash in a more or less traditional manner to create movie clips is still an important part of using ActionScript, but it has changed. You can no longer attach a class to a movie clip as was the case in previous versions. However, in creating applications using design patterns, you can still integrate different aspects created in the Flash IDE. So while ActionScript 3.0 has made the leap to a true ECMAScript language, it has not abandoned its roots in animated graphics.OOP Basics If you're familiar with OOP and have been practicing good OOP for some time now, you might want to skip this section or just skim over it to see if we've added anything new, or if there's something new as far as ActionScript is concerned. Later in this chapter, we introduce good practices in OOP on which design patterns are based. These more advanced concepts depend on understanding these basics. However, this short discussion is no substitute for a more in-depth understanding of OOP. If this is your first exposure to OOP, you will definitely want to supplement your understanding of OOP with an introductory book dedicated to OOP. Throughout the book, you will find references to how a design pattern employs different basic and design pattern OOP principles. Each chapter includes a section on key OOP concepts, and so what you read in this introductory chapter is only the first of many times an OOP concept will be described. This is intentional. By looking at an OOP concept from different angles, we believe you will have a better understanding of OOP's many nuances. We ourselves were surprised at how different design patterns brought out different perspectives on the same OOP concept and helped further clarify it. To get started, we'll review the four basic OOP concepts: Abstraction Encapsulation Inheritance Polymorphism Each of these concepts needs reflection, and if you're new to OOP, don't worry about getting it right the first time. We go over these concepts time and again in the design pattern chapters. In general, an abstraction is a model or ideal. You don't have all of the details, but you have the general parameters that can be filled in with details. Further, an abstraction is clear enough for you to tell one abstraction from another. Take, for example, two jobs your company is trying to fill. One's for a Web designer and the other's for a programmer. To advertise for the position, you would not describe the person as a specific person but instead in terms of the characteristics you want for the position. You might have the two abstractions representing the two different positions: Two Positions Open: Programmer Experienced with multi-programmer projects Experienced with middleware and database programming ECMAScript programming background OOP and Design Pattern programming skills Web designer Experienced with creating Web graphics Familiar with animation graphics Can work with vector graphics Client-centered approach You can tell the difference between the two positions and their general requirements (properties), but the details are left fairly open. A programmer is unlikely to apply for the Web designer position and a designer is just as unlikely to apply for the programmer position. However, a pool of applicants could have a wide range of skills that would provide the concrete details for each position. For example, one programmer may have PHP middleware skills and/or MySQL database skills, while another may be experienced in using ASP.NET, C# and MS SQL. The abstraction is in the job description and the details are provided by the applicants' unique sets of skills and experience. In Object-Oriented Design with Applications (Benjamin/Cummings), Grady Booch, one of the design pattern pioneers, provides the following definition of an abstraction that is both clear and succinct: An abstraction denotes the essential characteristics of an object that distinguish it from all other kinds of object and thus provide crisply defined conceptual boundaries, relative to the perspective of the viewer. Booch's definition pretty well describes the two job descriptions. The descriptions provide the essential characteristics of the position and they distinguish one from the other. Turning now to abstractions in ActionScript programming, we'll take a simple video player for an example. This player will be made of certain elements that we need; so we start by listing them as abstractions: A Net connection A video screen A stream An FLV file to play If we put these together just right, we'll be able to play a video. However, instead of starting with an abstraction, we want to start with something concrete that works for us right away. Enter the code in Example 1.2, “PlayVideo.as” saving the file using the name in the caption: Throughout the book, with a few exceptions, caption names represent the name used for the file. Example 1.2. PlayVideo.as package { import flash.net.NetConnection; import flash.net.NetStream; import flash.media.Video; import flash.display.Sprite; public class PlayVideo extends Sprite { public function PlayVideo() { var nc:NetConnection=new NetConnection(); nc.connect(null); var ns:NetStream = new NetStream(nc); var vid:Video=new Video(); vid.attachNetStream(ns); ns.play("adp.flv"); addChild(vid); vid.x=100; vid.y=50; } } } You'll need an FLV file named adp.flv—any FLV file with that name will work. Open a new Flash document file, enter PlayVideo in the Document class window, and test it. To change this to an abstract file, take out all specific references to any values with the exception of the null value in the NetConnection.connect() method. (We could pass that value as a string, but we're leaving it to keep things simple.) Example 1.3, “PlayVideoAbstract.as” shows essentially the same application abstracted to a "description" of what it requires to work. Example 1.3. PlayVideoAbstract.as package { import flash.net.NetConnection; import flash.net.NetStream; import flash.media.Video; import flash.display.Sprite; public class PlayVideoAbstract extends Sprite { public function PlayVideoAbstract(nc:NetConnection, ns:NetStream,vid:Video,flick:String,xpos:uint,ypos:uint) { nc=new NetConnection(); nc.connect(null); ns= new NetStream(nc); vid=new Video(); vid.attachNetStream(ns); ns.play(flick); vid.x=xpos; vid.y=ypos; addChild(vid); } } } All the values for the different elements (with the exception of null) have been abstracted to describe the object. However, like a job description that abstracts requirements, so too does the PlayVideoAbstract class. All the particulars have been placed into one long set of parameters: PlayVideoAbstract(nc:NetConnection,ns:NetStream,vid:Video,flick:String, xpos:uint,ypos:uint) The abstract parameters in the constructor function let us add any concrete elements we want, including the specific name of a video we want to play. Example 1.4, “PlayAbstract.as” shows how concrete instances are implemented from an abstract class: Example 1.4. PlayAbstract.as package { import flash.display.Sprite import flash.net.NetConnection; import flash.net.NetStream; import flash.media.Video; public class PlayAbstract extends Sprite { private var conn:NetConnection; private var stream:NetStream; private var vid:Video; private var flick:String="adp.flv"; public function PlayAbstract() { var playIt:PlayVideoAbstract=new PlayVideoAbstract(conn,stream,vid, flick,100,50); addChild(playIt); } } } All the entire class does is to create a single instance of the PlayVideoAbstract class and place it on the stage. Private variables serve to provide most of the concrete values for the required parameters. Literals provide the data for both the horizontal ( x) and vertical ( y) positions of the video. To test it, just change the Document class name in the Flash document (FLA) file to PlayAbstract. We can see two key reasons that abstractions are important for both OOP and Design Patterns. Rather than being dogged by minutiae of the problem, abstraction helps to focus on what parts, independent of their details, are required to solve the problem. Does this mean that you ignore the details? Not at all. Rather, the details are handled by adding them just when they're needed. For instance, in the example in the previous section, the exact video file is unimportant. All that's important is that some video name (a detail) be provided when we're ready to play it. We don't need to build a theater around a single movie. Likewise, we don't need to build a class around a single video file. The second advantage of abstraction is flexibility. If you're thinking that in the previous section the Example 1.2, “PlayVideo.as” was easier and took less code and classes, you're right. However, suppose you want to place four videos on the stage. Then, all you would need to do is to create four instances using the abstract class instead of re-writing three more classes. In other words, the second method using abstraction is more flexible. In addition to adding more videos instances, we can easily change the video file we choose to play. Encapsulation is what makes a code object an object. If you have a tail, four legs, a cold nose and a bark, you do not have a dog. You just have a collection of parts that make up a dog. When you bring all of the doggy parts together, you know that each part is a part but collectively, you do not think of parts but a reality sui generis. That is, a dog is an object unto itself and not doggy parts that happen to hang together. Encapsulation has a similar effect on a collection of operations and properties. Encapsulation has been used synonymously with other terms such as component and module. In the context of OOP, encapsulation is often called a black box, meaning you can see it do certain things but you cannot see the inner workings. Actually, a lot of things we deal with all the time are black boxes, such as our dog. We can see the dog do a lot of different things, and we can interact with the dog. However, we really don't know (or usually care) about how the physiology of the dog works—dogs are not transparent. They're black boxes. The good thing about the concept of a black box is that we don't have to worry about the inner workings or parts. We just have to know how we can deal with it, secure in the knowledge that whatever makes the black box work is fine as long as it works as we think it should. To see why you might want to encapsulate your data, we'll take a look at two programs. One is not encapsulated, leading to unwanted consequences, and the other is encapsulated, preventing strange results. If you make a dog object, you may want to include an operation that includes the way a dog communicates. For purposes of illustration, we'll include a method called dogTalk that will let the dog make different sounds. The dog's communication will include the following: Woof Whine Howl Grrrr We'll start off with bad OOP to illustrate how you may end up with something you don't want in your dog's vocabulary. Example 1.5, “NoEncap.as” is not encapsulated and will potentially embarrass your dog object: Example 1.5. NoEncap.as package { //This is BAD OOP -- No encapsulation import flash.text.TextField; import flash.display.Sprite; public class NoEncap extends Sprite { public var dogTalk:String="Woof, woof!"; public var textFld:TextField=new TextField(); public function NoEncap() { addChild(textFld); textFld.x=100; textFld.y=100; } function showDogTalk() { textFld.text=dogTalk; } } } As a black box, you should not be able to change the internal workings of a class, but this class is wide open, as you will see. You can interact with an encapsulated object through its interface, but you should not allow an implementation to make any changes it wants. Example 1.6, “TestNoEncap.as” breaks into the object and changes it in ways you don't want: Example 1.6. TestNoEncap.as package { import flash.display.Sprite; public class TestNoEncap extends Sprite { public var noEncap:NoEncap; public function TestNoEncap() { noEncap=new NoEncap(); noEncap.dogTalk="Meow"; noEncap.showDogTalk(); addChild(noEncap); } } } Open a new Flash document file, and, in the Document class window, type in TestNoEncap. When you test the file, you'll see "Meow" appear on the screen. Such a response from your dog object is all wrong. Dogs don't meow and cats don't bark. However, that's what can happen when you don't encapsulate your class. When you multiply that by every un-encapsulated class you use, you can imagine the mess you might have. So let's find a fix for this. The easiest way to insure encapsulation is to use private variables. The private statement in ActionScript 3.0, whether it's used with variables, constants or methods (functions) makes sure that only the class that defines or declares it can use it. This not only shuts out implementations that attempt to assign any value they want, but it also excludes subclasses. (This is a difference from ActionScript 2.0; so watch out for it if you're converting an application from ActionScript 2.0 to ActionScript 3.0.) To see how the private statement will change how the application works, Example 1.7, “Encap.as” changes the NoEncap class by adding the private statement to the variables: Example 1.7. Encap.as package { //This is GOOD OOP -- It has encapsulation import flash.text.TextField; import flash.display.Sprite; public class Encap extends Sprite { private var dogTalk:String="Woof, woof!"; private var textFld:TextField=new TextField(); public function Encap() { addChild(textFld); textFld.x=100; textFld.y=100; } function showDogTalk() { textFld.text=dogTalk; } } } Also, minor changes have to be made to the test file. The supertype implemented must be changed. Example 1.8, “TestEncap.as” shows the new test class, TestEncap , for the Encap class. Example 1.8. TestEncap.as package { import flash.display.Sprite; public class TestEncap extends Sprite { public var encap:Encap public function TestEncap() { encap=new Encap(); encap.dogTalk="Meow"; encap.showDogTalk(); addChild(encap); } } } Go ahead and test it by changing the Document class name to TestEncap. This time, though, you'll get the following error in the Complier Errors panel: Line 11: 1178: Attempted access of inaccessible property dogTalk through a reference with static type Encap. Source: encap.dogTalk="Meow"; It shows the source of the error to be the line: encap.dogTalk="Meow"; That error reflects the fact that it attempted to access a private variable outside the class. To fix the script, comment out the offending line: //encap.dogTalk="Meow"; Try testing it again. This second time, everything works fine, except, you don't get the dog object expressing "Meow." You see "Woof, woof." You may be thinking that private variables really limit what you can do. Suppose you want the dog object to howl, growl or whimper? How do you make the changes dynamically? Preventing an encapsulated object from doing something wrong is one thing, but how can an object be set up to accept variables? In this book, you will find the term interface used in different contexts, and each context gives the term a slightly different meaning. (Thanks a lot!) Up to this point, you're probably familiar with terms like UI (user interface) or GUI (graphic user interface). These terms refer to different tools you use to interact with a program. For example, a button is a common UI in Flash. When you click a button, something predictable happens. You may not know how it happens (or care), but you know that if you press the button, the video will play, a different page will appear, or an animation will start. So if you understand the basic concept of a UI, you should be able to understand how an interface works with an object. With a UI, the black box is the application you're using, whether it's shopping at eBay or using a word processor. If you follow certain rules and use the different UIs in the appropriate manner, you get what you want. In the same way, an encapsulated object is a black box, and the interface describes the ways you can interact with it programmatically. It's the UI for the object. Design Patterns: Elements of Reusable Object-Oriented Software (page 13) nicely clarifies object interfaces and their signatures. An object's signature is its operation name, parameters and return datatype. Figure 1.5, “Object's signature” graphically shows the makeup of a typical object's signature. All of an object's signatures defined by its operations is the interface. In this context then, the interface for the object constitutes the rules for access, list of services, and controls for it. Wait! There's more! Later in this chapter you will find an interface statement as part of ActionScript 3.0's lexicon, which is a whole different use of the term, interface. You will also find the term used synonymously with supertype elsewhere in this book. All of the different uses of interface will be explained in time. Before getting bogged down in contextual definitions, it's time to move on to see what an encapsulated object's interface looks like. The following section does just that. The most common way to enforce encapsulation but to give implementations access to an object is with getter and setter interfaces. The object controls both access to it and what is allowed. Keeping our example of a dog object, we know that the dog has a limited vocabulary, and it's not one that includes "Meow." So, we'll just make a setter that allows only the dog's limited vocabulary. A setter method includes parameter variables of some kind, and an algorithm that allows certain things and excludes others. However, most setters just assign the parameter value to a private member regardless of its value. The algorithm and everything else in the function that makes up the method is invisible to the implementation, and if the wrong parameter or wrong datatype is entered, either an error message appears or the value will not be passed. The following shows the general structure of a setter: function setterMethod(parameter) { if(parameter=="OK") { private variable = parameter; } } So the trick is to have the implementation pass any data to the object as a parameter, and if the parameter is acceptable, the private variable is assigned the parameter's value. This is a very general overview of the setter, but it shows the essentials of how it works. Compared to setters, getter methods are pretty straightforward. In Example 1.7, “Encap.as”, the showDogTalk() method is the getter function. The getter method's job is to provide data for the implementation. Thus, while the original example doesn't have a setter method, it does have a getter. The setter makes sure that the client gets only what it's supposed to get. In Example 1.9, “EncapSet.as”, the private variable, dogTalk, is not assigned a default value. However, the variable is still used in both the setter and getter methods. As you will see when you test the new class, EncapSet, the implementation has access to the private variable through the setter's interface. Example 1.9. EncapSet.as package { //This is BETTER OOP -- It's got encapsulation //plus a decent interface for an object import flash.text.TextField; import flash.display.Sprite; public class EncapSet extends Sprite { private var dogTalk:String; private var textFld:TextField=new TextField(); public function EncapSet() { addChild(textFld); textFld.x=100; textFld.y=100; } //Setter function setDogTalk(bowWow:String) { switch (bowWow) { case "Woof" : dogTalk=bowWow; break; case "Whine" : dogTalk=bowWow; break; case "Grrrr" : dogTalk=bowWow; break; case "Howl" : dogTalk=bowWow; break; default : dogTalk="Not dog talk!"; } } //Rendering value function showDogTalk() { textFld.text=dogTalk; } } } As you can see in Example 1.9, “EncapSet.as”, the setter method allows only the four expressions we had listed for a dog. Anything else (including "Meow") is not allowed. Next, Example 1.10, “TestEncapSet.as” shows how to interface with the encapsulated dog object: Example 1.10. TestEncapSet.as package { import flash.display.Sprite; public class TestEncapSet extends Sprite { private var encapSet:EncapSet public function TestEncapSet() { encapSet=new EncapSet(); encapSet.setDogTalk("Howl"); encapSet.showDogTalk(); addChild(encapSet); } } } Enter TestEncapSet in the Document class of the FLA file and test it. As you will see, the string "Howl" is perfectly acceptable. Now, test it again using "Meow." This time, you will see that the object rejected the kitty cat sound as "Not dog talk." This arrangement represents the best of both worlds; encapsulation and a way to execute operations within an encapsulated object. Another way to maintain encapsulation and hide the information in your objects is to use the ActionScript 3.0 get and set methods. Some programmers find it awkward to create their own getter and setter methods as we did in the previous section, preferring the simplicity of the get accessor and set mutator. Accessors and mutators versus Frankenstein: they sound like something from a horror flick, but the terms accessor and mutator are used to describe getters and setters. The accessor (getter) does access or get information, and that's perfectly reasonable. But mutators? Well, if we think about it, a mutation does refer to a change, and when we set information, we do indeed change it. It too makes perfect sense. So if you're happily reading an article on design patterns and you see the terms accessor or mutator, don't let it creep you out. In looking at how get and set are used, a key feature is the absence of parentheses. Changes (setting) are not accomplished by adding values. Rather, the getters and setters are treated like properties where values are assigned or retrieved through assignment. Example 1.11, “FlowerShop.as” and Example 1.12, “Send Flowers.as” show how you can lock up your encapsulated objects using getters and setters with the get and set methods. Example 1.11. FlowerShop.as package { public class FlowerShop { private var buds:String; public function FlowerShop():void {} //Getter function public function get flowers():String { return buds; } //Setter function public function set flowers(floral:String):void { buds=floral; } } } In Example 1.12, “Send Flowers.as”, keep in mind that flowers is a method, and not a property. However, setting and getting values using the flowers() method looks exactly like setting and getting a property value. Example 1.12. Send Flowers.as package { import flash.display.Sprite; public class SendFlowers extends Sprite { public function SendFlowers() { var trueLove:FlowerShop = new FlowerShop(); //Set values trueLove.flowers="A dozen roses"; //Get values trace(trueLove.flowers); //Set different values trueLove.flowers="And a dozen more...."; //Get the changed values trace(trueLove.flowers); } } } This section on encapsulation has been fairly long. The reason for the attention to encapsulation is because of its importance to good OOP; it's a crucial element in design patterns. Of the 24 original design patterns, only 4 have a class scope and the remaining 20 have an object scope. Rather than relying on inheritance (which is discussed in the next section), the great majority of design patterns rely on composition. Later in this chapter, in the section, "Favor Composition," you will see how design patterns are made up of several different objects. For the design patterns to work the way they are intended, object encapsulation is essential. The third key concept in good OOP is inheritance. Inheritance refers to the way one class inherits the properties, methods and events of another class. If Class A has Methods X, Y, and Z, and Class B is a subclass (extends) Class A; it too will have Methods X, Y and Z. This saves a lot of time and adds functionality to your programming projects. If you've done virtually any programming using ActionScript 3.0, you've probably extended the Sprite class as we've done in Example 1.1, “TestRocket.as” through Example 1.10, “TestEncapSet.as”. Because of inheritance, the new classes (subclasses) derived from the Sprite class have all of the functionality of the Sprite class, in addition to anything you add to the subclass. The best place to start looking at how inheritance works is with ActionScript 3.0. Open your online ActionScript 3.0 Language Reference. In the Packages window, click flash.display. In the main window that opens the Package flash.display information, click MovieClip in the Classes table. At the very top of the Class MovieClip page, you will see the Inheritance path: MovieClip→Sprite→DisplayObjectContainer→InteractiveObject→ DisplayObject→ EventDispatcher→Object That means that the MovieClip class inherited all of the characteristics from the root class, Object, all the way to Sprite object and everything in between. Scroll down to the Public Properties section. You will see nine properties. Click on the Show Inherited Public Properties link. Now you should see 43 additional properties! So of the 52 properties in the MovieClip class, you can see that only 9 are unique to MovieClip class. The rest are all inherited. Likewise, the methods and properties we added are unique to the class—the rest are inherited from Sprite. To see the effect of inheritance and the effect of using one class or another, change the two references to Sprite to MovieClip in Example 1.9, “EncapSet.as”. Because the MovieClip class inherits everything in the Sprite class, the application should still work. As you will see, it works just fine. The reason that Sprite is used instead of MovieClip is that we did not want to have any unnecessary baggage—just the minimum we needed. If you change Sprite to the next class it inherits from, DisplayObjectContainer, you will see that the application fails. This means that the application requires one of the Sprite class properties that is not inherited. One byte over the line: in Example 1.9, “EncapSet.as”, if you substitute the MovieClip for Sprite classes for the parent class, you will find that your SWF file is larger than when you tested it with Sprite (708 bytes versus 679 bytes). The 29 byte difference probably won't bloat your program significantly, but with added classes in design pattern applications, an unnecessary byte here and one there might add up. (When you win a contract because your application was 1 byte less than the competition's, you'll be glad we had this little chat.) In addition to seeing what is inherited in ActionScript 3.0 in the Language Reference, you might also want to note what's in the packages you import. If you import flash.display.* you can bring in everything in the display package. That's why importing just what you need, such as flash.display.Sprite or flash.display.Shape, is far more frugal and less cluttering. As can be seen from looking at the inheritance structure of ActionScript 3.0, a well-planned application benefits greatly from a well-organized plan of inheritance. To see how inheritance works from the inside, the next example provides a simple inheritance model for our four-legged friends. Even with this simple example, you can see what is inherited and what is unique to an application. Example 1.13, “QuadPets.as” through Example 1.16, “TestPets.as” make up the application illustrating inheritance. The first class, QuadPets, is the parent or superclass with a constructor that indicates when an instance of the class is instantiated using a trace statement. Any class that inherits the QuadPets class gets all of its methods and interfaces. Example 1.13. QuadPets.as package { public class QuadPets { public function QuadPets():void { trace("QuadPets is instantiated"); } public function makeSound():void { trace("Superclass:Pet Sound"); } } } Any class that uses the extends statement to declare a class inherits the class characteristics it extends. The class that's extended is the superclass (or parent). The class that extends another class is the subclass. Both the Dog and Cat classes are subclasses of the QuadPets class. They inherit all of the superclass' functionality. To see how that happens, we'll have to first create the two subclasses. Example 1.14. Dog.as package { public class Dog extends QuadPets { public function Dog():void { } public function bark():void { trace("Dog class: Bow wow"); } } } Example 1.15. Cat.as package { public class Cat extends QuadPets { public function Cat():void { } public function meow():void { trace("Cat class: Meow"); } } } To see how the Dog and Cat classes inherit the operations of the superclass, the TestPets class simply invokes the single method ( makeSound) from the superclass. As you can see from the Dog and Cat classes, no such method can be seen in their construction, and so you can see that it must have originated in the QuadPets class. Example 1.16. TestPets.as package { import flash.display.Sprite; public class TestPets extends Sprite { public function TestPets():void { var dog:Dog=new Dog(); dog.makeSound(); dog.bark(); var cat:Cat=new Cat(); cat.makeSound(); cat.meow(); } } } In addition to invoking the makeSound () method, the Dog and Cat instances invoke their own methods, bark() and meow(). Also, when you test the application, you will see: QuadPets is instantiated That output is caused by the line:, trace("QuadPets is instantiated"); placed in the constructor function of the QuadPets class. It fires whenever an instance of the class is invoked. So in addition to having the capacity to use methods from the superclass, subclasses inherit any actions in the constructor function of the superclass. Open a Flash document, and type TestPets in the Document class window. When you test it, you should see the following in the Output window: QuadPets is instantiated Superclass:Pet Sound Dog class: Bow wow QuadPets is instantiated Superclass:Pet Sound Cat class: Meow Looking at the output, both the dog and cat instances display two superclass messages ( QuadPets is instantiated, Superclass:Pet Sound) and one message unique to the respective subclasses ( Dog class: Bow wow, Cat class: Meow.) These examples show how inheritance works, but in practical applications, and in design patterns, inheritance is planned so that they cut down on redundancy and help build a program to achieve an overall goal. Inheritance can also be linked to two other structures; interfaces and abstract classes. However, the connection between the interface structure (a statement in ActionScript 3.0) or the abstract class and inheritance is a bit different from the one with the class structure we've examined thus far. First of all, in this context, interface refers to an ActionScript 3.0 statement and not the object interface discussed in the "Encapsulation and Design Patterns" section. While a class can be said to be an abstraction of an object, an interface is an abstraction of methods. They are widely used in design patterns. Beginning in Chapter 5 with the adapter pattern, you will see interfaces at work in several of the other design patterns. To begin to see what an interface does, a simple example illustrates the use of one. However, once you start seeing how they're used in design patterns, you will better see their utility. Example 1.17, “BandFace.as” shows how a typical interface is created. The application is made up of Example 1.17, “BandFace.as” to Example 1.20, “MakeSound.as”. The first thing we'll do is to make our interface. As you can see in Example 1.17, “BandFace.as”, the single function is quite simple and devoid of content. It does have a name, parameter, and return type, but note that the function is only a single line. Example 1.17. BandFace.as package { //Interface public interface BandFace { function playInstrument(strum:String):void; } } Each implementation of the interface must have exactly the same structure in all of the methods in the interface, and if anything is not the same, you'll get an error message. As long as the signature for the methods is the same, everything should work fine. Example 1.18, “Guitar.as” is the first implementation of the of the BandFace interface. Example 1.18. Guitar.as package { public class Guitar implements BandFace { public function Guitar() {} public function playInstrument(strum:String):void { trace("Playing my air "+ strum); } } } Looking at Example 1.19, “Bongo.as” and the Bongo class, at first you may think that the method is built incorrectly. It's wholly different from the Guitar class in its details, but the method's signature is identical. Example 1.19. Bongo.as package { import flash.media.Sound; import flash.media.SoundChannel; import flash.net.URLRequest; public class Bongo implements BandFace { public function Bongo(){} private var sound:Sound; private var playNow:SoundChannel; private var doPlay:URLRequest; public function playInstrument(strum:String):void { sound=new Sound(); doPlay=new URLRequest(strum); sound.load(doPlay); playNow=sound.play(); } } } Remember, when working with interfaces, the number of methods in an interface can be many or few, but as long as each implementation of the interface includes every method in the interface and maintains the structure, everything works fine. You may be wondering where the inheritance is. Given the fact that you must build all of the interface's methods, it looks more like a customization than an inheritance. However, the BandFace subclasses all inherit its interfaces. So essentially, the subclasses inherit the interface but not the implementation. Finally, to test the application, the MakeSound class, listed in Example 1.20, “MakeSound.as”, tests both classes and their very different constructions of the single method from the BandFace interface. You'll need an MP3 file named bongo.mp3 (use any handy MP3 file) saved in the same folder as the MakeSound.as file. Example 1.20. MakeSound.as package { import flash.display.Sprite; public class MakeSound extends Sprite { private var guitar:BandFace; private var bongo:BandFace; public function MakeSound():void { guitar=new Guitar(); guitar.playInstrument("Gibson"); bongo=new Bongo(); bongo.playInstrument("bongo.mp3"); } } } Note that both instances, guitar and bongo, are typed to the supertype, BandFace, and not to either the Guitar or the Bongo classes. This practice follows the first principle of reusable object-oriented design: Program to an interface, not an implementation. The purpose of doing so is to maintain flexibility and reusability. This principle will be fully explored elsewhere in this chapter and in Chapter 8, but for now, just note that fact. A word about the interface and abstract class naming conventions used in this book: with the focal point of this book on object-oriented programming and design patterns, we use naming conventions that best reflect and clarify the structure of the different design patterns. As a result, we don't always follow some of the naming conventions. One convention is to name interfaces beginning with a capital I. So following that convention, the BandFace interface would have been named IBandFace. Where using the I does not interfere with clarifying a design pattern structure, we use it, but where we have several interfaces in a design pattern, often we forego that convention. Another convention is to name abstract classes using Abstract+ Something. So, AbstractHorses would be a name for a class you'd otherwise name Horses. Again, our focus on revealing structure supersedes using these conventions. We differentiate abstract from concrete classes using comments in the code. Throughout the book, however, we attempt to be as clear as possible in naming the different classes. You may want to adopt some of these more common conventions to aid in keeping your code clear once you better understand them. As you become familiar with design patterns, you'll see more and more use of interfaces and its close cousin the abstract class. In ActionScript 3.0 the abstract class is a little problematic because no class can be actually defined as abstract. While you can use the public statement to make a class public, ActionScript 3.0 (and ECMAScript) chose not to include abstract classes in the language, as does Java. However, you can create an abstract class in ActionScript 3.0. All you have to do is create a regular class and treat it as an abstract class. Like interfaces, abstract classes can have abstract methods that are not directly implemented. Rather, abstract classes are subclassed and any abstract methods are overridden and implemented very much like methods are in using interfaces. However, abstract classes can have implemented methods as well; so when you need both abstract and implemented methods, abstract classes are key to such design patterns as the Factory Method (Chapter 2), Decorator (Chapter 4) and the Composite (Chapter 6) as well as others. You know from inheritance that when one class subclasses another, the subclass inherits the methods of the superclass. With an abstract class, you do not implement the class but instead subclass it, and then implement the subclass and its methods. Because of the abstract nature of at least some of the methods in the abstract class, you must override them. Using the override statement, an overridden class maintains its signature but can have its own unique details. You must be sure that the name, number, type of parameters, and the return type are the same. In other words, when you override a method from an abstract class, you treat the method exactly the same as an interface method. Example 1.21, “AbstractClass.as” through Example 1.23, “ImplementSub.as” make up an application that illustrates how an abstract class works. The abstract class has two methods; a concrete one and an abstract one. Example 1.21. AbstractClass.as package { //Abstract class public class AbstractClass { function abstractMethod():void {} function concreteMethod():void { trace("I'm a concrete method from an abstract class") } } } The subclass inherits the methods and interface from the abstract class, and it provides details for the abstract method by overriding it. However, the subclass leaves the concrete class as is and does not attempt to instantiate the superclass. Example 1.22. Subclass.as package { //Subclass of Abstract class public class Subclass extends AbstractClass { override function abstractMethod():void { trace("This is the overidden abstract method"); } } } When the application finally implements the methods originating in the abstract class, it does so by programming to the interface ( AbstractClass) but instantiates through the subclass ( Subclass). So the instance, doDemo, is typed as AbstractClass but instantiated as Subclass. Example 1.23. ImplementSub.as package { //Implement Subclass of Abstract class import flash.display.Sprite; public class ImplementSub extends Sprite { private var doDemo:AbstractClass; public function ImplementSub() { doDemo=new Subclass(); doDemo.abstractMethod(); doDemo.concreteMethod(); } } } The following shows what appears in the Output window when you test the program: This is the overidden abstract method I'm a concrete method from an abstract class At this point you may be scratching your head wondering why you should go through this kind of convolution to do what you could do with a non-abstract class. Instead of subclassing the abstract class, overriding one of the methods, and then implementing the subclass, we could have just written both methods the way we wanted them in the first place and not have to override anything. The next section attempts to answer that question. To understand why to use interfaces and abstract classes, we need to consider the whole purpose of design patterns. It's the ability to reuse object-oriented software. We've been using fairly simple examples to help clarify the concepts. However, typical software is usually far more complex, and the algorithms more sophisticated. Once you complete a project, you're likely to have to make a change. The larger and more complex the project, the more difficult it is to reuse the assets you've developed, maintain the interconnections and generally make any kind of change without unraveling the whole thing or introducing code that may make future change impossible. To illustrate what this means, consider the application in Example 1.21, “AbstractClass.as” to Example 1.23, “ImplementSub.as” in the previous section. Multiply the complexity of the class AbstractClass by a factor of 10. Do the same to the number of subclasses. Now you're dealing with some serious complexity. Next, consider that you need to maintain the functionality of the class, Subclass, and yet change the abstract method in the AbstractClass for a new functionality. Also, you have to maintain all of the interfaces and connections you've built. Because you used an abstract class, you can create a new subclass that overrides the abstract function and uses it in a different way. To see how this all works, we'll make a new subclass of the AbstractClass and change the abstract method. We're not changing anything else in the entire application, so we don't have to worry about everything working together because we can separate our new subclass and method and only implement them where needed. Other subclasses of the AbstractClass are unaffected. Example 1.24, “SubclassChange.as” and Example 1.25, “ImplementSubChange.as” show the two new classes created to make the change. Example 1.24. SubclassChange.as package { //A New Subclass of Abstract class with a change public class SubclassChange extends AbstractClass { override function abstractMethod():void { trace("This is the new abstractMethod!!") trace("Made just one little important change."); trace("But this still works just fine!"); } } } Note that instead of having a single trace statement, Example 1.24, “SubclassChange.as” uses three. This modification simulates a more complex change. However, Example 1.25, “ImplementSubChange.as”, which implements the application, is identical to Example 1.23, “ImplementSub.as”. Example 1.25. ImplementSubChange.as package { //ImplementSubChange of Abstract class import flash.display.Sprite; public class ImplementSubChange extends Sprite { private var doDemo:AbstractClass; public function ImplementSubChange() { doDemo=new SubclassChange(); doDemo.abstractMethod(); doDemo.concreteMethod(); } } } All the changes are revealed in the line, doDemo.abstractMethod(); Even though the line is the same as Example 1.23, “ImplementSub.as”, the output window reveals that a good deal has changed : This is the new abstractMethod!! Made just one little important change. But this still works just fine! I'm a concrete method from an abstract class What's important is not that it made the changes. The point that you need to consider carefully is the fact that such changes can be made without disrupting the other elements in your application that you have reused. So without having to rewrite the entire application, you can keep its functionality while making necessary changes. The, “AbstractClass.as” through Example 1.25, “ImplementSubChange.as”,, “Polymorphism.as” through Example 1.31, “PlayMusic.as”, “IBiz.as” through Example 1.36, “DoBusiness.as”, “Plasma.as”, “Plasma.as” with Example 1.34, “MP3Player.as”,, “Plasma.as” is set up to play a video and Example 1.34, “MP3Player.as”, “Computers.as”, “DoBusiness.as”,, “MakeSound.as”.) The founding principles of design patterns are laid down in the Gang of Four's (GoF) canon, Design Patterns: Elements of Reusable Object-Oriented Software. The two essential principles are: Program to an interface, not an implementation Favor object composition over class inheritance If you skipped the section on the four basic OOP principles because you already know them, the first principle of Design Pattern OOP was introduced, but only briefly. Not to worry, we'll go over it in detail. Before going further, we need to clarify some basic terms. While these terms are fairly simple, they can trip you up if you don't understand how they're used in context. The subsequent chapters use these terms a good deal, and a misunderstanding at the outset can lead to confusion now and later. For the most part, the term implementation is a noun referring to the details—the actual code—in a program. So when referring to an implementation, the reference is to the actual program as it is coded. Implementation is the internal details of a program, but is often used to refer to a method or class itself. You may run into some confusion when the keyword implements refers to contracting with an interface structure. For example, the line class MyClass implements IMyInterface has the effect of passing the interface of IMyInterface to MyClass. To say that MyClass implements IMyInterface really means that MyClass promises to use all the signatures of IMyInterface and add the coding details to them. So the phrase MyClass implemented IMyInterface means that MyClass took all of the methods in IMyInterface and added the code necessary to make them do something while preserving their signatures. One of the two main principles of design pattern programming is program to an interface, not an implementation. However, you must remember that implementation is employed in different contexts, and its specific meaning depends on these contexts. Throughout this book, you will see references to implementation used again and again with different design patterns, and you need to consider the other elements being discussed where the term is used. The term state is not part of the ActionScript 3.0 lexicon (like implements is), but the term is used in discussing design patterns. Essentially, state is used to refer to an object's current condition. For the most part you will see state used to convey the value of a key variable. Imagine a class with a single method with a Boolean value set in the method. The Boolean can either be true or false. So its state is either true or false—which could represent on/off, allow/disallow, or any number of binary conditions. The exact meaning of true or false depends on the position of the class in the rest of the design pattern. A more specific example would be an object that plays and stops an MP3 file. If the MP3 is playing, it's in a play state, while if it's not playing, it's in a stop state. One use of state is in the context of a state machine and the State design pattern you will be seeing in Chapter 10. A state engine is a data structure made up of a state network where changes in state affect the entire application. The State design pattern is centered on an object's behavior changing when its internal state changes. The term state here is a bit more contextually rich because it is the key concept around which the design pattern has been built. In general, though, when you see state used, it's just referring to the current value of an object's variables. In this age of the Internet and Web, the term client is used to differentiate the requesting source from the server from which a request is being made. We think of client/server pairs. Moreover, the term request is used to indicate that a Web page has been called from the server. In the context of design patterns, instead of a client/server pair, think of a client/object pair. A request is what the client sends to the object to get it to perform an operation—launch a method. In this context, a request is the only way to get an object to execute a method's operations. The request is the only way to launch an operation since the object's internal state cannot be accessed directly because of encapsulation. Don't forget Flash Media Server clients and servers! If you work with Flash Media Server 2, you're aware of client-side and server-side programs. You can launch a server-side method with a request from a client-side object. However, you can launch a client-side operation with a client-side request as well and a server-side method with a server-side request. So if you're using Flash Media Server 2, you're just going to have to keep the concepts separate. In a nutshell, the client is the source of a request to an object's method. A quick example shows exactly what this looks like. Example 1.37, “MyObject.as” and Example 1.38, “MyClient.as” make up an application that does nothing except show a client making a request to an object. Example 1.37. MyObject.as package { public class MyObject { private var fire:String; public function MyObject():void {} public function worksForRequest():void { fire="This was requested by a client"; trace(fire); } } } The MyObject class' sole method is worksForRequest(). The method is encapsulated, so it should have only a single way to launch its operations. Fortunately, all we need to do is create an instance of the class, and add the method to the instance to make it work, as Example 1.38, “MyClient.as” shows. Example 1.38. MyClient.as package { import flash.display.Sprite; public class MyClient extends Sprite { var myClient:MyObject; public function MyClient():void { myClient=new MyObject(); myClient.worksForRequest(); } } } The output tells the whole story: This was requested by a client The client in this case is simply an instance of the object whose method it requests. The client/object relationship is through the request. Often the client adds specific details through a parameter, but the concept of a request is usually nothing more than invoking the method with an instance of the object that owns the method. To understand why the authors of design patterns encourage programming to interfaces over implementations, you need to first understand the general goal of flexibility and reusability. Second, you need to appreciate the problem of managing dependency in large programs. As we have noted in this chapter, the overall goal of design patterns is to create reusable code. In order to meet this overall goal, the code must be flexible. This does not mean that your application runs better or compiles faster. All it does is help you create code that you can reuse in other projects. The more time and effort you spend, the larger the team engaged in working with the code, the more important this overall goal, The need for software flexibility leads to the need to manage dependency. When your code depends on a specific implementation, and the implementation changes, your client dependency leads to unexpected results or fails to run altogether. By depending on interfaces, your code is decoupled from the implementation, allowing variation in the implementation. This does not mean you get rid of dependency, but instead you just manage it more flexibly. A key element of this approach is to separate the design from the implementation. By doing so, you separate the client from the implementation as well. As we have seen, you can use the ActionScript 3.0 interface structure or an abstract class to set up this kind of flexible dependence. However, when you use either, you must be aware of the way in which to manage the dependency. If you use an interface structure, any change in the interface will cause failure in all of the clients that use the interface. For example, suppose you have five methods in an interface. You decide that you need two more. As soon as you add the two new methods to your interface, your client is broken. So, if you use the interface structure, you must treat the interface as set in stone. If you want to add new functionality, simply create a new interface. The alternative to using the interface structure is to use abstract classes. While the abstract class structure is not supported in ActionScript 3.0 as interfaces are, you can easily create a class to do everything an abstract class does. As we saw in several examples in this chapter beginning with Example 1.3, “PlayVideoAbstract.as”, creating and using an abstract class is simply adhering to the rules that abstract classes follow anyway. For example, abstract classes are never directly implemented. The advantage of an abstract class over an interface is that you won't destroy a client when you add methods to the base class. All of the abstract function must be overridden to be used in a unique manner, but if your client has no use for a new method, by doing nothing, the method is inherited but not employed or changed. On the other hand, every single method in an interface structure must be implemented. A further advantage of an abstract class is that you can add default behaviors and even set up concrete methods inherited by all subclasses. Of course the downside of default behaviors and concrete methods is that a subclass may not want or need the default or concrete methods, and a client may end up doing something unwanted and unexpected if any concrete changes are introduced. Whatever the case, though, management of dependency is easier with the flexibility offered by interfaces and abstract classes over concrete classes and methods. So the decision of whether to use an interface or abstract class depends on what you want your design to do. If the ability to add more behaviors easily is most important, then abstract classes are a better choice. Alternatively, if you want independence from the base class, then choose an interface structure. No matter what you do, though, you need to think ahead beyond the first version of your application. If your application is built with an eye to future versions and possible ways that it can expand or change, you can better judge what design pattern would best achieve your goals. As you saw in Example 1.36, “DoBusiness.as”, the program typed the instances to the implementation and not the interfaces as was done in Example 1.20, “MakeSound.as”. This was caused by the key methods being part of classes that implemented an interface and extended a class. Because of the way in which the different display objects need to be employed, this dilemma will be a common one in using Flash and ActionScript 3.0. Fortunately, a solution is at hand. (The solution may be considered a workaround instead of the correct usage of the different structures in ActionScript 3.0. However, with it, you can create methods that require some DisplayObject structure and program to the interface instead of the implementation.) If the interface includes a method to include a DisplayObject type, it can be an integral part of the interface. Because Sprite is a subclass of the DisplayObject, its inclusion in the interface lets you type to the interface when the class you instantiate is a subclass of Sprite (or some other subclass of the DisplayObject, such as MovieClip.) To see how this works, the application made up of Example 1.39, “IVid.as” and Example 1.40, “VidPlayer.as” creates a simple video player. The VidPlayer class builds the structural details of the video playing operations. To do so requires that it subclass the Sprite class. By placing a getter method with a DisplayObject type in the IVid interface, the application sets up a way that the client can program to the interface. Example 1.39. IVid.as package { import flash.display.DisplayObject; public interface IVid { function playVid(flv:String):void; function get displayObject():DisplayObject; } } The implementation of the IVid interface includes a key element. The displayObject() function is implemented to the DisplayObject class in building the VidPlayer class. Example 1.40. VidPlayer.as package { import flash.net.NetConnection; import flash.net.NetStream; import flash.media.Video; import flash.display.Sprite; import flash.display.DisplayObject; public class VidPlayer extends Sprite implements IVid { private var ns:NetStream; private var vid:Video; private var nc:NetConnection; public function get displayObject():DisplayObject { return this; } public function playVid(flv:String):void { nc=new NetConnection(); nc.connect(null); ns=new NetStream(nc); ns.play(flv); vid=new Video(); vid.attachNetStream(ns); addChild(vid); } } } Keep in mind that a getter method, using the get keyword, looks like a property, and is treated like one. Any reference to displayObject is actually a method request. The trick is to add the instance to the display list, and at the same time call the method that establishes the instance as a DisplayObject. Example 1.41, “DoVid.as” does just that. Example 1.41. DoVid.as package { import flash.display.Sprite; public class DoVid extends Sprite { //Type as Interface private var showTime:IVid; public function DoVid() { //Play the video showTime=new VidPlayer(); showTime.playVid("iVid.flv"); //Include DisplayObject instance addChild(showTime.displayObject); showTime.displayObject.x=100; showTime.displayObject.y=50; } } } In reviewing how the process works, first, the showTime instance is typed to the interface, IVid. Next, showTime instantiates the VidPlayer class that has all the details for playing the video. By doing so, it inherits the Sprite class as well as the IVid interface. Then the showTime client plays the video using the playVid() method. Finally, when the showTime instance is added to the display list with the addChild statement, it is added as both the child of the VidPlayer class and the DisplayObject class by using the displayObject getter. Because the getter, displayObject, is included in the display list, you will not get the following error: 1067: Implicit coercion of a value of type IVid to an unrelated type flash. display:DisplayObject. IVid appears in the error because the instance was typed to the interface instead of the implementation. By slipping in the DisplayObject typed getter method, we avoid the error. Throughout, “BaseClass.as” through Example 1.44, “DoHasBase.as”, “Media.as”, “DoHasBase.as”,, “Delegating to different classes”, “Media.as” though Example 1.54, “TestMedia.as”, “Media.as” through Example 1.54, “TestMedia.as” was used to illustrate composition. It also shows inheritance and instantiation at work. To see this relationship better, consider Figure 1.8, “Relationships of composition, inheritance, and instantiation”. In Figure 1.8, “Relationships of composition, inheritance, and instantiation”, you can see that the Media class delegates to the RecordMedia class. It does this by holding a reference to that class in its definition. (See Example 1.45, “Media.as”.). A number of years ago, we built a web site designed for change. At the time, no thought was given to design patterns, but instead we knew that the site would change based on experience, and were acutely aware of making sure that the site was set up for accepting change without having to redo it. This was a case where the plan worked too well. The site is easy to update and as a result, we really haven't bothered to take the time to rework the site to incorporate new concepts. It's starting to look a little old-fashioned, and we'd like to upgrade the version of Flash so that we can optimize video and all the new features in Flash CS3 and ActionScript 3.0. However, the plan illustrates a basic truth about software in general and web development in particular. You're going to spend more time on maintenance and changing a site than you are building it in the first place. As a result, you need to plan for maintenance and extensibility, and not just to get things working right in the first place. Had the web site that had been planned for change been done with design patterns, not only would it be able to adapt to change, we wouldn't have to scrap the current design and start all over from scratch to extend the site. That is, only part of the planning process should address change of a static category. You also need to plan for extending the site to incorporate more than what you originally planned to change. That is, you may need to add new materials to change. While the web site described as built for change has persisted, it has not evolved. The site has been easy to maintain because its main function loads an image, a text file, and menu items for a given product. By either changing the label on an existing button or adding a button, changing and adding products is pretty simple as well. So it has a little extensibility in the sense that its not fixed to a single product. However, the site really isn't set up for robust extensibility. For instance, adding a blog to the site or changing the way that the menus work would take the same amount of re-structuring as it would to start from scratch. So, while changing products in the site is simple, changing the site is not. Structurally, the site looks something like Figure 1.9, “The Big Function”—The Big Function: It doesn't matter whether the big function is directly instantiated, gained through inheritance, or a delegate of composition. It has no flexibility other than the parameter to tell it what to place on the stage. If its algorithm is changed, it could wreck havoc on its use. You can view it as tough and inflexible because it gets one job done in one way. Alternatively, the big function can be seen as dainty and fragile because of its dependency on a single routine, and because it is subject to freeze up when interacting with new elements in the application. In any case, it doesn't lend itself to a flexible site, and we should rethink it. The plan using The Big Function, even though it has limited flexibility, is bound to break down and fail in the long run. To avoid getting stuck with an inalterable application, you need to consider some granularity in your design. In this context granularity refers to the amount of functionality each of your classes has. The trade-off between full functionality and granularity is that the more functionality a class has, the more it will do all by itself. After all, most classes we create are developed to add the functionality of several built-in classes. However, sometimes less is better. The less functionality a class has, the more components in your application its functionality can employ. Figure 1.10, “Granular functions” shows how this granularity might work. The Big Function from the last section has been broken down into three smaller (more granular) functions. Using composition, the functionality of the Big Function is duplicated. However, the granularity gives the developer far more options in the future. In the context of developing a real-world application, your design must look over the horizon. That is, you need to plan for both possible and unknown changes. Figure 1.10, “Granular functions” shows some possible future extensions to the application (those with dashed lines). The value of granularity is that the new classes can use some or all of the more granular functions. That would be impossible with the single Big Function from the previous section. Likewise, new functions can be added and used with the old ones. Jennifer Tidwell, in talking about interface design, reminds the designer that whatever else is true, the designer/developer is not the audience (or the client for that manner). Before pulling out your program development tools, you really need to get together with the client and find out what the client wants. The image of the programmer as the guy in the basement with a ponytail (whom we don't let talk to our clients) simply isn't a workable model, especially when you're planning for the Web. Because the Web and Web-related technology are always changing, as clearly evidenced by the changes in Flash and ActionScript over recent years, what can and cannot be accomplished by any software tool is always changing. Because the developer is responsible for knowing the limits and possibilities of software better than anyone, she needs to be part of the process. In larger firms, this role is part of a graphic design, interface design, human computer interaction (HCI) designers, and information design team. In smaller firms, the developer may have to fill several roles, but whatever the arrangement, developers need to be part of the process interacting with the client whose business or organization depends on accomplishing a goal. The better the developer understands what the client wants and the better he can communicate the opportunities and limitations of the software to accomplish the goals, the more likely the software produced will accomplish what the client needs for success. The role of object-oriented programming and design patterns is to help the software developer plan for creating a site that keeps the client's site healthy. Keeping a site in good shape depends on the capacity to regularly update it, and to expand it when needed. Too often developers think of a web site as static, but sites are dynamic, living entities—or at least need to be conceived that way. Design patterns constitute a set of plans—architectural designs if you will—that provide the tools to keep web sites alive. Flexibility is inherent in software reusability. Both OOP and design patterns were developed with the goal of both reusability and flexibility, and if an application is approached in the most practical manner imaginable, then design patterns make a great deal of sense. So rather than being a set of strict rules for creating great software, design patterns represent flexible tools for creating exactly the kind of site your client needs. Choosing the best design pattern for a particular situation is as much an art as it is a formula. Throughout the book, you'll see that we've included a wide variety of examples, and you may even see a few similar examples with different design patterns. The reason is that most development challenges can be approached from more than a single angle. From one angle, a solution seems good and natural, but from a different angle, another solution seems better. For example, a major project employing a design pattern involved a video player that would be able to play, record, stop, and pause a video using Flash Media Server 2. The solution originally seemed to lie in state machine because of a related project. The "fit" between what needed to be done and the concepts in a state machine seemed to be perfect. From there it was a simple step to the State design pattern. It was tested as a solution, and it worked so well, and had the required flexibility, that it was adopted as the right solution. As you go through the examples in the book, you'll see that the patterns have been organized into three parts: creational, structural, and behavioral. The parts in the book describe the general categories for the design patterns. The chapters within the parts explain how to create the designs in ActionScript 3.0, and give examples and explanations of their actual use. In addition to organizing the design patterns into the purposes for which the patterns are designed, the Gang of Four also classified the patterns by scope. Scope refers to whether the pattern applies primarily to object or class. In selecting the design patterns for this book, we selected representative patterns from each of the matrices that these class and object classifications represent. Table 1.1, “Design pattern classifications”shows the design patterns chosen for this book organized by purpose and scope. While this chapter has provided an introduction to key OOP concepts for those who are relatively new to OOP, learning the design patterns should prove useful in learning OOP as well. We might even venture to add that if this is your initial introduction to OOP, you will find what some consider the correct way to understand OOP. (Others might even contend that it is the only way to understand OOP.) We spent time on OOP because a sizable portion of ActionScript programmers may not have gotten around to its use yet. After all, ActionScript itself is relatively new to OOP structures. Alternatively, if you're an old trooper with OOP, either from ActionScript or another language such as Java or C++, we hope that our discussion of design patterns will help you better apply OOP concepts in a design pattern. Whatever your background in OOP, the Gang of Four recommend the following design patterns for those of you who are relatively inexperienced in object-oriented programming: Factory Method (Chapter 2) Strategy (Chapter 11) Template Method (Chapter 9) However, if these patterns seem in any way daunting, do not worry. We don't know of anyone who fully grasped design patterns on the first go-around, including the patterns suggested by GoF. We certainly didn't. However, by using, changing and experimenting with the examples we have provided, along with going over the explanation of OOP and design pattern concepts in the chapters, we believe that you'll come to see them in the same light as we do—the best friend a programmer could have. If you enjoyed this excerpt, buy a copy of ActionScript 3.0 Design Patterns.
http://www.oreillynet.com/lpt/a/7640
CC-MAIN-2014-41
refinedweb
13,776
55.03
How to set up an obscenely easy, AI-API-using tweetbot in Python Using Tweepy and some image recognition public APIs to help Twitter users with an all-important question — IsItNSFW? I love learning on the go. The approach is particularly useful for me this year. 2019 in my case means a professional transition from humanities (focus on translation) to tech (focus on NLP/ML). Here’s one example of how quickly one can apply freshly-acquired skills. Also, this story comes in a series of threes. A month ago, after a poke from Piotr Migdał, I described how to fool neural networks with seemingly pornographic optical illusions. As a spin-off and to learn a new skill, I created a simple tweetbot. @IsItNSFW, upon receiving an image, will respond with the scores a handful of AI image detection models give it. To create it, I needed to put together three elements: - Twitter interaction via Tweepy, - connecting to AI detector APIs, - and somewhere to host my script. Twitter interaction via Tweepy For the first module, Tweepy was my choice after Googling for accessible Twitter-wise Python libraries. Again, there are three Tweepy-related parts of the code: - linking a Twitter account to the script, - defining a function that will reply to tweets, - setting a stream Prerequisite: Twitter account setup I started by creating an account my bot will be using on Twitter. Then I headed to the developer dashboard in order to create a new app and obtain the API key, API secret key, Access token and Access token secret. I noted them down in a safe place. In fact, I stored them straight away in a separate file called secrets.py. That way I can share the rest of the code on GitHub without needing to worry about the secrecy of my API access keys: #Twitter APIconsumer_key = '...'consumer_secret = '...'access_token = '...'access_secret = '...' Connecting to Twitter Only now do I start to write the main script (nsfw_bot.py in my case). I used the newly-created credentials to connect to Twitter using Tweepy’s OAuthHandler class: auth = tweepy.OAuthHandler(consumer_key, consumer_secret)auth.set_access_token(access_token, access_secret)api = tweepy.API(auth) The api variable will be the entry point for the operations I will perform with Twitter — in this case, posting replies with the image scores. Tweepy’s API class provides access to the entire Twitter RESTful API methods (see API Reference for more information). Replying function Then I set the tweet_image_ratings() function exactly for that. The function accepts — you guessed it — 3 arguments: (pic_url: [the tweeted image source], username: [the username of the user that tweeted the image], status_id: [the id of the tweet]) My function consults the image with all the three image detectors and replies to the tweets with the corresponding scores and a retweet of the image. In order to retweet, I need to temporarily store the image as temp.jpg: def tweet_image_ratings(pic_url, username, status_id): # Take the pic url and tweet the various NSFW ratings clarifai_score = check_clarifai(pic_url) deepai_score = check_deepai(pic_url) sightengine_score = check_sightengine(pic_url) filename = 'temp.jpg' request = requests.get(pic_url, stream=True) if request.status_code == 200: with open(filename, 'wb') as image: for chunk in request: image.write(chunk) api.update_with_media(filename, status='Is it NSFW, @'+username+'?\n' + clarifai_score+deepai_score+sightengine_score, in_reply_to_status_id=status_id) The last Tweepy-related bit to fill in — tweet_image_ratings() arguments, that is, the original tweet data. How to get those? Streaming In order to keep the connection open and be able to respond to all incoming and upcoming tweets, I need the streaming API. Tweepy makes it easier to use the Twitter streaming API by hiding authentication, session handling and reading incoming messages under the hood. I used the streaming API in two steps. - I created a class inheriting from StreamListener, overriding the on_status method. BotStreamer captures the username and tweet id. If an image is accompanying the tweet, it passes the three arguments the tweet_image_ratings() function described above: class BotStreamer(tweepy.StreamListener): # Called when a new status arrives which is passed down from the on_data method of the StreamListener def on_status(self, status): username = status.user.screen_name status_id = status.id if 'media' in status.entities: for image in status.entities['media']: tweet_image_ratings(image['media_url'], username, status_id) - Using that class, I instantiated a Stream object and connected to the Twitter API using it: myStreamListener=BotStreamer() stream=tweepy.Stream(auth, myStreamListener) stream.filter(track=['@IsItNSFW']) Connecting to AI detector APIs The last part of nsfw_bot.py I wrote concerns connecting to the nudity detectors and retrieving their scoring for a given picture. It may surprise you, but I chose three: For each of these, I had to create an account and an API key, immediately stored in secrets.py. In all three cases, the pricing of the service includes a free tier that is absolutely sufficient for my light use. As for the code itself, in the case of Clarifai and Sightengine we are presented with a dedicated library. DeepAI uses a simple requests query. In any case, there are pythonically few lines to write: def check_sightengine(pic_url): try: client = SightengineClient(sightengine_user, sightengine_secret) output = client.check('nudity').set_url(pic_url) score = 'Sightengine: ' + \ str(round(100*output['nudity']['raw']))+'% NSFW' except: score = '' return score In all 3 cases, I included a try/except clause for any error handling. This way the bot will only tweet about the successfully retrieved scores. And that’s it! We’re done with a simple bot. Time to test it… Check out the complete code of nsfw_bot.py in this GitHub repo. Where to host my script? That was the last issue I had to solve. Running it from my computer was not a sustainable option. Literally the first service I stumbled upon was PythonAnywhere. Straightforward, easy to apply with a fairly generous free use tier and helpful staff — it won my heart from the start. I simply uploaded both nsfw_bot.py and secrets.py to PythonAnywhere. The service comes with a full Python environment installed, so the only thing left to do was open a Bash console in PyAnywhere and run my script. I was still concerned about the infallibility of the bot out in the wild — will it crash on some unavoidable or unpredictable connection error? Googling for a solution, I found the following trick: from subprocess import Popen import sys import datetimefilename = sys.argv[1] while True: print('\n'+str(datetime.datetime.today())+"\nStarting " + filename) p = Popen("python " + filename, shell=True) p.wait() The miniscript I called forever.py opens the [filename] as a new subprocess every time it produces an error. I added a print(datetime.datetime.today()) statement to have a log on the frequency the script crashes. Final thoughts The main purpose of this story is to show how little fuss is needed to set up a minimal working tweetbot connecting to several APIs. I was writing it in a public cafe, and realized a limitation of the @IsItNSFW bot itself — after all, many people may be too embarrassed to upload a (verging on a) kinky photo on their Twitter and have it publicly retweeted. That said, use and abuse the bot, the repo and please share the results here. Maybe set up a bot that writes image captions? Or creates images from text? The APIs are there for you to grab. My thanks go to Piotr Migdał for the initial prod and invaluable feedback.
https://medium.com/@marekkcichy/how-to-set-up-an-obscenely-easy-tweetbot-in-python-7837d776e706?utm_source=aidigest&utm_medium=web&utm_campaign=featured
CC-MAIN-2019-30
refinedweb
1,228
57.27
So you’ve decided to build a React component. You know what it will do, what it’s called, and where it’ll go. But when you open your editor and go to write the first line, you’re confronted with the decision between three different ways to declare it? Why three? And which one should you use? Luckily, this decision doesn’t need to be a hindrance. In fact, with a little history and two simple rules, it’ll only take a few seconds to get on with the job of writing your component. And without further adieu, here’s the first rule: Rule 1: If your component needs to access this, use ES6 Classes Stateless functions don’t have a this object; if you need anything defined on this other than props, you’ll need a component class. But this.state and this.refs are important parts of many useful React components. Isn’t it a little weird to have this limitation? Why don’t stateless functional components have this? As you may know, JSX tags compile to plain old JavaScript objects — not HTML. Let’s take a <div /> tag for example. Passing it to the JSX compiler will return this JavaScript: React.createElement('div') And as React’s documentation shows, this just returns a ReactElement – i.e. a plain JavaScript object. The structure of this ReactElement looks something like this: { type: 'div', props: {}, ref: null, key: null } But this presents an interesting problem; if each call to your component’s render function produces a new object, how is it that its this object continues to contain the same state and refs between renders? Indeed, how is it that a plain old JavaScript object is connected to the DOM at all? React Elements and Class Instances Ok, I was being a little tricky above. I purposefully omitted an important step. Remember that when you create a ReactElement with a JSX <div />, it isn’t connected to the DOM. It isn’t until you call ReactDOM.render on an element that it and its children are rendered to the DOM. But creating DOM nodes is not all that ReactDOM.render does – it also creates ReactComponent instances. But what are these ReactComponent instances? Think about it this way: if a ReactElement is an instruction to React that you need an instance of your component, a ReactComponent is the actual component. Or to put it another way, rendering a ReactElement lets React know that we need a ReactComponent. Let’s check your understanding with a quick quiz. Given the above explanation, do you know what type of object your component’s this is? Touch or hover your mouse over the box below to check your answer: thisis a ReactComponentinstance. Each ReactComponentholds state, refs, etc. Stateless Functional Components But stateless functional components aren’t classes – they’re functions. They don’t have a persistent this property to attach a ReactComponent instance to. But while a this object expands a component’s possibilities, it also has a cost: keeping a ReactComponent linked up with its ReactElement objects takes work. By using stateless functional components, you’re signalling to React that you don’t need that ReactComponent. From this, it is able to make a bunch of optimisations — which while currently modest, are set to grow. But even before these performance improvements materialise, there are a number of things your components won’t have to do if they’re classless. Which brings us to the second rule. Rule 2: If your components need lifecycle methods, use ES6 classes Without a common this property, stateless functional components wouldn’t be able to communicate with their lifecycle methods. As a result, React doesn’t provide them. And as this enlightening comment in the React codebase shows, this can save a lot of work: /** * ------------------ The Life-Cycle of a Composite Component ------------------ * * - constructor: Initialization of state. The instance is now retained. * - componentWillMount * - render * - [children's constructors] * - [children's componentWillMount and render] * - [children's componentDidMount] * - componentDidMount * * Update Phases: * - componentWillReceiveProps (only called if parent updated) * - shouldComponentUpdate * - componentWillUpdate * - render * - [children's constructors or receive props phases] * - componentDidUpdate * * - componentWillUnmount * - [children's componentWillUnmount] * - [children destroyed] * - (destroyed): The instance is now blank, released by React and ready for GC. * * ----------------------------------------------------------------------------- */ You may have noticed that the above two rules have something in common: they’re both about the situations in which stateless functional components can’t be used. As a result, you don’t even really need two rules; they could be merged into one: Golden Rule: If you can use a Stateless Functional Component, then do so. Of course, sometimes you can’t use a stateless functional component. In these cases, I recommend React.Component over React.createClass. And to find out why, let’s learn a little history. A little history As you probably already know, React didn’t have stateless functional components until quite recently. In a way, this helps to explain why you’d want to use them when possible — they wouldn’t have been added unless there was a clear reason to do so. You also may know that React.Component is a relatively recent addition to React. This gives you a clue as to why it’d be preferred over React.createClass. For another clue, look at the naming of their respective locations in the React repository — modern vs. classic. But more concretely, React.Component has a much smaller API than React.createClass. A smaller, equally powerful API Where React.createClass has mixins, ES6 Classes are well suited to Higher Order Components. While these may feel unfamiliar at first, they’ll soon feel far more natural than the alternative. HOCs give you more power than mixins in a more intuitive package. And the fact that a HOC is just vanilla JavaScript means that you don’t have to worry about them being deprecated. Similarly, where React.createClass has auto-binding of function methods, Babel and ES6 Classes let you bind only the required methods. This improves performance, while again decreasing the probability of your code being deprecated. And with that, you now have the tools you need to choose how to define your components. In summary: - If you can use stateless functional components, do so - Otherwise, use ES6 Classes But while this will get you over the first hump, there are many more decisions you’ll need to make. For a start, how are you going to style your new component? Will you use inline style, CSS Modules, or maybe Pacomo? And even once you’ve got your styles sorted, do you know how you’ll store your component’s state? Will use use setState, Redux, or maybe something else? Now you could find this all out the hard way: with Google and trial and error. Or, you could just join my Newsletter — next month I’ll be sending out a decision chart covering styling, flux, build systems, and more. Of course, it will be exclusive to subscribers.! Some real great reads on your blog, keep them coming :). AFAIK React won’t re-render if a stateless component receives same props & context. Which means “PureRenderMixin” is implied with stateless functional components. So be careful to use this mixin when using classes. See for ES6 & React mixins. I’m not sure what you mean by “be careful to use this mixin when using classes”? It seems to me like a bad idea to add a shallow equals comparison to all classes unless you need the performance improvement. See this GitHub issue on the React repository for more reading – shouldComponentUpdateis more meant as an escape hatch for performance improvements than something which necessarily must be implemented on every component. Great post, when you say: ” if a ReactElement is an instruction to React” didn’t you mean, ” if React.createElement is an instruction” ? While its true that React.createElementis an instruction in a way, all it does is return a plain object (i.e. a ReactElement). Passing that ReactElementto ReactDOM.renderis what actually creates the ReactComponent. So, you can think of the ReactElementas the instruction which is passed to ReactDOM.render. Great read – thanks. “if you need anything defined on `this` other than `props`, you’ll need a component class.” `context` is also supplied to a stateless functional component in the 2nd argument. I don’t see a reason why one should use ES2015 class components over React.createClass. Both are well suited to use with HOCs, that’s the beauty of HOCs, not of classes. I’m not against class components, but I don’t see here a real reason which explains why I must use classes instead of React.createClass. I personally prefer the latter, because it’s just a function call and I don’t want to bring new entities into the code if there’s no real benefit of using them. +1 – I think the justification for using ES6 Classes here needs a bit more explanation. You could still use stateless functional components if all you want to do with “this” is to get a handle to the DOM node. const AddTodo = ({ onAddClick }) => { let input; return ( { input = node; }} /> { onAddClick(input.value); input.value = ”; }}> Add Todo ); }; Example from Dan Abramovs video Hey James, thanks for your great article! Helped a lot seeing through the mess. As a beginner in the react world and in the modern web area I could say (I remember like 7 years ago, choosing your tools was easier as we didn’t have that many options), I am having a hard time making choices which is impacting my mood haha Thanks again! Thanks for the knowledge drop. Always appreciated. Doh! The code has moved the relevant lines out of the comment your “enlightening comment” links to: Maybe pick a commit hash and link to that? Great post! Thanks for the Post
http://jamesknelson.com/should-i-use-react-createclass-es6-classes-or-stateless-functional-components/
CC-MAIN-2017-34
refinedweb
1,636
65.12
This C Program Counts the Number of Words in a given text Or Sentence. Here is source code of the C Program to Count the Number of Words in a given text Or Sentence. The C program is successfully compiled and run on a Linux system. The program output is also shown below. /* * C Program to Count Number of Words in a given Text Or Sentence */ #include <stdio.h> #include <string.h> void main() { char s[200]; int count = 0, i; printf("enter the string\n"); scanf("%[^\n]s", s); for (i = 0;s[i] != '\0';i++) { if (s[i] == ' ') count++; } printf("number of words in given string are: %d\n", count + 1); } $ cc string2.c $ a.out enter the string welcome to sanfoundry's c-programming class! number of words in given string are: 5 Sanfoundry Global Education & Learning Series – 1000 C Programs. Here’s the list of Best Reference Books in C Programming, Data-Structures and Algorithms If you wish to look at programming examples on all topics, go to C Programming Examples.
http://www.sanfoundry.com/c-program-count-words-in-sentence-2/
CC-MAIN-2014-52
refinedweb
175
73.98
#include <pcap/pcap.h> int pcap_get_selectable_fd(pcap_t *p); Some network devices opened with pcap_create(3PCAP) and pcap_activate(3PCAP), or with pcap_open_live(3PCAP), do not support those calls (for example, regular network devices on FreeBSD 4.3 and 4.4, and Endace DAG devices), so PCAP_ERROR is returned for those devices. In that case, those calls must be given a timeout less than or equal to the timeout returned by pcap_get_required_select_timeout(3PCAP) for the device for which pcap_get_selectable_fd() returned PCAP_ERROR, the device must be put in non-blocking mode with a call to pcap_setnonblock(3PCAP), and an attempt must always be made to read packets from the device when the call returns. If pcap_get_required_select_timeout() returns NULL, it is not possible to wait for packets to arrive on the device in an event loop.. Note that in: select(), poll(), and kevent() do not work correctly on BPF devices; pcap_get_selectable_fd() will return a file descriptor on most of those versions (the exceptions being FreeBSD 4.3 and 4.4), but a simple select(), poll(), or kevent() call will not indicate that the descriptor is readable until a full buffer's worth of packets is received, even if the packet timeout expires before then. To work around this, code that uses those calls to wait for packets to arrive must put the pcap_t in non-blocking mode, and must arrange that the call have a timeout less than or equal to the packet buffer timeout, and must try to read packets after that timeout expires, regardless of whether the call indicated that the file descriptor for the pcap_t is ready to be read or not. (That workaround will not work in FreeBSD 4.3 and later; however, in FreeBSD 4.6 and later, those calls work correctly on BPF devices, so the workaround isn't necessary, although it does no harm.) Note also that poll() and kevent() doesn't work on character special files, including BPF devices, in Mac OS X 10.4 and 10.5, so, while select() can be used on the descriptor returned by pcap_get_selectable_fd(), poll() and kevent() cannot be used on it those versions of Mac OS X. poll(), but not kevent(), works on that descriptor in Mac OS X releases prior to 10.4; poll() and kevent() work on that descriptor in Mac OS X 10.6 and later. pcap_get_selectable_fd() is not available on Windows.
https://www.tcpdump.org/manpages/pcap_get_selectable_fd.3pcap.html
CC-MAIN-2019-04
refinedweb
396
68.5
13. FAQS¶ 13.1. Installation issues¶ 13.1.1. ValueError: unknown locale: UTF-8 under Mac OS X 10.7 - Lion¶ The installation with PIP is succesful but I get a “ValueError: unknown locale: UTF-8” under Mac OS X 10.7 - Lion when typing from bioservices import *. On solution is to fix your environment by typing the following code in a shell: export LANG="it_IT.UTF-8" export LC_COLLATE="it_IT.UTF-8" export LC_CTYPE="it_IT.UTF-8" export LC_MESSAGES="it_IT.UTF-8" export LC_MONETARY="it_IT.UTF-8" export LC_NUMERIC="it_IT.UTF-8" export LC_TIME="it_IT.UTF-8" export LC_ALL= You can check if it works by typing python -c 'import locale; print(locale.getdefaultlocale());' If this works without error, then it is fixed and you should be able to import bioservices. If so, make this solution persistent by adding the code into your environment. For that, just copy and paste the code in a file called .bashrc_profile (or .bashrc) 13.2. General questions¶ 13.2.1. How can I figure out the taxonomy identifier of the mouse ?¶ You can use the Taxon class that uses Ensembl/UniProt/Eutils depending on the tasks. Here, we do not know the scientific name of taxonomy identifier of the mouse. We can use the search_by_name fuction: Warning Taxon class is not part of BioServices but some utilities have been added to BioKit (github.com/biokit) Changed in version 1.3. In earlier version of BioServices, you could use: >>> from bioservices import Taxon >>> t = Taxon() >>> t.search_by_name("mouse") u'10090' But this is now in BioKit: >>> from biokit import Taxonomy >>> t = Taxonomy() >>> results = t.fetch_by_name('mouse') >>> results[0]['id'] u'10090' 13.2.2. How to convert ID from one database to another ?¶ Many web services provides convertors. In BioServices, you can access to Kegg and UniProt that both provides convertor. For instance with Kegg, you can convert all human (hsa) Kegg Id to uniprot Id with: from bioservices import * s = KEGG() kegg_ids, uniprot_ids = s.conv("hsa", "uniprot") Or you can use the uniprot mapping function: from bioservices import * u = UniProt() u.mapping(to="KEGG_ID", fr="ACC", query="ZAP70_HUMAN") 13.3. Specific Usage¶ 13.3.1. Why my uniprot request takes forever ?¶ This may happen. Consider: from bioservices import * u = UniProt() u.search("P53") This request performed on UniProt web sites is actually pretty fast but there are 386 pages of results. In BioServices, the search commands reads the 386 pages of results and then stores the result in a variable. So it may take a while. More generally if a request returns a very long result, it may take a while. You can use the socket module: import socket socket.setdefaulttimeout(5.) After 5 seconds, the read() call will stop returning whatever has been read so far. 13.3.2. KEGG service¶ 13.3.2.1. Is it possible to get the pathway information for multiple proteins ?¶ Currently there is no such function. You can only retrieve pathways given a single protein Id. However, you can easily write such a function. Here is the code for 2 proteins: >>> p1 = k.get_pathway_by_gene("7535", "hsa") # correspond to ZAP70 >>> p2 = k.get_pathway_by_gene("6885", "hsa") # 6885 correspond to MAP3K7 >>> [k1 for k1 in p1.keys() if k1 in p2.keys()] ['hsa04660', 'hsa04064'] There are 2 pathways containing the proteins 7535 and 6885. 13.4. Interest of the BioServices classes REST and WSDL ?¶ There are a few technical aspects covered by BioServices to ease our life when adding new modules such as timeout, long request, headers, and so on. 13.4.1. What is the difference between GET and POST¶ When the user enters information in a form and clicks Submit , there are two ways the information can be sent from the browser to the server: in the URL, or within the body of the HTTP request. The alternative to the GET method is the POST method. This method packages the name/value pairs inside the body of the HTTP request, which makes for a cleaner URL and imposes no size limitations on the forms output. It is also more secure.
http://bioservices.readthedocs.io/en/latest/faqs.html
CC-MAIN-2017-30
refinedweb
683
68.97
import range to clone the submission sheet but its not working as beforeAsked by steveclampitt on July 03, 2015 at 07:57 AM. - JotForm Support If I understand you correctly, it seems like the integrated Google spreadsheet is not cloning to one of your spreadsheets. Is that correct? If the integrated spreadsheet is still fetching the submissions to your form, the problem might be more related to the setup you have using the ImportRange. I would advise checking the function you are using, the spreadsheet key or the name, there might have been changes. We'll wait for your response. the new spreadsheet is not pulling the data across until i go onto the integrated spreadsheet (the one created by jotform) and edit a cell then it updates across to my other spreadsheet with the new submission but the past week it has been automated and i have not needed to change the integrated spreadsheet what has changed and my formula is fine: =IMPORTRANGE("1d47MLoDnLX_f51yUbdnhCxncYOx7VAo5q01CJDOZd6A", "responses 1!A:A") as i said its been working up to today, and i have an equivalent set up with Google forms which is picking up and updating instantly... o me it looks like Google is not noticing a change has been made until something is changed in the integrated spreadsheet - JotForm Support I'm sorry, but I'm not a pro with Google's ImportRange function. I've found this solution, please try doing the following. 1. In the spreadsheet, click the "File" option 2. Find and click the "Spreadsheet Settings" 3. Change Recalculation from Onchange to Onchange and Every Minute: I hope this helps. If not, please let us know so we can look into that further. If possible, share with us the spreadsheets so we can check and try it. Thanks im afraid not i already have that set up, google is not picking up the jotform entry and for some reason not updating it does not pick up the data unless an edit is made to the cell, is there a setting on jotform i have missed or could use ? - JotForm Support I've found this solution: Or this one: The problem is the same as yours when external file is the source file of the importrange function. I think this should help. Thanks nothing suggested is working this is weird because as i said it was working for a week then just stopped, what i want to know know is there an easy way to clone or edit the submission spreadsheet without it messing up because i need to run functions and scripts hence why im using the clone otherwise i would just do it on the main document. - JotForm Support We do not recommend doing any modification in the form spreadsheet. Otherwise, that will break the integration and you have to start over to fix it and that will create a new sheet and there's no way you can re-use the same spreadsheet later. I'm not sure what's causing that to happened, but based on some results I've found, that is an issue with Google Spreadsheet's ImportRange function. Especially if the data is added through another automation such as the integration. Some used the ImportRange without any issues because the data is coming from the other sheet with manual input so onEdit is triggered. I think you should add a script in the script editor that would trigger the edit when a data is added in a row in the form spreadsheet. I can't seem to find it, but I think that is possible. I will try to find more link about this problem and let you know. Thanks - JotForm Support @peplow.jeremy Hi, for us to better assist you, I went ahead and opened a separate thread for your concern. Please refer to this link instead:. We will address it accordingly. Thank you. The google sheet add-on, Import Sheet, can also solve this problem and auto update your target sheet with the source data. Check in out here:
https://www.jotform.com/answers/602481-I-am-using-import-range-to-clone-the-submission-sheet-but-its-not-working-as-before
CC-MAIN-2017-17
refinedweb
679
68.1
This site works best with JavaScript enabled. Please enable JavaScript to get the best experience from this site. Quote from Tannerbarnman Spiderwebs. Quote from AbrarSyed my tut series... it should help a lot. Quote from Metasponge64 Add sponges. My idea for crafting is like TNT exept replace the gunpowder with wool and the sand with no block. such as Crafting Fire by Combining a torch with a block of wood or Crafting 9 Blocks of water by combining 9 buckets of water and Vice Versa If there is a block you wish for me to make a crafting recipe for then PM Me or post it here and i will try and do what I can Downloads None Yet Screenshots Recipes Combining These Materials Will Yeild 9 Water Blocks Allowing for easy stacking of water Combining These Materials Will Yeild 9 Lava Blocks Allowing for easy stacking of lava Combining These Materials Will Yield 2 Fire Blocks So You Can Finaly Craft ChainMail Armor Combining These Materials Will Yield 8 Ice Blocks Now you can Legitimitly Collect Ice Blocks Combining These Materials Will Yield 3 Cobwebs So that you can Now Obtain Them In Minecraft YAY Combining These Materials will Yield 3 Obsidian You Can Also Use Actual Water Blocks And Actual Lava Blocks In The Same Way to Create Obsidian Via Crafting Combing These Materials Will Yield 8 SoulSand Making it possible to get soulsand without having to go to the Nether (Blank Square Is where you Put a Block of Pure Fire) Combining These Materials Will Yield 2 Netherack YAY Now the Nether Pretty Much is Useless Except for Getting Glowstone Combining These Materials Will Yield you 5 Bedrock Now you can get bedrock legitimatly Combining These Materials Will Give you 2 Saplings Now you can be a Herboligist and do some Herbology and Make Saplings Combining These Somewhat hard to get Materials will get you 1 Mob Spawner OMG Yay Combining These Materials will Give you 9 Mossy Cobblestone A Very easy Way To Harvest Mossy Cobblestone Those Are all the recipes I have made so far If someone can tell me how to compile the source code into a set of .class files for a mod that would be greatly helpful please pm how to compile them I Was Orginally Using Mod Maker But Now I Am using a Specific Tutorial for Making recipe mods for Minecraft 1.7.3 If there is any recipes you want me to add Then Post it Now Please As The Mod Download Should Be Up for download within 30 Minutes A Challenge Box that is Eternally Dark! I Have added those they are craftable with 5 slimeballs and 4 string A Challenge Box that is Eternally Dark! Added Recipe Images to The First Post A Challenge Box that is Eternally Dark! A Challenge Box that is Eternally Dark! that would help greatly thanks A Challenge Box that is Eternally Dark! Im Now Using a Tutorial A Challenge Box that is Eternally Dark! it should help a lot. This is My Recipe code so far package net.minecraft.src; import java.util.Random; public class mod_craft extends BaseMod { public mod_craft() { ModLoader.AddRecipe(new ItemStack(block.waterMoving,9), new object [{"1","","", Character.valueOf("1"), item.waterbucket } public String Version() { return "3.14159265"; } } but when i try to recompile it in mcp I Get 8 Errors A Challenge Box that is Eternally Dark! I Can add sponges but Changing the tnt recipe to something like that is a No Because Im trying to make the recipe's as realistic as Possible Like Making Bedrock with 5 stone 3 lava 1 obsidian A Challenge Box that is Eternally Dark! and create them properly A Challenge Box that is Eternally Dark!
https://www.minecraftforum.net/forums/mapping-and-modding-java-edition/minecraft-mods/mods-discussion/1363956-mo-craftables
CC-MAIN-2021-25
refinedweb
627
62.51
PWM Brightness Control You may notice the breathing light on your phone when a new message comes. It smoothly changes from dark to bright and back to dark. In this project, let's try to realize it using an LED. What you need - SwiftIO Feather (or SwiftIO board) - Breadboard - LED - 330ohm resistor - Jumper wires Circuit Let's build the circuit as below. - Place the LED onto the breadboard. - The long leg (anode) of LED goes to PWM4B through a resistor. - The short leg (cathode) of LED connects to the ground. Example code You can find the example code at the bottom left corner of IDE: / GettingStarted / PWMBrightnessControl. // Brighten or dimming the LED by changing the duty cycle of a PWM signal. // Import the library to enable the relevant classes and functions. import SwiftIO // Import the board library to use the Id of the specific board. import MadBoard // Initialize the PWM pin the LED connects, with other parameters set to default. let led = PWMOut(Id.PWM4A) // Initialize a variable to store the value of duty cycle. var value: Float = 0.0 // Change the brightness from on to off and off to on over and over again. while true { // Brighten the LED in two seconds. while value <= 1.0 { led.setDutycycle(value) sleep(ms: 20) value += 0.01 } // Keep the duty cycle between 0.0 and 1.0. value = 1.0 // Dimming the LED in two seconds. while value >= 0 { led.setDutycycle(value) sleep(ms: 20) value -= 0.01 } // Keep the duty cycle between 0.0 and 1.0. value = 0.0 } Background Pulse Width Modulation (PWM) Pulse Width Modulation (PWM) can simulate analog results digitally. The signal is still a square wave that switches between on and off. The duration of the "on-time" is called the pulse width. So this technique will change the duration of high level relative to low level. In this way, it will simulate the voltage between fully open (3.3 volts) and off (0 volts). In this case, if you repeat this switching pattern with LEDs fast enough, the signal seems to be a stable voltage between 0 and 3.3v. And the LED would show different brightness. Now come more concepts. A fixed time period consists of on and off time. The duration or period is the inverse of the PWM frequency. For example, when the PWM frequency is 500 Hz, one period is 2 milliseconds.. Why PWM instead of analog signal? Unlike incandescent light bulbs, LEDs (and some other devices) can only operate under certain voltages. Lowering the voltage on LEDs wouldn't result in lower brightness. The LED will turn off if the voltage isn't high enough. However, you can use PWM to control the overall power output of the LEDs. The LED is flashing, but it is imperceptible to your eyes. So it seems that this LED is darker or brighter. BTW, most mobile phones use the same method to control the brightness of the screen. However, if the flashing frequency is too low (the screen is very dark), it may be harmful to your eyes. Code analysis let led = PWMOut(Id.PWM4A) Initialize the PWM pin the LED connects. You may notice the pin names of PWM are a little strange, with "A" or "B" after the number. Since there are 14 pins for PWM in total, some pins are paired, like PWM3A and PWM3B. Two paired pins can only share the same frequency. var value: Float = 0.0 The duty cycle would change all the time to change the brightness of the LED. So you need a variable to store its value. var is the keyword to declare a variable. Just like its name, its value can always change after it has been assigned. The value is explicitly declared as a floating-point number type. This is very important when the type is easy to be confused. For example, 0.0 could be float or double. And each numeric type has a different range of numbers. while value <= 1.0 { led.setDutycycle(value) sleep(ms: 20) value += 0.01 } It ensures the value is not bigger than 1.0 and gradually brightens the LED. .setDutyCycle method allows you to set the duty cycle. Each time, you will gradually increase the value by 0.01. The brightening process lasts for 2 minutes. To ensure a smooth brightness change, you need to set appropriate value change and sleep time. value = 1.0 After finishing the first loop, the value is about 1.01. So value = 1.0 is to keep it in the specified range. The second while is similar but to dim the LED. Reference PWMOut - set the PWM signal. init(_:frequency:dutycycle:)- initialize a PWM pin. You need to tell the id of a specified pin to initialize it. The frequency and duty cycle have their default value. You can set their values or change them later. setDutycycle(_:)- set the on-time of the signal to change the average voltage. MadBoard - find the corresponding pin id of your board.
https://docs.madmachine.io/tutorials/general/getting-started/pwm-brightness-control
CC-MAIN-2022-21
refinedweb
843
78.35
Creates a new Slapi_DN structure and sets a DN value. #include "slapi-plugin.h" Slapi_DN *slapi_sdn_new_dn_passin(const char *dn); This function takes the following parameter: The DN value to be set in the new Slapi_DN structure. This function returns a pointer to the new Slapi_DN structure with DN value set to the content of dn. This function creates a new Slapi_DN structure and initializes its DN with the value of dn. The DN of the new structure will point to the string pointed to by dn. The FLAG_DN flag is set and the internal counter is incremented. The memory is allocated by the function itself.
http://docs.oracle.com/cd/E19693-01/819-0996/aainf/index.html
CC-MAIN-2017-04
refinedweb
105
65.12
Countdown to Ext JS 4: Developer Preview On behalf of the entire Ext JS team, I am excited to announce the immediate availability of the first Ext JS 4 Developer Preview. Ext JS 4 is by far the biggest overhaul we’ve ever made to Ext JS, and constitutes the most advanced JavaScript framework ever created. Almost every area of the framework has been upgraded, from the generated HTML to the class system. We’ve unified APIs, added incredible new capabilities, and improved the performance of the entire framework. With Ext JS 4 we’ve been driven by three key goals: speed, robustness and ease of use. We wanted the framework to be as fast and as robust as possible on every browser, and to be easy to learn and use. To achieve this we took the whole framework back to the drawing board, and what we’ve come back with is the fastest, most bullet-proof version of Ext JS we’ve ever created. Best of all, we’ve managed to do it while staying true to the core experience of writing apps “the Ext JS way.” Today’s preview release (and examples) contains almost all of the components that will ship in the final version. We’ve kept a couple back to give them some extra polish, notably Tree and Accordion. We’ve also ported the default Blue Theme from Ext JS 3 — a task that was made easy by the brand new SASS-based theming system. There’s so much to cover that we can’t hope to fit it all into a single blog post so we’ll continue our countdown to Ext JS 4 series over the coming weeks. Ext JS 4 Developer Preview is available to download today. Download Now A Framework you already know Ext JS has long been the leading JavaScript framework for creating web applications, and with version 4 we aim to continue that heritage. One of the most important aspects of any major upgrade is continuity – continuity of the API, of look-and-feel, and of backwards compatibility with previous versions. With Ext JS 4 we’ve taken two enormous steps forward to meet this goal. Firstly, Ext JS 4 is completely sandboxed; there is no more native object augmentation or reliance on global variables. This means you can run Ext JS 4 alongside Ext JS 3 on the same page. To demonstrate this we’ve taken the best of both versions and combined them in a single page. We’ve used the Desktop example from Ext JS 3 and loaded Ext JS 4’s brand new charts alongside as if they were part of Desktop itself – check it out for yourself. The second step is to provide as strong a legacy layer as possible to help you upgrade your Ext JS 3.x applications. We’re starting that today with the release of a detailed overview guide to the differences between the versions, and will update this as we continue our rollout. In addition to the guide we will be providing a legacy file that can be dropped in to an Ext JS 3.x application to help you upgrade your app. We’ll be expanding on both of these in another post very soon. Ext JS 3 and Ext JS 4 running in the same page. View Example Awesome New Features Charting Last week we unveiled Ext JS 4’s brand new drawing and charting package. The reception it got was unbelievable and we know it’s one of the most anticipated parts of Ext JS 4. For those who didn’t see the original announcement, Ext JS 4 draws gorgeous charts using SVG, Canvas and VML — with no need for any plugins. The charting system is integrated with the new data package and fully animated in every browser. Data The data package is what loads and saves all of the data in your application, and it saw a massive upgrade for version 4. With the new Model class, it’s easy to handle almost any kind of data on the client side. Validations are performed at the Model level and the new Associations API makes it possible to set up relationships between your models. The expanded Proxy class can now be used directly on a Model, meaning you can load and save data without the need for a Store, and the new localStorage Proxy enables you to save data on the client with a single line of code. Multiple sorting, filtering and grouping is now all possible on the client side, and the new Readers even understand nested data sets. The data package underpins most of the components in the framework, and we’ve written extensively about it in recent posts: Grid The grid is one of the centerpieces of Ext JS. It’s an incredibly versatile component that provides a great way to view lots of data at once, formatted exactly how you need it. With Ext JS 4 we have overhauled the grid, making it faster, lighter and easier to customize. We started by drastically reducing the HTML markup for the grid. In previous versions the grid would always render markup that was capable of handling all of the customizations applied to it: row expansion, editors, column locking and all the rest. With version 4, the grid is smarter and only renders the markup it needs based on the features you enable. In most cases this results in 20% less markup generated in Ext JS 4 than in Ext JS 3, which brings tremendous performance benefits. In fact, the grid is so fast that it actually outperforms the (now deprecated) 3.x ListView. There’s a lot more to talk about with the grid, and we’ll have a full post about it shortly, but the last thing to mention is that Tree now extends from Grid. This not only reduces the size of the framework, it also makes it extremely easy to customize Trees the same way we can customize Grid. We’re just putting the finishing touches on the new Tree class and expect to ship it to you in the next release. Forms Along with Grids, Forms are used in almost every Ext JS application. In version 3 we were a little constrained by the FormLayout, which made it difficult to get a form looking just right. Ext JS 4 removes the need for FormLayout altogether, enabling you to use any layout imaginable with your forms. Forms are also no longer tied to the DOM, making it easier than ever to have forms spanning multiple tabs without problems. With the addition of the new FieldLayout, any component can be placed into a form and given a label and even error messages. Because the definition of a Field has been moved into a Mixin (see our blog posts on Ext JS’s new class system), you can add a grid, tree or other component into a form and use it as a multi select. There are a number of examples showing off the new form capabilities in this release. Layouts The layout system is one of the most powerful parts of Ext JS. It handles the sizing and positioning of every component in your application and keeps everything in the right place while your user gets their work done. Ext JS 2 had fast layouts, but they weren’t as flexible as we knew they could be. With Ext JS 3 layouts became a lot more flexible with new box layouts, but we gained that flexibility at the cost of some performance. In Ext JS 4 we have the best of both worlds — better performance and more flexibility — as well as brand new layouts like DockLayout. We’ll have a blog post explaining all the upgrade shortly but in the meantime be sure to watch Jamie’s talk on Ext JS 4 Layouts at SenchaCon 2010. Accessibility Writing accessible JavaScript applications has always been difficult. With Ext JS 4 we are taking the pain out of making apps accessible with three main features: - ARIA Support. All components are marked up with ARIA attributes to allow screen readers to understand the page. - Focus Manager. Enables keyboard-driven navigation around any app. - High Contrast Theme. Light text on a dark background is easier to read for users with vision problems. Accessibility and RTL support are both very important to us and will feature heavily in upcoming releases. For today we’ve focused on shipping as many components as possible. Theming One of the biggest requests we get is to make it easier to theme applications. With Ext JS 4 we’ve built on our experience with Sencha Touch to make the framework more themable than ever before by unlocking the power of SASS and Compass. Every aspect of the framework’s appearance can be customized, often by simply tweaking a color variable. If you haven’t seen the power and flexibility that SASS and Compass bring be sure to check out Dave Kaneda’s post on theming Sencha Touch. We’ll be revisiting the subject soon with a full post on theming Ext JS 4, but if you can’t wait I highly recommend checking out Rob Dougan’s talk on theming at SenchaCon 2010. The Road to Ext JS 4.0 Today’s release is the first of several before we hit Ext JS 4.0 Final. We expect to ship new releases roughly every week for the next several weeks, with the first beta release hitting your screens about two weeks from now. Ext JS 4 has been a long time coming and I hope you’re as excited as we are about the changes coming to the framework. Check out all of the examples and tell us what you think in the comments! There are 80 responses. Add yours. Nils Dehl4 years ago Congrats to the TEAM!!! Awesome work *Thumbsup* Jay Garcia4 years ago just awesome!! Isaac Peraza4 years ago Congratulations to everyone who worked on this new version and all the members of ExtJS now Sencha for offering an excellent framework for web application development, thank you very much. Scott Martin4 years ago Woohoo!! ExtJS4 has been release, now I get to ... oh crap .. now I have to ... <vbg> Great work guys!! Tobias Uhlig4 years ago Congrats! I am really looking forward to check out the performance and new features Westy4 years ago Excellent guys, congratulations on the release! Charles Himmer4 years ago Oh man, this is exciting! The new data model system looks awesome! david4 years ago Excellent! PS: Which date did you blog this post? February 14, 2011 or February 16, 2011 Duael4 years ago Sounds really great ! Will Butler4 years ago Awesome - looking forward to playing with this! Jay Robinson4 years ago Excellent job, team. You guys are putting out some incredible work! Jamie Popkin4 years ago Fantastic! Can’t wait to use it. vishal kr singh4 years ago I am so lucky to start my career with extjs, love you guys, this is what you call awesomeness Stju4 years ago Excellent news! Congratulations Extjs Team! Meir4 years ago Grepping the extracted tarball yields nothing regarding RTL, any idea when we can test the implementation ? Thanks. Johannes Pfeiffer4 years ago Well done! I was waiting for this a long time. Can’t wait to give it a try. iwiznia4 years ago In the overview guide, it says: “Markup Changes: Ext now supports browser/version-specific markup for most components, which is a great advance from Ext 3.” This mean that it’s possible to generate different markups for different browsers, is this being used right now in the Ext components? It would be possible to simplify the markup a lot for newer browsers (for example for theming rounded corners and so). I think this would yield a performance boost… James Hughes4 years ago I know what I’ll be doing tonight Will the API docs be brought up to date for this release? I notice there are a few missing pages - Controller for example. Excellent work guys. Jamie Avins4 years ago @iwiznia - Indeed. Buttons are an excellent example of this. @James Hughes - Remember it’s a preview release, docs are still being updated. Kevin4 years ago Awesome release!!!!! I’ve downloaded but wasn’t able to find MVC examples and how unit test work…. sorry for being a noob. Until then I can play w/ the chart + data package. Unless it is already there and I just don’t know it. Donald Organ4 years ago What about the “theme” was that being shown at the Conference. Is there a way to enable that I liked that look much better than the ExtJS 3 theme. Thomas4 years ago Aweesome! But the Desk-Sandbox Example not working or it’s just my FF? Crysfel4 years ago This is just awesome!! I really like it!! now I’m reading the new code and look fantastic Jamie Avins4 years ago @Thomas - FF seems fine here. What version are you trying? @Donald Organ - We’ll have a blog post on the new Theming in the run up to release. DocMarvin24 years ago Great work !! Excellent Framework !! I’m using this framework since two years and i’m suprising of day in day Valentin4 years ago Nice. Downloading. Luke Fowell4 years ago Looks great! Can’t wait for the Beta to arrive icflorescu4 years ago Looking good! But I think there are some bugs in Ext.data package that still need to be ironed out… For instance, Model.load and Model.save aren’t working as they supposed to, according to the docs. icflorescu4 years ago @Jamie Avins, @Thomas: Desk-Sandbox example is running fine here on FF 3.6.13 / OS X, including the new charts. Brendan Carroll4 years ago Great work! Again another spectacular showing by the Ext team. Thomas4 years ago Ah now the sandbox example works perfect, the mobile bandwith with my laptop was maybe to low. Grgur4 years ago I think I’m wet down there Donald4 years ago @Jamie Avins Is that theme just not included in the preview release?? I tried changing the stylesheet on a couple of the examples to be ext4.css and then things didnt work simeon kessler4 years ago Mazel Tov, I can’t wait for the briss! Eduard4 years ago Hi guys, ext4.css works? chris scott4 years ago Looks interesting. Looking forward to more information on theming. Tommy Maintz4 years ago For those wondering what the ext4.css file does. It is basically a namespaced version of the css used in the sandbox example. It has nothing to do with the Ext 4 theme.. Eduard4 years ago Do Tommy, thanks for your answer, so do you know when we have the theme used in the Conference? Thank You MrSparks4 years ago Congrats to the EXTJS team! Seana4 years ago Nice work guys! the “start menu” in the sand box doesn’t appear to be rendering correctly in ff 3.6.13 or Chrome 9.0.597.98 Jamie Avins4 years ago @Seana - Thank you for the report, we will fix that in a forthcoming release. Conor4 years ago Awesome dudes! Well done! alexmace4 years ago Looks to me like the entire data package is broken. As others have noticed, nothing works like advertised in the documentation: Model.load and Model.save are both failing with errors, you can’t iterate over related items in models, no matter how you load your data (through model or store proxies)... I’m even afraid to think of what else is broken. Maybe you should make a list of the thing we CAN test in this developer preview? Plus, the new theme(s) everybody was so excited about is missing. Sencha, what happened there?... The developer preview is really UNUSABLE! What’s the purpose of releasing it? Looks like nothing more than a bone thrown to a community growing anxious. Now we know for sure there’s no hope in hell of having a working beta on February 28th, not to mention a full release. Well, I guess nobody really expected a full release in the end of the month, but an official statement about this would be nice, not to mention POLITE. After all, you’re trying to sell the damn thing, aren’t you? Plus, nobody from the development team is answering, which is not exactly a surprise, we’ve started to get used to it. Now, I’ve been to the conference and listened to the whole marketing hipe about the completely revamped architecture, completely new fabulous data package and the best documentation ever. What’s the use of having the best, most complete documentation ever if the product DOESN’T WORK AS DOCUMENTED? Maybe this whole “major release” was too bold of a goal for such a short(?) dev time? Sorry about flaring up like this, but my company bought a few licenses on the premise of a February-March release date, we’re waiting for it for a couple of new projects and now things are looking like we have to wait more. And the thing that pisses me off the most is that nobody cares to answer these questions!... And before coming up with the line “it’s ready when it’s ready” - which is fine for open-source completely-free projects like jquery, think again that you’re trying to sell the product. You’re trying to boost your sales based on the promise on Ext4. Which will be a great product, I have no doubt about it, and 2 and 3 were great products too, but you guys need to learn to COMMUNICATE with your community/users/customers. You’re selling a product and you shouldn’t behave like a bunch of smart and bonny college kids working in their garage… Eduard4 years ago Alexmace, thanks for your comments, We feel the same in our company, we also have Sencha licenses and have outstanding commitments with our clients. Eduard Wemerson4 years ago Just Perfect News! Thanks Nicolas BUI4 years ago I’m actually playing with the API ... You guys have done a really great jobs. I’m having a lot of fun experimenting it ! The documentation is really really great and it’s so much a pleasure to navigate in it (althought there are still some buggy things but not annoying) Nicolas BUI4 years ago Alexmace, You are a damn rude with the Ext team…. Before telling that “nothing is working or it’s broken”, you should try to calm down, breath slowly and try to remember your first steps to the framework. This is a “preview release” so it mean that is a “preview” .. a “sneak peak” and it give time to developer to adapt themselve to the mass of new things slowly. I did not expect to have a preview that quickly so I’m damn really happy. About where to start, If you have learned to use the previous release of ExtJS , well you should be accomodated and should know where to start : playing with examples, hacking the examples and reading the documentation to find out how it is overall architectured ... Well if you are a new ExtJS developer, I uderstand why you are lost. Another way to discover changes is to use some of your Ext 3.3.1 codes with the new one to find out what have changed. I found that the error/exception are much much more explicit so I already know what to do to make my old code work on the new one (use the developper console firebug etc.). The data package is broken ? I don’t think so .... just give a another look at the samples and test again ... lots of them make use of data so I don’t think it’s broken. So keep in mind that is a preview release for testing purpose only ! So be patient .... Michael Mullany4 years ago Alex/Eduard. We probably should have spent more time laying out the goals of the preview release in the blog post. If expectations were mis-set then we apologize. We have a large number of people asking us for early access to a working version of Ext JS 4, so they can start getting familiar with the new API and seeing how it works. Until right now, we haven’t been comfortable with saying “yes, you can rely on this API being pretty much final”. The reason we called this a “preview” not a “beta” was that we’re missing some key features (like tree) and the testing time for some of the new features means that there are lots of known bugs. We don’t expect you to put this into production. As far as the dev team being non-responsive, Tommy and Jamie are both active on the thread further up, and we’ll be looking at the bugs in code and documentation that are found as quickly as we can. We’ve said before that we’re delivering a little later than we liked because a number of the components took more time to stabilize than we expected. We do hope we can keep our delivery pretty close in time to what our original goal was. Thanks for being a part of the Ext JS community, we appreciate the feedback. We’re not perfect but I can guarantee you that we’re putting 100% effort into getting this out as soon as possible and as high quality as possible. Jamie Nicholson4 years ago Congratulations guys, I’m always impressed with the ambitious approach you’ve take with the new features. I’ll wait for one of the later release candidates before getting my teeth stuck in. The current set of plugins for ExtJS 3.3.2 are outstanding and will no doubt take a little while to bed in and extension developers to catch up to your pace. Are there any plans of setting up an extension team or development company to work along side or follow the releases as no doubt some of the plugins will have started to find their way into commercial applications? bobp4 years ago First off, thanks for releasing the preview, lots of good stuff to chew on. One thing we were hoping to see in the preview was more detailed info on the new MVC and application structuring guidelines. We reviewed the videos from the conference, which contained some good tidbits, but would be great to get some more meaty detail/examples, just so we can start planning ahead…. Eduard4 years ago Michael, thanks for your great comments, We are proud of your work, Simply we plan our goal based on the date published by sencha.Our company has developed a software which makes amazing use of sencha chart, This is the fundamental reason for our expectancy, our company are anxiously awaiting a stable version of extjs4, because we are preparing an update to our aplicacón, Regards ibnesayeed4 years ago And how can I give RTL a try? Which property to set/change and where? Nikolai Babinski4 years ago Great work guys! It will be great to have (in the future releases) a kind of control package which will be dealing with application bootstrapping, configuration and tuning, Just to have a global point for the whole application. Alex R4 years ago This is such an impressive preview release! We just looked at the dom splitting, how the heck did you guys do that? We tested a rather complex example (lots of form feilds, nested tabs and panels) and the application is a magnitude faster! The new markup is very impressive. And the charting has left us speachless. Job well done you guys. Kudos! taxidriver4 years ago Excellent job!!!! Congratulation It would be desirable if grid multi-grouping were included, too Thanks Plnodi4 years ago I have read in a past presentation ( Ed Spencer’s presentation) that ext js 4 will have a new webSql proxy, but i can’t find this in the documentation WHY????? Regards JMiller4 years ago Thanks for releasing this preview—it’s quite well done for this far along. I’d say you made 1 glaring mistake though: Way too many people on here are looking at the DEVELOPER preview who must not be developers—maybe 100em font would make it easier for them? Perhaps adding in RTFM and RTFC (last letter meaning “Code”)? Documentation wrong on the Ext.data.Model.load and that means it’s the end of the world as we know it? Or, maybe a developer might read a little further down than the descriptive summary for the example which has the event handler receiving 2 parameters, not 1 and ask, “Why?”—you know, where the function is actually documented, not just demonstrated? And, of course, programmers have always been known for the easiest-to-follow, most-comprehensive documentation! BTW: What were you thinking on your blog post that links to DataDemo—not everyone can find it and then copy-and-paste develop! Get it together Sencha People! Something’s not present? Really? Maybe you should have said in your original post something like, oh, say, “We’ve kept a couple [features] back to give them some extra polish, notably…” But, maybe I missed a change in the definition of “notably”—it means you’re giving an example of some things, right? My favorite was, “our goal based on the date published” I mean seriously Sencha People! Someone has a GOAL and you’re supposed to have a rock-solid DATE PUBLISHED! How dare you release stuff early so people can get familiar with it? I mean take Windows Vista, for example: Huge company, lots of profit, release dates set in stone, absolutely no bugs, and given to developers and OEM well in advance of the release! (OK, actual developers, stop rolling around on the floor—it’s dangerous to your electronics.) Just remember, this is computer science, not computer art—when you’re programming, you really ought to know exactly how long it’s going to take you—the HTML 5 specification (which you’ve had the nerve to go ahead and start supporting) released just 3 days ago that it wouldn’t be a formal recommendation for 3 more years (that’s a good example of knowing exactly how long something will take, right?) Well, I guess since some have gone ahead and built major aspects of their business around your target release date, that they’ll just have to go find another application-oriented, MVC-paradigm, feature-rich JavaScript framework that’s better. I can’t really think of one, but hey, you Sencha People aren’t perfect, so what’s someone to do? Build their own? Just couldn’t resist sharing a little comedic relief with you in a time where I’m sure you’re sleeping as little as I trying to get your bread and butter prepared. Again, well done and us developers are really looking forward to the value behind this well-engineered and implemented upgrade and are truly grateful for this preview release which allows us time to prepare for our upcoming (and passing) deadlines. Tommy Maintz4 years ago @JMiller Hope people understand the sarcastic undertone in your post I think part of the problem was that our whole dev team was sleeping right after we released because most of us hadn’t slept more then 15 hours in the last week. This caused us to be not very responsive the first hours after we released. I understand people’s concern with some functionality not being in a stable state yet, but like you said, thats why we call this a preview release. We are confident we will be able to get the framework in the best state it has ever been in and hope that till then all of you will become just as excited about the new features and API’s as we internally are by testing and using our preview and beta releases. JMiller4 years ago @Tommy—I didn’t think about people not catching the sarcasm. I’m a fan of the argument ad absurdum. Programmers all too often get treated as magicians: able to create on a whim. We are still humans. I’m with you on sleep loss—I forced myself to stop working and do something else to settle my mind: 22hrs today. I’m very excited about Ext JS 4—I’ve looked over a lot of the code in the last 22hrs and while there remains work (as the code comments state, too), it’s quite well done. Best of wishes to the whole team! Plnodi4 years ago I owe a great apologies Sencha Developers Team. I think you do a great job and i am waiting for new Ext js version. I think that my observation for the documentation about the Web Sql proxy is misunderstood. I am Sorry again Regards MarkusW4 years ago Thanks for providing this nice preview! Just as a first feedback concerning the API docs: Currently, on the API documenation’s index page, the tree on the left shows the same icon for all ExtJS 4 classes. This is a step backwards compared to 3.x and even 2.x where “visual” classes could be distinguished from “non-visual” classes instantly because they had a different icon (the green puzzle piece icon). PeterS4 years ago I considered using Ext JS for my new project, but keyboard navigation has serious flaws compared to other widget sets like Mozilla’s XUL, Java’s Swing and Adobe’s Flex. Only edit/combo boxes and buttons have correct focus feedbacks, but this is the default behaviour for HTML forms. Speaking of complex, non-HTML widgets like tabs, grids and trees, how can the user find out where the focus is at a given time? Standard navigation keys like TAB, SHIFT+TAB should always switch between widgets and sublayouts, not between items/rows inside a single widget. At this time, Ext JS framework looks very mouse-oriented and therefore cannot compete with mature desktop widget sets. Should I expect improvement in focus handling and keyboard navigation? Les4 years ago This looks fantastic… quick question: what’s the purpose of ‘uses’ in th code below? What’s the difference btwn ‘uses’ and ‘requires’? Ext.define(‘Ext.Component’, { /* Begin Definitions */ alias: [‘widget.component’, ‘widget.box’], extend: ‘Ext.AbstractComponent’, uses: [ ‘Ext.Layer’, ‘Ext.resizer.Resizer’, ‘Ext.util.ComponentDragger’, ‘Ext.fx.Anim’, ‘Ext.state.Manager’, ‘Ext.util.DelayedTask’ ], Jay Robinson Sencha Employee4 years ago Les, you might get a quicker, better, more thorough response by asking this question in the Ext JS 4 Forum: Michael Mullany Sencha Employee4 years ago There is no RTL support in the developer preview, and looking at the current schedule - there is a good chance that it won’t ship until 4.1, but we’re still hopeful. Tommy Maintz4 years ago @PeterS Although we haven’t put it into the preview release yet, we already have most of the work done to support full keyboard navigation for Ext 4 doing exactly the thing you are describing. Expect to see it land soon into one of the next beta releases. PeterS4 years ago @Tommy Thanks for your answer. A framework with full/uniform keyboard navigation sounds very promising. I will definitely follow the development of Ext JS 4 and check all new releases from now. iv_ekker4 years ago Great job! Thanks for the novelties! There is theme generator in ExtJS 4 , is there? Les4 years ago Have you considered selling Ext charting as a stand-alone product to compete with Highcharts? I might be a bit much to require people to buy an Ext license if they only want to use charting. Dowied4 years ago Tabs seem larger than normal..I guess more surface area for clicking/switching but the result is they look less sleek, IMHO. Hopefully the final theme will have then looking like they currently do. Lit parapluie4 years ago Thanks you for this new version. She is amazing ! Is there an upgrade like this for ext gwt in the next weeks ? AwesomeBob4 years ago Neptune will be included on Monday right? Edward Boggis-Rolfe4 years ago Hi looks great, I am starting on a new project and am interested in having a databound grid that supports automatic paging as you scroll. e.g what can be supported by . and. Can you do this? renaudham4 years ago It seems very great and i’m waiting for this. So why no new post or any news since the 16th release? Is there any issues to hide, what happened in Sencha? Could we have some feedbacks on the status? Hoping to see any news even if it was to announce delay or issues thanks Lucas w.4 years ago Hi everyone! renaudham, thank you for this comment, our team also is waiting for any news about the release. Please Sencha team let us something news about it, thank you, regards cojack4 years ago Hello, I found a bug, at when you open the select of states, and try by mouse navigate with scrollbar, the select close his self automaticly. OS: Kubuntu 10.10 Webbrowser: FF 3.6.13 Regards. Lewis J.4 years ago Hi guys!, Any estimate as to when will be Extjs4 released? Thank !!!!! Ed Spencer4 years ago For those who missed it, PR2 is now released on the forums - see lisa4 years ago Congrats!!!!!!! Mohammad4 years ago RTL is what my company is waiting for ... any confirmation on the release date ? Comments are Gravatar enabled. Your email address will not be shown.Commenting is not available in this channel entry.
http://www.sencha.com/blog/ext-js-4-developer-preview
CC-MAIN-2014-49
refinedweb
5,636
71.65
Let me preface this post by saying that it is about my (early) experiences practicing BDD in the context of a specific specification framework – MSpec. There are plenty of folks out there who are much more authoritative on this subject than I. And if you happen to be one of those people, I would love to get your feedback on what I’ve done here and how I could do it better. In developing a SSIS data source to talk to an ADO.NET Data Services endpoint, shifting my approach from a basic TDD style to specifications was not originally part of the plan. However, as I got deeper into development, I felt as though I actually wanted a bit more rigor in terms of semantic restrictions (e.g. arrange/act/assert) than I was getting out of the box from my unit test framework. MSpec had been on my radar for a little while (I was introduced to both this framework and BDD in general at ALT.NET), so I took a quick look – and after staring oddly for a moment at the =()=> syntax decided that it provided the kind of semantic consistency I was really looking for. Additionally, the fact that it could plug into the ReSharper unit test runner (with underscores removed from boxcar casing no less!) made it quick and very easy to get up and going on the journey writing specifications rather than tests. Going through the specification writing exercise on this project has me convinced at this point that as someone else once said, BDD is TDD done right. I certainly plan to keep pursuing specifications over traditional unit tests in future efforts. So where to start? I’m not going to spend time in this post talking about how to use MSpec – there’s already lots of good guidance out there. That said, because the syntax initially looks a bit funky, I will say 2 quick things that are helpful to keep in mind when you first look at specifications built on top of MSpec. - Everything in your specification (test) class is a field. And all of the work gets down via delegate fields which are of types with names like “Establish”, “Because”, and “It” - Regarding the =()=> syntax, you are probably already comfortable with lambda expressions, so you can ignore the => on the right – and you can get the assignment operator on the left. Therefore, that just leaves the empty parenthetical – and this just means that your delegate (lambda) is for a function that has no arguments. There, all better? I already had some unit tests written – though some of them were more of integration tests and revealed some pretty serious problems in my design – but we’ll deal more with those topics later. So my starting point was to try and re-write some of my existing tests as specifications. For example, take the following: [Test] public void Given_Null_URI_Throw_Exception() { try { new DataServicesQuery(_dataServicesQueryBuilder, null, "Suppliers"); } catch(ArgumentNullException ex) { Assert.AreEqual("serviceUri", ex.ParamName); } } Re-writing this as a specification resulted in the following: public class when_service_uri_is_null : with_dummy_resource_builder_and_reader { static NullReferenceException lastException; Because service_uri_is_null = () => { try { queryBuilder.ServiceURI = null; queryBuilder.ResourcePath = "Products"; queryBuilder.GetQuery(); } catch (NullReferenceException ex) { lastException = ex; } }; It should_have_specific_error_message = () => lastException.Message.ShouldEqual("Service URI must be provided"); It should_raise_an_exception = () => lastException.ShouldNotBeNull(); } A couple things to point out here. - First and foremost, there is a more rigid semantic that is enforced by both convention and even by the type system. The convention is that classes follow a “when X [with Y]” pattern (I found that this class name should map very tightly to the instructions contained in the Establish and/or Because expressions – if it does not, you may have a problem in your design). Also, assertion (“It” expressions) delegates should follow a naming convention whereby “should …” is followed by a description of the assertion. MSpec also uses the type system to enforces this convention in how the delegate types are named (e.g. – “Because”, “It”). - All of the NUnit attributes identifying test fixtures and tests are gone. MSpec is smart enough to figure out what is a test vs. what is a regular old class based on the MSpec delegates that make up the class. When looked at through the NUnit lens, the specification class remains the test fixture, and each “It” delegate instance becomes an NUnit test. - Notice the with_dummy_resource_builder_and_reader class. As with unit tests, inheritance can be a great way to provide shared context to multiple test fixtures. This base class is responsible for 2 things: 1) exposing a few data fields for its child classes, and 2) providing its own Establish delegate to setup those data fields. - For me, this format of specifications seemed to yield another benefit – without explicit effort. That benefit was more tests with each test having a much better definition around its responsibility. This particular type of test – one where you’re expecting an exception – was one where I wasn’t 100% confident in my approach. However, I didn’t find any baked in support from MSpec – and frankly, for the framework to provide a feature similar to NUnit’s ExpectedExceptionAttribute doesn’t really make a lot of sense to me for writing specifications anyway. You can see above the approach that I took – simply trap the exception and then run a couple tests over the exception to assert various characteristics of it. If there’s a better way to go about this, I would love to hear it. After I started getting the hang of writing specifications by rewriting my tests, I began venturing into new parts of my application – and this is where I really started realizing just how much ceremony exists in traditional unit testing frameworks compared to something like MSpec. Because mostly everything in MSpec is a field, there’s no need to provide any kind of method body, etc. after declaring the title of the specification. So, for example, when I wanted to write specifications around executing a query, I was able to quickly capture the specs as follows: public class when_query_is_executed_for_products_with_expanded_suppliers : with_query_shape_factory { It should_have_2_result_sets; It should_have_a_result_set_named_products; It should_have_8_columns_in_products_result_set; It should_have_77_rows_in_products_result_set; It should_have_a_result_set_named_suppliers; It should_have_13_columns_in_suppliers_result_set; It should_have_77_rows_in_suppliers_result_set; } Remember, “It” is simply a type and “should_have_2_result_sets” is simply a field declaration – no need to initialize it with anything just yet. When the MSpec runner is executing, it checks to see whether the field has been initialized – if it hasn’t, as in this case, it flags it as if you used NUnit’s IgnoreAttribute. Finally, I mentioned earlier in this post about using ReSharper to run MSpec specifications. While I would probably still be interested in MSpec if this integration didn’t exist, the fact that it was there removed any question or hesitation. We talk a lot about the readability of tests, and of tests being the spec – and this is, based on what I’ve experienced so far, where specification frameworks like MSpec really shine. Bringing that goodness directly into Visual Studio ala ReSharper mades the feedback loop even tighter, and really made for a happy development process. If you’ve read this far and you have no clue what this project is that I’m talking about, see the overview here. If you want to download the code and look at it yourself, check it out at
http://codebetter.com/howarddierking/2009/11/13/astoria-ssis-adapters-and-bdd-on-bdd-and-mspec/
CC-MAIN-2021-43
refinedweb
1,218
50.46
XSIM¶ Description¶ XSIM provides a near cycle-accurate model of systems built from one or more xCORE devices. Using the simulator, you can output data to VCD files that can be displayed in standard trace viewers such as GTKWave, including a processor’s instruction trace and machine state. Loopbacks can also be configured to model the behavior of components connected to XMOS ports and links. To run your program on the simulator, enter the following command: xsim <binary> To launch the simulator from within the debugger, at the GDB prompt enter the command: connect -s You can then load your program onto the simulator in the same way as if using a development board. Options¶ Overall Options¶ --args <xe-file> <arg1> <arg2> ... <argn>¶ Provides an alternative way of supplying the XE file which also allows command line arguments to be passed to a program. --plugin <name> <args>¶ Loads a plugin DLL. The format of args is determined by the plugin; if args contains any spaces, it must be enclosed in quotes. --stats On exit, prints the following: A breakdown of the instruction counts for each logical core. The number of data and control tokens sent through the switches. Warning Options¶ --warn-resources Prints (on standard error) warning messages for the following: A timed input or output operation specifies a time in the past. The data in a buffered port’s transfer register is overwritten before it is input by the processor. --warn-stack Turns on warnings about possible stack corruption. xSIM prints a warning if one XC task attempts to read or write to another task’s workspace. This can happen if the stack space for a task is specified using either ~~#pragma stackfunction~~ or ~~#pragma stackcalls~~. Tracing Options¶ --trace -t Turns on instruction tracing for all tiles (see XSIM Trace output). --vcd-tracing <args>¶ Enables signal tracing. The trace data is output in the standard VCD file format. If <args>contains any spaces, it must be enclosed in quotes. Its format is: [global-options] <-tile name <trace-options>> The global options are: -pads Turns on pad tracing. -o <file> Places output in <file>. The trace options are specific to the tile associated with the XN core declaration name, for example tile[0]. The trace options are: -ports Turns on port tracing. -ports-detailed Turns on more detailed port tracing. -cycles Turns on clock cycle tracing. -clock-blocks Turns on clock block tracing. -cores Turns on logical core tracing. -instructions Turns on instruction tracing. To output traces from different nodes, tiles or logical cores to different files, this option can be specified multiple times. For example, the following command configures the simulator to trace the ports on tile[0] to the file trace.vcd. xsim a.xe --vcd-tracing "-o trace.vcd -start-disabled -tile tile[0] -ports" Tracing by the VCD plugin can be enabled and disabled using the _traceStart()and _traceStop()syscalls. The -start-disabledargument disables the vcd tracing from the start, allowing the user to enable/disable only those sections of code where tracing is desired. For example: #include <xs1.h> #include <syscall.h> port p1 = XS1_PORT_1A; int main() { p1 <: 1; p1 <: 0; _traceStart(); p1 <: 1; p1 <: 0; _traceStop(); p1 <: 1; p1 <: 0; return 0; } Loopback Plugin Options¶ The XMOS Loopback plugin configures any two ports on the target platform to be connected together. The format of the arguments to the plugin are: -pin <package> <pin>¶ Specifies the pin by its name on a package datasheet. The value of package must match the Idattribute of a ~~Package~~ node in the XN file used to compile the program. -port <name> <n> <offset>¶ Specifies n pins that correspond to a named port. The value of name must match the Nameattribute of a ~~Port~~ node in the XN file used to compile the program. Setting offset to a non-zero value specifies a subset of the available pins. -port <tile> <p> <n> <offset>¶ Specifies n pins that are connected to the port p on a tile. The value of tile must match the Referenceattribute of a ~~Tile~~ node in the XN file used to compile the program. p can be any of the port identifiers defined in <xs1.h>. Setting offset to a non-zero value specifies a subset of the available pins. The plugin options are specified in pairs, one for each end of the connection. For example, the following command configures the simulator to loopback the pin connected to port XS1_PORT_1A on tile[0] to the pin defined by the port UART_TX in the program. xsim uart.xe --plugin LoopbackPort.dll '-port tile[0] XS1_PORT_1A 1 0 -port UART_TX 1 0' xSCOPE Options¶ --xscope <args>¶ Enables xSCOPE. file format. If <args> contains any spaces, it must be enclosed in quotes. One of the following 2 options is mandatory: -offline <filename> Runs with xSCOPE in offline mode, placing the xSCOPE output in the given file. -realtime <URL:port> Runs with xSCOPE in realtime mode, sending the xSCOPE output in the given URL:port. The following argument is optional: -limit <num records> Limts the xSCOPE output records to the given number. For example, the following will run xSIM with xSCOPE enabled in offline mode: xsim app.xe --xscope "-offline xscope.xmt" For example, the following will run xSIM with xSCOPE enabled in reatime mode: xsim app.xe --xscope "-realtime localhost:12345"
https://www.xmos.ai/documentation/XM-014363-PC-4/html/tools-guide/tools-ref/cmd-line-tools/xsim-manual/xsim-manual.html
CC-MAIN-2022-21
refinedweb
891
64.81
[ ] ASF GitHub Bot commented on CLOUDSTACK-8855: -------------------------------------------- rafaelweingartner commented on a change in pull request #2387: CLOUDSTACK-8855 Improve Error Message for Host Alert State and reconnect host API. URL: ########## File path: engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java ########## @@ -122,7 +122,6 @@ **/ public class AgentManagerImpl extends ManagerBase implements AgentManager, HandlerFactory, Configurable { protected static final Logger s_logger = Logger.getLogger(AgentManagerImpl.class); - protected static final Logger status_logger = Logger.getLogger(Status.class); Review comment: Yes, I did. It is a change in the logging class that is used on the left side of the log message in the log file. People normally lock down their log parse in the message per se, but for sure if they are considering the class as well, this can break things. I changed this for a simple reason. It is misleading. It will show as if the “Status” class is generating that message, which is not true. I see no reason for this type of code. I can revert this change, but my position is that it does not make sense. It is not used consistently. The same type of message is being logged also with the other logger. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: users@infra.apache.org > Improve Error Message for Host Alert State > ------------------------------------------ > > Key: CLOUDSTACK-8855 > URL: > Project: CloudStack > Issue Type: Bug > Security Level: Public(Anyone can view this level - this is the default.) > Affects Versions: 4.6.0 > Reporter: Bharat Kumar > Assignee: Bharat Kumar > Priority: Major > -- This message was sent by Atlassian JIRA (v7.6.3#76005)
http://mail-archives.apache.org/mod_mbox/cloudstack-issues/201803.mbox/%3CJIRA.12864196.1442309446000.16054.1521029160171@Atlassian.JIRA%3E
CC-MAIN-2019-43
refinedweb
292
58.18
The string is popular data and variable type of Python. There are different operations about the string type where removing the last character from the string is one of them. There are different ways to remove the last character from the string in Python. The last character from a string can be removed for different reasons like removing unnecessary data, format string into a specific way, etc. Remove Last Character From String with Positive Slicing Python string type is a list of characters and in order to remove the last character, we just need to remove the last item from the list. Converting a variable into the list and working item by item is called slicing. We will use list slicing to remove the last item. The square brackets are used to define list items and inside them, the index of the items can be specified. First, we get the length of the list that returns item counts. Then we remove the last character by setting the end index before the last character. name="pythontecT" name_len = len(name) new_name = name[:name_len-1] print(new_name) The output is like below where the last character “T” is removed. pythontec Remove Last Character From String with Negative Slicing As stated previously slicing can be used to remove the last character from the string. We have used positive slicing in the previous part and the alternative slicing way is negative slicing. Actually, negative slicing is easier to implement and understand. There is no need to calculate and use the string length. For negative slicing, we just provide the end index number as -1. The negative index numbers are used to start from the reverse. Simply -1 means the index before the last character index. name="pythontecT" new_name = name[:-1] print(new_name) Remove Last Character From String with rstip() Method Python provides the rstrip() method or function in order to strip character from the right side of the string. The right side of the string is the end of the string and this means the rstrip() can be used to remove the last character from the provided string. The rstrip() is provided by every string type. The rstrip() method requires the character we want to remove where we should provide the last character of the string with the negative index -1. name="pythontecT" new_name = name.rstrip(name[-1]) print(new_name) Remove Last Character From String with regex Regular Expression or Regex is used to define and match character and text partterns. Regex provides a lot of useful specifiers where some of them is end of line, multiple characters etc. These can se used to remove last character from the string. In order to work with regex the re module should imported and the sub() method is used to make substitution with the provided regex pattern. import re def another_group(m): return m.group(1) name="pythontecT" new_name = re.sub("(.*)(.{1}$)",another_group,name) print(new_name)
https://pythontect.com/remove-last-character-from-string-in-python/
CC-MAIN-2022-21
refinedweb
489
63.59
When CONFIG_EXT3_FS_POSIX_ACL is not defined, ext3_init_acl() is an inline function in fs/ext3/acl.h which doesn't check if a file is a symlink before applying umask. I've always liked my acls to be available (so never noticed), but came across this recently when trying to explain why RedHat Enterprise 3's BOOT kernel creates symlinks 755 during kickstart. I'm *assuming* this is a bug (acl code treats symlinks specially): It doesn't affect functionality, but those 755 symlinks can be noisy in your security reporting :-) Can anyone tell me if there's a good reason why umask *should* be applied to symlink permissions? Otherwise I guess (for 2.6.9): --- fs/ext3/acl.h 2004-12-07 08:15:07.859199829 +0000 +++ fs/ext3/acl.h.khy 2004-12-07 08:05:11.631931063 +0000 @@ -5,6 +5,7 @@ */ #include <linux/xattr_acl.h> +#include <linux/stat.h> #define EXT3_ACL_VERSION 0x0001 #define EXT3_ACL_MAX_ENTRIES 32 @@ -79,7 +80,8 @@ static inline int ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir) { - inode->i_mode &= ~current->fs->umask; + if (!S_ISLNK(inode->i_mode)) + inode->i_mode &= ~current->fs->umask; return 0; } #endif /* CONFIG_EXT3_FS_POSIX_ACL */
https://www.redhat.com/archives/ext3-users/2004-December/msg00002.html
CC-MAIN-2015-11
refinedweb
191
50.63
J Comparison date - JSP-Servlet Comparison date sir i need to display a new web page when system date is equal to given date.iit is like birthday.i am using tomact server.when birth date is reached,a new page should be displayed automatically saying happy Jsp Code to store date in database - JSP-Servlet Jsp Code to store date in database Hi, Can u give me jsp code to store current date in to database. Thanks Prakash JSP:How to get year of system date - Date Calendar JSP:How to get year of system date Please tell me how to get year of system date; I am doing import java.util.*; Date d = new Date(); d.getYear...:// Thanks Date validation in JSP Date validation in JSP Example for validating date in a specified format in a JSP page This example illustrates how to use date validation in a JSP page. Here in this code we are using for date generation - JSP-Servlet jsp code for date generation hai i am meyis i need a jsp program... thanks Hi friend, For more information on Date in JSP visit to : http Date Formatter in JSP Date Formatter in JSP This section illustrates you how to use date formatter. To display the date in different formats, we have used DateFormat class. This class provides date format - Date Calendar date format how to convert dd-mmm-yyyy date format to gregorian calendar format in JSP please tell me the code Hi friend, Code to convert date to calender. import java.util.*; import java.text.*; public How to print a webpage without url and date in jsp ? How to print a webpage without url and date in jsp ? How to print a webpage without url and date in jsp JSP :// EL parser...Can you explain jsp page life cycle what is el how does el search JSP the following links: servlet - Date Calendar servlet Dear friends How to automaticaly indicate(color) the particular date in jsp after the expires from the database. for example... will be indicate the jsp, based on the expire the date) what are the ways JSP the following link:... language , it is a simple language for accessing data, it makes it possible to easily access application data stored in JavaBeans components. The jsp expression Convert string to Date in JSP Convert string to Date in JSP  ... non- programmers. Whenever such a user pass a date in string format, he is unaware of the fact that the date he has passed has been parsed by the server struts2.2.1 date Format example struts2.2.1 date Format example. In this example, We will discuss about the different type of date format using struts2.2.1. Directory structure of example. 1- index.jsp <html> <head> <title>Date Date validation Date validation How to validate date in jsp? That is i have a textbox which will have the value from the date picker. How to validate the date.../jsp/emp-event.shtml Thanks javascript date picker - Date Calendar javascript date picker how to insert a date picker user control in a html/jsp using javascript??? please help, it's urgent. Hi Friend.../javascript-calendar.shtml Thanks JSP Simple Examples JSP Simple Examples Index 1. Creating... page. Html tags in jsp In this example.... Date in JSP To print a Date in JSP firstly Simple Date example Simple Date example In this section we have presented a simple Date example that shows how you can use different constructors of Date. We can construct the Date=" JSP :// <jsp:useBean id="user.../jsp/simple-jsp-example/UseBean.shtml...how can we use beans in jsp how can we use beans in jsp   Simple Examples ; Convert string to Date in JSP To parse a string to a date we...: out> For Simple Calculation and Output In this example we have used... In this example we are going to retrieve the value we have entered in the jsp Simple Date Format Example Following is a simple date format example made using Format class. We have uses a pattern of special characters to date and time format. In this example... is a simple Date Format Example: import java.text.*; import java.util.*; public doubt - Date Calendar from Database using JSP JSP Example Code for retrieving Data from Database<...;/html>For more JSP and Servlet Example codesInserting Data into Database using...;Retrieve data from Database using JSP JSP Example Code for retrieving Data from conept - Date Calendar example of using Calendar in JSP page by the JavaScript.... has a very good example of using Calendar in JSP page by the JavaScript. http... link. This link has a very good example of using Calendar in JSP page Want Automatic No with Date - Development process Want Automatic No with Date Hi, I want the jsp code. i want serial no with date and month.For example "240501" date is 24 and month is 05 and serial no 01 .Thanks Prakash jsp _Mandatory(document.forms[0].expirydate,"Expiry Date"); if(expirydate...; 1000) ? yy + 1900 : yy; //End Current Date Processing cont_curdate=year JSP - JSP-Servlet JSP Hi! I am doing a project in JSP i.e. Library managment. In that I have to display current date in a text field as date of issue. I have tried... like to display in a simple format i.e. 18-12-2008. In the same way in another Date Tag (Data Tag) Example Date Tag (Data Tag) Example In this section, we are going to describe the Date tag. The date tag allows to format a Date in a quick and easy way. User can specify a custom format (eg Simple date formatter example Simple date formatter example In this section of simple date formatter example we... class to convert the date into the specified format. There are various construct DATE DATE I have the following as my known parameter Effective Date... of calcultion starts My question is how to find the date of Thursday This cycle repeats based on every cut off day Here is an example that displays date time picker date time picker I have enetered a date time picker in my jsp file. there is text field before the date time picker. What should i do so... the date time picker gets opened Date operation - JDBC Date operation The same what i asked already.Still i didnt get the solution.I dont ve any problem while inserting data from jsp to database.My Doubt... the html text obj into sql Date obj? Hi friend, javascript cal code for selecting date in text field - JSP-Servlet javascript cal code for selecting date in text field HI I want javascript calendar code to select date in text field in jsp.pls send me? .../jsp/emp-event.shtml embed ganttChart on JSP page embed ganttChart on JSP page How I can embed ganttChart on JSP page ??? Im already created ganttChart by using this example: To create a simple... Date date( int day, int month, int year) { Calendar calendar Roseindia JSP Tutorial Introduction to JSP Scriptlets and JSP Expressions with examples Writing the Date JSP... JSP Cookies Example Disabling Session in JSP JSP PDF books Free JSP...Roseindia JSP tutorials provides you with a library of best JSP tutorials function - JSP-Servlet a simple example of JSP Functions Method in JSP See the given simple button Example to submit...:// Reading Request Information Struts 2 Date Validator Struts 2 Date Validator The Date validator in the Struts 2 Framework checks whether the supplied date lies... between the <message> </message> tag. The following example Hibernate JSP In this section, you will learn about Hibernate with JSP using simple pagination example Simple Date Format Exception Simple Date Format Exception Simple Date Format Exception inherits from a package name... and normalization of date. Simple Date-format provides you to work on choosing any JSP - JSP-Servlet JSP Can anybody help me in making a jsp page with these contents:- ONLINE QUIZ: Name: Date: SAP Code... friend, Read for more information. JSP - JSP-Interview Questions :// Thanks... are the comments in JSP(java server pages)and how many types and what are they.Thanks inadvance. Hi friend, JSP Syntax XML Syntax Simple clarification - JSP-Servlet Simple clarification Respected Sir/Madam, Thanks for your immediate response..Sorry for troubling you again and again. So,you told... way to do so with JSP's? Whether it is conceptually right or wrong? Please jsp sir i am trying to connect the jsp with oracle connectivity ,but i am facing some prblems please help me. 1)Import the packages...(); Example: <%@ page import="java.sql.*"%> <%@ page import oracle.sql. the Date JSP | Reading the Request Information | Retrieving the data... Example to Store and Show only 10 values | Disabling Session in JSP... the file Into Database with Current Date and Time In JSP | Popup Window using Ajax How to Create JSP Page on tomcat server. Creating a new JSP page is very simple and any text editor can be used. In this example I will show you how to create a simple JSP page... the current date and then print on the web browser. Steps to Create JSP page: Step 1 jsp - JSP-Servlet jsp hi friends i want the jsp code to get the data from the database for particular date by clicking on date it has to forward to the page like form... | | | date | date | YES | | NULL JSP - JSP-Servlet :// as<jsp:forward>... for processing.See examples and details here Ajax Examples ; Displaying Time: This example is simple one to understand Ajax with JSP. The objective of the example is to display the current date... refreshing the current page. This example also shows the key value entered-Interview Questions : Thanks...jsp what are the thinks in el (jsp) nesseay to take care wel JSP-EL - JSP-Servlet JSP-EL ThankYou Sir for Ur sending the EL example. Sir, I have... files: home.html: A simple JSP application Name...;Hi friend, Home.html A simple JSP application EL Uisng pagination in JSP - JSP-Interview Questions pagination in JSP Hi! Everybody.. I am new to JSP. I am doing project using JSP. Everything is OK. When i am displaying the data from ResultSet...("ofset" + ofset); System.out.println("MySQL Connect Example JSP Tutorials the Date JSP Till now you learned about the JSP syntax, now I will show you how to create a simple dynamic JSP page that prints the current... to the user. JSP Cookies Example This tutorial shows how to handle Java Date ; Simple Date example In this section we have presented a simple Date example that shows how you can use different constructors of Date... formatter example In this section of simple date formatter example we Display Current Date using JSP Custom(user define) Tag Display Current Date using JSP Custom(user define) Tag In this section, we will discuss about how to display current date & time using custom(user... page Deploy the application EXAMPLE : In this Example, Current date jsp jsp sir plz tell me how can I create a page which is similar to feedback form in which if one option is empty then other option is inaccessible. for example it consists of name address etc. if name field is not filled address Locale Specific Date validations through JavaScript/Ajax... in JSP file. For example You if your validation function is like... about using cookies in JSP at JSP Cookies Example tutorial page. Thanks...Locale Specific Date validations through JavaScript/Ajax... Hi, I Jsp Grid ; --> </code></pre> <p><title>jsp paging example...Jsp Grid <p><%@ page</p> <pre class="prettyprint"><title>My JSP JSP - JSP-Servlet JSP Hi! I am doing a project in JSP called "SMS schedule Reminder"... In this i have to store the user schedules and i have to send alert... an alert message (including message) to his mobile according to the date and time jsp .(for example if i am selecting country as india in first dropdown list then in second Ask Questions? If you are facing any programming issue, such as compilation errors or not able to find the code you are looking for. Ask your questions, our development team will try to give answers to your questions.
http://www.roseindia.net/tutorialhelp/comment/82476
CC-MAIN-2013-20
refinedweb
2,041
64.71
soyuz upload system pays attention to ftp sessions Bug Description -----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1 affects /products/soyuz done I am informed that soyuz, when handling incoming uploads, assumes that all of the components of an upload (the .changes and the files listed in it) will appear in a single FTP session. This is a violation of the implied semantics of FTP, and can cause practical problems. For example, if you use dupload but your upload fails (eg due to network problems) after successfully completing some files, then dupload will record success for those files but soyuz will delete then. If you then rerun dupload it will upload only the files which were not successfully transferred the first time, but the already-uploaded files will have been previously deleted by soyuz. As another example, you might reasonably upload the different parts of an upload from different systems to save bandwidth on small links. (Often the .orig.tar.gz is very large.) As a third example, you might be behind an application relay (web proxy) which starts a new FTP connection for each transfer. That's obviously not ideal and is slow and wasteful but it's not demonstrably wrong. The correct solution is for soyuz to keep files hanging around rather than giving each new upload connection a new blank directory. Clashes between files of different names can be resolved in favour of the most recent, if each target distribution or namespace has a separate upload directory. Races can be avoided by careful programming. Ian. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.0.6 (GNU/Linux) Comment: Processed by Mailcrypt 3.5.6 <http:// iD8DBQFD43hN8jy DI+R1DR6Cp52eJf =CRKM -----END PGP SIGNATURE----- Celso Providelo writes ("[Bug 30415] Re: soyuz upload system pays attention to ftp sessions"): > Following you suggestion, I think, we need to support a single > control file inside the upload directory which controls its > processing, of course, it needs support from 'dput' side. It would > be something like: Why not process each .changes as soon as the files it lists have arrived ? A .changes is detailed enough to tell whether the upload is complete. Stale files can be expired, and it is safe to allow overwrites - or you could even do something fancy like keep both versions of the file and pick the one that matches the md5sum in the .changes. In fact, there is not really any reason to support directory listings on the incoming directories, nor downloads from them. Once we realise this, then it becomes obvious that there is no need for the _filename_ to be a unique key for finding an uploaded file. Non-.changes files which have been uploaded should be indexed by (filename, the .changes. .changes files can be indexed by the md5sum of the .changes, and if two .changes with the same name are uploaded then whichever one gets all of the files is processed - if both get all of their files then both are processed, possibly causing an error if that would violate in-archive uniqueness. If you do this then you don't need all the palaver with directories either, because the .changes will say where (which distro, which personal package archive, etc.) the files should be sent. Ian. Here is an example of a failure caused by this bug. I used dupload to upload this file and my network broke during the upload of the .orig.tar.gz. I restarted the upload and dupload quite correctly didn't reupload the already successfuly uploaded files. The result was the error message you will find attached. I disagree vehemently with describing this bug as a `wishlist item'. It is quite simply a bug that soyuz pays attention to which FTP session a file arrives in. * It is completely wrong as it violates the basic assumptions underlying the FTP protocol (which are, admittedly, not stated in the FTP RFC as the authors presumably didn't think anyone would be so daft). * There is no excuse for this behaviour. As you can see from the design sketch I have provided, there are reasonably sane and reliable ways of dealing with filename clashes, partial uploads, etc. * As you can see from the attached message, this behaviour causes actual malfunctions. So there is no excuse for classifying it as `wishlist' ! I will change the bug status shortly. Ian. Rejected: UploadError made it out to the main loop: File gs-gpl_ -----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1 Format: 1.7 Date: Fri, 30 Jun 2006 18:41:54 +0100 Source: gs-gpl Binary: gs-gpl gs Architecture: source Version: 8.50-1.1ubuntu1 Distribution: edgy Urgency: high Maintainer: Masayuki Hatta (mhatta) <email address hidden> Changed-By: Ian Jackson <email address hidden> Description: gs - Transitional package gs-gpl - The GPL Ghostscript PostScript interpreter Closes: 347637 348834 354352 357326 Changes: gs-gpl (8.50-1.1ubuntu1) edgy; urgency=low . * Merge from debian unstable. * Remaining differences from Debian: - Enable IJS KRGB support. - PowerPC crash fix, messing with setjmp. - Fix for X11 crash with antialising. * Differences from Debian now dropped: - `printenv' in debian/rules. - Specification of CC=gcc in debian/rules. * See changelog entries below for full details. . gs-gpl (8.50-1.1) unstable; urgency=high . * Non-maintainer upload. * debian/ delayed to fix after the 8.50 release. Cures segfaults on ppc (again). Thanks to Roger Leigh for testing. Closes: #357326 . gs-gpl (8.50-1) unstable; urgency=low . * Works done at Codefest in Malaysia 2006. * New upstream release - closes: #347637, #348834 * Updated debian/watch - closes: #354352 * Bumped to Standards-Version: 3.6.2.2 (no physical changes). . gs-gpl (8.15-4.1) unstable; urgency=low . * Non-maintainer upload. * Use gcc-3.4 on s390. Files: d86d5fce7654ff 661cacc387fb90 ea907b11855332 -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.1 (GNU/Linux) iD8DBQFEpWQv05Q fB0OzGAeGT08Rgb =cKRH -----END PGP SIGNATURE----- === If you don't... The main reason this has never been done is that it would involve rearchitecting of the upload service, and few if any other users have ever reported this to be an inconvenience.. Christian Reis writes ("[Bug 30415] Re: soyuz upload system pays attention to ftp sessions"): > The main reason this has never been done is that it would involve > rearchitecting of the upload service, and few if any other users have > ever reported this to be an inconvenience. In practice it's not usually too much of an inconvenience if you know it's happening. >. That sounds like a perfectly fine beginning approach. >. I propose the following approach: * Move files out of the incoming directories and rename them into a holding directory with a filename which includes their checksum. (Several hardlinks if several checksums need to be supported.) If two identically named files arrive with the same checksums, check that they are identical and if not call for help. (Put everything aside and block further processing of any file with this checksum.) * Periodically look for .changes files all of whose pieces have arrived, and then process them. When processed, delete the .changes but not the other files. * Expire files after some period of time (24h?) Ian. I agree with some of your points and we do have a plan for rearranging the directory structure on the server to allow 'lazy' uploads and also allow 'Personal Package Archives'uploads to be landed. Something like: {{{ <distro_ name>/ <person_ name>/distros/ <distro_ name>/ /distros/ /people/ }}} Each directory beyond those paths will be processed contextually. Following you suggestion, I think, we need to support a single control file inside the upload directory which controls its processing, of course, it needs support from 'dput' side. It would be something like: {{{ ubuntu/ <upload_ directory_ name>/< control_ file> containing: at /distros/ proccess = False }}} I don't know how to define a reliable way to name the upload directory, since it will be required in the' dput' side to continue the upload later. Also it needs a way to minimize exploits by having a lot of locked upload_directories, maybe removing directories older than a some age (one day would be a lot, IMO). Anyway, the current prototype hasidentified problem, let's discuss available mid-term solutions.
https://bugs.launchpad.net/txpkgupload/+bug/30415
CC-MAIN-2021-04
refinedweb
1,349
64.1
Some background on equality and hash code overrides in Java With just a couple of clicks, your favorite Java integrated development environment (IDE) can generate equals() and hashCode() overrides for your Java class. But do you understand what is generated, line by line, and why? I understood most of what NetBeans generates for equals(), but there was one detail in that had me puzzled. I was going to ask a question about it on one of those question forums. But, as I typed my question, I figured out the answer, and decided to write it up as an article here instead. I could also have asked the question on the NetBeans mailing list, but it’s also applicable to the other major Java IDEs. I think I should go over some very basic background information that would have been considered unnecessary on the question forum. But then the article turned out to be so long, I broke it up into three articles. In this first article, I’m just going to give background information. In the next article, I will look at generated hashCode() overrides, line by line. And in another article, I will look at generated equals() overrides, also line by line. Some very basic background information In Java, Object is the top-level superclass of all other classes, whether those classes come from the Java Development Kit (JDK), from you, or from a third party developer. Thus there is never any need to write “ extends Object” in a class declaration, but you can if you really want to (your IDE might issue a warning, though). The Object class includes equals() and hashCode(), and other functions that we’re not interested in at the moment. Therefore, all classes in Java are guaranteed to have equals() and hashCode(). With equals(), we can see if one object is equal to another. And hashCode() just gives a number that is hopefully unique, or at least unique enough to distinguish one object from other objects in the currently running program. In particular, hash codes are useful to hash table data structures like HashMap. Such data structures generally provide fast access to elements, thanks to a small cost in overhead. It is my understanding that hash table data structures use numbered buckets to store their elements. Then, to see if a particular element is contained, or to retrieve it, the program needs to look in only one bucket instead of potentially having to look in every bucket. But how does the hash table program know what bucket to put an element in? One way out of a few different possibilities would be to use the element’s hash code modulo the number of buckets. Suppose for example we want to add two objects to a hash table with 128 buckets. One element has hash code 148, the other 1212436. Divided by 128, both of those numbers leave a remainder of 20. We could write 148 ≡ 20 mod 128, and 1212436 ≡ 20 mod 128. Then both elements get put in bucket 20. Now, what bucket should an element with hash code −20 go into? I think it should go into bucket 108, not bucket 20. But such implementation details should be encapsulated so that we needn’t be too concerned by them. It is important to understand that in most cases the hash code is not a two-way function, meaning that even if you have a hash code and know the type of the object that generated the hash code, it’s usually not possible to reconstruct the original object from just the hash code. In Java, hashCode() is of type int, a 32-bit signed integer primitive. This means there are 2³² possible hash codes, but for some types there are more than 2³² possible objects. For such types, unique hash codes are impossible. We have to settle for hash codes that are unique enough for our actual use cases. A simple example: String. If we iterate Integer.toString(n) for n from Integer.MIN_VALUE to Integer.MAX_VALUE, we would have created 2³² distinct instances of String, none of which contain any letters or other non-digit characters apart from the dash as a minus sign. Not that we’d have any practical need to do that. The important thing is that the String hash code function should give us hash codes that are likely to be unique for the instances of String we need to store in a data structure. There are some types for which it is possible to guarantee hash code uniqueness, like for example the object wrapper Integer for the primitive int: the hash code is just the unboxed value. Because hash code uniqueness can’t be guaranteed for most types, it may be necessary to do an equality check upon finding an object with a matching hash code, to be certain the object has been identified correctly. The performance cost of an equality check on a single element should be negligible compared to the performance cost of equality checks on many or all elements of a given collection. To determine if one object is equal to another, it may be necessary to compare them field by field. Determining the hash code of an object might also involve doing something with each field an object has. However, the equals() and hashCode() in Object can’t really do anything with the properties that are specific to any of its subclasses. They should not be expected to, they were never meant to. Because the equals() in Object might not suit the needs of a subclass, you can override that equals() by defining your own equals() in your class. Your equals() override can use the pertinent fields of the subclass to determine the correct result. Much the same goes for hashCode(). Here’s a toy example of a hash code override for a class called SomeSubClass, with just two primitive numeric fields: @Override public int hashCode() { return this.fieldA * this.fieldB; } The override annotation is not strictly required, but the compiler will give a warning if you leave it out, and so should the IDE. Maybe the designers of Java wish they had made “override” a reserved word. In both Scala and Kotlin, “ override” is reserved by the language and must be used whenever applicable. At runtime, when the Java Virtual Machine (JVM) needs the hash code for an object of class SomeSubClass and that class has hashCode() overridden, that’s the hashCode() function the JVM will use. Let’s say instead SomeSubClass doesn’t override hashCode(), but it extends SomeClass, which does have a hashCode() override. Then the JVM will use hashCode() from SomeClass. But if SomeClass, a direct descendant of Object (explicitly or implicitly), doesn’t have hashCode() either, then the JVM will use the hashCode() function from Object. For a somewhat more realistic example, consider an Account class to represent bank accounts. This example Account class is extended by a CheckingAccount class and a SavingsAccount class. You probably want to know if two accounts are the same account in order to avoid pointless transactions, like transferring money from one account to the very same exact account. Maybe hashCode() doesn’t seem all that useful in the Account example, since presumably each account has a unique account number. But a hash code can be useful for storing an object in a data structure. So it might not be too much trouble to simply use the account number as a hash code. The expectations for hashCode() are closely related to the expectations for equals(). For one thing, objects said to be equal should have the same hash code. And objects of the same class that are said to be different should have different hash codes. If you see a need for equals() but not hashCode(), you may think it efficient to only override equals() and rely on a superclass hashCode(). But that would probably frustrate the expectations users of your class have for the equality and hash code correlation. A set of expectations is often termed a “contract.” That, as a matter of fact, is the terminology used in the Javadoc for Object. In order to uphold the contract for equals() and hashCode(), if you override one, you have to override the other one also. However, it may happen that an object of one class gets the same hash code as an object of a different class, even though they are said to be unequal. Most likely that won’t be a problem. Another expectation is consistency. If we get the hash code of an object at one point during execution, and then get the hash code of the object again later on during the same run of the program, but the object has not changed in any way, the hash code should be the same as before. Whether you want to override equals() and hashCode() in Account or in CheckingAccount or SavingsAccount is your call. Your decision will depend in great part on how you design these classes. Even if Account is an abstract class, it might make sense to override equals() and hashCode() in it rather than in CheckingAccount or SavingsAccount. This is a point I’ll come back to later. Having the IDE write the overrides for you If you’re working on the source for the class in a plaintext editor like Notepad or Vim rather than in an IDE, you’ll have to figure out what the steps are and what order to do them in to properly override hashCode() and equals(). But in an IDE, you can invoke a dialog box, select the fields you want to use for equals() and hashCode() and then let the IDE write the whole thing for you. The procedure is slightly different in the three major IDEs: - In Eclipse 2019–09 R, use the menu Source > Generate hashCode() and equals()… to invoke the dialog box (there might be other ways to get to the dialog box). Then, in the dialog box, choose the fields you want. Eclipse gives some options for source placement and also a couple of very consequential details I’ll cover in Part 2 and Part 3 of this article. - In NetBeans 11.1, use the menu Source > Insert Code… to bring up the Generate dialog box (there’s one or two other ways to get to this one). From the Generate dialog box, choose equals() and hashCode()… to bring up a dialog box in which to choose the fields to use. - In IntelliJ 2019.1.1, use the menu Code > Generate… to bring up the Generate dialog box (there’s one or two other ways to get to this one). From the Generate dialog box, choose equals() and hashCode() to bring up a wizard that asks a couple of things before asking you to choose the fields to use. Caveat: I don’t expect the above to change in any significant way, but I can’t guarantee they’ll stay exactly the same either. Even with the chosen fields named the same, these three IDEs will generate equals() and hashCode() differently. But most of the differences are likely to be cosmetic and inconsequential. Choosing the relevant fields In the bank accounts example, it might be enough to use just the account number field, since presumably there’s some mechanism to ensure each account gets a unique account number. If each customer can have more than one account, looking at the customer name field would not tell us much, in which case we should leave it out of equals() and hashCode(). It would also be a good idea to leave out the balance field, since that’s probably going to be changing all the time, and will occasionally coincide with the balance of other accounts. Once you have chosen the fields to base equals() and hashCode() on in the dialog box of an IDE wizard (and other options as applicable), the IDE writes the whole thing for you. It’s a good idea to review the generated functions. Most people, though, probably just accept what was generated without fully understanding each line, or even glancing it over. Under the pressure of a deadline, one doesn’t really have time to wonder why an IDE automatically generates equals() and hashCode() the way it does, and not some other way. But, as part of professional development as a programmer, one should take a moment to learn about what was generated and understand why it was generated the way it was. An example class: money amounts I could continue using the bank account example for this article, but there’s the problem that Java doesn’t yet have a standard way for representing money. There actually are competing proposals. Money involves numbers, specifically real, rational numbers, which offer quite natural examples of equality and inequality. Also, if the Account hash code is just the account number, it’s not such a good illustration of hash codes for classes needing two or more fields to come up with the hash codes. An Account object should probably have a transaction history object. Or maybe it should have two transaction history objects, one for pending transactions and the other for completed transactions. Then maybe a transaction history object would be a data structure that holds Transaction objects. I’m thinking Transaction would be an abstract class that essentially has two fields: transaction time and transaction amount. It would make sense for the hash code of a Transaction object to be based on the hash codes for the transaction time object and the transaction amount object. I’ll show this “basing” of hash codes later on. Maybe deposits (represented by the Deposit class that extends Transaction) would be transactions with positive amounts. And maybe withdrawals (represented by the Withdrawal class, which of course also extends Transaction) would be transactions with negative amounts. This is so that the account balance can be updated simply by using the plus() function in CurrencyAmount. Deposits increase the balance, withdrawals decrease the balance and comments have no effect. These requirements would be enforced by the constructors, but they require the money amount class to have some way of comparing money amounts as greater than or less than rather than just equal or not equal. The standard way would be to have the money amount class T implement the interface Comparable<T> and then the compareTo(T other) function. Maybe I’ll write a separate article about that. The money amount class will also need arithmetic functions, so that our program can add and subtract money amounts, and calculate percentages and permillages. That’s also outside the scope of this article. However, the money amount class should be immutable, so that a money calculation always results in a new money amount object, even if the amount is the same (e.g., add zero currency units, multiply amount by 1). In some banking systems there are also comment transactions, which don’t affect any account balances but provide information about transactions that do affect account balances. In our object-oriented model, comment transactions would probably be implemented as a Comment class that extends Transaction. An instance of Comment would then always have a transaction amount of zero currency units, so the constructor would only need a comment message (probably a String) and a comment time. Then perhaps Transaction hash code and equality to be based on the comment message object and the comment time object, and not the transaction amount, which is of course always 0.00. I’m thinking it might actually be a good idea to use money for this example. What we come up with here won’t be a serious contender for becoming the official Java money API, but it will be of great pedagogical value. Java does define java.util.Currency, which provides us with a standard way to distinguish the various currencies, e.g., United States dollars, Japanese yen, euros, Swiss francs, etc. So you’ll want to import that into our money class. We’ll call our money amount class CurrencyAmount and place it in a package called currency. It will import Currency. package currency;import java.util.Currency;public class CurrencyAmount { private final long units; private final short cents; private final Currency currencyID; public CurrencyAmount(long singles, short cents, Currency currency) { this.units = singles; this.cents = cents; this.currencyID = currency; }} You might also want to write a chained constructor that omits the cents parameters and fills it in as 0 for the primary constructor. Also, you might want to design the primary constructor so that it quietly changes cent overflows to whole units, e.g., 10 dollars and 250 cents gets quietly changed to 12 dollars and 50 cents. Have your IDE generate a JUnit 4 or JUnit 5 test class. I’ll do most of the testing with United States dollars and some other country’s currency. You can choose to use the same currencies or any other currencies. package currency;import java.util.Currency;import static org.junit.Assert.*;public class CurrencyAmountTest { private static final Currency DOLLARS = Currency.getInstance("USD"); // private static final Currency YEN? EUROS? FRANCS? ETC.} Also create a Transaction class and a couple of associated subclasses, like Deposit and Withdrawal, but place them in a package called transactions or bankaccounts.transactions. But don’t spend too much time on these, just create the necessary constructors and import the necessary classes (our CurrencyAmount class and a standard Java class for date and time). The Java date and time API has its share of shortcomings, but for our purposes here it will do just fine. And back in the currency package, create the runtime (not checked) exception CurrencyConversionNeededException, to throw in case someone tries to do something like deposit yen to an account drawn in francs. Exceptions don’t generally need custom equality nor hash codes. But, to properly use CurrencyConversionNeededException, we’re relying on Currency to have equals() properly overridden so our program can tell United States dollars apart from Canadian dollars, for example. Also, we’re going to need CurrencyAmount to override toString(). @Override public String toString() { return this.currencyID.getSymbol() + this.units + "." + this.cents; } This leaves a lot to be desired, but it’ll be good enough for what we need it to do. Transaction also needs a toString() override. @Override public String toString() { return "Transaction " + this.amount.toString() + " on " + this.dateTime.toString(); } Or maybe each of the concrete subclasses should individually override toString(), which might then perhaps render toString() in Transaction unnecessary. That’s not needed for this series of articles, though. Another example class: complex numbers Money is so mundane. I would rather use for an example something a little loftier, like complex numbers, which figure in astronomy. But more people know about money than know about complex numbers. It’s therefore necessary to explain things about complex numbers that go without saying for money amounts. Despite the name, complex numbers are, in some ways, quite simple. Complex numbers don’t have an obvious natural ordering, but two complex number variables can still be said to be equal or not. So therefore the ComplexNumber class should definitely not implement Comparable<ComplexNumber>, but it should definitely override equals(). And of course also hashCode(). The ComplexNumber class will use floating point rational numbers to represents a small subset of all possible complex numbers. Specifically, two 64-bit double primitives. Not that different from the Complex struct in C#. A complex number has a “real” part and an “imaginary” part. Two examples: - −1.2 + 0.8i is a complex number with a real part −1.2 and imaginary part 0.8i; and i is the imaginary unit, one of two numbers such that its square is −1 (the other is −i). - 1.2 − 0.8j is a complex number with real part 1.2 and imaginary part −0.8j, and j is just a different symbol for the imaginary unit which electrical engineers seem to prefer. By the way, neither of these numbers is in the Mandelbrot set. I’m not going to go too in depth on the math of complex numbers here, other than what pertains to equality testing. I refer you to a couple of Medium articles for more background on complex numbers: Brett Berry’s basic tutorial, and my own article on the subject (though in that one I’m more concerned with algebraic rather than numeric representations of complex numbers). Complex numbers have many applications in physics and engineering. Both Fortran and C# come with a complex number data type right out the box. But Java doesn’t. At least not the JDK, so maybe you have to write one yourself. The ComplexNumber class should probably be immutable, and it should have these two fields: private final double real; private final double imag; And also the appropriate constructor and getters. I will place ComplexNumber in a package called numerics, I suggest you do the same. Then the file path for the source file will be something along the lines of src/numerics/ComplexNumber.java. This will be significant later on. The expectation for equals() is that it will return true for two instances of ComplexNumber that represent the same number, e.g., after these lines, ComplexNumber someNumber = new ComplexNumber(1.0, 1.0); ComplexNumber sameNumber = new ComplexNumber(1.0, 1.0); boolean flag = someNumber.equals(sameNumber); flag should be true. But that’s not going to be the case at this point if all we have is a constructor and a couple of getters. In the next part I will look at generated hashCode() for CurrencyAmount and Transaction in the next part. And in the part after that, I will look at the generated equals() for ComplexNumber, but the one for CurrencyAmount will also figure in there.
https://alonso-delarte.medium.com/some-background-on-equality-and-hash-code-overrides-in-java-5b5d085a798a
CC-MAIN-2021-21
refinedweb
3,645
62.48
Unanswered: Dynamical Module Load in Big GWT & GXT Projects Dear All; I want to moduler structure in my newly developing project with gxt3. Sample : --Base Module ---Products ---Sale ---Accounting ... ... ... .. How can i load dynamical modules in my gwt project. What do you recommend about this situation. Best regards. - Join Date - Sep 2011 - Location - Superior, CO - 413 - Vote Rating - 21 - Answers - 20 Do you mean maven modules? I.e. application dependencies? Or do you mean split-points? I.e., the code is only downloaded and loaded when requested for the first time (after that it's stored locally depending on your browser and cache settings). Thanx for reply icfantv i dont mean maven I mean Modular Architecture. Everyone must install base module but customer install other modules. For Example. Customer X need Sale,CRM,MRP. Customer X install BaseModule, Sale Module, CRM Module, MRP Module. and customer x have not other modules codes. Customer Y need Sale,CRM,Project Management, Accounting Customer Y install BaseModule, Sale,CRM,Project Management, Accounting Modules. and customer y have not other modules codes. I want to make this architecture. Base_Module.gwt.xml --> in Base.war Sale.gwt.xml --> in Sale.jar Accounting.gwt.xml --> in Accounting.jar Customer press to install. Jar download in application lib folder and hook to basemodule. If its possible how can i this module architecture with GWT else what do you recommend for me? - Join Date - Sep 2011 - Location - Superior, CO - 413 - Vote Rating - 21 - Answers - 20 Ok. This is pretty advanced. We are doing this with our projects but it's not like you're thinking and I don't think it will work like you're thinking. Here's why: Think of a normal Java application, say Swing. This is done quite easily if you have your main application scan a, say "plugin" folder, at launch time so that any plugin JARs get loaded on the classpath. Downloaded JARs would be placed in this directory and then simply relaunching the app (or dynamically loading the JAR via a dynamic classpath would/should work). Unfortunately, GWT applications are NOT Java applications per se. Rather, there are two sides: the server-side which is Java only and that functions as one would expect a normal Java application to function. The second side is the client-side. Recall that there are two phases to the compilation process for GWT web applications. The first is the normal Java compile process. The second is the GWT compile process which builds Javascript versions of any relevant and required shared and client Java classes. I do not think it is possible to JAR up the GWT compiled code and expect to be able to use it because how would the main application even know about it? It's Javascript, not Java. What we do, however, and this is SIGNIFICANTLY more efficient, is have our child applications configured to build JAR files and have the parent application have dependencies on the children. The Java side works as you would normally expect but when the GWT compile phase kicks in, it GWT compiles ALL the relevant shared/client-side code from ALL the projects (parent and children) together. This means if you're using, say, a TextButton in multiple projects, the code is only included ONCE for ALL the projects to access. What you would need to do is build in security so that someone who doesn't have permission (or has purchased) access to the Sale module to not be able to access or use it. We do this via Spring Security and Roles. If the user does not have the appropriate role, we do not even render the component. NB: THERE IS NO CLIENT-SIDE SECURITY and frankly, there cannot be. The entire application is either downloaded or accessible via split points, so the onus would be on you to ensure server-side code cannot be executed were someone to hack the client-side code. Yes, the client-side code is obfuscated and compressed but security through obscurity is not security and the GWT-RPC URIs are not and cannot be obfuscated so someone with enough time and patience could find out what paths exist on your server. We mitigate this by using Spring Security on our servlet methods so that only requests from authenticated users with the right role can execute the method - a 403 Forbidden is returned for users who do not. The great thing about this is that this cannot be hacked (well, not easily) since it's all server-side code and the user's authentication is stored by Spring on the user's session in read-only form. So, essentially, what you want can be done. Just not in the manner you've indicated above. I will say that it took me a while to get all this working correctly because there's a lot of moving parts and you want stuff to be reusable. Additionally, there's some URI name-spacing issues you will need to resolve if you want your child applications to have their own, independent URI namespace so they do not clash with each other or the parent app. I had to write a custom generator for RemoteService implementations to do this and not use GWT's built in generator. Let me know if you have questions as you progress and I will try to help you as best I can. Again, there's a lot of work involved here but if you do it right, you'll have a VERY flexible platform on which to build a robust and pluggable web application.
https://www.sencha.com/forum/showthread.php?249077-Dynamical-Module-Load-in-Big-GWT-amp-GXT-Projects&p=911855
CC-MAIN-2016-18
refinedweb
937
62.88
Hide Forgot In the OpenShift Container Platform 4.x the kibana logging console might be manipulated or even completely damaged by any user who create kibana resource in a non openshift-logging namespace. Due to that the console links is recreated by the elasticsearch-operator based on the new CR. If the new kibana resource is removed then the openshift-logging console link does not back to the original one but completely is lost. This flaw could lead to an arbitrary URL redirection or the openshift-logging console link full damage. Fixes: Acknowledgments: Name: Aivaras Laimikis This issue has been addressed in the following products: Red Hat OpenShift Container Platform 4.6 Via RHSA-2021:0310 This bug is now closed. Further updates for individual products will be reflected on the CVE page(s):
https://bugzilla.redhat.com/show_bug.cgi?id=1902698
CC-MAIN-2022-27
refinedweb
134
55.64
Hashtable.put Method Visual Studio 2005 Adds an element at a specific key entry to a Hashtable object. Package: java.util Assembly: vjslib (in vjslib.dll) Parameters - key The key entry at which the element is added. - element The element to be added. In the following example, you create and initialize a Hashtable object and then you display the values stored in the object. // HT-put1.jsl // Hashtable.put example import java.util.*; public class MyClass { public static void main(String[] args) { // Create a new hashtable object: Hashtable ht = new Hashtable(); // Add some elements: System.out.println("The current value at \"4\" is " + ht.put("4", "Eve Kennedy")); // Add a key that is already present. System.out.println("The current value at \"4\" is " + ht.put("4", "Jack")); } } /* Output: The current value at “4” is null The current value at “4” is Eve Kennedy */ ReferenceHashtable Class ConceptsHashtable Members java.util Package Show:
http://msdn.microsoft.com/en-us/library/aa989603(v=VS.80).aspx
CC-MAIN-2014-35
refinedweb
152
60.21
Hierarchical Clustering Reading time: 15 minutes Machine learning is divided into three categories: supervised learning, unsupervised learning and reinforcement learning. In supervised learning, we want to find the best model that describes the relationship between two variables – input and output. It is called supervised because we give the learning algorithm correct examples and it will train itself on these data. Giving this data is like a guide for the learning process. For example, image classification problem, in which we want to classify some pictures into some labels. So we give the learning algorithm collections of pictures with correct labels and it will try to find the best model to classify pictures. But in unsupervised learning, we are concerning about one variable – input. So we will give the learning algorithm a collection of data without any clue about its structure and label. The goal of unsupervised learning is to explore the structure of the data and performs many tasks like compressing, labling and reducing dimenstions of the data. Clustering Clustering is the most common unsupervised learning method. It is the process of organizing data into clusters (groups) where the members of some cluster are more similar to each other than to those in other clusters. Clustering has many applications in many fields like data mining, data science, machine learning and data compression. In machine learning, it is usually used for preprocessing data. There are many clustering algorithms: k-means, DBSCAN, Mean-shift and hierarchy clustering. Hierarchical Clustering Hierarchical clustering is a method of clustering. In this method, we find a hierarchy of clusters which looks like the hierarchy of folders in your operating system. This hierarchy of clusters will resemble a tree structure and it is called dendrogram (see image below). source: Types of Hierarchical Clustering There are two main methods for performing hierarchical clustering: Agglomerative method: it is a bottom-up approach, in the beginning, we treat every data point as a single cluster. Then, we compute similarity between clusters and merge the two most similar clusters. We repeat the last step until we have a single cluster that contains all data points. Divisive method: it is a top-down approach, where we put all data points in a single cluster. Then, we divide this single cluster into two clusters and recursively do the same thing for the two clusters until there is one cluster for every data point. This method is less common than the agglomerative method. source: Similarity metrics The similarity metric is a function that takes two data points as an input and outputs a value that represents the similarity between them (i.e. the distance between them). The most famous metric is Euclidean distance which is the length of the shortest distance between two data points. Other distance metrics include Manhattan distance, Pearson correlation distance, Eisen cosine correlation distance and Spearman correlation distance. There is no specific right choice for metric, it depends on the problem that we want to solve and the structure of the data. source: Linkage methods There are many ways to determine the distance between two clusters (aka linkage methods): Single linkage: the distance between two clusters is defined as the minimum value of all pairwise distances between the elements of the first cluster and elements of the second cluster. Complete linkage: the distance between two clusters is defined as the maximum value of all pairwise distances between the elements of the first cluster and elements of the second cluster. Average linkage: the distance between two clusters is defined as the average distance between the elements of the first cluster and elements of the second cluster. Centroid linkage: the distance between two clusters is defined as the distance between the centroids of the two clusters. source: Example using python Prerequisites: - Python 3. - Scipy and sklearn Imports from sklearn.metrics import normalized_mutual_info_score from sklearn.datasets.samples_generator import make_blobs from scipy.cluster.hierarchy import dendrogram, linkage, fcluster import matplotlib.pyplot as plt import numpy as np Generating sample data make_blobs is used to generate sample data where: n_samples : the total number of points equally divided among clusters. centers : the number of centers to generate, or the fixed center locations. n_features : the number of features for each sample. random_state: determines random number generation for dataset creation. This function returns two outputs: X: the generated samples. y: The integer labels for cluster membership of each sample. Then we use plt.scatter to plot data points as shown in the figure below. X, y = make_blobs(n_samples=90, centers=4, n_features=3, random_state=4) plt.scatter(X[:, 0], X[:, 1]) plt.show() Clustering In this part, we are performing agglomerative hierarchical clustering using linkage function from scipy library: method: is the linkage method, 'single' means the linkage method will be single linkage method. metric: is our similarity metric, 'euclidean' means the metric will be euclidean distance. A (n-1)by 4 matrix Zis returned. At the -th iteration, clusters with indices Z[i, 0]and Z[i, 1]are combined to form cluster with index (n+i). A cluster with an index less than ncorresponds to one of the noriginal sand t. The algorithm begins with a forest of clusters that have yet to be used in the hierarchy being formed. When two clusters sand tfrom this forest are combined into a single cluster u, sand tare removed from the forest, and uis added to the forest. When only one cluster remains in the forest, the algorithm stops, and this cluster becomes the root. A distance matrix is maintained at each iteration. The d[i,j]entry corresponds to the distance between cluster iand jin the original forest. At each iteration, the algorithm must update the distance matrix to reflect the distance of the newly formed cluster u with the remaining clusters in the forest. for more details check the documentation of linkage function: Z = linkage(X, method="single", metric="euclidean") print(Z.shape) (89, 4) Plotting dendrogram The dedrogram function from scipy is used to plot dendrogram. - On the x axis we see the indexes of our samples. - On the y axis we see the distances of our metric ('Euclidean'). plt.figure(figsize=(25, 10)) plt.title("Hierarchical Clustering Dendrogram") plt.xlabel("Samples indexes") plt.ylabel("distance") dendrogram(Z, leaf_rotation=90., leaf_font_size=8. ) plt.show() Retriving clusters fcluster is used to retrive clusters with some level of distance. The number two determines the distance in which we want to cut the dendrogram. The number of crossed line is equal to number of clusters. cluster = fcluster(Z, 2, criterion="distance") cluster array([4, 4, 3, 4, 3, 4, 4, 3, 4, 3, 3, 3, 3, 2, 3, 4, 2, 4, 3, 1, 4, 2, 1, 1, 1, 2, 2, 2, 4, 4, 2, 4, 3, 2, 4, 2, 1, 1, 4, 2, 4, 2, 1, 2, 3, 4, 3, 2, 2, 3, 1, 3, 2, 4, 1, 1, 1, 2, 4, 1, 3, 3, 4, 2, 3, 3, 4, 2, 3, 1, 3, 1, 1, 1, 1, 2, 1, 1, 2, 3, 3, 1, 4, 1, 2, 4, 1, 2, 4, 1], dtype=int32 Plotting clusters Plotting the final result. Each color represents a different cluster (four clusters in total). plt.figure(figsize=(10, 8)) plt.scatter(X[:, 0], X[:, 1], c=cluster, cmap="Accent") plt.show() Evaluting our clusters Finally, we will use Normalized Mutual Information (NMI) score to evaluate our clusters. Mutual information is a symmetric measure for the degree of dependency between the clustering and the real classification. When NMI value is close to one, it indicates high similarity between clusters and actual labels. But if it was close to zero, it indicates high dissimilarity between them. normalized_mutual_info_score(y, cluster) 1.0 The value is one because our sample data is trivial. Pros and cons The time complexity of most of the hierarchical clustering algorithms is quadratic i.e. O(n^3). So it will not be efficent for large datasets. But in small datasets, it performs very well. Also it doesn't need the number of clusters to be specified and we can cut the tree at a given height for partitioning the data into multiple groups.
https://iq.opengenus.org/hierarchical-clustering/
CC-MAIN-2019-47
refinedweb
1,363
55.64
Computer Science Archive: Questions from February 15, 2008 - kl6073 askedWhy is there no explicit authentication in the protocol in Figure 8.19? Is authentication needed? Wh... More »0 answers - Anonymous askedbuilding and using a... Show moreIn this question you are to write some procedures in MIPS assemblylanguage for building and using a quadtree data structure. Each node of the treeis implemented with six consecutive words of memory, as shown below: x coordinate,y coordinate, NW child ptr ,NE child ptr ,SE childptr, SW child ptr address a ,address a+4 ,address a+8, address a+12, address a+16,address a+20 Each of the four child pointers (NW child ptr, NE child ptr, SEchild ptr, and SW child ptr) gives the memory address of the first word of thecorresponding child node. A pointer (memory address) value of zero indicates that there is nosuch child node. The tree nodes should be linked such that for a node with coordinates(X,Y) all nodes in its NW subtree have x coordinate less than or equal to X and ycoordinate greater than Y, all nodes in its NE subtree have x coordinate greater than X and ycoordinate greater than or equal to Y, all nodes in its SE subtree have x coordinate greaterthan or equal to X and y coordinate less than Y, and all nodes in its SW subtree have xcoordinate less than X and y coordinate less than or equal to Y. Use “.space 480” to allocate memory sufficient for 20nodes (20 nodes x 6 words/node x 4 bytes/word = 480 bytes). Each successive group of 6 words will beused for a node. You will first initialize the contents of these words so to make afree list of nodes (i.e., a singly-linked list of nodes not currently being used in a tree).Then, whenever a new node needs to be added to your tree, you can obtain one from yourfree list. All of your procedures must use the “standard”conventions discussed in class for passing parameters and returning results. Hand in a single filecontaining your procedures for parts (a)-(e) as well as a suitable main programthat will allow the marker to easily test your code. You should include comments describingwhat your code does, how it works, what limitations (if any) it has, and what testingyou have carried out. Write a procedure init that takes as its parameters a memoryaddress (i.e., the starting address of your block of 480 bytes), and a count ofnumber of nodes (i.e., 20), and initializes memory starting at the provided address, so asto create a free list with the indicated number of nodes. Each word in each node shouldbe initialized to 0, except (for all nodes but the last node) the “SW childptr” word, which should contain the memory address of (the first word of) the next node inthe free list. Your procedure should return the memory address of the first node in thecreated free list. • Show less0 answers - Anonymous asked write a function... Show more 1. bool operator<(const Ring& r2) const; 2. int find_index(const char *str, char c); write a function• Show less 1. bool operator<(const Ring& r2) const; 2. int find_index(const char *str, char c); thanks 1 answer - Anonymous asked1) Calculate the Hamming code for the following, performing eachstep: a- 1101010 b- 1000111 2) Follo1 answer - Anonymous askedeach of... Show morex.øi5 QUIZ Amoeba is an ----------based system. It can be viewed as a collection of objects, each of which contains a set of operations that can be performed. · Structure · Object · Locked · Data Which of the following is one of the four functional components of a typical Amoeba? · Bluetooth · Airnet · infrared · A node to transfer data between private networks and the Internet One way of securing the objects in Amoeba is to protect them by --------------- · Using Encryption/decryption algorithms · Running antivirus · Simply keep them password protected · Keeping them as hidden files/folders Amoeba is organized as a collection of --------- each with some number of operations that processes can perform on it. · Objects · Data types · Hardware · Software The Amoeba ---------server, called the bullet server · File · Database · Proxy -----------can be dynamically allocated as needed by the system and the users in Amoeba. · CPUs · Disks · Memory· I/O devices To perform an operation on an object, a client performs a --------with the appropriate server, specifying the object, the operation to be performed and any parameters needed. · RPC · Encryption · Deletion · Updating The ------------- in amoeba is usad for fault tolerance and performance · Bullet Server · Directory Server · File System · Replication Server CPUs need to be of the same architecture in the Amoeba System Architecture. · True · False The -----------Server is designed to run on machines with large amounts of RAM and huge local disks and used for file storage · Bullet Server · Directory Server · File System · Replication Server When a server is prepared to accept requests from clients, it executes a ----------- primitive, which causes it to block. · get-request · put-reply · do-trans· none of the above When a request message arrives, the server is unblocked and the formal parameters of the call to get-request are filled in with the information from the incoming request. The server then performs the work and sends a reply using ---- -------. · get-request · put-reply · do-trans On the client side, to invoke a remote operation, a process uses ----------. This action causes the request message to be sent to the server. · get-request · put-reply · do-trans The size of the header in the request and reply messages is usually ------------ bytes · 1 · 32 · 35 · 48 Associated with each object in Amoeba is a ----------------, a kind of ticket or key that allows its holder to perform some (not necessarily all) operations on that object. · Capability · Lock · Buffer Note : Copy the PDF file into word document and submit your quiz (as .doc file) after highlighting the correct options.• Show less1 answer - Anonymous askedCity xyz, Street... Show moreWrite a program to read a ".txt" file having text givenbelow.//database.txt• Show less Mr. ABC City xyz, Street xyz, House No 123 0923443314181 cplusplus_programmer@yahoo.com1 answer - Anonymous askedWrite a modular program to keep records andperform statistical analysis for a class of students.... Show moreWrite a modular program to keep records andperform statistical analysis for a class of students. For eachstudent, we need a name of up to 26 characters, an ID for fourdigits, three midterms, and one final. The student data will bestored in an array of studentstructures. Provide for up to 50students. The input is read from a text file. Each line inthe file contains a student’s name (last name, first name), three midterm scores, and one final score inorder. If a midterm or final was not taken, the score is zero. The student’s nameand ID, the midterm scores, and the final score are all separatedfrom each other by at least one space. A “newline” endsthe data for one student. Therefore, the number of lines in thisfile is the same as the number of students. The output consists of a listing of the students sorted by name(ascending order) and is tobe stored in a file named“ece261lab3.prn”. Print each student on a separate linewith an appropriate caption for each column. The last column shouldlist the class average for each student assuming a 20% weight foreach midterm and a 40% weight for the final. After the last student, print the highest, lowest, and averagescore for each midterm and the final. In determining the lowest score, do not consider zeroscores. This is the prompt, it is an extension of this code. #include <stdio.h> #include <stdlib.h> #include <string.h> typedef struct{char name[26]; int midterm[3]; int final; } STUDENT; /* Prototype declarations */ void get_data(FILE *inpfile, STUDENT stu[], int *count_ptr); void print_data(STUDENT stu[], int count); void insertion_sort(STUDENT stu[], int count); int main(void) { FILE *inputfile = NULL; STUDENT student_array[50]; int *student_count_ptr = NULL; int student_count; /* Open the external file "ece161lab2.txt" for reading */ inputfile = fopen("ece161lab2.txt", "r"); if (inputfile == NULL) { printf("Error opening input file.\n"); return 1; } student_count_ptr = &student_count; get_data(inputfile, student_array, student_count_ptr); fclose(inputfile); printf("Unsorted student data:\n"); print_data(student_array, student_count); insertion_sort(student_array, student_count); printf("\nSorted student data:\n"); print_data(student_array, student_count); return 0; } /* =================================================================== */ void get_data(FILE *inpfile, STUDENT stu[], int *count_ptr) { char last_name[13], first_name[13]; int count = 0; while (fscanf(inpfile,"%s%s%d%d%d%d",last_name,first_name,&stu[count].midterm[0], &stu[count].midterm[1],&stu[count].midterm[2], &stu[count].final) == 6) { strcpy(stu[count].name,last_name); strcat(stu[count].name," "); strcat(stu[count].name,first_name); count = count + 1; } *count_ptr = count; } /* =================================================================== */ void print_data(STUDENT stu[], int count) { int i; printf(" NAME MID 1 MID 2 MID 3 FINAL\n"); for (i=0;i<count;i++) printf("%26s%5d%11d%8d%9d\n",stu[i].name,stu[i].midterm[0],stu[i].midterm[1], stu[i].midterm[2],stu[i].final); } /* =================================================================== */ void insertion_sort(STUDENT stu[], int count) { /* This is a typical function that sorts the array of structures called stu into ascending order using the "Insertion Sort" algorithm. The number of students in the array is in count. */ int i, j; STUDENT index; for (i=1; i < count; i++) { index = stu[i]; j = i; while ((j > 0) && (strcmp(stu[j-1].name,index.name) > 0)) { stu[j] = stu[j-1]; j = j - 1; } stu[j] = index; } } /* =================================================================== */ • Show less2 answers - Anonymous askedand su... Show more Suppose that we aregiven a key k tosearch for in a hash table with positions 0, 1, ...,m - 1, and suppose that we havea hash function h mapping the key space into the set {0, 1,..., m- 1}. The search scheme isas follows. 1. Compute thevalue i← h(k),and set j← 0. 2. Probe inposition ifor the desired keyk. If you find it, or if this position isempty, terminate thesearch. 3. Setj ← (j +1) mod mand i ← (i + j)mod m,and return to step 2. Assume thatm is a power of 2. a. Show that this schemeis an instance of the general "quadratic probing" schemeby exhibiting theappropriate constants c1 andc2 for equation (11.5). b. Prove that thisalgorithm examines every table position in the worstcase.• Show less1 answer - Anonymous asked1 answer - Anonymous asked2 answers - Anonymous asked1 answer - Anonymous askedIn spite of the advantages for instantaneous wireless PHY/MACinformation, what... Show more (i) In spite of the advantages for instantaneous wireless PHY/MACinformation, what are the pitfalls that a wireless monitoringsystem needs to be aware of? [3] (ii) How can we avoid such pitfalls to improve the capabilities of thewireless monitoring technique?[3] (iii) How can we leverage the information that wireless monitoringprovides, for WLAN traffic characterization and network diagnosis?[4]• Show less1 answer - Anonymous askedAmoeba is an----------based system. It can be viewed as a collection ofobjects, each of which... Show more QUIZ 1 Amoeba is an----------based system. It can be viewed as a collection ofobjects, each of which contains a set of operations that can beperformed. · Structure · Object · Locked · Data 2 Which of thefollowing is one of the four functional components of atypical Amoeba? · Bluetooth · Airnet · infrared · A node to transfer data betweenprivate networks and the Internet 3 One way of securingthe objects in Amoeba is to protect them by--------------- · Using Encryption/decryptionalgorithms · Running antivirus · Simply keep them passwordprotected · Keeping them as hiddenfiles/folders 4 Amoeba is organizedas a collection of --------- each with some number of operations that processes can perform on it. · Objects · Data types · Hardware · Software 5 TheAmoeba ---------server, called the bullet server · File · Database · Proxy 6 -----------can be dynamically allocated as needed by the system andthe users in Amoeba. · CPUs · Disks · Memory · I/O devices• Show less2 answers - Anonymous asked2 answers - Archit askedtofind the LCM (lowest common multiple) and GCD (greatest co... Show moreWrite a simple C++ program, using loops tofind the LCM (lowest common multiple) and GCD (greatest commondivisor) of two numbers.Will rate LIFESAVER to thecorrect answer• Show less3 answers - Anonymous asked"While providingcommunication training to the employees, educators usually focusmore on written and ... More »2 answers - Anonymous asked1 answer - Anonymous askedHow we can runexisting projects in NET BEAN.KINdly told me.What is the method of running exising pro... Show moreHow we can runexisting projects in NET BEAN.KINdly told me.What is the method of running exising project in NET Bean. • Show less1 answer - Anonymous askedYouhave to submit abstract of 300-500 words o... Show more An Evolutionary View of the Object-OrientedParadigmYouhave to submit abstract of 300-500 words of this researchpaper• Show less1 answer - Anonymous askedYou are required to design a shopping cart whichwill contain following two parts... Show more ONLINE SHOPPINGCART You are required to design a shopping cart whichwill contain following two parts Store Front & Administrationarea Store Front contains product catalog to which user interacts. Users cansearch products from product catalog and can add to the basket forordering. Administration area is a module to create and manage product categories, and orderthe way in which they are displayed on the storefront. The administrator will be able to do theseoperations after authentication (using login andpassword). Note: You can use Servlets/JSP or both for thispurpose. The database for this system will be MicrosoftAccess.• Show less0 answers - Anonymous askedA... Show more LIBRARY MANAGEMENTSYSTEM You have to design a library management systemwhich will keep record of · All Books · Borrowers · Lending books · Returning of book Working: Its working will be in such a way that thelibrarian will check the availability of desired book by searchingit. If the book is available that will be issued to particularborrower (Student). The information of borrower and the returningdate of the book will be recorded. On returning of book theborrower’s status will be cleared and quantity of books willbe revised. Note: You can use Servlets/JSP or both for thispurpose. The database for this system will be MicrosoftAccess.• Show less0 answers - Anonymous asked1 answer - Anonymous asked1 answer - Anonymous askedWebsite develop... Show more FINAL DELIVERABLE (PROJECTIT430) Your final deliverable consists of two majorparts i. Productor service Administration - Website development (40 Marks) - Static web pages (Consult from Website Contents template) (15 Marks) - Dynamic web pages (25 Marks) (Consult from Website Contents template)ii. Order Management or ShoppingCart• Show less (Consult from Website Contentstemplate) iii. JobOpportunities or Apply online (Consult from Website Contents template) - Presentation PPT Slides (10Marks) - At least 10 slides are required - Slides should contain: i. Personal Introduction ii. Company profile iii. Product or Service informationiv. Importance of product or service in globalperspectives v. Marketing Strategy vi. SWOT analysis vii. Promotional Activities viii. Importance of CRM w.r.t your productix. Futuristic goals for enhancementx. summary4 answers - Anonymous asked"The role of Information Technology in Pakistan" Please comment on it your comments must be 200 to 3... More »0 answers - Anonymous asked2 answers - vikrant askedQ1. When virtual memory is implemented in a co... Show moreHello sir/madam,I need your help. Would you help me?Q1. When virtual memory is implemented in a computingsystem, it carries certain costs and certain benefits. Lists thosecosts and the benefits. It is possible for the costs to exceed thebenefits. Explain what measures you can take to ensure that thisimbalance does not occur.Thanks!!!• Show less0 answers - vikrant askedQ1. Consider a file system where a file can be... Show moreHello sr/madam!!!I need your help. Woud you help me?Q1. Consider a file system where a file can be deleted and itsdisk space reclaimed while links to that file still exist. Whatproblems may occur if a new file is created in the same storagearea or with the same absolute path name? How can these problems beavoided?Thanks!!!• Show less1 answer - Anonymous asked2 answers - Anonymous asked0 answers - Anonymous askedstages,columns for cycles) explained in class... Show moreUsing the two-dimensional table (rows for pipeline<BR>stages,columns for cycles) explained in class show<BR>how the aboveinstructions will flow in the pipeline<BR>once withforwarding and once without forwarding.<BR> • Show less0 answers - Anonymous askedAssuming that branches are predicted not-taken,<BR>how manycycles will each iterations of this loop<... Show moreAssuming that branches are predicted not-taken,<BR>how manycycles will each iterations of this loop<BR>take?<BR><BR>Top: DADDIU R4, R4, 1<BR>LW R5, 100(R3)<BR>SLTIU R6, R4, 100<BR>BNE R0, R6,TOP<BR>DSUB R6, R7, R8<BR>DDIV R2, R1, R2<BR> • Show less1 answer - Anonymous askedQuestion :... Show moreCalculate theHamming code for the following data.Performe each step.Question :• Show lessCalculate theHamming code for the following data.Performe each step.a). 1101010b). 10001110 answers - Anonymous askedQ the following bit streams containig checksum are recieved at thereceiver end. describe whether dat... Show moreQ the following bit streams containig checksum are recieved at thereceiver end. describe whether data is correct or there are some errors? justify your answer. a) 100001110100101100101101 b) 100001010001010001010101 • Show less1 answer - Anonymous askedDiscuss its v... Show moreQuestion :• Show lessWhat are theasynchronous protocols in data link layer?Discuss its various types.1 answer - Anonymous asked(0.862... Show moreIf possible could you please help me with thisprogram? Programming Assignment 1 JShello.htm (0.862 Kb) Create a JavaScript program that declares andprints an array that includes at least 20 of your favoritesongs. For each song, determine the best means for storingthe year it was published. Using JavaScript, printeach song's name along with the year it was published.The print logic must use aloop. (Youmay use for song information. The data really do not matter:The programming does.)0 answers - Anonymous askedProg... Show moreIf someone could please help me with this program forjavascript that would be much appreciated. Programming Assignment 2 The sum of the lengths of any two sides of a triangle mustbe greater than the length of the third side. For example, thenumbers 3, 4, and 5 can form a triangle because 3+4 > 5, 4+5> 3, and 5+3 > 4. In contrast, the numbers 1, 2, and 5 cannotform a triangle because 1+2 < 5. Thus, if you are given anythree integers, you can determine whether they could possibly forma triangle or not by applying this general principle. Write a JavaScript program that allows a user toinput three integers using text boxes ina form. (Hint:You need to use the built-in parseInt function toconvert the input strings to integers.) Test the threeintegers to determine if they can be formed into atriangle using the rule given above. Also test if the resultingtriangle would be a right triangle using the Pythagorean theorem,namely that the square of the hypotenuse (the longest side) equalsthe sum of squares of the other two sides. Display an alertbox to inform the user whether their integers can form atriangle or a right triangle (tell them which), or if the integerscannot form a triangle. Continue accepting sets of three integersand testing them until the user decides toquit.• Show less0 answers - Anonymous asked0 answers - Anonymous askedProgramming Assignment... Show moreDoes anyone know javascript if so could you please help mewith this program? Programming Assignment 4 Create an html document that includes a JavaScriptprogram that creates a new constructorfunction named Automobile in the documenthead. Include at least five properties in the object definition,such as make, model, color, engine, seats, and so forth. Thenassign the values of your car to each of the Automobile properties.Print the properties to the screen.3 answers - Anonymous askeda... Show moreusing C++ standard change this code in C++ alsoall these function written convert in Class Comp {} also if first word is enter store it in file and at same time ask if you have second word if yes then enter secont word store also in file alsocompare these two word if same store in an other file set flag yes. if any time you enter same word as alread intered and flagyes then show already exist. #include <stdio.h> #include <conio.h> #include <string.h> #include <stdlib.h> #include <malloc.h> #define LEFT 1 #define RIGHT 2 struct node { char word[20]; node *left,*right; }; node *maketree(char[]); node* treefromfile(); void filefromtree(node*); void addword(node*,char[]); void displayall(node*); node* bsearch(node*,char[]); void showmenu(); FILE *file_ptr; int main() { char word[20]; int mnuchoice; node *tree; tree=treefromfile(); if(tree==NULL) { printf("\nFile does not exist ..."); getch(); } while(1) { showmenu(); scanf("%d",&mnuchoice); switch(mnuchoice) { case 1: printf("\nEnter word : "); scanf("%s",word); if(tree==NULL) tree=maketree(word); else addword(tree,word); break; case 2: if(tree==NULL) printf("\n is empty..."); else displayall(tree); getch(); break; case 3: filefromtree(tree); exit(1); default: break; } } } void showmenu() { printf("\n\t\tADD CLEAR COURSES"); printf("\n[1]. Add a word."); printf("\n[2]. Display all."); printf("\n[3]. Save and Close."); } node* treefromfile() { node *ptree=NULL; char word[20],str[120],*i; int flags=0; file_ptr=fopen("C:\\dict.txt","r"); if(file_ptr==NULL) ptree=NULL; else { while(!feof(file_ptr)) { i=fgets(str,120,file_ptr); if(i==NULL) break; if(flags==0) { ptree=maketree(word); flags=1; } else addword(ptree,word); } fclose(file_ptr); } return ptree; } node* maketree(char w[]) { node *p; p=(node*)malloc(sizeof(node)); strcpy(p->word,w); p->left=NULL; p->right=NULL; return p; } void addword(node *tree,char word[]) { node *p,*q; p=q=tree; while(strcmp(word,p->word)!=0 && q!=NULL) { p=q; if(strcmp(word,p->word)<0) q=p->left; else q=p->right; } if(strcmp(word,p->word)==0) //compare two word printf("\nalready exists..."); else if(strcmp(word,p->word)<0) p->left=maketree(word); else p->right=maketree(word); } node* bsearch(node *tree,char word[]) { node *q; q=tree; while(q!=NULL) { //p=q; if(strcmp(word,q->word)<0) q=q->left; else if(strcmp(word,q->word)>0) q=q->right; if(strcmp(word,q->word)==0) break; } return q; } void filefromtree(node *tree) { void travandwrite(node*); file_ptr=fopen("C:\\dict.txt","w"); if(file_ptr==NULL) { printf("\nCannot open file for writing data..."); } else //if(tree==NULL) { if(tree!=NULL) { travandwrite(tree); } fclose(file_ptr); //Close the file anyway. } /*else { travandwrite(tree); fclose(file_ptr); }*/ } //char glbStr[120]; void travandwrite(node *tree) { if(tree!=NULL) { fprintf(file_ptr,"%s %s\n",tree->word); travandwrite(tree->left); travandwrite(tree->right); } } void displayall(node *tree) { if(tree!=NULL) { printf("%s\n",tree->word); displayall(tree->left); displayall(tree->right); } } • Show less1 answer - Anonymous asked" b... Show moreHi all, I need the exercise solutions for the textbook:"Introduction to the theory of computation" byMichael Sipser. Does anyone have it? If not, where can i getit? Thanks in advance • Show less0 answers - Anonymous askeds... Show more Q.2. Match the following to one of the five internetlayers. [10] 2 answers - Communication directly with user’s applicationprogram. - Error correction and retransmission. - Mechanical, electrical, and functionalinterface. - Responsibility for delivery between adjacentnodes. - Reassembly of data packet. - Anonymous asked2 answers - Anonymous asked2 answers - Anonymous asked2 answers - Anonymous asked3 answers - Anonymous asked2 answers - Anonymous asked3 answers - Anonymous asked4 answers - Anonymous asked1 answer
http://www.chegg.com/homework-help/questions-and-answers/computer-science-archive-2008-february-15
CC-MAIN-2014-35
refinedweb
3,814
56.76
Today’s Programming Praxis problem is about Ternary search tries, which are basically hashmaps of strings to values, but which can be faster in some cases. We have to implement the data structure and provide functions to find, insert, update and delete items and a function to convert the trie to a list. In order to match the provided solution, we will test our code by putting all the words in the bible, as well as their frequencies, into a ternary trie. Our target is 52 lines (the size of the provided solution). Let’s go. Our imports: import Data.Char import qualified Data.List.Key as K import Prelude hiding (lookup) The data structure has two small differences from the one in the provided solution: I use a Maybe instead of the bool+value approach, since it’s more idiomatic Haskell, and the split is a list instead of a single type. The reason for this is that it makes the ternary trie far more generic: the provided solution works only for strings, because in the insert function it uses the first ASCII character as a default. By making the split a list, I can use an empty list as the default, which means I can also define a trie with e.g. lists of Int as the key. This would not be possible with a single item, since there is no way to get a default value for an arbitrary type. Another thing you might notice are the strictness annotations on the three branches of a node. If you omit these (as I initially did) and try to cram the bible in a trie you’re going to go through stack space like a hot knife through butter, which means the program will be terribly slow. Note that in order to see the benefit you will have to compile the program: running it in ghci will still produce a stack overflow. data TernaryTrie k v = Empty | Node { val :: Maybe v, split :: [k], lb :: !(TernaryTrie k v), eb :: !(TernaryTrie k v), gb :: !(TernaryTrie k v) } For lookup we have two terminating cases (an empty node is reached or the key is found). The rest is just simple recursion., update and delete all follow the same pattern. To get rid of the boring ‘take the correct action for each branch’ code I factored that out into the modify function to better comply with the DRY principle. } And more recursion for the enlist method. enlist :: TernaryTrie k v -> [([k], v)] enlist = enlist' [] where enlist' _ Empty = [] enlist' k t = maybe [] (\v -> [(k, v)]) (val t) ++ enlist' k (lb t) ++ enlist' (k ++ split t) (eb t) ++ enlist' k (gb t) And we test it by finding the 25 most common words in the bible. main :: IO () main = print . take 25 . reverse . K.sort snd . enlist . foldl (\t k -> update k 1 succ t) Empty . map (map toLower . filter isAlpha) . words =<< readFile "bible.txt" And there we go. With 35 lines it’s not a huge reduction (although the provided solution grows by about 15 lines if you reformat it to the 65-ish character limit I use on this blog), but that was to be expected since it’s mostly basic recursion, which doesn’t easily lend itself to alternative solutions. Still, it’ll do. Tags: Haskell, kata, praxis, programming, search, ternary, trie, tries June 6, 2009 at 3:29 am | I take it that you’ve made ‘split’ a list of keys since you plan to eventually encode more than one element? June 6, 2009 at 7:41 am | No, the list is always supposed to have one element. Perhaps it would have been slightly better to make it a Maybe instead. June 7, 2009 at 6:35 am | have you tried taking a look at bytestring-trie? It’s restricted to using ByteStrings for keys rather than being generic, but it has a number of nice additional features: node fusion for shared prefixes, extremely generalized versions of the primary functions,… June 7, 2009 at 8:57 am | … and it’s a binary instead of a ternary trie :) I didn’t look at it because I always try to solve these exercises myself, but if I ever need to use one in practice, then yeah, I’ll use the existing library. June 9, 2009 at 8:27 pm | Well, the above version isn’t really all that ternary. It’s only “ternary” on account of not doing node fusion. If two keys are distinct then there exists some maximal equal prefix after which the suffix of one is initially greater/lesser than the suffix of the other. That binary split is the same for the above and the library. A real ternary trie would have instead that two distinct keys have some maximal equal prefix after which the suffix of one key is initially A/B/C than the suffix of the other. The trick is getting an element-wise function that partitions non-equal elements into three equal sized sets, rather than two. You can’t use equality as the third set, because equality is handled by the shared prefix and so is already accounted for. Trying to achieve a real ternary trie is desirable because many decision problems have e as the optimal branching factor. It’s easy to get any 2^n branching factor by just chomping off more elements at once. That’ll help to flatten the trie, but at the cost of additional width. Optimality is about balancing the cost of depth against the cost of width; node fusion reduces depth at no cost to width.
https://bonsaicode.wordpress.com/2009/06/05/programming-praxis-ternary-search-tries/
CC-MAIN-2015-22
refinedweb
940
68.5
sql ), ) ppic Wrote:basically, you don't need http if you use executebuiltin: Code: sql ), ) ruuk Wrote:Posted version 0.9.5 for update to the repository. Or get the zip here: branlr Wrote:Great script! 3 things:1. I noticed what I think is a bug... When I click on "Add show" with the mouse, I go to the add show dialog box. However, when I navigate to it with my remote, and hit enter, it brings up the last show I had highlighted. branlr Wrote:2. Earlier in the thread someone said, "This is a great start to your sickbeard plugin." I don't know if this was a joke, but... uh... PLEASE!? Quote. branlr Wrote:3. Is there any way you and ppic could combine your scripts? I honestly think your interface is WAY better, and the fact that you can view a list of past episodes, but the fact that his scans the library and now has data access for skinners gives it a lot of power. I don't understand the whole one-episode-at-a-time list view interface. I know you could probably just both develop them separately, but it seems like both have the functionality that the other lacks. Dobyken Wrote:Tried out ..9.4 last night and .9.5 this morning...Mythbuntu 10.04. The "Add All" doesn't appear to be writing to the data file for me. It scans the TV series and prompts for verification for matching but they don't actually get added. I took a look at the "data" file in Userdata/Addonondata and it only contains the original 6 shows I added manually the other day. Wiping the old data file and rerunning the scan left me with zero shows in the list. One related issue on the new file scan. One of my series wasn't found in your search. Can an "Use Existing" selection be added to the prompt or "Skip". ruuk Wrote:As for scanning the library, my script now has that function. I believe that ppic's main focus is the skin integration aspect (in fact when I released my script he said he was glad because he could focus on that) and that the interface is secondary. If the goal is to get his info to show up contextually with skin integration then his interface makes more sense. My main goal was to have a list of what's coming up next. I think that we're doing two different things and keeping them separate allows the user to only install the functionality they want. I'm not saying combining them is a bad idea, just that there are pros to not doing it as well. XmemphistoX Wrote:Awesome script. Is there anyway to get this incorporated into the TV Shows menu addons rather than Programs? ppic Wrote:basically, you don't need http if you use executehttpapi same way for you with a json request for k in xbmc.__dict__.keys(): print k import jsonrpc api = jsonrpc.jsonrpcAPI() #or api = jsonrpc.jsonrpcAPI(mode='http',url='',user='user',password='password') seasonsarray = api.VideoLibrary.GetSeasons(tvshowid="idhere") ruuk Wrote:What is probably happening is that some error is occurring before it saves. If you could do the scan and then find the error in .xbmc/temp/xbmc.log and post that here it would help. The error will have NOTICE tags in the log. That would also cause it to miss shows if the error is happening before the show. library name would help me figure out why. ruuk Wrote:Thanks for pointing that out. That is indeed a bug. I just fixed it and that will be in the next version. Yeah, when he said that I had to google sickbeard to find out what it was. Sounds interesting but I don't use torrents for tvshows because whenever I do I get emails like this: Dobyken Wrote:What i meant was one of my obscure shows (Helen of Troy Minseries) could not be matched in TVRage. A list of similar names came up but none of them were the correct show. The only option at that point is to close the list with by clicking X or hit escape which then makes the search continue like it should. I was just suggesting that there be an option in the list of TVRage suggested shows that allows you to select the name just as it's entered in XBMC.
http://forum.kodi.tv/showthread.php?tid=82174&pid=620221
CC-MAIN-2014-52
refinedweb
749
73.68
Azure Functions HTTP triggers and bindings This article explains how to work with HTTP triggers and output bindings in Azure Functions. An HTTP trigger can be customized to respond to webhooks.. Tip If you plan to use the HTTP or WebHook bindings, plan to avoid port exhaustion that can be caused by improper instantiation of HttpClient. For more information, see How to manage connections in Azure Functions. The code in this article defaults to the syntax which uses .NET Core, used in Functions version 2.x and higher. For information on the 1.x syntax, see the 1.x functions templates. Packages - Functions 1.x The HTTP bindings are provided in the Microsoft.Azure.WebJobs.Extensions.Http NuGet package, version 1.x. Source code for the package is in the azure-webjobs-sdk-extensions GitHub repository. Support for this binding is automatically provided in all development environments. You don't have to manually install the package or register the extension. Packages - Functions 2.x and higher The HTTP bindings are provided in the Microsoft.Azure.WebJobs.Extensions.Http NuGet package, version 3.x. Source code for the package is in the azure-webjobs-sdk-extensions GitHub repository. Support for this binding is automatically provided in all development environments. You don't have to manually install the package or register the extension. Trigger The HTTP trigger lets you invoke a function with an HTTP request. You can use an HTTP trigger to build serverless APIs and respond to webhooks. By default, an HTTP trigger returns HTTP 200 OK with an empty body in Functions 1.x, or HTTP 204 No Content with an empty body in Functions 2.x and higher. To modify the response, configure an HTTP output binding. Trigger - example")]"); } Trigger - attributes In C# class libraries and Java, the HttpTrigger attribute is available to configure the function. You can set the authorization level and allowable HTTP methods in attribute constructor parameters, webhook type, and a route template. For more information about these settings, see Trigger - configuration. This example demonstrates how to use the HttpTrigger attribute. [FunctionName("HttpTriggerCSharp")] public static Task<IActionResult> Run( [HttpTrigger(AuthorizationLevel.Anonymous)] HttpRequest req) { ... } For a complete example, see the trigger example. Trigger - configuration The following table explains the binding configuration properties that you set in the function.json file and the HttpTrigger attribute. Trigger - usage. As an example, the following function.json file defines a route property for an HTTP trigger: { "bindings": [ { "type": "httpTrigger", "name": "req", "direction": "in", "methods": [ "get" ], "route": "products/{category:alpha}/{id:int?}" }, { "type": "http", "name": "res", "direction": "out" } ] } Using this configuration, the function is now addressable with the following route instead of the original route. http://<APP_NAME>.azurewebsites.net/api/products/electronics/357 This allows the function code to support two parameters in the address, category and id. You can use any Web API Route Constraint with your parameters. The following C# function code makes use of both parameters. using System.Net; using Microsoft.AspNetCore.Mvc; using Microsoft.Extensions.Primitives; public static IActionResult Run(HttpRequest req, string category, int? id, ILogger log) { var message = String.Format($"Category: {category}, ID: {id}"); return (ActionResult)new OkObjectResult(message); } By default, all function routes are prefixed with api. You can also customize or remove the prefix using the http.routePrefix property in your host.json file. The following example removes the api route prefix by using an empty string for the prefix in the host.json file. { . The ClaimsPrincipal; } Authorization keys Functions lets you use keys to make it harder to access your HTTP function endpoints during development. A standard HTTP trigger may require such an API key be present in the request. Important While keys may help obfuscate your HTTP endpoints during development, they are not intended as a way to secure an HTTP trigger in production. To learn more, see Secure an HTTP endpoint in production. Note In the Functions 1.x runtime, webhook providers may use keys to authorize requests in a variety of ways, depending on what the provider supports. This is covered in Webhooks and keys. The Functions runtime in version 2.x and higher does not include built-in support for webhook providers. There are two types of keys: - Host keys: These keys are shared by all functions within the function app. When used as an API key, these allow access to any function within the function app. - Function keys: These keys apply only to the specific functions under which they are defined. When used as an API key, these only allow access to that function. Each key is named for reference, and there is a default key (named "default") at the function and host level. Function keys take precedence over host keys. When two keys are defined with the same name, the function key is always used. Each function app also has a special master key. This key is a host key named _master, which provides administrative access to the runtime APIs. This key cannot be revoked. When you set an authorization level of admin, requests must use the master key; any other key results in authorization failure. Caution Due to the elevated permissions in your function app granted by the master key, you should not share this key with third parties or distribute it in native client applications. Use caution when choosing the admin authorization level. Obtaining keys Keys are stored as part of your function app in Azure and are encrypted at rest. To view your keys, create new ones, or roll keys to new values, navigate to one of your HTTP-triggered functions in the Azure portal and select Manage. You may obtain function keys programmatically by using Key management APIs.: Turn on App Service Authentication / Authorization for your function app. The App Service platform lets you use Azure Active Directory (AAD) and several third-party identity providers to authenticate clients. You can use this to an Azure App Service Environment (ASE). ASE provides a dedicated hosting environment in which to run your functions. ASE lets you configure a single front-end gateway that you can use to authenticate all incoming requests. For more information, see Configuring a Web Application Firewall (WAF) for App Service Environment. When using one of these function app-level security methods, you should set the HTTP-triggered function authorization level to anonymous.. Trigger -. Output Use the HTTP output binding to respond to the HTTP request sender. This binding requires an HTTP trigger and allows you to customize the response associated with the trigger's request. If an HTTP output binding is not provided, an HTTP trigger returns HTTP 200 OK with an empty body in Functions 1.x, or HTTP 204 No Content with an empty body in Functions 2.x and higher. Output - configuration The following table explains the binding configuration properties that you set in the function.json file. For C# class libraries, there are no attribute properties that correspond to these function.json properties. Output - usage To send an HTTP response, use the language-standard response patterns. In C# or C# script, make the function return type IActionResult or Task<IActionResult>. In C#, a return value attribute isn't required. For example responses, see the trigger example. host.json settings This section describes the global configuration settings available for this binding in versions 2.x and higher. The example host.json file below contains only the version 2.x+ settings for this binding. For more information about global configuration settings in versions 2.x and beyond, see host.json reference for Azure Functions. Note For a reference of host.json in Functions 1.x, see host.json reference for Azure Functions 1.x. { "extensions": { "http": { "routePrefix": "api", "maxOutstandingRequests": 200, "maxConcurrentRequests": 100, "dynamicThrottlesEnabled": true, "hsts": { "isEnabled": true, "maxAge": "10" }, "customHeaders": { "X-Content-Type-Options": "nosniff" } } } } Next steps Learn more about Azure functions triggers and bindings Feedback
https://docs.microsoft.com/en-us/azure/azure-functions/functions-bindings-http-webhook?WT.mc_id=devto-dotnet-cephilli
CC-MAIN-2020-05
refinedweb
1,308
51.44
There are couple open source zip library available for .Net framework. One of the best I can recommend is SharpZipLibrary, it is pretty easy to use and reliable. In this tutorial, I will show you how easily you can zip a folder that have multiple sub folders and files. Firstly you will need to download the dll component this ISharpCode site. Once download is completed, you can add this dll reference into your project, either it can be MVC site, ASP.Net site, console or windows application. In your header code, please import the following library namespace. Note: the System.IO is part of .Net framework. using System.IO; using ICSharpCode.SharpZipLib.Zip; Next we need to create a function method that will zip a folder and perform recursive zipping inside of a folder public static void ZipFolder(string CurrentFolder, ZipOutputStream zStream) { string[] SubFolders = Directory.GetDirectories(CurrentFolder); //calls the method recursively for each subfolder foreach (string Folder in SubFolders) { ZipFolder(CurrentFolder, Folder, zStream); } string relativePath = CurrentFolder.Substring(CurrentFolder.Length) + "/"; //the path "/" is not added or a folder will be created //at the root of the file if (relativePath.Length > 1) { ZipEntry dirEntry; dirEntry = new ZipEntry(relativePath); dirEntry.DateTime = DateTime.Now; } //adds all the files in the folder to the zip foreach (string file in Directory.GetFiles(CurrentFolder)) { AddFileToZip(zStream, relativePath, file); } } The next function is to create a method that will add a file to a zip object. private static void AddFileToZip(ZipOutputStream zStream, string relativePath, string file) { byte[] buffer = new byte[4096]; //the relative path is added to the file in order to place the file within //this directory in the zip string fileRelativePath = (relativePath.Length > 1 ? relativePath : string.Empty) + Path.GetFileName(file); ZipEntry entry = new ZipEntry(fileRelativePath); entry.DateTime = DateTime.Now; zStream.PutNextEntry(entry); using (FileStream fs = File.OpenRead(file)) { int sourceBytes; do { sourceBytes = fs.Read(buffer, 0, buffer.Length); zStream.Write(buffer, 0, sourceBytes); } while (sourceBytes > 0); } } This is how you will use the function method to zip a folder. ZipOutputStream zip = new ZipOutputStream(File.Create("c://sample.zip")); zip.SetLevel(9); ZipFolder("c://sample", zip); zip.Finish(); zip.Close();
http://bytutorial.com/blogs/aspnet/how-to-use-open-source-sharpziplib-library-for-zipping-files-in-net
CC-MAIN-2017-39
refinedweb
353
50.12
advertise here This is the C# tutorial. (Switch to the VB tutorial) Download the code for this tutorial Download the tutorial in PDF format By Jesse Liberty Building Silverlight 2 applications just got a lot easier.. The project we're setting out to build is very similar to the Silverlight chat service built by ScottGu, and is shown in Figure 5-1 Figure 5-1. Completed Application (Click to view full-size image) The best way to get started is by opening Expression Blend. The first thing you'll see is the New Project window. Choose Silverlight 2 Application, set your language to Visual Basic and choose a location for your new project, which we'll name BlendForSilverlight, as shown in Figure 5-2 Figure 5-2. Creating the New Project (Click to view full-size image) Take a look in the upper right hand corner. The solution, project and other files created by Blend are identical to the files created by Visual Studio 2008. In fact; there is no exporting/importing between the two; you can open both development environments on the same files simultaneously and they will both work perfectly (see Figure 5-3) Figure 5-3. Blend Files The upper right hand corner not only contains the files, but also contains the properties window, the resources window and, below these, the data window. The central area has the "Art Board" (the design surface" and the Xaml (markup) which can be split as shown in Figure 5-4 Figure 5-4. Design Surface in Split Mode (Click to view full-size image) I've added highlighting in the upper right hand corner to point out the controls that allow you to switch among design, Xaml and split mode. The upper left hand corner contains the Interaction panel and the toolbar. Many of the tools in the tool bar have a small mark in the lower right hand corner indicating that they can be expanded to find related tools. For example, expanding the grid tool displays all the layout controls as shown in Figure 5-5 Figure 5-5. Expanding the Grid Tool The Chevron tool allows access to all the controls, including any user controls, custom controls or third party controls (be sure to click Show All) as shown in Figure 5-6 Figure 5-6. Chevron Expanded (Click to view full-size image) Note that from within the Asset Library exposed by expanding the Chevron you can also search for the control you want, and by clicking on the details radio button you can determine which library a control is in. Begin by switching to design view, and then hold the control key and use the mouse wheel to zoom out so that you can see the entire grid. If your mouse doesn't have a wheel, you can zoom by picking a value (or filling in an arbitrary value) at the bottom of the design window or by using the menu choices View®Zoom In and View®Zoom Out If your mouse doesn't have a wheel, you can zoom by picking a value (or filling in an arbitrary value) at the bottom of the design window or by using the menu choices View®Zoom In and View®Zoom Out You can move the surface by clicking on the hand and then using the mouse to click and drag or by using the scroll bars. Take a look at the upper right hand corner of your grid, as shown in Figure 5-7 Figure 5-7. Upper left corner of the grid Hovering over the upper left hand corner will tell you if you are in grid layout mode or Canvas layout mode. If you are in canvas layout, clicking will toggle you back to grid layout mode. You can create rows and columns just by sliding along the margins and clicking where you want the rows and margins placed, as shown in Figure 5-8 Figure 5-8. Creating a Row using The Mouse Immediately after I click, an open lock appears above and below my new row line. I'll add two lines, one for the top row and one for the bottom; thus implicitly creating a middle row, and thus three locks as shown in Figure 5-9. Figure 5-9. Creating the bottom row (Click to view full-size image) Note that immediately after the row is created, not only are the locks shown but the cursor changes shape to indicate that you can easily move, and thus resize the rows. The open locks indicate that the rows will resize as the browser (inside of which your Silverlight application will run) is resized. We want the middle row to do so, but we want the top and bottom row to remain of fixed size, so click on the top and bottom lock, and then click on split view to examine the effects on the XAML, as shown in Figure 5-10. Figure 5-10. Middle Row has Star Sizing Note that (on line 10) the middle row definition has its Height property set to * (star) indicating that it will take up the remaining room after the other rows are allocated. Let's create two columns, a smaller right column that has a fixed size and a larger floating size left column. Try this yourself without additional help. We'd like to give the entire application a light blue background, but to make it a bit more attractive, we'll use a linear gradient brush. To do this, please follow these numbered steps, reflected in Figure 5-11. Figure 5-11. Adding a Linear Gradient By the Numbers (Click to view full-size image) The next step is to add a button to the lower right hand corner. Click on a button on the toolbar, drag it to approximate size in the area and then click in the properties window. Next, open the Layout subpanel and set the Width and Height to Auto and set a small value (e.g., 4) for the Margin, all around. This will fill the area, but with a reasonable margin from the edges of the column and row. The value you put in a property box does not take effect until you leave that box (e.g., using tab) The value you put in a property box does not take effect until you leave that box (e.g., using tab) There are a few things to notice about the Layout Panel (Figure 5-12) Figure 5-12. Setting the Button Layout with the Layout Panel First, you can set the Width and/or Height to auto by pressing the button to the right of the field. Second, when you set a specific (non-default) value next to a property, a white dot appears (there are 5 white dots in Figure 5-12). Take a look at the Xaml once you have these values set, <Button HorizontalAlignment="Stretch" Margin="4,4,4,4" VerticalAlignment="Stretch" Grid. Blend has translated your choices into Xaml that will create the look you've chosen. We have, however, forgotten to change the "Content" property from Button to "Send". The easiest way to do so is to type Content into the property Search box, At "co" you will get back "Column", "Horizontal Content", "Data Content" and "Content" as shown in Figure 5-13 Figure 5-13. Search with "co" Refine your search just a bit further, and you reduce the complexity a bit, as shown in 5-14, Figure 5-14. Refined Search In any case, remove the word Button from the Content field and put in the word Search. Your text task is to set the font size to 18 and the font family to Verdana. Oh, and set it to Bold. (No cheating! Do this in design, not in Xaml!) (You'll find the image on the next page) Some (few) controls have a text property and most have a content property. Those with a content property often fill that property with text, but they are free to fill it with almost anything else, including other controls. Some (few) controls have a text property and most have a content property. Those with a content property often fill that property with text, but they are free to fill it with almost anything else, including other controls. Figure 5-15. Setting the Font Family and More Examining the finished application shown at the top of the tutorial reveals that the input area has a rounded rectangle as shown in Figure 5-16 Figure 5-16. Input Area is a Rounded Rectangle (Click to view full-size image) This is slightly tricky to achieve since the TextBox does not have a setting for rounding its corners. It turns out, however, that the Border control does. Our tricky solution, therefore, is to put a TextBox within a border, and letting the Border handle the look and feel and the TextBox mange the text handling! Step one is to fill the grid square with a border. Click on the Grid pallet and expand it to click on a border, and then drag out the border into the appropriate grid location (we'll size it properly with the properties window). Once in the grid, set its Width and Height to Auto, and its Margin to 4 all around, as shown in Figure 5-17 Figure 5-17. Sizing the Border in the Layout Panel Let's set the background brush for the border, by setting the values in the Brushes property window by hand to 255, 255, 255 with an Alpha (opacity) of 45%.I highly recommend doing these one by one. AS you set the red value the background turns Red. When you set the Green background to 255 as well, the background turns bright yellow. Add 255 for Blue and the background turns bright white. Finally, knocking the Alpha down to 45% gives us the pale shade we're looking for, as shown in Figure 5-18 Figure 5-18. Setting the Background Brush (Click to view full-size image) If you look carefully under the settings for RGBA you'll find the hex value #72FFFFFF which is the equivalent of setting the four values independently (with the alpha value first). Our Border control also needs its Border brush set (in 5-18 it is set to "No Brush"). The values we want (which it may well default to) are Alpha of 100% and full black (00,00,00). The Border brush won't appear unless we set its thickness (which defaults to zero); which we can do in the Appearance panel. Let's set it to 2. This is also where we get to set the rounded corners, which we'll set to 5 as shown in Figure 5-19. Figure 5-19. Setting the Border Thickness and Rounded Corners The border has the rounded corners, but as noted above, a border can't accept text input. For that we need a text box, but we don't want the text box to overwrite the background we just carefully placed in the border. Zap! All we need to do is make the text box reside within the border, and set it to transparent! To place the text box within the border, double click on the border in the Interaction panel, signaling to Blend that it is now the container for the next control. That is, the next control we add will not be a sibling, but a child of the border. The yellow rectangle will jump from Layout Root to the Border as feedback that it is now the active container. When you drag the text box in, it will be placed inside the text box, as shown in Figure 5-20, both from a design view and a Xaml view. Figure 5-20. Text Box Nested within Border (Click to view full-size image) There are a few new concepts all put together in this one construct, so a quick review is in order: We'll do the same thing on the top row, except that instead of using a TextBox we'll use a textblock, setting the text inside to a font family of Comic Sans MS, a font size of 48 and the text of your own name. To set the upper right hand corner, begin by double clicking on the LayoutRoot to make it the container, then click on an Image Control in the Chevron. Set the source property to point to any image file on your disk. The final part of our IM application is the middle: the conversation area, which will in fact be a list box. We'll place that in a border that will stretch across both columns. You can place that manually, just by stretching a border across both columns using the mouse. You'll notice as you go to refine the placement in the Layout panel that the ColumnSpan has been set to 2, and all you need do is fix the Margins to make the border fit snugly (e.g., with a margin of 4 all around.) You'll want to set the background to a solid color ( FF03588D works nicely) and once again we'll set the corners to a radius of 5 all around, as well as setting the border to black and a thickness of 2. Once again, double click on the new border, and drag a list box (from the chevron) inside the new border, and set it to fill the border. It is convenient to have dummy data to fill the list box, and the easiest way to do that is in Xaml. This is a good opportunity to switch to Visual Studio, so save all the files in Blend but leave blend open and switch over to the project in Visual Studio 2008. If your project was already open, you'll be notified that it has changed, say yes to accept the changes, and you should find that all the files are shown and your Page.xaml looks impressively similar to what you just saw in Blend. Figure 5-21. Examining the Project in Visual Studio (Click to view full-size image) You can now easily populate your list box with dummy list items, <ListBox Height="Auto" Width="Auto" x: <ListBoxItem Content="Jesse: Is this working?" /> <ListBoxItem Content="Scott: Of course." /> <ListBoxItem Content="Jesse: I'm following the directions." /> <ListBoxItem Content="Scott: Then you should be fine." /> <ListBoxItem Content="Jesse: Great, thanks." /> </ListBox> Save your file and switch back to Blend (no need to close Visual Studio 2008). When you do, Blend lets you know the files were modified, Figure 5-22. Blend letting you know the files were modified Click yes, and your dummy data should be visible. You can press F5 to run the program as an interim check. You'll find that the image doesn't appear unless you place a copy of the image file in the bin/debug directory as well. That's not a big problem, however, as we'll be binding to the source for the image file. To support the exchange of instant messages, we will build (or, to be more honest, stub out) a service that will identify the remote user's name, image and that will manage the messaging. Because we are more interested in Silverlight than in building the service (at least for the purposes of this tutorial) we'll simplify this by creating a ChatSession class that will stand in for all the networking, sockets and other plumbing, and which will maintain instead a collection of Message objects. As you can guess, we'll also create, therefore, a Message class to represent each message "transmitted" by the service. Because Visual Studio 2008 is better suited to programming than is Blend, we'll do this work in Visual Studio 2008 but we'll get there by right clicking on the solution in Blend and choosing Edit In Visual Studio, as shown in Figure 5-23. Figure 5-23. Moving to Visual Studio from Blend Once in Visual Studio 2008 right click on the project and choose Add Class. In the new items dialog select class and name your new class ChatMessage as shown in Figure 5-24 Figure 5-24. Adding the Chat Message Class (Click to view full-size image) The Chat Message class is pretty straight forward, consisting only of two properties, public class ChatMessage { private string privateUserName; public string UserName { get { return privateUserName; } set { privateUserName = value; } } private string privateText; public string Text { get { return privateText; } set { privateText = value; } } } The ChatSession is somewhat more complex, but we'll simplify greatly by stubbing out all the difficult parts. We begin by marking the class as implementing INotifyPropertyChanged, which as noted in the DataBinding Tutorial ensures that the UI fields will be updated as the data changes. public class ChatSession : INotifyPropertyChanged { This interface requires only that we provide a PropertyChangedEventHandler that implements the PropertyChanged event, public event PropertyChangedEventHandler PropertyChanged; The class has two straight forward public properties: one for the user's name, and one for the URL of the user's Avitar (image), private string privateRemoteUserName;public string RemoteUserName{ get { return privateRemoteUserName; } set { privateRemoteUserName = value; }}private string privateRemoteAvatarUrl;public string RemoteAvatarUrl{ get { return privateRemoteAvatarUrl; } internal set { privateRemoteAvatarUrl = value; }} Next we want to create a collection of the ChatMessages. What we want, however, is to raise an event each time the collection changes (e.g., a message is added) and we want the list box to respond to that event. We could create all of that, but the framework makes it easy. Listbox already knows to respond to PropertyChanged events, and there is a special collection type ObservableCollection(of) that fires exactly that event every time the collection is changed. This makes life a walk in the park, private ObservableCollection privateMessageHistory;public ObservableCollection MessageHistory{ get { return privateMessageHistory; } internal set { privateMessageHistory = value; }} Sending a message is now as simple as adding a new Chat message where the text is taken from the message box and the user name is taken from the current user's name (for now, hard coded to "Me") public void SendMessage(string message){ // Todo: Send to remote chat server over sockets MessageHistory.Add(new ChatMessage {Text = message, UserName = "Me"});} We leave it as an exercise for the reader to actually send the message across the network We leave it as an exercise for the reader to actually send the message across the network Before we can send messages, however, we must connect with the remote user, obtaining the remote user's name, avatar and initializing the observable collection of messages, public void ConnectWithRemoteUser(string remoteUserNameParam) { // Todo: 1) Wire-Up socket stack to receive notifications of received messages // 2) Wire-Up to a remote avatar service instead of hard-coding my picture RemoteUserName = remoteUserNameParam; RemoteAvatarUrl = "billg.jpg"; MessageHistory = new ObservableCollection(); // Notify any listeners of property changes if (PropertyChanged != null) PropertyChanged(this, new PropertyChangedEventArgs("RemoteUserName")); if (PropertyChanged != null) PropertyChanged(this, new PropertyChangedEventArgs("RemoteAvatarUrl")); if (PropertyChanged != null) PropertyChanged(this, new PropertyChangedEventArgs("MessageHistory"));} Finally, we'll add a method we can call that will simulate an exchange of IM's (but we'll only call this method when we're in design mode; not when the program is actually running) public void PopulateWithDummyData() { RemoteUserName = "BillG"; RemoteAvatarUrl = "billg.jpg"; MessageHistory = new ObservableCollection(); MessageHistory.Add(new ChatMessage {UserName = "BillG", Text = "How is your video going?"}); MessageHistory.Add(new ChatMessage {UserName = "BillG", Text = "Hmmm....you there?"}); MessageHistory.Add(new ChatMessage {UserName = "BillG", Text = "Hello???"}); MessageHistory.Add(new ChatMessage {UserName = "Jesse", Text = "Sorry Bill - working on a video..."}); MessageHistory.Add(new ChatMessage {UserName = "BillG", Text = "Oh - ok."});} To ensure that this dummy data is in place during design mode, we'll test in the constructor of ChatSession, public ChatSession() { if (HtmlPage.IsEnabled == false) { PopulateWithDummyData(); }} With these classes in place (you did build to make sure all is well, yes?) we're ready to data bind. Let's do that in Blend (!) Make sure you've saved all your files in Visual Studio 2008 but don't close it, just switch over to Blend. Say yes to the "something has changed when I wasn't looking" dialog (your wording may vary). Under the project panel you should see a Data panel, within which is a tab marked CLR Object. Click on that and Hey! Presto! You will see Blend for Silverlight that will open up to show a dialog that lets you add any CLR object as a data source. Notice that ChatMessage and ChatSession are listed. Click on Chat session and click OK and Chat SessionDS appears in your Data list, as shown in Figure 5-25. Figure 5-25. Adding Chat Session as a Data Source Now, this is really scary how easy this is; click on RemoteUserName, and drag it onto the name field at the top of the design and let go, as shown in Figure 5-26 Figure 5-26. Binding the RemoteUserName to the TextBlock by Drag and Drop When you let go, Blend recognizes that you are binding the data, and asks first which field you'd like to bind to (guessing you'd like to bind to Text) and then offers (if you expand the dialog) additional options, such as two-way binding (so that if the UI is updated, the changes will be written back to the data source), as shown in Figure 5-27 Figure 5-27. Creating the Data Binding When you click OK the UI is immediately updated with the bound data. Figure 5-28. Bound Data Unfortunately, your list box does not know how to display a Chat Message (why would it?). We can fix that, of course, by using a Data template, <ListBox.ItemTemplate> <DataTemplate> <StackPanel Orientation="Horizontal" Margin="5"> <TextBlock FontFamily="Comic Sans MS" FontSize="16" Foreground="red" Text="{Binding UserName}"/> <TextBlock Text=": "/> <TextBlock FontFamily="Comic Sans MS" FontSize="16" Text="{Binding Text}"/> </StackPanel> </DataTemplate></ListBox.ItemTemplate> This teaches the ListBox how to display each of the list items, specifying that each will be dislayed with three TextBlocks, aligned horizontally by a stack panel, as shown in Figure 5-29 Figure 5-29. Data Template at work The Listbox and especially the Start Button don't quite look the way we'd like. In the previous version of this tutorial we fixed them with hand-written templates, but Blend now offers a far better solution, which we will introduce in the next tutorial, so we're going to leave this as it is for now (not quite done, but a good way towards a solid introduction to using Blend and Visual Studio together for programming an application). Better not to finish, than to show you what amounts to an obsolete and inferior way of solving the problem. In the interim, both Tim Heurer and I and many others will be blogging about the new approach to Templates, and we'll have the full tutorial out within a few weeks. Hi, how to download expression blend for free? like vs2008 express edition. Hi ! Thanks, this is a good article. Sorry:This contains an ugly, bad solution. There is only code, which write something from one control into another, what a wizards work ....... br--mabra not bad yaar but jst good one
http://silverlight.net/learn/tutorials/expressionblendfordevelopers_cs.aspx
crawl-002
refinedweb
3,893
64.95
I get this kind of events logged in the eventlog:Source: .Net RuntimeType: ErrorUnable to open shim database version registry key - v2.0.50727.00000.Is this 'normal' behaviour or is there a misconfiguration. I have this messages onWindows 2003 Server, on SqlServer2005 boxes as well on TFS app tier and Terminal Server running VS2005.Strange thing is: everythings seems to work OK... Rene I'm testing click once deployment, and when i install the msdn click once sample application on a w2003 server, using TS i'm getting this error, and the application does not start....Anybody has any idea Is it safe to uninstall the netframe on the servers that got problems until this issue is solved < xml:namespace prefix = o We have a VS2005 windows app deployed using a SQL Server 2005 backend on a single computer. This error occurs when the app attempts a connection to the DB in order to save a transaction and, in fact, causes a timeout. It appears to happen at random, twice in a 4 day span. The NT user has restricted rights and does not belong to the administrators group. This is certainly a major problem. -nava Hello Jesse, It appears this is more of a global issue than just Excel. I am encountering the error running BizTalk 2004 (on 1.1) when an Http Receive is executing. I suspect any process that wants to host the CLR that lives on the COM side of the fence (could it be any other way right now ) throws this error. It is a nuisance, but if I understand you correctly, it has no implications otherwise Hi Jesse Is this error logged in the KB or msdn , can you give me the link for this issue in microsoft site. I am also looking for more information on this issue. I have an application that has stopped working since installing the final version of VS2005 - It was working OK with the Beta versions, but now when I try to sign on on to my Web app , I get a Server Application Unavailable message. The event logs contains messages Unable to open shim database version registry key - v2.0.50727.00000. aspnet_wp.exe (PID: 4056) stopped unexpectedly. The application is an asp.net 2.0 web app calling a asp.net 2.0 web service - it does not use excel, word, biztalk, etc. The application worked for a while after installing VS2005 but has now stopped - I dont know why. This is an urgent issue for me as it is delaying my development work. Regards Aengus O'Sullivan Updated: If appears that my asp worker process problem was caused by an error during compilation - using the wrong version of an assembly. Problem solved : sorry for false alarm. However I would still like to fix the Unable to open shim database version registry key - v2.0.50727.00000 Thanks Can you run regmon (from sysinternals) and filter on errors only, to see which registry key is actually the culprit Tnx Rene I am just trying to find out more on this issue. It is showing up in our site's Event Viewer and I am investigating whether or not it is having affects on login time. Currently we are experiencing login times around a minute and a half on the slowest segment of our network and I notice this error being produced a few times during startup and login. Could this potentially be a cause We have current Office 2003 Updates and are running Visual Studio .NET 2003 in all sites. According to Jesse it is only an access permission issue on one ore more registry keys. If you run Registry Monitor (download from) (no install required, but run it as Administrator) you can find out which registry keys give a Access Denied. The involved registry keys require write permissions. I cannot imagine that this is causing network problems but it might be related with the start of an appdomain where the starting application is trying to lookup a non-existing server or a wrong route.
http://databaseforum.info/25/500041.aspx
CC-MAIN-2019-13
refinedweb
680
61.97
The QTextOption class provides a description of general rich text properties. More... #include <QTextOption> Note: All the functions in this class are reentrant. The QTextOption class provides a description of general rich text properties. QTextOption is used to encapsulate common rich text properties in a single object. It contains information about text alignment, layout direction, word wrapping, and other standard properties associated with text rendering and layout. See also QTextEdit, QTextDocument, and QTextCursor. The Flags type is a typedef for QFlags<Flag>. It stores an OR combination of Flag values. This enum holds the different types of tabulator This enum was introduced in Qt 4.4. This enum describes how text is wrapped in a document. Constructs a text option with default properties for text. Constructs a text option with the given alignment for text. Construct a copy of the other text option. Destroys the text option. Returns the text alignment defined by the option. See also setAlignment(). Returns the flags associated with the option. See also setFlags(). Sets the option's text alignment to the specified alignment. See also alignment(). Sets the flags associated with the option to the given flags. See also flags(). Sets the tab positions for the text layout to those specified by tabStops. See also tabArray(), setTabStop(), and setTabs(). Sets the default distance in device units between tab stops to the value specified by tabStop. See also tabStop(), setTabArray(), setTabs(), and tabs(). Set the Tab properties to tabStops. See also tabStop() and tabs(). Sets the direction of the text layout defined by the option to the given direction. See also textDirection(). If enable is true then the layout will use design metrics; otherwise it will use the metrics of the paint device (which is the default behavior). See also useDesignMetrics(). Sets the option's text wrap mode to the given mode. See also wrapMode(). Returns a list of tab positions defined for the text layout. See also setTabArray() and tabStop(). Returns the distance in device units between tab stops. Convenient function for the above method See also setTabStop(), tabArray(), setTabs(), and tabs(). Returns a list of tab positions defined for the text layout. This function was introduced in Qt 4.4. See also tabStop(), setTabs(), and setTabStop(). Returns the direction of the text layout defined by the option. See also setTextDirection(). Returns true if the layout uses design rather than device metrics; otherwise returns false. See also setUseDesignMetrics(). Returns the text wrap mode defined by the option. See also setWrapMode(). Returns true if the text option is the same as the other text option; otherwise returns false.
http://doc.trolltech.com/4.5-snapshot/qtextoption.html
crawl-003
refinedweb
432
62.44
A new developer joins a project, and asks in Campfire: “what is the purpose of the can_refer_other_users flag?” The flag in question is a boolean column on User. Rails automatically created a query method for this column, which is used like so: if can_refer_other_users? # allow user to refer end The new developer’s innocuous query reveals an important subtlety between code that describes its job, and code that makes its purpose clear. At first glance, a name like can_refer_other_users seems quite good. You’re reading it now without any context, and can probably guess exactly what it does. The problem is, there are no clues about why we wouldn’t let a user refer others. Since growth is good, it’s counter-intutive that we’d block it. Looking in the database, most users have it set to true, with a handful of falses. No help there. It turns out that some users are disallowed from this process because they were caught referring their own duplicate accounts, earning illegitimate bonuses. Unfortunately, you’ll find this information nowhere in the code. It lives only as an oral myth passed between developers of this project, subject to our own faulty memories. I think this is a great example of the weakness of code that tells you what it does, but not why. Code that is clear about its job, but opaque about its purpose. What if can_refer_other_users weren’t a flag anymore, but a method that referred to a new flag? def can_refer_other_users? !caught_referring_own_accounts end Suddenly, it’s clear why this alternate code path exists. New developers have one less question to ask, and someone that removed the bonus for referring others would know this flag is now unnecessary. The implicit purpose behind the code is made explicit. And rather than burdening our memories with this fact, we use git. Good developers are careful to create code that clearly tells you what it does, but the why is far more important. It’s more important because all code already says what it does. Even if that flag had been named foo, you could examine the conditional and ascertain that it determined whether users could refer others. However, there’s simply no way you could find out why the flag was created without asking someone. That knowledge cannot be deduced from any amount of code study. It’s tough to write code that communicates what it does, but it turns out that bar isn’t quite high enough. Strive to write code that makes clear the meanings behind its machinations. Code that can survive the loss of those who originally wrote it; that doesn’t just redescribe its implicit function. Code that answers “why?”. Next Steps & Related Reading Detect emerging problems in your codebase with Ruby Science. We’ll deliver solutions for fixing them, and demonstrate techniques for building a Ruby on Rails application that will be fun to work on for years to come. Grab a free sample of Ruby Science today!
https://robots.thoughtbot.com/code-that-says-why-it-does
CC-MAIN-2017-13
refinedweb
500
72.97
21 November 2011 09:44 [Source: ICIS news] TOKYO (ICIS)--?xml:namespace> The country’s domestic shipments totalled 12,046 tonnes in October 2011, down by 9.3% from October 2010, while its exports fell by 3.3% to 382 tonnes, according to the Japan Polypropylene Film Industry Association. Among the domestic shipments, The country’s production of oriented PP film decreased by 1.6% to 18,906 tonnes in October from the same period a year before, according to the association. Its domestic shipments of oriented PP film fell by 11% to 17,673 tonnes in October year on year, while its exports of the product rose by 15% to 376 tonnes, the data showed.
http://www.icis.com/Articles/2011/11/21/9509949/japan-october-cast-polypropylene-film-production-falls-by.html
CC-MAIN-2014-42
refinedweb
116
64.71
It can be hard to write idiomatic web applications in Golang with many frameworks not following the rules. This article explains best practices for middlewares. The article hasn't been updated for a long time. If it's code, it probably don't work anymore. My opinions may have also changed, as well as my experience.. This is the second article in the five-part series "Build Your Own Web Framework in Go": We create a very simple app from scratch with the net/http package of the standard library: import ( "net/http" "fmt" ) func handler(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "Welcome!") } func main() { http.HandleFunc("/", handler) http.ListenAndServe(":8080", nil) } We have a function with 2 args: a response writer and a request. In addition to having a function, we can implement the http.Handler interface to any struct. type handler struct {} func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "Welcome!") } func main() { http.Handle("/", handler) http.ListenAndServe(":8080", nil) } Any struct with the method ServeHTTP(http.ResponseWriter, *http.Request) will be implementing http.Handler and will be usable with the Go muxer ( http.Handle(pattern, handler) function). We now want to add a simple log of the time spent to process each request: func indexHandler(w http.ResponseWriter, r *http.Request) { t1 := time.Now() fmt.Fprintf(w, "Welcome!") t2 := time.Now() log.Printf("[%s] %q %v ", r.Method, r.URL.String(), t2.Sub(t1)) } func main() { http.HandleFunc("/", indexHandler) http.ListenAndServe(":8080", nil) } Easy enough. Here's what it will output: [GET] / 1.43ms [GET] /about 1.98ms Now we add a second handler because, let's face it, there are not many apps with only one route. func aboutHandler(w http.ResponseWriter, r *http.Request) { t1 := time.Now() fmt.Fprintf(w, "You are on the about page.") t2 := time.Now() log.Printf("[%s] %q %v ", r.Method, r.URL.String(), t2.Sub(t1)) } func indexHandler(w http.ResponseWriter, r *http.Request) { t1 := time.Now() fmt.Fprintf(w, "Welcome!") t2 := time.Now() log.Printf("[%s] %q %v ", r.Method, r.URL.String(), t2.Sub(t1)) } func main() { http.HandleFunc("/about", aboutHandler) http.HandleFunc("/", indexHandler) http.ListenAndServe(":8080", nil) } Code duplication detected! We could create a function with a closure. But if we have multiple functions like that, it will become as bad as callback spaghetti in Javascript. We don't want that. We want something like the middleware systems of Rack, Ring, Connect.js and other similar solutions. What we would like is to chain multiple handlers. We already have this kind of handlers in the standard library: http.StripPrefix(prefix, handler) and http.TimeoutHandler(handler, duration, message). They both take a handler as one of their arguments and they both return a handler. So we can write a handler and pass another handler to it. loggingHandler(recoverHandler(indexHandler)) So a middleware would be something like func (http.Handler) http.Handler This way we pass a handler and returns a handler. At the end we have one handler and can be called with http.Handle(pattern, handler) func main() { http.Handle("/", loggingHandler(recoverHandler(indexHandler))) http.ListenAndServe(":8080", nil) } But it can be cumbersome for multiple routes, repeating the stack over and over again. It would be easier to chain them in a more elegant way. Alice is a small package to chain handlers more elegantly. Furthermore, we can create a common list of handlers and reuse them for each route like this: func main() { commonHandlers := alice.New(loggingHandler, recoverHandler) http.Handle("/about", commonHandlers.ThenFunc(aboutHandler)) http.Handle("/", alice.New(commonHandlers, bodyParserHandler).ThenFunc(indexHandler)) http.ListenAndServe(":8080", nil) } Problem solved. We now have a middleware system that is idiomatic and use standard interfaces. Alice is 50 lines of code, so it is a very small dependency. But we still can't use handlers like http.StripPrefix(prefix, handler) because it is not func (http.Handler) http.Handler. Though, each time we need to use http.StripPrefix(prefix, handler) we can just make a new handler defined like func (http.Handler) http.Handler: func myStripPrefix(h http.Handler) http.Handler { return http.StripPrefix("/old", h) } Now that we found a way to remove code duplication in an elegant way, we can finish to write our application. First we extract our logging code into a handler: func loggingHandler(next http.Handler) http.Handler { fn := func(w http.ResponseWriter, r *http.Request) { t1 := time.Now() next.ServeHTTP(w, r) t2 := time.Now() log.Printf("[%s] %q %v ", r.Method, r.URL.String(), t2.Sub(t1)) } return http.HandlerFunc(fn) } We run some code before executing the next handler(s) and after we have some more code. It is very similar to Negroni but we don't need a new and redefined http.Handler interface. We stay with standard interfaces, which means less things to learn and less dependencies for an equivalent feature set. Finally we use alice to chain loggingHandler with our other handlers: func aboutHandler(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "You are on the about page.") } func indexHandler(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "Welcome!") } func main() { commonHandlers := alice.New(loggingHandler) http.Handle("/about", commonHandlers.ThenFunc(aboutHandler)) http.Handle("/", commonHandlers.ThenFunc(indexHandler)) http.ListenAndServe(":8080", nil) } Duplication removed! Another feature that is really necessary. When our code panic in production (make sure it should not but we can forget things sometimes) our application will shutdown. Even if we have a monitoring software to restart our application, we'll have an embarrassing downtime. So we must catch panics, log them and keep the application running. It's pretty easy with Go and our middleware system. We just have to make a deferred function that will recover the panic, respond with a HTTP 500 error and log the panic: func recoverHandler(next http.Handler) http.Handler { fn := func(w http.ResponseWriter, r *http.Request) { defer func() { if err := recover(); err != nil { log.Printf("panic: %+v", err) http.Error(w, http.StatusText(500), 500) } }() next.ServeHTTP(w, r) } return http.HandlerFunc(fn) } Then we add it to our middleware stack: func main() { commonHandlers := alice.New(loggingHandler, recoverHandler) http.Handle("/about", commonHandlers.ThenFunc(aboutHandler)) http.Handle("/", commonHandlers.ThenFunc(indexHandler)) http.ListenAndServe(":8080", nil) } We just saw that func (http.Handler) http.Handler is a pretty simple way to define middlewares, yet it delivers everything we need. http.Handler is a standard interface and this chaining is already used in popular libraries like Gorilla and in the standard library itself. I think this is the most idiomatic way. The two middlewares we wrote, logging and panic recovery, are rewrote in each framework I've seen despite the fact they all are pretty much the same. But most frameworks have their own version of handlers and can't really work with other middlewares that their own. We will see in the next part that there might be a reason. When we need to share values between middlewares we might need to change some things. But it is not as big of a change so there's no reason to rewrite middlewares that have already been written.
https://www.nicolasmerouze.com/middlewares-golang-best-practices-examples/
CC-MAIN-2018-39
refinedweb
1,195
62.64
By popular demand, this is a sequel to the original post, which grew unwieldy with 300+ comments taking long time to load. Same purpose as the original post: "There is a sub on Reddit called Techsupport gore where people share pictures and stories of some horrific things they see done with technology. I've been looking at some old programs lately and see lots of stuff that would qualify as "ABAP gore". :) So I thought it might be therapeutic and educational to post some examples on SCN (examples to follow in the comments)." I got an email from the users asking to explain what triggers an error message they were getting. Thankfully, it wasn't one of those "& & & &" messages but, even so, it occurred in about 10 different programs. Below is just a part of what I found (relevant definitions provided for context, comments mine, some variable names changed to protect the innocent). As a side note, more or less the same code repeats in all the different programs, not a single time an effort was made to create at least an FM. DATA: valid_char(36) TYPE c " Not a constant VALUE 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'. DATA: some_variable TYPE c LENGTH 10. FIELD-SYMBOLS: <valchar>. len = strlen( some_variable ). ASSIGN some_variable+0(len) TO <valchar>. IF <valchar> CO valid_char. ELSE. " display vague error message ENDIF. ( Jelena Perfiljeva thanks for this new place !! ) There are a lot of points on these lines .... SY-ABCDE / ASSIGN not protected ... and REGEX that do the job in one line. Maybe, what could be nice, it is a Blog/Wiki where we could propose simplification of strange code found ? Great point, Frederic Girod ! As stated, the purpose of this discussion is not merely to poke fun at the code but also educational. After all, a lot can be learned from how NOT to do ABAP. I am happy to see that when something is zero then the "not zero" flag is set. The other day I also saw a variable called COSTED which was set to X when the order had not been costed. Hahaha! This reminds me of a rather cheesy standup comedy trick of saying something and then immediately exclaiming "Not!" :) "Order costed. Not!" Extra points for using 'X' too. :) Oh, I can explain: surely the code was written on Opposite day! (German: "Gegenteiltag"). Wow, it (the day, not the code!) even has an article on Wikipedia!? This one gave me a laugh "unless on is gung-ho" start-of-selection. " this report reports the truth ( raw persistence, unless one is gung-ho ... set parameter id '/UI2/PAGE_CACHE_OFF' field p_caoff. data lr_packages type ref to /ui2/if_fcc_const=>TP_T_PACKAGES. data lv_scope type string. data l_i_signature type abap_bool value 'X'. if ( g_signat = 'X' ). clear l_i_signature. endif. "this report reports the truth" - haha, I'll have to remember this one, maybe will use it in a title one day. :) I wonder how these lines of code could be written ? Is it a a result of several people modifying without understanding the first objective ... ? Perhaps. The comment itself seemed strange to me. The lines of code are strange, as this is the start of the program. But I didn't really dig much deeper to try to find out why they did this at the start of the program. But I'm not sure where g_signat was found data l_i_signature type abap_bool value'X'.if( g_signat ='X').clear l_i_signature.endif. Assuming g_signat is a global flag of somekind, what is the added value of a local flag pertaining to it? This is hurting my brain. The only way you would use redundancy in coding for such a simple variable is maybe for readability during debugging where the same value has a different functional meaning elsewhere. Before reading the code, the functional guy said "This code is special, but nothing is done without reason" read table t_dliv with key kunnr = t_paret-kunwe. clear w_index2. loop at T_MDVEX. w_index2 = w_index2 + 1. loop at t_dliv where kunnr = t_paret-kunwe. same code ... loop at t_dliv where kunnr = t_paret-kunwe. if t_dliv-zdatu GE T_MDVEX-DAT01. exit. else. clear t_dliv-zdatu. endif. endloop. Why CLEAR ?? > but nothing is done without reason he lied �� Class /IWBEP/CL_MGW_ABS_DATA . Yes one can comment too much. CONSTANTS: BEGIN OF gcs_association_multiplicity, one TYPE string VALUE '1', " Indicates that exactly one entity type instance exists at the association end zero_or_one TYPE string VALUE '0..1', " Indicates that zero or one entity type instances exist at the association end many TYPE string VALUE '*', " Indicates that zero, one, or more entity type instances exist at the association end. END OF gcs_association_multiplicity . FIELD-SYMBOLS: <ls_deep_component> TYPE any. *-get service namespace (basically service name) from application context object to fulfill API contract mo_context->get_parameter( EXPORTING iv_name = /iwbep/if_mgw_context=>gc_param_service_namespace IMPORTING ev_value = lv_ctx_service_namespace ). *-get child nodes of current expand node io_expand_node->get_children( IMPORTING et_children = lt_children ). *-check whether current node has more children IF lt_children IS NOT INITIAL. ...and on, and on, and on... We have some programs like that at work. :( This type of "code narration" style is driving me crazy. For cripes sake, let the code speak for itself! Seen plenty of it in customer code, but this is recent-ish SAP code - it's deep in the core of the Netweaver Gateway / OData service. And if someone reading this doesn't know what 0..1 multiplicity is then they need far more help than the comment... And they're using prefixes... ;-) Triple-barreled ones to boot! It is standard SAP code. I know for a fact that SAP mandates the use of prefixes in standard SAP code. I have physically sat in Walldorf HQ and watched them doing it. Yikes! Not an Abap Gore, but a funny Standard SAP Code in the ATP Check * these are the adventures of the new spaceship BERID; * she intrudes into MRP areas where no man has gone before; PERFORM DISPO_AREA_SET TABLES P_ATPCSX P_ATPMATX. We used to have a thread for the funny comments too! Feel free to revive the tradition. :) ************************************************************** *** There is no point in doing a RETURN here since we are done here *** anyway. Or... could have just put RETURN. Or nothing. Instead of a three line comment that adds no value whatsoever. Sometimes I feel there should be a section of Psychology dedicated to the program comments. Could be an interesting research field. Have a look at the buffering settings on standard table T001W. Can you spot what is wrong? Actually, I can't!? Seem's ok to me.... Care to explain? (Screenshot from a recent system - S/4HANA 1909 , in case that matters ) This is something that has caught us out a few times in my company. People get really confused as to whether MANDT counts in the number of key fields. Clearly it does. So in this case T001W is generically buffered on MANDT. As there are two key fields that is the only way you can possibly generically buffer it. This means the first time someone does a read on T001W in a client the entire table is loaded into the buffer. It is, in effect, fully buffered. Take TVKOT as another example. That is also generically buffered on MANDT. I was always told that for text tables you should generically buffer on SPRAS. In our Australian system we only have one country as my German colleagues have pointed out once or twice, so we only ever log on in English. Yet the standard German values in TVKOT get loaded in as as well. I put it to you this was never intended. The person who created the setting on TVKOT thought MANDT did not count and so set the number of fields to "1" trying to buffer on SPRAS. There are lots of things wrong with this, but the one that makes my head spin is that by passing in "E" to the LR_EXCLUDE range the last instruction is to delete everything that is NOT in the exclusion table. If typing out "CHAR1" for a single character data type is too much work, just use this handy type definition: class /IWBEP/CL_MGW_PUSH_ABS_DATA ... ... public section. types S type CHAR1 . Facepalm. :) Include LBTCHFXX contains this form. FORM gen_jobcount USING jobname jobcount rc. DATA: jcnt LIKE tbtco-jobcount. ** FIXME ** Diese sehr rudimentaere Methode soll durch Nummernkreisvergabe ** ersetzt werden GET TIME. jcnt = sy-uzeit. rc = 1. DO 99 TIMES. IF sy-index > 9. jcnt+6(2) = sy-index. ELSE. jcnt+6(1) = '0'. jcnt+7(1) = sy-index. ENDIF. SELECT SINGLE * FROM tbtco WHERE jobname = jobname AND jobcount = jcnt. IF sy-subrc > 0. " freie Nummer gefunden jobcount = jcnt. rc = 0. EXIT. ENDIF. ENDDO. ... ENDFORM. For those who don't read German, that comment means "This very rudimentary method is to be replaced by number range assignment" This comment was put in between 4th July 1996 and 15th July, 2013. We're still waiting somewhere between 24 and 7 years later! (My money is on the 24 years). I'll join your bet. I can confirm it's not later than November 2008. "There is nothing more permanent than a temporary fix" (c) I bet some code I wrote back in 2005 as a "temporary workaround" still runs in Production. [blushing] Forget the comment, I love the roundabout keep-stabbing-at-the-database-until-we-find-a-hole approach. Custom (Z...) table has a key field named EBELN (= same name as the key in standard PO tables EKKO/EKPO). However, the field is assigned a custom data type that is using CHAR10 domain. Even though the length is the same as standard EBELN data type, this one doesn't use ALPHA conversion routine. Now I have a fun task of ELI5 to the users how '100' is different from '0000000100'. I just decided to change the name of a local variable I came across. The previous name was LD_VALUE and for some reason I did not think that was descriptive enough. I do find the use of LD_ wrong. D is for Data, which is the most generic type in the ABAP universe, it includes tables and structures, yet LD_ is used denote an elementary data type. Another reason to drop prefixes... Now I came across the conditional logic that followed the above comment today. "This condition will be removed" Do you know, when someone writes a comment like that, I honestly think they actually believe what they are saying. You should have waited for year 2012 at least... Well, when you have 9986 lines things slip up eventually. :) There are five INCLUDES each one with way more lines than that. For 20 years code has been added non-stop with nothing ever removed when it was not relevant any more. I am putting any new logic in an "Island of Happiness" using TDD. I am giving a presentation on what I am doing at SAP Online Track on 30/31 May, plug, plug. "nothing ever removed when it was not relevant any more" - this is my pet peeve in the legacy code. 20 years, no one stops to run Extended Check (which existed for a long time already) or to think how to improve even a little thing. And it just gets worse and worse. Just. In. Case. It. Is. Not. Clear. Some more from the Redundant Comment Department. Thanks for the tip! :) As a nod to this somehow continued discussion about Hungarian Notation, here is just a taste of what I found today in one program: t_prot - global table f_stat - global variable p_vbeln - USING parameter for a routine p_remove - CHANGING parameter for a routine Local variables: lv_menge, xdocnum, o_vbeln (no, not an object). Nice collection of random global variables (and yes, there are 300+ lines of them). Can't make this up. If I was looking for an example of bad naming choices and strange definitions this would be it. Feel free to use this image for educational purposes, free of charge. OK I found a user exit which was evaluating system field SYST-ONCOM. I had never even heard of that. Anyway if the value was "T" something special happened. Weeks later I found (by accident) what was setting that field to "T". So someone had decided the best way to transfer data from one place to another was to pick a random field from the SYST structure which they did not know what it did, fill it with a random value, and the hopefully that system field will not have changed by the time the user exit evaluated it. A piece of code from the fifth of BIG4 Very clean boy �� He cleans up at every iteration The obsessive CLEARing is one of my pet peeves. I've seen too many times when a variable is cleared immediately after declaration. I stumbled across function module SLS_MISC_GET_MEDIAN, which contains a brilliant algorithm to calculate the median (granted, the function module is marked as obsolete). Behold: IF NOT p_num[] IS INITIAL. loop at p_num. lv_num_count = lv_num_count + 1. endloop. sort p_num by number. lv_num_mod = lv_num_count mod 2. IF lv_num_mod = 0. * --- even amount of numbers lv_index1 = lv_num_count / 2. lv_index2 = lv_num_count / 2 + 1. lv_num_count = 0. loop at p_num. lv_num_count = lv_num_count + 1. if lv_num_count = lv_index1 OR lv_num_count = lv_index2. lv_median = lv_median + p_num-number. endif. endloop. lv_median = lv_median / 2. ELSE. * --- uneven amount of numbers if lv_num_count = 1. loop at p_num. lv_median = p_num-number. endloop. else. lv_index1 = ( lv_num_count - 1 ) / 2 + 1. lv_num_count = 0. loop at p_num. lv_num_count = lv_num_count + 1. if lv_num_count = lv_index1. lv_median = p_num-number. endif. endloop. endif. ENDIF. ELSE. raise List_Of_Number_Is_Empty. ENDIF. median = lv_median. I guess this is an ancient code before READ TABLE was introduced.. �� Holy cr@p! Truly a gem :-) It was created in 2000, READ TABLE was around for a looooong time by then. INCLUDE MM06EFMP * Verfügbarkeit überprüfen: SUBRC ungleich 0 wenn alles ok ist!!! PERFORM availability_check. (Translation: SUBRC not equal 0 when everything is OK) Inverse logic ftw. Or born to fail? Perhaps the PROgrammer is an old C-language fox? Used subrc as a hybrid parameter and returns how many of whatever are available? Old habits... OK someone please tell me what CHECK TBL_ZSD04-POSNR IN R_DELIVERIES means. As far as I can see it is a double negative so if the value is IN the range then in fact it is NOT in the range and so result is FALSE. What if the evaluation was a thousand lines away from the routines that filled the range? And if you call a database table ZSD04 how can anyone guess what that table is for except - it might be something to do with SD? And C_PICK_STATUS_1 - its somewhat different to C_PICK_STATUS_A but in both cases you have no idea that it means the delivery has not been picked yet. It's called R_DELIVERIES but actually contains sales order line items. Nice. :)
https://answers.sap.com/articles/12984186/share-your-abap-gore-part-2.html
CC-MAIN-2020-34
refinedweb
2,463
67.04
Hello, first time poster here. Be gentle with me. I'm having some trouble with a small program I'm writing for my Intro to Java course. Specifically the way my For loop is interacting with my array. This program when finished would: ask the user how many bowling scores they wish to enter, store the entries in the array using the first for loop, using the second loop repeat the entries back to the user and then report a total and average. I ran the debugger, which we haven't gone over yet and it seemed to think the problem existed on line 23. 23 being inside my first For loop. import javax.swing.JOptionPane; public class Main { public static void main(String[] args) { String sUserName, sInput; String sNumberOfScores = ""; int iNumberOfScores = 0; int iIndex = 0; double[] dScores; dScores = new double[ iNumberOfScores ]; sNumberOfScores = JOptionPane.showInputDialog("How many scores do you wish to enter?"); iNumberOfScores = Integer.parseInt( sNumberOfScores ); for(iIndex = 0; iIndex < iNumberOfScores; iIndex++) { sInput = JOptionPane.showInputDialog("Please enter " + "score # " + (iIndex + 1) + ";"); dScores[iIndex] = Double.parseDouble(sInput); } for(iIndex = 0; iIndex < iNumberOfScores; iIndex++) { System.out.println("The scores you entered were " + dScores[iIndex]); } double dAverage = (dScores[iNumberOfScores])/iNumberOfScores; double dTotal = (dScores[iNumberOfScores]); System.out.println("Your total pin count is " + dTotal); System.out.println("Your average score per game is " + dAverage); } } Running the program results in it asking me how many scores I want to enter, then asking for the first score. Upon entering the first score it always comes up with, Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException: 0 at Main.main(Main.java:23) I believe I failed to set the size of my array but to me it looks good. I know my calculations for Average and Total are wrong as are most likely other things outside of my For loops but I can slam my head against my desk till those are fixed. I know its simple I just can't see to find what this thing wants. Thanks in advance.
http://www.javaprogrammingforums.com/whats-wrong-my-code/3600-array-problem.html
CC-MAIN-2014-42
refinedweb
334
57.06
I got a mac last week and am trying to learn the basics by coding a few projects I came up with. I’ve been creating a few apps I can place on the dock that automate simple functions that I use often. So far, all my programs have worked by creating a zsh script and getting Automator to ‘Run Shell Script’, however this particular program isn’t fully working, although, if I run my program through terminal, it works perfectly. I’m trying to create a program that can be executed after a YouTube url has been copied into the clipboard. The program takes the url and runs it through ‘youtube-dl’ and downloads the audio into the Downloads folder. I tried to learn how to do this from Z shell alone, but I was very overwhelmed with it and I decided to use python which I already know. Here’s what I’m doing: My Python3 code: import os url = os.popen("echo `pbpaste`").read() os.system("youtube-dl -f 'bestaudio(ext=m4a)' '"+url+"'") My zsh file: #!/bin/zsh cd /Users/admin/Downloads python3 /Users/admin/Documents/Scripts/Automation/ytGetAudio/ytGetAudio.py I then made it executable with “chmod +x /Users/admin/Documents/Scripts/Automation/ytGetAudio/ytGetAudio.zsh” When I run my program from the terminal, everything works great, and the file is downloaded into Downloads, but when I use Automator to “Run Shell Script” and I get it to run the same zsh script it doesn’t download anything. I tested it by creating a pop-up message box in the python script; that worked, so the script is running, but something isn’t working because nothing is downloading when I run the ‘.app’ file. Can anyone please help me with this? 🦄
https://proxies-free.com/mac-why-does-my-zsh-program-run-fine-in-terminal-but-not-as-an-automator-app/
CC-MAIN-2020-40
refinedweb
295
59.53
Java Development Kit (JDK) 1.7 (officially named Java SE 7), which is freely available from Sun Microsystems (now part of Oracle), is needed for writing Java programs. JDK can be downloaded from the Java mother site @ (or). JDK or JRE? JRE (Java Runtime) is needed for running Java programs. JDK (Java Development Kit), which includes JRE plus the development tools (such as compiler and debugger), is need for writing as well as running Java programs.. - J2SE 1.2 (codename Playground) (December 8, 1998): Re-branded as "Java 2" and renamed JDK to J2SE (Java 2 Standard Edition). Also released J2EE (Java 2 Enterprise Edition) and J2ME (Java 2 Micro Edition). Included JFC (Java Foundation Classes - Swing, Accessibility API, Java 2D, Pluggable Look and Feel and Drag and Drop). Introduced Collection Framework and JIT compiler. - J2SE 1.3 (codename Kestrel) (May 8, 2000): Introduced Hotspot JVM. - J2SE 1.4 (codename Merlin) (February 6, 2002): Introduced assert, non-blocking IO (nio), logging API, image IO, Java webstart, regular expression support. - J2SE 5.0 (codename Tiger) (September 30, 2004): Officially called 5.0 instead of 1.5. Introduced generics, autoboxing/unboxing, annotation, enum, varargs, for-each loop, static import. - Java SE 6 (codename Mustang) (December 11, 2006): Renamed J2SE to Java SE (Java Standard Edition). - Java SE 7 (codename Dolphin) (July 28, 2011): First version after Oracle purchased Sun (called Oracle JDK). - Java SE 8: expected in summer 2013?! How To Install JDK on Windows Step 0(a): Un-Install Older Version(s) of JDK/JRE I recommend that you install the latest JDK, but it can be messy if you have multiple versions of JDK/JRE. If you have previously installed older version(s) of JDK/JRE, un-install ALL of them. Run "Control Panel" ⇒ Program and Features ⇒ Un-install programs begin with "Java", such as "Java SE Development Kit" and "Java SE Runtime". If you are not sure whether you have older version(s) of JDK, check! Step 0(b): Understand Windows' CMD Shell Programmers need to know how to use CMD shell to issue commands. If you are completely new to CMD, read "Programmer's Survival Guide for Windows". Step 1: Download JDK - Goto Java SE download site @. - Click the "Download" button under "JDK" of "Java SE 7". -.7.0_xx). You need to include the " <JAVA_HOME>\bin" directory in the PATH. To edit the PATH environment. Variable name : PATH Variable value : c:\Program Files\Java\jdk1.7.0_xx\bin;[exiting entries] (For Advanced Users Only) I suggested that you place the JDK bin directory in front of " c:\windows\system32" and " c:\windows". This is because some Windows systems may have an out-dated copy of JDK/JRE in these directories. Do a search for " java.exe", and you will be amazed by the findings. You could read "Java Applications and Environment Variable" for more discussions about "Start" button ⇒ Select "run..." ⇒ Enter " cmd"; or "Start" button ⇒ All Programs ⇒ Accessories ⇒ Command Prompt). - Set the Current Drive to the drive where you saved your source file " Hello.java". For example, suppose that your source file is saved in drive " d", enter " d:" as follow: prompt> d: D:\xxx> - ...... 08-May ...... - To run the program, invoke the Java Runtime " java": D:\myproject> java Hello Hello, world! Everything that can possibly go wrong will go wrong: Read "JDK Installation Common Errors". Step 7: (Optional) Download JDK API Documentation, Samples and Demos The JDK download does not include the documentation, which needs to be downloaded separately. In the past, I always insist that my students should have a local copy of JDK API Documentation. But, today, you can easily access the online copy by googling "JDK Samples and Demos" from the Java SE download site. Step 8: (For Advanced Users Only) JDK's Source Code Source code for JDK is provided and kept in " <JAVA_HOME>\src.zip". I strongly recommend that you to go through some of the source files such as " String.java", " Math.java", and " Integer.java", under " java\lang". Using TextPad for Java Programming (on Windows) TextPad (@) is an excellent programming text editor for writing toy Java programs. It can be configured to couple with the JDK, hence, bypassing the CMD shell. From the TextPad editor, you can invoke the JDK compiler/runtime directly via "Tools" menu ⇒ External Tools ⇒ "Compile Java" or "Run Java Application". Take note of the keyboard shortcuts - Ctrl+1 for compile and Ctrl+2 for run. If you cannot find these commands in the "Tools" menu, goto "Configure" ⇒ Preferences... ⇒ Tools ⇒ Add ⇒ JDK Commands. You can also configure the properties of "compile" and "run" there, such as prompting for command-line arguments. TextPad Tips and Configuration Using NotePad++ for Java Programming (on Windows) Notepad++ (@) is a free and open-source programming editor. You can use NotePad++ to input Java source code, and compile and run the Java program under CMD shell. NotePad++ Customization and Tips You can customize Notepad++ to compile and run Java programs with hot-keys. Read "NotePad++". How to Install JDK on Mac Step 0: Understand the "Terminal" Programmers need to know how to issue commands through the "Terminal". If you are new to Terminal, read "Programmer's Survival Guide for Mac and Ubuntu". Step 1: Check if JDK has been Pre-Installed In some Mac systems (earlier than Mac OS X 10.7 (Lion)), JDK has been pre-installed. To check if JDK has been installed, open a "Terminal" (Go ⇒ Utilities ⇒ Terminal) and issue these commands: $ javac -version - If a JDK version number is returned (e.g., JDK {1.x.x}), then JDK has already been installed. Proceed to "Step 3: Write a Hello-world Java program". - If message "command not found" appears, JDK is NOT installed. Proceed to the "Step 2: Install JDK". - If message "To open javac, you need a Java runtime" appears, select "Install" and follow the instructions to install JDK. Then, proceed to "Step 3: Write a Hello-world Java program". Step 2: Download and Install JDK - Goto. - Login with your AppleID. - Download "Java for Mac OS X {10.x}Update {u}Developer Package (DMG)". Choose {10.x}according to your Mac OS X version and the latest {u}. - Double-click to install the downloaded Disk Image (DMG) file. - Eject the DMG file. - To verify your installation, open a "Terminal" and issue these commands: // Check the version of "javac" (Java Compiler) and "java" (Java Runtime) $ javac -version javac {1.x.x_xx} $ java -version java version "{1.x.x_xx}" Java(TM) SE Runtime Environment (build {1.x.x_xx-xxx}) Java HotSpot(TM) Client VM (build 22.1-b02, mixed mode, sharing) // Find the location of "javac" (Java Compiler) and "java" (Java Runtime) $ which javac /usr/bin/javac $ which java /usr/bin/java Step 3: Write a Hello-World Java Program - Create a directory called " myproject" under your home directory (Finder ⇒ Go ⇒ Home; File ⇒ New Folder ⇒ " myproject"). In Mac under the Terminal, the home directory of the current login user is denoted as " ~". Hence, this new directory is represented as " ~/myproject". - Use a programming text editor (such as jEdit, gedit) to input the following source code and save as " Hello.java" under the directory " ~/myproject" created earlier. If you use "TextEdit" (NOT encouraged, as it is a plain text editor, NOT a programming text editor), you need to open a new file ⇒ choose "Format" ⇒ "Make Plain Text" ⇒ Enter the source code ⇒ Save as " Hello.java" (without the " .txt"). /* * My First Java program to say Hello */ public class Hello { // Save as "Hello.java" under "~/myproject" public static void main(String[] args) { System.out.println("Hello, world from Mac!"); } } Step 4: Compile and Run the Hello-World Java Program -". - Read "Debugging program in Eclipse". First Java Program with NetBeans - You need to first install NetBeans. Read "How to Install NetBeans". - You can then proceed to write your first Java program. Read "Writing your first Java program with NetBeans". - Read "Debugging program in NetBeans". (Advanced) External JAR Files and Native Libraries External Java packages (such as Servlet, MySQL Connector/J, JOGL, JUnit) are often distributed in JAR files (Java Archive - a single-file package of many Java classes), with possibly Native Libraries (" .lib" and " .dll" in Windows, or " .a" and " .so" in Linux/Mac). External JAR Files (".jar") If external JAR files are not properly included: - During the compilation, you will receive compilation error "cannot find symbol" on classes belonging to the external packages. - During execution, you will get a runtime error "Could not find or load main class xxx" or " NoClassDefFoundError". To include external JAR files, you can either: - Copy all the JAR files of the external packages to the Java's Extension Directories. - For Windows, the JDK classes) or JAR files (single-file archive of Java classes). If you set the CLASSPATH, you must also include the current directory (denoted as " ."). - For Windows, set the CLASSPATHin Control Panel ⇒ System ⇒ Advanced system settings ⇒ Advanced ⇒ Environment Variables ⇒ System Variables ⇒ New ⇒ In "Variable name", enter " CLASSPATH" ⇒ In "Variable value", enter " .;path1\xxx.jar;path2\yyy.jar", where the entries are separated by a semi-colon ( ;). - For Linux and Mac OS: Edit ~/.profileor ~/.bash_profile(or /etc/profilefor system-wide setting) to include the following line at the end of the file: export CLASSPATH=.:path1/xxx.jar:path2/yyy.jarThe entries are separated by colon ( :). - You can also set the CLASSPATHin the javac/ java's command-line via the option -cp <paths>(or -classpath <paths>), for example, // Compile Java source code > javac -cp .:path1/xxx.jar:path2/yyy.jar ClassName.java // Run Java class > java -cp .:path1/xxx.jar:path2/yyy.jar ClassName External Native Libraries (".lib", ".dll", ".a", ".so") Some external package may provide static or shared native libraries in the form of " .lib" (Windows' static library), " .dll" (Windows' dynamically link library), " .a" (Unix's static library), or " .so" (Unix's shared library). Native Libraries are to be kept in a directory accessible via JRE's Property " java.library.path", which normally but not necessarily includes all the directories in the PATH environment variable. Native libraries are not involved in the compilation. But if they are not properly included during runtime time, you will get a runtime error " java.lang.UnsatisfiedLinkError: no xxx in java.library.path". To include external native libraries: - Copy the native libraries into a system library directory, e.g., c:\windows\system32(Windows), /usr/libor /usr/local/lib(Linux or Mac OS). You can verify that the directory is included in Java's System Property " java.library.path", via System.out.println(System.getProperty("java.library.path")). - You can also set the native library path via the java's command-line option -Djava.library.path=xxx, for example, > java -Djava.library.path=xxx ClassName Eclipse/NetBeans Using an IDE can greatly simplifies inclusion of external packages. Read "Eclipse How-To" or "NetBeans How-To".Link to References & Resources
http://www3.ntu.edu.sg/home/ehchua/programming/howto/JDK_Howto.html
CC-MAIN-2014-10
refinedweb
1,805
58.79
Asked by: Syntactic sugar or short trick for decorator pattern? General discussion - The decorator pattern and the general principle of favouring composition over inheritance as well as adapters, and probably a few more patterns, involve wrapping an instance of a class that implements an interface inside another wrapper class implementing that same interface by simply calling the inner instance, with a few modifications. Edit: I realise that while adapters follow a similar pattern, they tend to have references to concrete classes that don't technically implement the interface, although they may have all the methods required. Whether this idea should generate 'auto-wrappers' for those is another question, right now I'd be happy with the less controversial case for decorators where the implementing inner instance is the same interface. Being as this is such a common task, especially when unit testing, it would be nice if there was a simple shorthand for this along the lines of: public class wrapper : IInterface { inner IInterface _implementor; } where the compiler automatically passes calls to any interface methods that aren't explicitly implemented to the 'inner' instance. This new 'inner' keyword may even elevate composition to the same level of thought as inheritance in the minds of those new to OOP. Although maybe a different word would be less confusing for those coming from Java. This would be especially useful when the interface has a large number of methods and you don't happen to have a tool like Resharper. Now, given that C# doesn't (yet?) have this keyword, is there some other trick with extension methods or perhaps delegates or lambda functions that can cut down on the need to write implementions for every interface method while still allowing you to make exceptions to the usual delegate to child path? Wednesday, August 26, 2009 7:22 AM - Edited by Christopher Woodward Thursday, August 27, 2009 4:41 AM All replies - There aren't any tricks to do this directly. However, this would be possible to implement via an AOP framework, such as PostSharp . A custom aspect could delegate all of the routines to a member via an attribute. Reed Copsey, Jr. -, August 26, 2009 5:37 PMModerator - I had a quick look at PostSharp and although it adds the additional idea of using attributes I can't see any way around needing to write an implemention for every interface method to please the compiler. Once you've gone to all that effort a policy injection approach won't save you much else or am I missing a trick? Your answer does strengthen the argument for a keyword though (maybe 'implementing' would be a better word). It would definitely get my vote for a new language feature. Even Resharper doesn't help if the interface definition changes and you have to go through all your decorating classes again. All that redundant pass-through code also obscures what is usually a simple operation to change a single behaviour. Edit: Behaviour changes made me think of one idea that might work. That is the use of a mock library as some of them have clever ways to inject a single behaviourial change. Not sure how but it's the nearest thing that came to mind. Serious overkill mind you. :)Thursday, August 27, 2009 2:45 AM - Taking this a step further let's make it generic. What I'd like to get is not just a set of decorators that I don't have to go through and modify if an interface changes but preferably a generic decorator that works across interfaces. Here's the idea: Let's say I want to modify the behavior of the ToString() method. For now I'll avoid the auto-wrapper idea and create an interface for it: public interface IToString { string ToString(); } So I create a decorator class that, say, adds a new line after every ToString(): public class NewLineDecorator : IToString { private IToString _object; public NewLineDecorator(IToString o) { _object = o; } public string ToString() { return o.ToString() + "\r\n"; } } Now say I've got a group of classes I want to apply this to that do something else too. Let's give them an interface: public interface IFoo : IToString { void Foo(); } Now, in particular context, I'd like to wrap all IFoo objects inside the NewLineDecorator but still reference them as IFoos. I'd also like to do the same for another interface too, IBar. Let's say I have some method that wraps them in a factory somewhere. Instead of having functionally identical, but textually very different, copies of the NewLineDecorator class for both Foo and Bar and any other descendent of IToString, what I'd like to do is: public IFoo Decorate(IFoo foo) { return new NewLineDecorator<IFoo>(foo); } So the generic NewLineDecorator class would look something like public class NewLineDecorator<T> : T where T : IToString // (and T is still an interface, unless we go for the whole auto-wrapper approach.* ) { private implementing T _object; public NewLineDecorator(T o) { _object = o; } public string ToString() { return o.ToString() + "\r\n"; } } I hope this shows how this idea is not just about saving a bit (or maybe a lot) of typing. It could be quite powerful for something that wouldn't be too hard to implement. Anyone got anything against the idea? *The need to constrain a generic type parameter to being an interface is another topic. Edit: Looking at that generic class again I can see it would get confusing if T weren't an interface and you start muddling inheritance and composition and ToString starts getting called on the constructor parameter where any other methods would be called on the decorator itself through inheritance. Not good. Maybe it needs a less confusing syntax.Friday, August 28, 2009 12:20 AM - My advice is to tackle this at the design level. Avoid having lots of methods in your interfaces, or change your callers so that they can take advantage of the fact that A has a IFoo, rather than A is a IFoo.Friday, August 28, 2009 8:57 PM - What the OP is proposing is very much like the mixin pattern, or how templates are used in C++. It is the path to the dark side, multiple inheritance. Mark the best replies as answers. "Fooling computers since 1971."Friday, August 28, 2009 10:36 PMModerator - I've come back to this after finding another post about the same thing. He just didn't spot that it was the decorator pattern he was talking about. While it may look like Mixins or multiple inheritance, having many of the advantages of such. It is nothing more than what I originally said which is purely syntactic sugar that avoids what must be one of the most common remaining patterns of boilerplate coding that devs still have to write. Wednesday, October 14, 2009 7:48 AM - The keyword, ' inner ', that you are suggesting is not a variation of the Decorator Pattern. It is the path to multiple inheritance of classes, not interfaces. Let's suppose that there was such a keyword as you propose. Let's take a look at your initial example. public class wrapper : IInterface { inner IInterface _implementor; } So where does the actual coding for the 'implementor' object come from? If IInterface were an class, then you are talking about basic inheritance. If IInterface were an interface, then implementation must come from somewhere. Where? You are also proposing the addition of class members through this other keyword, 'implementing'. What is there to stop the use of multiple statements with either keyword? Nothing, because it is multiple inheritance. Sorry, and I know you spin it differently, but that is almost exactly what the mixin pattern is in C++. Mark the best replies as answers. "Fooling computers since 1971."Wednesday, October 14, 2009 5:14 PMModerator - If you allow the IInterface to be a class, yes, that starts to get into the realms of multiple inheritance. That's why I mentioned that 'auto-wrapper' as being a debatable option. You're also right in that the implementation must come from somewhere. You need to set the inner _implementor to a concrete instance, otherwise it will simply throw a null reference exception. How you inject that will be however you'd inject any other instance member. You may even change it at runtime to suddenly switch behaviours. It's composition, not inheritance. I see what you mean about multiple inheritance if you allow more than one use of this. If if there is 2 interfaces that are implicitely implemented using this method and they both have the same method then which inner instance gets called? There is an ambiguity there. Is that what you are thinking when you see this as a mixin? That ambiguity can be solved by either enforcing a rule of only one implicit interface or by forcing the class to explicitely implement any ambiguous methods. That would be little work for the compiler. public class wrapper : IInterface1, IInterface2 { inner IInterface1 _i1; inner IInterface2 _i2; public object CommonMethod() { return i1.CommonMethod(); } } Anyway. I can see that there needs to be lines drawn around this idea. The reason I came back to this is that I was looking in Dynamic Proxy after it caused problems with debugging mocks (a known bug ) and it noticed that it does just what we are talking about here, that is an implicitly forwarding decorator, that allows you to override the default behaviour of a method. Only it is through interceptors which bypasses the need for an interface altogether. In future if I need to create a set of common decorators as in my second post I'll look into using Dynamic Proxy.Wednesday, October 14, 2009 11:41 PM - A rule of only one implicit interface already exists in C#. It is called simple inheritance. Mark the best replies as answers. "Fooling computers since 1971."Thursday, October 15, 2009 12:41 AMModerator
https://social.msdn.microsoft.com/Forums/en-US/5120d5ef-97ec-4de6-ab6c-9a7b5dfc50de/syntactic-sugar-or-short-trick-for-decorator-pattern?forum=csharplanguage
CC-MAIN-2022-40
refinedweb
1,659
61.16
I have a browsable API: restaurant_router = DefaultRouter() restaurant_router.register(r'rooms', RoomsViewSet) restaurant_router.register(r'printers', PrintersViewSet) restaurant_router.register(r'shifts', ShiftsViewSet) urlpatterns = patterns('', url(r'^$', api_root), url(r'^restaurant/$', RestaurantView.as_view(), name='api_restaurants_restaurant'), url(r'^restaurant/', include(restaurant_router.urls)), ) In the api_root I can link to the named route: @api_view(('GET',)) def api_root(request, format=None): return Response({ 'restaurant': reverse('api_restaurants_restaurant', request=request, format=format), }) Or I can use the browsable API generated by the DefaultRouter, as explained in the documentation: The DefaultRouter class we're using also automatically creates the API root view for us, so we can now delete the api_root method from our views module. What do I do if I want to mix ViewSets and normal Views, and show everything in the same API root? The DefaultRouter is only listing the ViewSets it controls. Doesn't look like there's a simple way to do that using the DefaultRouter, you'd have to build your own router. If it's any consolation the DefaultRouter's logic for generating the APIRoot view is fairly simple and you could probably easily roll your own, similar router based on the DefaultRouter class (e.g. Modify the ApiRoot class implementation to fetch additional URLs to include, you can do this any number of ways e.g. pass them into your Router's constructor): You can define your views as ViewSets with only one method. So you can register it in router and it will be in one space with ViewSets. From: A ViewSet class is simply a type of class-based View, that does not provide any method handlers such as .get() or .post(), and instead provides actions such as .list() and .create() Which means we can extend your ViewSets: def other_rooms_view(request): return Response(...) class RoomsViewSet(ViewSet): ... def list(self, request): return other_rooms_view(request) restaurant_router = DefaultRouter() restaurant_router.register(r'rooms', RoomsViewSet)
http://www.devsplanet.com/question/35276916
CC-MAIN-2017-04
refinedweb
313
55.24
Custom views with bold text and rounded corners - peiriannydd Two issues: - When I try to create a custom view with the pyui I am unable to created rounded corners, but if instead I create the custom view without the pyui I get rounded corners. - When I try to use <system-bold> font in a custom view the text is not bold. How do I solve this? Thanks for any help! Here is an example that illustrates both: import ui class customButton(ui.View): def __init__(self,frame=(0,0,300,100)): self.corner_radius = 8 self.frame = frame self.text = 'why does only one of these have rounded corners and neither have bold text?' def draw(self): path = ui.Path.rounded_rect(0,0, self.width,self.height,self.corner_radius) ui.set_color('green') path.fill() ui.draw_string(self.text,(0,0,self.width,self.height),('<system-bold>',20),'black',ui.ALIGN_CENTER) #the pyui file consists of a single custom view, with # x=20 # y=200 # width=300 # height=100 # custom view class = customButton v = ui.load_view() cbn = customButton() v.add_subview(cbn) v.present('fullscreen') @peiriannydd said When I try to create a custom view with the pyui I am unable to created rounded corners, but if instead I create the custom view without the pyui I get rounded corners. First, when you post a source code, please insert it between 2 lines of 3 back ticks. When you edit your post, the </>key does it for you, try it. That works @peiriannydd said When I try to use <system-bold> font in a custom view the text is not bold. You're right, I don't know why but try this font, it works ui.draw_string(self.text,(0,0,self.width,self.height),('Arial Rounded MT Bold',20),'black',ui.ALIGN_CENTER) Or, in place of draw, use a label, there bold works 😉 def __init__(self, frame=(20,200,300,100)): self.corner_radius = 8 self.frame = frame self.text = 'why does only one of these have rounded corners and neither have bold text?' l = ui.Label() l.frame = (0,0,self.width,self.height) l.background_color = 'green' l.text_color = 'black' l.number_of_lines = 0 l.text = self.text l.font = ('<System-bold>',20) self.add_subview(l) @peiriannydd Finally, you didn't say if this solved your two problems... - peiriannydd Yes, thank you, this solved my problems, although using a label since <system-bold> doesn’t work with ui.draw_string seems like a bug workaround. @peiriannydd Said seems like a bug workaround. Sure, but what else to do if the app is no more updated? @peiriannydd other workaround f = '<System>' ui.draw_string(self.text,(0,0,self.width,self.height), (f,20), 'black',ui.ALIGN_CENTER) ui.draw_string(self.text,(1,1,self.width-1,self.height-1), (f,20), 'black',ui.ALIGN_CENTER) So, a way that should work would be to use UIFont to find the system font name, and then create a bold version, then find the name of that... Just an idea, have not pursued it yet <UICTFont: 0x103990c70> font-family: ".SFUI-Semibold"; font-weight: bold; font-style: normal; font-size: 20.00pt Try ".SFCompactText-Bold" Or ".SFProText-Bold" Also-- the "Text" part of the name can be replaced with Rounded, or Display. Apparently Text is supposed to used under 19pt, Display for 20pt+. I'm not sure what Rounded is for -- looks a little more informal. I think apple tried to obfuscate system font names so people used their new API in iOS13, to prevent people hard coding fonts that might change later. Hence reason that .SFUI doesnt work I assume draw_string is using UIFont.fontWithName_size_ I think compact is for watchOS, Pro for iOS. Does UIFont have a .fontName() method? @JonB neither ".SFProText-Bold" nor ".SFProDisplay-Bold" do work I ask me if for SF standard system font, bold in name does work without a supplementary bold attribute @peiriannydd this works, in place of ui.draw_string UIFont = ObjCClass('UIFont').boldSystemFontOfSize_(20.0) attrtext = ObjCClass('NSMutableAttributedString').alloc().initWithString_(ns(self.text)) attrtext.addAttribute_value_range_(ns('NSFont'), UIFont, NSRange(0,len(self.text))) attrtext.drawInRect_(CGRect(CGPoint(0, 0), CGSize(self.width, self.height))) Nb: needs from objc_util import *
https://forum.omz-software.com/topic/7603/custom-views-with-bold-text-and-rounded-corners/10
CC-MAIN-2022-27
refinedweb
696
60.82
Ok, i know that i am doing something wrong and if possible would like to be pointed in the right direction. Im just learning java, my background is in c/c++. I need to be able to add custom levels to the logger and im defiantly doing it wrong. Couldnt find much on the internet about it. Any help would be greatly appreciated. Here is my code so far. import java.util.Scanner; import java.util.logging.FileHandler; import java.util.logging.Level; import java.util.logging.Logger; import java.io.*; public class LogRunner { public static void main(String[] args) { // This is what i could come up with so far but its wrong. // Create the new levels final Level LogRunner debug = new LogRunner(); final Level LogRunner error = new LogRunner(); try{ FileHandler hand = new FileHandler("application.log"); Logger log = Logger.getLogger("log_file"); log.addHandler(hand); log.debug("This is bad debug it! "); log.info("Here is the info "); log.warning("DANGER DANGER "); log.error("There seems to be an error "); System.out.println(log.getName()); } catch(IOException e){} } } Thanks in advance for any help seanman
http://www.javaprogrammingforums.com/java-se-apis/11123-custom-log-level-help.html
CC-MAIN-2015-48
refinedweb
184
54.49
#015 Template matching using OpenCV in Python Highlights: In this post, we’re going to talk about template matching which is probably the simplest form of object detection. Template matching is a technique in digital image processing that identifies the parts of an image that match a predefined template. It has various applications and is used in such fields as face and speech recognition, automation, and motion estimation. So, let’s begin with our post and see how template matching works in Python. Tutorial Overview: 1. How does template matching work? Let’s have a look at the following example. In this GIF animation, we can see a photo of Lionel Messi. This is our input image. We also have the template image which is a cropped part of the input image. Now, we are simply going to scan a larger image with this template by sliding it across all possible positions. Then, we will compare a template against overlapped image regions in the larger image, until we find a match. So, how are we going to make that comparison? Are we going to compare pixel by pixel or do we need to use another method? OpenCV has provided several different template matching methods. Here we can see the formulas that OpenCV has calculated for each available method. Note that \(I \) denotes an image, \(T \) template image, and \(R \) result. SqDiff – Squared difference $$ R(x, y)=\sum_{x^{\prime}, y^{\prime}}\left(T\left(x^{\prime}, y^{\prime}\right)-I\left(x+x^{\prime}, y+y^{\prime}\right)\right)^{2} $$ SqDiffNormed – Normalized squared difference $$Corr – Cross correlation $$ R(x, y)=\sum_{x^{\prime}, y^{\prime}}\left(T\left(x^{\prime}, y^{\prime}\right) \cdot I\left(x+x^{\prime}, y+y^{\prime}\right)\right) $$ CCorrNormed – Normalized cross correlation $$Coeff – Cosine coefficient $$ R(x, y)=\sum_{x^{\prime}, y^{\prime}}\left(T^{\prime}\left(x^{\prime}, y^{\prime}\right) \cdot I^{\prime}\left(x+x^{\prime}, y+y^{\prime}\right)\right) $$ Where: $$ T^{\prime}\left(x^{\prime}, y^{\prime}\right)=T\left(x^{\prime}, y^{\prime}\right)-1 /(w \cdot h) \cdot \sum_{x^{\prime \prime}, y^{\prime \prime}} T\left(x^{\prime \prime}, y^{\prime \prime}\right) $$ $$ I^{\prime}\left(x+x^{\prime}, y+y^{\prime}\right)=I\left(x+x^{\prime}, y+y^{\prime}\right)-1 /(w \cdot h) \cdot \sum_{x^{\prime \prime}, y^{\prime \prime}} I\left(x+x^{\prime \prime}, y+y^{\prime \prime}\right) $$ CCoeffNormed – Normalized cosine coefficient $$ R(x, y)=\frac{\sum_{x^{\prime}, y^{\prime}}\left(T^{\prime}\left(x^{\prime}, y^{\prime}\right) \cdot I^{\prime}\left(x+x^{\prime}, y+y^{\prime}\right)\right)}{\sqrt{\sum_{x^{\prime}, y^{\prime}} T^{\prime}\left(x^{\prime}, y^{\prime}\right)^{2} \cdot \sum_{x^{\prime} y^{\prime}} I^{\prime}\left(x+x^{\prime}, y+y^{\prime}\right)^{2}}} $$ When we slide the template image across the input image, a metric is calculated at every pixel location. This metric represents how similar the template is to that particular area of the input image. For each location of \(T \) in the image \(I \) we store the metric in the result matrix \(R \). Each location \((x,y) \) in \(R \) contains the match metric. Note that this process is very similar to the convolution where the output of our image will shrink. In the following image, we can see the map of the comparison results. The brightest locations indicate the highest matches. As you can see, the location marked by the yellow circle is probably the one with the highest value, so that location will be considered as the best match candidate for our template. This yellow circle will represent the top left corner of the rectangle, where we assume the optimal match candidate will be located. The width and height of the rectangle are equal to the template image. Now, let’s see how each of these methods works in Python. 2. Template matching in OpenCV with Python First, we are going to import the necessary libraries and load the input image and the template image. We will also correct the color order because we will plot these images with matplotlib. import cv2 import numpy as np from matplotlib import pyplot as plt from google.colab.patches import cv2_imshow template = cv2.imread("Picture9.jpg") template = cv2.cvtColor(template, cv2.COLOR_BGR2RGB) img = cv2.imread("Picture10.jpg") img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(img) plt.imshow(template) So, as you can see the input image is Leo Messi and the template image is just Messi’s face. Now, if we check out dimensions of a template image we can see that they are (269, 191, 3). template.shape (269, 191, 3) Note that if draw a rectangle of size (269, 191) around Messi’s face in the input image, that area will have the exact size and shape of a template image. Now, one more note before we proceed. In our code we’re going to use the list of strings which are the names of different template matching methods. methods =["cv2.TM_CCOEFF" , "cv2.TM_CCOEFF_NORMED", "cv2.TM_CCORR" , "cv2.TM_CCORR_NORMED" , "cv2.TM_SQDIFF" , "cv2.TM_SQDIFF_NORMED"] However, we need to evaluate each of these strings as if it is an OpenCV function. To do that we are going to use the function eval(). In that way we can directly transform a string that matches each of these built-in template matching functions. It will be more convenient for us to write our code in that way, instead of calling each function manually. Now, let’s move on. The next step is to create a for loop that goes through each of these methods. We will also create a copy of the input image. Then, using the eval() function, we will loop through all string methods, and transform that string we are going to use to the actual OpenCV function. Now we are ready to perform the template matching. For this, we will use the function cv2.matchTemplate() that consists of the following parameters. - Input image – An image where the search is running. Note that it must be an 8-bit integer or 32-bit floating-point. - Template – Searched template image. It must be smaller or equal to the input image and it must have the same data type. - Output – The resulting map of comparison results. It is a single-channel 32-bit floating-point. If the input image dimensions are \(x\times y \) pixels and template dimensions are \(m\times n \) pixels, then the result is \((x-m+1)\times (y-n+1) \). - Method – Parameter that specifies the comparison method - Mask – Mask of the searched template. It must have the same datatype and size as the template. It is not set by default. for m in methods: img_copy = img.copy() method = eval(m) res = cv2.matchTemplate(img_copy,template,method) Now, we need to find the maximum and minimum values of the resulting map, as well as the minimum and maximum value locations. For that, we will use the function cv2.minMaxLoc() and as a parameter, we will just pass our image that is the output of our template matching method. This function takes in the map, finds the minimum and the maximum locations, and returns them back as a tuple. Then, we can unpack that tuple, to find the minimum value, the maximum value, as well as the minimum and maximum locations. min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) One important thing to note is that two methods that use squared differences, SqDiff and SqDiffNormed, are going to be slightly different because the location with the minimum value will be considered the match. On the other hand, for other methods, the match will be located where the function finds the maximum value. To fix this problem, we need to create a loop that will check if a given method considers location with the minimum value as a match, or location with the maximum value. So we will say that if we use a SqDiff or SqDiffNormed method the top left corner of the rectangle is equal to the minimum location. For all other methods will say that the top left corner of the rectangle is equal to the maximum location. if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc print(top_left) So, now we know the location of the top left corner of our rectangle. Next, we need to find the bottom right corner of the rectangle. We already know that width and height of the rectangle are equal to the template image. Therefore, we just need to get the shape of the rectangle. Then, we are going to define the bottom right corner of the rectangle that is equal to the top left corner indexed at zero plus the width, and then the top left corner indexed at one plus the height. height, width, channels = template.shape bottom_right = (top_left[0]+width, top_left[1]+height) Now, we just need to draw our rectangle with the cv2.rectangle() function. cv2.rectangle(img_copy, top_left, bottom_right, (0,255,0),6) The final step is to plot all results that we obtained. Let’s see how they look. plt.subplot(121) plt.imshow(res) plt.title("TEMPLATE MATCHING MAP") plt.subplot(122) plt.imshow(img_copy) plt.title("TEMPLATE DETECTION") plt.suptitle(m) plt.show() print("\n") print("\n") (301, 28) (301, 28) (0, 224) (301, 28) (301, 28) (301, 28) So, as you can see, five of the methods did a pretty good job. Five methods find while one of them made a mistake. We can see that CCoeff, CCoeffNormed, SqDiff, SqDiffNormed and CCorrNormed methods matched the template correctly. Moreover, all five methods identified top left corner of the rectangle at the same location (301,28). On the other hand, we can see that the Corr method found a lot of different comparisons but unfortunately did not detect the correct template. So, even when we have an exact copy of the part of an input image, we still may not end up with correct results. That is why you’re gonna have to make sure you’re using the right method. Here you can download code from our GitHub repo. Summary In this post, we learn how to detect objects in images using a template matching method. We examined several different template matching methods and compared their results in Python. In the next post, we will learn how to extract feature points from images and match them together.
https://datahacker.rs/014-template-matching-using-opencv-in-python/
CC-MAIN-2021-39
refinedweb
1,748
64.3
#include <Wire.h>#include <DAC6573.h> DAC6573 myDAC6573(DAC6573_DEFAULT_ADDRESS);void setup() { Wire.begin(); //Start the I2C bus Serial.begin (9600);} void loop(){ byte channel=1; static word DAC; static int delta=10; Serial.print("DAC"); Serial.println(DAC); delay(100); myDAC6573.DAC6573_SetChannel(DAC,channel); DAC=DAC+delta; if (DAC > 1020) {delta = -10;} if (DAC < 20) {delta = 10;} } Where do you read the value of the DAC?Which statement does that? If you include header files in the library header file, it is not necessary to include them in the library source file, too. Sorry, stupid slip of the mind Lets try another "stupid "question How are the address lines of the chip connected? Are they GND? one question remains, could you test:If you GND both address lines and you use the I2C scanner of nick Gammon, does it find the device?similar for other address pin combinations (high/low)?- - search for scanner (~@ 80% of the page) loose connections, happens all around the world Please enter a valid email to subscribe We need to confirm your email address. To complete the subscription, please click the link in the Thank you for subscribing! Arduino via Egeo 16 Torino, 10131 Italy
http://forum.arduino.cc/index.php?topic=143588.msg1083581
CC-MAIN-2015-18
refinedweb
199
67.15
Feedback Getting Started Discussions Site operation discussions Recent Posts (new topic) Departments Courses Research Papers Design Docs Quotations Genealogical Diagrams Archives A recent correspondent asks jokingly "I suppose static typing allows for solving the halting problem as well?". This reminded me of an article where Mark-Jason Dominus (the Perl guy) describes how the type checker in ML found an infinite loop bug in a simple sort routine - - well, "found" in the sense of "gave enough information for it to be apparent to anyone paying attention that something was wrong".? Does anyone have other examples? Crashes are caused by paths of execution that anr't closed or terminated. For instance a certain sequence of inputs causes something "unintended and unexpected" to happen. In terms of machine codes the execution runs over the end of a block and into an adjacent unrelated block, at this point anything can happen. Another example is the bad pointer problem. "Spaghetti code" makes this type of error more likely. Type checking seem to reduce the probability of this but as far as I know doesn't preclude it. Some people will put in GOTO error codes at points where execution should never reach. I wonder if there is any formal way to check for this? It seems to me that there could be. Type checking seem to reduce the probability of this [crashes] but as far as I know doesn't preclude it. Of course it does! That's the whole point of soundness proofs for type systems. Granted, current mainstream languages rarely have a formally defined type system, let alone one that's sound. But the typed languages more popular in this forum, e.g. ML and Haskell, have (modulo some explicit unsafe operators intentionally put into some of them). Regarding gotos: of course, they don't exist in any decent high-level language. :-) But nevertheless they can be typed in a sound manner. Have a look at typed assembly language, for example. Human developers, particularly experienced ones, tend to write very stereotypical code. They also write code that is either correct or very close to correct, differing from correct code in only one or two particulars. Given this, static error detection in human-written code is vastly easier than the halting problem, for an enormous set of possible errors. Just code up detectors for a bunch of common bug patterns and your bug yield can be very high. For instance, you can't write an analysis program which will find all infinite loops, but for many languages you can write analysis programs to find loops where the exit condition is vacuous, unreachable, or missing and it turns out that that's most of the infinite loops that programmers actually write. Do a bit of research into the sorts of bugs developers actually make, and there are tons of easily detectable bug patterns you can statically analyze. For non-terse, well-behaved, statically-typed language, many static error analyses are so easy that it doesn't surprise me at all that a type checker could be coaxed into doing some of them. metal/xgcc Thanks for the pointer on this. I hadn't read these papers before, but seem to have been living them for the last few years. I love their idea of statistically inferring intended program invariants and then driving static analyses off of those inferences. Expect to see a commercial implementation of that for Java by year-end. It was spun into a commercial product (though for C/C++ I think (?)). But yes, it was impressive. One thing though is that I think the technique would be much less useful in languages LtU people usually prefer. But this is a completely (educated) guess; it would be interesting to see how it works out. Coverity is the company your thinking of. Their website (which seems to be having some trouble right now) mentions analysis for C,C++, and Java. At the last linuxworld, their CTO mentioned in passing an employee creating a stack usage analysis for php. The interesting thing about coverity vs e.g. Polyspace, is the decision to emphasize scale over soundness/completeness. Coverity can work with huge codebases (e.g. the Linux kernel or Mozilla) and catch many errors, but does not guarantee the no runtime errors occur. I recall seeing a similar report from, IIRC, Andrew Koenig. The trick is that an infinite loop, or bottom, can be given an arbitrary type. Consider the following example: fun loop () = loop () That function is inferred to have the type val loop : unit -> 'a The 'a as the return type (meaning: the result of the function can be given an arbitrary type) shows that the function never returns normally. I write SML code daily and my experience is that when I get the code to type check it works like I thought it should work. Mike Smith:? The misapprehension that I frequently find on the dynamic typing side is the belief that static typing can't treat things beyond just ensuring that an integer is used as an integer, a float as a float, etc. The phrase "just type violations" is a huge red flag indicating that the writer's understanding of type systems is in need of enhancement. vesa_karvonen's experience mirrors mine: O'Caml gives me an order of magnitude more of that "if it compiles, it works as it should" experience than, say, my Java day-job does (!), and my Java day-job gives me an order of magnitude more of it than, say, my C++ day-jobs and "recreational" (in the sense that masochism can be said to be "recreational") programming did. Granting all that does the O'Caml type system actually do anything to detect crash type errors, or is this due to enforcing a certain level of rigor. Is the problem solved or is it less likely? Hank Thediek: Granting all that does the O'Caml type system actually do anything to detect crash type errors, or is this due to enforcing a certain level of rigor. Is the problem solved or is it less likely? I'm very reluctant to say it's impossible to crash the O'Caml runtime. I can honestly say, however, that I never have. On the other hand, I haven't yet done anything with truly enormous datasets in O'Caml, so I likely haven't bumped into some of the runtime limits that are being discussed on the mailing list. Apparently some of the standard library routines aren't tail-recursive, for example, and so using them on large constructs runs the risk of stack overflow, and there are some challenges for the runtime in catching stack overflow correctly on some popular platforms. Most folks in the O'Caml community know that these functions aren't tail-recursive, and so tend to use the ones from ExtLib preferentially, but that of course begs the question as to why you should have to know to do this. With all that said, let's be clear that that reliability's not a function of being statically typed, but of being strongly typed: I've never crashed DrScheme, SBCL, Python, or Ruby, either. input_value can produce a value which crashes the runtime. input_value The analogous problem doesn't exist in dynamically typed languages. QrczakL input_value can produce a value which crashes the runtime. Sure it does, if you don't restrict yourself to only reading values that were serialized out from the language in question, and that's still assuming that those serialized values are tagged (which, in dynamic language, of course will be the case). So all you're really saying is that since you have tags at runtime, you can use them at runtime to mark stuff that isn't even in the runtime. That's a truism. And of course O'Caml marshaling isn't type-safe; marshaling/unmarshaling and using the Foreign-Function Interface are the well-known "interaction points with the outside world" that require some kind of external support (for example, Saffire for checking use of the FFI and HashCaml for type-safe marshaling). The fact that both the popular dynamically-typed languages and pretty much the entirety of the statically-typed languages other than C/C++ are memory-safe is indeed one of the several pleasant things that they have in common. What I appreciate about a good inferred statically-typed language is that it's dramatically safer even than that, where "safer" includes definitions like "behaves unexpectedly to the user less." Less than what? That's where the question lies, isn't it? Sorry, but that's pretty naive. Dynamic "typing" mainly helps with external consistency of read values, i.e. checks how they are used. If the language and its serialisation mechanism are rich enough - in particular, if values can include code - then you also will have to perform all kinds of non-trivial checks to verify internal consistency of such values, in order to ensure integrity of the runtime. Almost no existing language implementation does that. For instance, it is no problem to feed Oz a malformed pickle that crashes the VM. My understanding of type systems is in need of enhancement? That's why I'm here!:) Mike Smith: My understanding of type systems is in need of enhancement? That's why I'm here!:) Yeah, me too, but let me suggest that you hie yourself (if you haven't already) to TAPL and work through the exercises. I'm doing it, albeit in my Copious Free Time™ so it's taking longer than I'd like. But the only more eye-opening experience I've had is reading CTM. :-) Not being a computer science person this may be a bogus question but I am really curious. Suppose I have a "sound semantics" and I write a C interpreter for that language; is the result sound? I was just thinking about termination guarentees in the type system, and how far this should go. Epigram distinguishes between structural and general recursion, using different operators for each. This means you to retain decidable type checking for dependent types: by only allowing functions that are guaranteed to terminate in type-forming expressions. Other people have suggested non-termination should be put into monads along with other side effects. I'm not sure how far this should go - making termination a part of the function signature would probably damage modularity quite a lot. [in reply to Hank Thediek's above post, sorry for not following up properly] If you prove your implementation correct then yes. ;-) Of course, you can ask the same question one level deeper: is the C compiler correct? Is your CPU correct? This is related to the problem of trusted base in security. But the point is, when a program of yours crashes, but the language you wrote it in is known to be safe and sound, then you know it was not your fault, but that the implementation is buggy (or something at an even lower level). A sound language hence represents an important, impenetrable abstraction barrier, which has all kinds of obvious and less obvious advantages. Now that is exciting! Thanks. The type of sort is Ord a => [b] -> [a] with no annotations, which proves that sort cannot possibly do anything meaningful. I wonder if this is a case for having compilers to compute principal types without paying attention to annotations first, since an annotation completely hides this defect. Ord a => [b] -> [a] How Ord a => [b] -> [a] is the type of sort ... I've never seen behaviour like that from GHC. Can you clarify? It refers to this. It is a neat example. And I now understand Koray's point about annotations possibly masking such defects. None of the examples I've seen so far preclude implementation in a dynamic or psuedo-dynamic language. The problem isn't necessarily detection, as demonstrated by the use of the phrase "enough information for it to be apparent to anyone paying attention that something was wrong." It would be ridiculous to, say, expect the programmer to declare any loops that aren't purposefully infinite as such. This obviously points to the use of warnings, which suggests it isn't so much a static vs. dynamic thing. Let's stipulate that you can check anything that's computable at all at runtime. So that's not a very interesting observation. What's interesting is how far you can go in the other direction: how much you can guarantee before runtime. It turns out that even in today's functional languages, e.g. O'Caml and Haskell, the answer is "a lot farther than you think unless you've been doing it for years." When you consider how much farther still you can go with either recent additions (e.g. GADTs) or still-not-common ones (dependent types), it becomes very exciting indeed. Also, don't forget, there's a class of problems (concurrency, distribution, proof of adherence to a security policy) that it's simply incoherent to talk about "solving dynamically," as there's no meaningful sense in which "handling at runtime" can be made equivalent to "I've proven that this problem doesn't exist before I've run the code." I wasn't speaking about checking at run time, merely that compile time warnings are applicable to both static and dynamic languages. But, if you start checking types at compile type you essentially have static typing, right? Well, either that or soft typing. Personally I think that a soft type checker, done right, would be the way to go if programmers didn't start ignoring the warnings of the type system. Unfortunately it seems to be human nature to ignore warnings until its too late. Sort of. Warning at compile time is most certainly in the relm of dynamic typing, but actual errors would be possible in a hybrid approach.. I consider phantom types a strong example of a benefit of static typing over dynamic typing. Phantom types involve adding completely "unnecessary" restrictions to functions in order to maintain a "semantic" invariant. There is no correspondence with dynamic typing as the information included has no connection to the data representation. Further, this is not just some theoretical construct, but is used out there in the wild. Dependent types and related schemes definitely lead to the catching of "interesting" errors.. Could you give an example of this? I suspect this isn't the case, but I'm not fully understanding what you mean. You can't return an STRef from the invocation of runST it was created in. The code won't compile. More interesting, you can't use a reference inside one invocation of ST which was created by any other, even a nested one. This means that running the same computation several times will give the same answer, executing an ST computation is a pure operation, even if the computation uses variables internally. ST would be much less interesting in an impure language. With respect to referential transparency, ST is like drilling a hole in a pot and then patching it - if you start with a sieve instead, why bother with the patch? A simple example of phantom types: ensuring that reads and writes are threaded over uses of a "slot" type. Here, 'a is the (real) type of the "slotted" values, and 'b is the phantom type: 'a 'b module type SLOT = sig type ('a, 'b) t val from : 'a -> ('a, [`Full]) t val get : ('a, [`Full]) t -> 'a * ('a, [`Empty]) t val put : 'a -> ('a, [`Empty]) t -> ('a, [`Full]) t end For reasons that will be clear below, you cannot create an empty slot, only a slot filled from an initial value (this is a limitation of the implementation I chose, not of the technique). Also, for phantom types to work, the phantom type parameter must remain unbound in the interface, by keeping the type abstract. The implementation is like this: from module Slot : SLOT = struct type ('a, 'b) t = 'a let from x = x let get x = x, x let put x _ = x end Really, that's it! The type checker ensures that all gets and puts are interleaved: get put # let x = Slot.from 1 ;; val x : (int, [ `Full ]) Slot.t = <abstr> # Slot.get x ;; - : int * (int, [ `Empty ]) Slot.t = (1, <abstr>) # Slot.put 2 x ;; This expression has type (int, [ `Full ]) Slot.t but is here used with type (int, [ `Empty ]) Slot.t These two variant types have no intersection In contrast, a traditional, mutable, object-oriented, explicitly checked implementation would be something like this: class ['a] slot = object val mutable slot : 'a option = None method empty = slot == None method put x = match slot with | None -> slot <- Some x | Some _ -> failwith "put" method get = match slot with | Some x -> slot <- None; x | None -> raise Not_found end Clearly, you have to do all the grunt work for yourself, in particular, you have to test explicitly in the client code whether the slot is empty or not. empty Ah, I see. Clever. What about: data Slot: val type Full = Slot(x => x.val!=None) type Empty = Slot(x => x.val==None) def put(x:Empty, y): x.val = y def get(x:Full): val = x.val x.val = None return val def create_slot(x): return Slot(x)::[put=put, get=get] Dependant types to the rescue ;-) What about something like: data VariableStore: ... def new_var(variable_store, name, value): ... def get_var(variable_store, name): ... def set_var(variable_store, name, value): ... def commit(variable_store): ... def retry(variable_store): ... # Excuse the weird syntax; this creates a "class" of sorts def create_variable_store(...): return VariableStore(...)::[ new_var = new_var, get_var = get_var, set_var = set_var, commit = commit, retry = retry ] # This basically strips off any previous member functions and # only puts the 'get_var' function back. def read_only(variable_store): return class_data(variable_store)::[ get_var = get_var ] which would be used like: vars = create_variable_store(...) use_variable_store(read_only(vars)) vars = create_variable_store(...) use_variable_store(read_only(vars)) Which can ensure that use_variable_store isn't able to write, but can't check that it won't try to write. If I write the above code, and pass a read_only(vars) to a function, I have no reason to believe that it won't try to call new_var, set_var, etc., and raise an error at runtime. I know, that was the intent. Of course, I'm assuming there are ways to forcedly cast the value to a different type when using phantom types so I decided to mimmick this behavior. If you'd like, you could use a proxy object to completely forbid access to the raw data structure and therefore modification: def read_only(variable_store): return None::[ read_var = (self, *args => variable_store.read_var(*args)) ] Or, if you don't care about keeping the same interface in case the function might eventually need write priveleges, you could just pass the read_var function around: vars = create_variable_store(...) use_variable_store(vars.read_var) While there's no value of phantom type, there's a value whose type is parameterised on one - I guess a term for that might be useful if there isn't one already? There is no value of a phantom type (hence the 'phantom'). If can have an action of type Int -> STM Full (), the Full part is nothing more than a type -- there is no value of type Full. I know. What I'm getting at is that it could be possible to, say, create a function that converts ReadOnly phantom types to Full, circumventing the type checks. Using a proxy object still won't give us any reason to believe the code correct without showing that the proxy is used appropriately, i.e., that the function has an appropriately restricted type. How is using phantom types different from this? It's not possible to circumvent the phantom typing if you hide the relevant constructors. Such a function is trivial and only circumvents the type checks in that it makes explicit the fact that Full subsumes ReadOnly: It also has the effect of making it impossible for the compiler to completely guarantee. I just reread your previous assertion in which you state it would only be checkable at run time. This is false; my approach allows for a compiler to check it at compile time, exactly like your example. Guarantee what? We WANT to be able to do such things. If we cannot do such things, then an action that we label Full cannot read or we require some mechanism to ascribe multiple types to the read-only actions, or we need to start building type-level collections. As it is, we just use the type system, which is quite capable of proving this for us; without the use of unnecessary functions performing the 'cast' for us. Sorry, I'm not particularly good at articulating my thoughts :) What I'm talking about is using a function to cast a ReadOnly STM into a Full STM, thereby circumventing the checks put in place by the type system. If that's not possible, then disregard the first solution as equivalent, as you'd need a proxy object to emulate that sort of behavior. ...because abstract types in ML are inviolate (well, OK—O'Caml has Obj.magic, but anyone who uses it without submitting to the requisite 40 lashes from Xavier Leroy is a fool). In any case, however, the point of phantom types, again, is that wrong code won't even compile, and you'll get a pretty clear error message indicating why it didn't compile. This is why, generally speaking, I prefer a good inferred statically-typed language these days. In any case, however, the point of phantom types, again, is that wrong code won't even compile, and you'll get a pretty clear error message indicating why it didn't compile. This is why, generally speaking, I prefer a good inferred statically-typed language these days. My approaches allow for compile time checking exactly as you described, as well :-) The punchline of Dominus' story is that the inferred type of his sort function was not what he was expecting: 'a list -> int list rather than int list -> int list. The inferred type is a static property; the whole point is that Dominus did not have to run the sort to determine it had a bug and would never terminate. In my experience, it is reasonably common for buggy code to show up as unexpected typings in OCaml. Now, a soft typing system could give exactly that same message as a warning, but Dominus goes on to give a short list of some Perl constructs that would fail to typecheck under an ML-ish type system: while (<>) { $total += $_; } ... @a = localtime(); $s = localtime(); ... These are pretty common Perl idioms, and would definitely qualify for a "stupid type checker whining about perfectly valid code" complaint. A type checker that would work for Perl clearly would have to be very different from that of ML, and has some open questions: Would it give excessive numbers of false warnings? (Ultimately, any might well be too many.) Would it give the same level of "after I fixed the typing errors, the program just works"? (Perl's base types are probably something on the order of scalars, arrays, and hashes rather than ints and floats; and silent conversions are very common.) The key difference I see is that the semantics of ML and its type system are closely linked---for example, adding a string to a number is completely alien to ML---something that is forbidden to soft type checkers. That's a great explanation as to why I'm extremely skeptical of "soft" or "pluggable" type systems for languages that weren't designed with static typing in mind: it just seems mind-bogglingly unlikely that you'll get the benefit from them that you do from an inferred statically-typed language in which the semantics and type system are closely interrelated. Curtis seems to be following a common line in the static vs. dynamic debate, saying that you could catch errors in dynamically code at "compile" time if you wrote some kind of static analysis. Of course! From this point of view, the interesting thing about (good) type systems is that somebody has already written the analysis, and that you can leverage that one analysis to prove properties you care about, with just a little work. But, once start talking in these terms, then you are no longer talking about the what errors can be caught by a static type system (the original question), but debating the merits of catching those errors statically by using the typed system of a designed-to-be-typed language, or by writing an analysis tool for a dyanmically typed language. Maybe somebody should make a topic for that discussion? What? I'm confused--I'm talking about type checking in the compiler, not in an external tool. Anything that can't be proven false at compile time by the compiler would be accepted, at which point the editor can step in and do warnings if it so wishes, but I'm not sure if that's what you're talking about. In type systems with parametric polymorphism, you can prove some term has a type forall a.T(a) if you can show that the term has type T(A) where A is some freshly invented type constant. Without things like unsafe casts or unrestricted reflection, you can't do anything with values of types like that except pass them around. forall a.T(a) T(A) A This means that just knowing the type of a polymorphic function gives you some information about what it does. For example, the identity function has type forall a . a -> a. The function is only given a single a, so all it can do is return that value, or fail to terminate. forall a . a -> a a An interesing application is the foldr/build rule for list fusion. These are functions, foldr/build foldr :: forall a b . (a -> b -> b) -> b -> [a] -> b foldr f x [] = x foldr f x (a:as) = f a (foldr f x as) build :: forall a . (forall l . (a -> l -> l) -> l -> l) -> [a] build k = k (:) [] foldr reduces a list to a single value, by replacing conses with its first argument, and nil with the second. build takes a "list"-building function parametric in the "cons" and "nil" is uses, and uses it to build an ordinary list. For example, foldr build foldr (+) 0 [1,2,3] = 1 + (2 + (3 + 0)) = 6 and if we define k cons nil = cons 1 (cons 2 (cons 3 nil)) then build k = k (:) [] = (:) 1 ((:) 2 ((:) 3 [])) = [1,2,3] The fusion rule says that when foldr is used to consume a list constructed by build we can skip making a list: foldr f x (build k) = k f x. Parametricity tells us that because k is polymorphic in l, the only way it can be producing its result is with the provided functions. Following the example above, we have foldr f x (build k) = k f x k l k (+) 0 = 1 + (2 + (3 + 0)) = 6 = foldr (+) 0 (build k) In the presence of casting (say, a dynamic langauge or a soft typing system) this all breaks down - we could instead have defined k cons nil = cons 1 (2 : (cons 3 nil)) which works fine as an argument to build, but gives nonsense instead of fusion: k (+) 0 = 1 + (2 : (3 + 0)) One of the examples in the Epigram paper The view from the left is an AST and type checker for a simply typed lambda calculus, where the typed AST can only represent well-typed programs, and the type of the checker is a proof that it is actually checking the term provided, and if it says it passed then you really did get a typing for your program. They don't mention any particular bugs this caught, but any time you see "this type guarantees the correcteness of that program" you could read "this type catches any bug that would possibly be made in writing that program". :)
http://lambda-the-ultimate.org/node/1527
CC-MAIN-2019-13
refinedweb
4,701
59.84
Symbol Not Found I'm Runnning Sage 7.2 on OS Sierra. All was running fine until I tried to run the line: VS = VectorSpace(GF(2), 5) I get the error: ImportError: dlopen(/Users/name/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numpy/core/multiarray.so, 2): Symbol not found: _PyUnicodeUCS2_AsASCIIString Referenced from: /Users/name/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numpy/core/multiarray.so Expected in: flat namespace in /Users/name/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numpy/core/multiarray.so If anyone could shed some light on this I'd be very grateful. Could it be that Sagemath 7.2 has incompatibilities with OS Sierra? Many Thanks Patrick Exactly which version did you download? If you build it from scratch it should be fine, but the downloaded version for a prior MacOS might sometimes not be compatible. Actually, your error message suggests something else - it seems to be using the "wrong" Python. Do you have some customization in your path that would lead Sage to be looking in your Enthought distribution for stuff?
https://ask.sagemath.org/question/35523/symbol-not-found/
CC-MAIN-2018-09
refinedweb
184
51.55
Bugtraq mailing list archives - Linux? (I don't think so, If we have network and other I/O device such as keyboard, I thought that would be used, too. but I want confirmation from people in the know.). I would be most interested if that has changed since the last time I looked at it. OpenBSD, FreeBSD, NetBSD and the like, and of course Checking the /dev/random manpage on Darwin, it indicates that entropy is input from the system "Security Server", which uses "kernel jitter". Unfortunately, a quick check did not reveal exactly what the source of this kernel jitter is. Never-the-less, the manpage does indicate that this /dev/random design is from FreeBSD and likely shared with other BSDs. Windows family OSs. All I can observe here is that F-secure SSH still (at least the most recent version I've used) collects its own entropy when running on Win2K, which indicates to me that either they want to operate the same on all Windows versions (as memory serves, Win95/98 does not have a RNG), or that Win2k does not have a suitable RNG.! You would only want to use one or the other, since the fan rotation is a function of the CPU temperature measurement -- if you used both you would essentially be entering the same measurement into the RNG twice, which isn't very random. Some of these CPU-bound devices may have implications when we have a dual core CPU. Reading of such device by one thread may be highly predictable by another thread running on the CPU chip. Indeed -- certainly the recent advisory regarding information leakage through the cache between threads on multi-core CPUs (CVN: CAN-2005-0109) indicates that we're starting to find problems of this nature already. Cheers, Terry #include <stddisclaim.h> By Date By Thread
http://seclists.org/bugtraq/2005/Jul/26
CC-MAIN-2014-42
refinedweb
309
60.14
Opened 1 year ago Last modified 4 months ago As requested by Simon I propose an {% else %} tag for the {% for %} template tag which could be used like this:!' For a standalone version look at First implementation, including documentation The same be accomplished by this: {% if athlete_list %} {% for athlete in athlete_list %} ... {% endfor %} {% else %} Nothing in list {% endif %} Such behaviour can be confusing as it is not similar to python for object in objects: if object: print 'Found' break else: print 'Not found' pattern. The idea gets a +1 from me. I wonder how useful this new syntax is, as you can't use it for the following (pretty common) pattern: {% if athlete_list %} <ul> {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% endfor %} </ul> {% else %} <p>Nothing in list</p> {% endif %} (Because there is no place where you can put the <ul> tags.) Plus it doesn't comply with the Python syntax (as Anonymous@20:51 says). Thanks Adrian! Matthias, I think it's still worthwhile and useful if you use: <ul> {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% else %} <li class="emptylist">Nothing in list</li> {% endfor %} </ul> I don't see a reason why compliance with the Python syntax is required for a template tag. There's a good discussion about this on my blog, where Fredrik points out that this would be semantically different from how Python handles else: blocks attached to for statements. As such, the extra block would need a different name - {% default %} was suggested. Forgot the link: Second implementation, including documentation Ok, this sounds like a reasonable compromise. I changed it to having an optional {% default %} clause. The working example is: >>> from django.template import * >>> t = Template(""" {% for athlete in athlete_list %} {{ athlete }} {% default %} No athlete in list! {% endfor %} """) >>> c1 = Context( {'athlete_list': ['me', 'myself', 'I'] }) >>> t.render(c1) u'me myself I ' >>> c2 = Context({}) >>> t.render(c2) u'No athlete in list!' Still a reference to else in the patch (in a docstring), and needs tests. I'd suggest dropping the "As you can see, " in the docs and also to mention that the default node is displayed if the given array is empty or not found (emphasis for the bit that I'm suggesting adding). Fix those and it's good to go. Third implementation, including documentation and tests, fixing some issues, thanks SmileyChris?! Ouh, am I actually allowed to change the ticket properties? If you're confident that you've fixed the issue, sure. It doesn't change it's status so that is alright. Final thoughts: * Perhaps make nodelist_default optional in case someone is using ForNode for thier own devices? def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_default=None): (and then move the empty NodeList generation into their too) * You should have a test for the actual working case (i.e. non-empty list) * You need an empty line after "**New in Django development version**" Ok, sounds all reasonable, except one thing: Do you mean with "the empty NodeList? generation" the following? class ForNode(Node): ... def render(self, context): nodelist = NodeList() # <-- this line? ... Yea, that logic obviously needs to move if nodelist_default could be None in __init__ Fourth implementation, including documentation and tests. Do you think this is sufficient? Maby I still don't see your point :/ Is the last patch what you meant? Close but not quite, you need something like this: lass ForNode(Node): def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_default=None): self.loopvars, self.sequence = loopvars, sequence self.is_reversed = is_reversed self.nodelist_loop = nodelist_loop if nodelist_default is None: self.nodelist_default = NodeList() else: self.nodelist_default = nodelist_default fifth implementation with optional nodelist_default, including documentation and tests. Updated patch to Django r9032 () (In [9530]) Fixed #6398: added an optional {% empty %} clause to the {% for %} template tag. The contents of this clause are rendered if the list iterated over turns out to be empty. Thanks, Jannis Leidel. Astute readers will notice that the patch originally called this default; after consideration I decided that empty is a very slightly better color for this particular bikeshed. As far as I can see, the conversion from 'default' to 'empty' in the patch is not complete. At least, the tests seems broken, as they use "{% default %}" and not "{% empty %}". This problem was fixed in [9532], which was committed a few hours after [9530]. Milestone post-1.0 deleted By Edgewall Software.
http://code.djangoproject.com/ticket/6398
crawl-002
refinedweb
724
65.32
{-# LANGUAGE CPP #-} -- | -- Module : Foreign.C.UTF8 -- Copyright : (c) 2004 John Meacham, Alistair Bayley -- License : BSD-style -- Maintainer : alistair@abayley.org -- Stability : experimental -- Portability : portable -- --... -- This module has been reasonably optimised. You can check GHC's -- simplifier output (for unboxing, mainly) with this: -- ghc -O2 -c UTF8.hs -ddump-simpl > simpl.txt module Foreign.C.UTF8 ( peekUTF8String, peekUTF8StringLen , newUTF8String, withUTF8String, withUTF8StringLen , toUTF8String, fromUTF8String , lengthUTF8, fromUTF8, toUTF8 ) where import Control.Monad (when, liftM) import Data.Bits import Data.Char import Data.Word (Word8) import Foreign.C.Types import Foreign.C.String import Foreign.Ptr import Foreign.Marshal.Array import Foreign.Storable #ifdef __GLASGOW_HASKELL__ import GHC.Base (unsafeChr) #else unsafeChr :: Int -> Char unsafeChr i = chr i #endif nullCChar :: CChar nullCChar = 0 nullByte :: Word8 nullByte = 0 -- | Analogous to peekCString. Converts UTF8 CString to String. peekUTF8String :: CString -> IO String peekUTF8String cs = fromUTF8Ptr0 (castPtr cs) --peekUTF8String cs = peekArray0 nullByte (castPtr cs) >>= return . fromUTF8 -- | Analogous to peekCStringLen. Converts UTF8 CString to String. -- The resulting String will end either when @len@ bytes -- have been converted, or when a NULL is found. peekUTF8StringLen :: CStringLen -> IO String peekUTF8StringLen (cs, len) = fromUTF8Ptr (len-1) (castPtr cs) "" --peekUTF8StringLen (cs, len) = peekArray len (castPtr cs) >>= return . fromUTF8 -- | Analogous to newCString. Creates UTF8 encoded CString. newUTF8String :: String -> IO CString newUTF8String hs = do p <- newArray0 nullByte (toUTF8 hs) return (castPtr p) -- | Analogous to newCStringLen. -- The length returned is in bytes (encoding units), not chars. newUTF8StringLen :: String -> IO CStringLen newUTF8StringLen hs = do let utf8 = toUTF8 hs p <- newArray0 nullByte utf8 return (castPtr p, length utf8) -- | Analogous to withCString. Creates UTF8 encoded CString. withUTF8String :: String -> (CString -> IO a) -> IO a withUTF8String s action = withUTF8StringLen s (\(cstr, _) -> action cstr) -- | Analogous to withCStringLen. -- The length returned is in bytes (encoding units), not chars. withUTF8StringLen :: String -> (CStringLen -> IO a) -> IO a withUTF8StringLen s action = do let utf8 = toUTF8 s withArray0 nullByte utf8 (\arr -> action (castPtr arr, length utf8) ) -- |. fromUTF8String :: String -> String fromUTF8String = fromUTF8 . map charToWord8 charToWord8 :: Char -> Word8 charToWord8 = fromIntegral . fromEnum -- |. toUTF8String :: String -> String toUTF8String = map word8ToChar . toUTF8 word8ToChar :: Word8 -> Char word8ToChar = unsafeChr . fromIntegral lengthUTF8 :: String -> Int lengthUTF8 s = length (toUTF8 s) {- The codepoint-to-UTF8 rules: 0x00 - 0x7f: 7 bits: as is 0x80 - 0x07ff: 11 bits byte 1: 0xC0 OR ((x << 6) AND 0x1F) i.e. 0xC0 + bits 7-11 (bits 12-up are 0) byte 2: 0x80 OR (x AND 0x3F) i.e. 0x80 + bits 1-6 0x0800 - 0xFFFF: 16 bits byte 1: 0xE0 OR ((x << 12) AND 0x0F) i.e. 0xE0 + bits 13-16 byte 2: 0x80 OR ((x << 6) AND 0x3F) i.e. 0x80 + bits 7-12 byte 3: 0x80 OR (x AND 0x3F) i.e. 0x80 + bits 1-6 0x00010000 - 0x001FFFFF: 21 bits byte 1: 0xF0 OR ((x << 18) AND 0x07) i.e. 0xF0 + bits 19-21 byte 2: 0x80 OR ((x << 12) AND 0x3F) i.e. 0x80 + bits 13-18 byte 3: 0x80 OR ((x << 6) AND 0x3F) i.e. 0x80 + bits 7-12 byte 4: 0x80 OR (x AND 0x3F) i.e. 0x80 + bits 1-6 0x00200000 - 0x03FFFFFF: 26 bits byte 1: 0xF8 OR ((x << 24) AND 0x03) i.e. 0xF8 + bits 25-26 byte 2: 0x80 OR ((x << 18) AND 0x3F) i.e. 0x80 + bits 19-24 byte 3: 0x80 OR ((x << 12) AND 0x3F) i.e. 0x80 + bits 13-18 byte 4: 0x80 OR ((x << 6) AND 0x3F) i.e. 0x80 + bits 7-12 byte 5: 0x80 OR (x AND 0x3F) i.e. 0x80 + bits 1-6 0x04000000 - 0x7FFFFFFF: 31 bits byte 1: 0xFC OR ((x << 30) AND 0x01) i.e. 0xFC + bit 31 byte 2: 0x80 OR ((x << 24) AND 0x3F) i.e. 0x80 + bits 25-30 byte 3: 0x80 OR ((x << 18) AND 0x3F) i.e. 0x80 + bits 19-24 byte 4: 0x80 OR ((x << 12) AND 0x3F) i.e. 0x80 + bits 13-18 byte 5: 0x80 OR ((x << 6) AND 0x3F) i.e. 0x80 + bits 7-12 byte 6: 0x80 OR (x AND 0x3F) i.e. 0x80 + bits 1-6 -} -- | Convert Unicode characters to UTF-8. toUTF8 :: String -> [Word8] toUTF8 [] = [] toUTF8 (x:xs) = toUTF8' (ord x) where toUTF8' x | x <= 0x0000007F = fromIntegral x : more | x <= 0x000007FF = w8 0xC0 6 : w8 0x80 0 : more | x <= 0x0000FFFF = w8 0xE0 12 : w8 0x80 6 : w8 0x80 0 : more -- If we want to encode chars > 1114111 then this test should be -- x <= 0x001FFFFF -- because that's the upper limit of the 4-byte encoding -- (and the 5- and 6-byte cases below might also be enabled). | x <= 0x0010FFFF = w8 0xF0 18 : w8 0x80 12 : w8 0x80 6 : w8 0x80 0 : more | otherwise = error ("toUTF8: codepoint " ++ show x ++ " is greater than the largest allowed (decimal 1114111, hex 0x10FFFF).") {- -- Potentially useful code, if Haskell ever supports codepoints > 0x0010FFFF. -- There are no tests for this, because we can't create Strings containing -- chars > 0x0010FFFF. | x <= 0x03FFFFFF = w8 0xF8 24 : w8 0x80 18 : w8 0x80 12 : w8 0x80 6 : w8 0x80 0 : more | x <= 0x7FFFFFFF = w8 0xFC 30 : w8 0x80 24 : w8 0x80 18 : w8 0x80 12 : w8 0x80 6 : w8 0x80 0 : more | otherwise = error ("toUTF8: codepoint " ++ show x ++ " is greater " ++ "then the largest that can be represented by UTF8 encoding" ++ "(decimal 2147483647, hex 0x7FFFFFFF).") -} where more = toUTF8 xs w8 :: Word8 -> Int -> Word8 w8 base rshift = base .|. (fromIntegral (shiftR x rshift) .&. mask) where mask | base == 0x80 = 0x3F | base == 0xC0 = 0x1F | base == 0xE0 = 0x0F | base == 0xF0 = 0x07 | base == 0xF8 = 0x03 | base == 0xFC = 0x01 {- And the rules for UTF8-to-codepoint: examine first byte: 0x00-0x7F: 1 byte: as-is 0x80-0xBF: error (surrogate) 0xC0-0xDF: 2 bytes: b1 AND 0x1F + remaining 0xE0-0xEF: 3 bytes: b1 AND 0x0F + remaining 0xF0-0xF7: 4 bytes: b1 AND 0x07 + remaining 0xF8-0xFB: 5 bytes: b1 AND 0x03 + remaining 0xFC-0xFD: 6 bytes: b1 AND 0x01 + remaining 0xFE-0xFF: error (byte-order-mark indicators: UTF8 - EFBBBF, UTF16 - FEFF or FFFE) remaining = lower 6 bits of each byte, concatenated -} -- | Convert UTF-8 to Unicode. fromUTF8 :: [Word8] -> String fromUTF8 [] = "" fromUTF8 (x:xs) | x <= 0x7F = remaining 0 (fromIntegral x) xs | x <= 0xBF = err x | x <= 0xDF = remaining 1 (bAND x 0x1F) xs | x <= 0xEF = remaining 2 (bAND x 0x0F) xs | x <= 0xF7 = remaining 3 (bAND x 0x07) xs | otherwise = err x {- -- Again, only works for chars > 0x0010FFFF, which we can't test. | x <= 0xFB = remaining 4 (bAND x 0x03) xs | x <= 0xFD = remaining 5 (bAND x 0x01) xs | otherwise = err x -} where err x = error ("fromUTF8: illegal UTF-8 character " ++ show x) bAND :: Word8 -> Word8 -> Int bAND x m = fromIntegral (x .&. m) remaining :: Int -> Int -> [Word8] -> String remaining 0 x xs = chr x : fromUTF8 xs remaining n x [] = error "fromUTF8: incomplete UTF8 sequence" remaining n x (b:xs) | b == 0 = err x | otherwise = remaining (n-1) ((shiftL x 6) .|. (bAND b 0x3F)) xs {- This version of fromUTF8Ptr starts at the end of the array and works backwards. This will allow us to create the result String with constant space usage. Contrast this with creating the String by processing the array from start to end: in this case we would probably use an accumulating parameter, and reverse the list when we reach the end of the array. This isn't so bad, if we expect reverse to work in constant space (more or less). -} -- | Convert UTF-8 to Unicode, from a null-terminated C array of bytes. -- This function is useful, in addition to 'fromUTF8' above, -- because it doesn't create an intermediate @[Word8]@ list. fromUTF8Ptr0 :: Ptr Word8 -> IO String fromUTF8Ptr0 p = do len <- lengthArray0 nullByte p fromUTF8Ptr (len-1) p "" -- | The bytes parameter should be len-1 -- i.e. if the CString has length 2, then you should pass bytes=1. -- That's because we add bytes to the Ptr p to get the offset -- for each byte; byte 1 is at p+0, byte 2 is at p+1, etc. fromUTF8Ptr :: Int -> Ptr Word8 -> String -> IO String fromUTF8Ptr bytes p acc | bytes `seq` p `seq` acc `seq` False = undefined | bytes < 0 = do if null acc then return acc else -- BOM = chr 65279 ( EF BB BF ) if head acc /= chr 65279 then return acc else return (tail acc) | otherwise = do x <- liftM fromIntegral (peekElemOff p bytes) case () of _ | x == 0 -> error ("fromUTF8Ptr: zero byte found in string as position " ++ show bytes) | x <= 0x7F -> fromUTF8Ptr (bytes-1) p (unsafeChr x:acc) | x <= 0xBF && bytes == 0 -> error "fromUTF8Ptr: surrogate at start of string" | x <= 0xBF -> fromUTF8Ptr (bytes-1) p acc | otherwise -> do c <- readUTF8Char x bytes p fromUTF8Ptr (bytes-1) p (c:acc) readUTF8Char :: Int -> Int -> Ptr Word8 -> IO Char readUTF8Char x offset p | x `seq` offset `seq` p `seq` False = undefined | otherwise = case () of _ | x == 0 -> err x | x <= 0x7F -> return (unsafeChr x) | x <= 0xBF -> err x | x <= 0xDF -> do x1 <- liftM fromIntegral (peekElemOff p (offset + 1)) return (unsafeChr ( ((x - 0xC0) * 64) + (x1 - 0x80) )) | x <= 0xEF -> do x1 <- liftM fromIntegral (peekElemOff p (offset + 1)) x2 <- liftM fromIntegral (peekElemOff p (offset + 2)) return (unsafeChr ( ((x - 0xE0) * 4096) + ((x1 - 0x80) * 64) + (x2 - 0x80) )) | x <= 0xF7 -> do x1 <- liftM fromIntegral (peekElemOff p (offset + 1)) x2 <- liftM fromIntegral (peekElemOff p (offset + 2)) x3 <- liftM fromIntegral (peekElemOff p (offset + 3)) return (unsafeChr ( ((x - 0xF0) * 262144) + ((x1 - 0x80) * 4096) + ((x2 - 0x80) * 64) + (x3 - 0x80) )) | otherwise -> err x where err x = error ("readUTF8Char: illegal UTF-8 character " ++ show x)
http://hackage.haskell.org/package/Takusen-0.8.7/docs/src/Foreign-C-UTF8.html
CC-MAIN-2015-32
refinedweb
1,531
70.13
Hi again, Probably caused by this problem softdevice always starts suspended but I can't change to playing. When I try it always kept the suspended state. Thanks. Leo Márquez escribió: > Hi: > >> On Donnerstag 02 November 2006 23:38, Stefan Lucke wrote: >> >> >>> Quoting Leo Márquez: >>> >>> >>> >>>> Hi again, >>>> >>>> Stefan Lucke escribió: >>>> >>>> >>>> >>>>>> / But after started it without problems now I get: >>>>>> >>>>> >>>>> />/ >>>>> />/ [softdevice] A/V devices initialized, now initializing MPEG2 >>>>> Decoder >>>>> />/ X Error of failed request: BadValue (integer parameter out of >>>>> range for >>>>> />/ operation) >>>>> />/ Major opcode of failed request: 145 (MIT-SHM) >>>>> />/ Minor opcode of failed request: 3 (X_ShmPutImage) >>>>> />/ Value in failed request: 0x3ff >>>>> / >>>>> Which are the startup options now ? >>>>> 0x3ff = 1023 = 1024 - 11 >>>>> >>>>> >>>>> >>>> >>>> My startup options are only "-vo xv:full" >>>> >>>> >>>> >>>>> Did you changed your X resolution via "ALT"+"CTRL"+"Keypad +" ? >>>>> >>>>> >>>>> >>>> >>>> No but I press the 'f' key to toggle between fullscreen and >>>> windowed vdr >>>> mode. >>>> The second time I run vdr with -vo xv:full don't work >>>> >>> >>> Ah, I should have mentioned that you _must_ not toggle full screen -> >>> windowed mode, as windowed mode works with some default values >>> which even did not work previously (thats why I ask for full screen >>> mode). >>> >>> I'll send you a patch tomorrow that will restrict these values. >>> >>> >> >> >> Ok, here comes the patch. I tested it with 640x480 resolution and now >> I do not >> get these failed request any more. In contrast to your setup, mine >> worked fine >> when setting "softdevice.OSDalphablend = 1" in vdr's setup.conf even >> without the patch. >> >> >> >> ------------------------------------------------------------------------ >> >> Index: video-xv.c >> =================================================================== >> RCS file: /cvsroot/softdevice/softdevice/video-xv.c,v >> retrieving revision 1.63 >> diff -U3 -r1.63 video-xv.c >> --- video-xv.c 29 Sep 2006 19:17:18 -0000 1.63 >> +++ video-xv.c 3 Nov 2006 18:29:54 -0000 >> @@ -862,6 +862,23 @@ >> } >> } >> #endif >> + >> + /* >> ------------------------------------------------------------------------- >> >> + * limit widht and height to screen dimensions >> + */ >> + { >> + int screen_width, screen_height; >> + >> + screen_width = DisplayWidth(dpy, scn_id); >> + screen_height = DisplayHeight(dpy, scn_id); >> + + if (height > screen_height) >> + old_dheight = lheight = dheight = height = screen_height; >> + >> + if (width > screen_width) >> + old_dwidth = lwidth = dwidth = width = screen_width; >> + } >> + >> /* >> ------------------------------------------------------------------------- >> >> * default settings which allow arbitraray resizing of the window >> */ >> >> >> ------------------------------------------------------------------------ >> >> _______________________________________________ >> vdr mailing list >> vdr at linuxtv.org >> >> >> >> __________ Información de NOD32, revisión 1.1832 (20061025) __________ >> >> Este mensaje ha sido analizado con NOD32 antivirus system >> >> >> >> > > > _______________________________________________ > vdr mailing list > vdr at linuxtv.org > > > __________ Información de NOD32, revisión 1.1852 (20061103) __________ > > Este mensaje ha sido analizado con NOD32 antivirus system > > > >
http://www.linuxtv.org/pipermail/vdr/2006-November/011162.html
CC-MAIN-2015-48
refinedweb
395
62.88
We have looked at fonts in a previous C# tutorial, in Auto Ellipsing in C#. But whereas in that tutorial a string gets cut off if there is not enough room, in this tutorial we are going to take a look at how to scale the font such that the text will fit in whatever you need it in. As a side note, look for a revisit on ellipsing in C# at some point in the future - as one of our readers sent us some interesting info about the performance characteristics of the API calls we used in that tutorial. The code we will show here today can probably be used with any drawing API with minimal tweaks, because, surprisingly, we don't need to do anything crazy. Essentially, we are going to build a function today that takes in a string and a Size, and returns a font size that will make that string take up that amount of space. Below, you can see some screen shots of the example app at different sizes: ![ Font Scaling Screenshot]() So how do we do this? Well, here is the method signature: public static Font AppropriateFont(Graphics g, float minFontSize, float maxFontSize, Size layoutSize, string s, Font f, out SizeF extent) Some of these arguments should be pretty self explanatory. For example, minFontSize and maxFontSize are the minimum and maximum font sizes, respectively, that the function will be allowed to return. The string s is the string that we will be fitting, and the layoutSize is the size that the function will fit the string to. The graphics object, g, is needed because one of the methods we use in the function is attached to the graphics object. The font f that is passed in is needed so we know what font family and style the string will be drawn with (the size set in this font object will actually be completely ignored). Finally, the out param extent is the final size the string ends up being using the font returned by the function. Onto the body of the method:; } Not much code at all, really. First, we check to see if the min and max font size are equal, and if so, we create a new font with that size (because we will return it without doing any work in a moment). Then we measure the string with the current font (because out params always need to be set before a function can return). Next, if the max font size is less than or equal to the min font size, we return the current font. This means that if you passed in a bad min and max, you are going to get back the font you passed in, and if you passed in min and max as the same number, you will get a font back out whose size is that number. After those checks, we get to the real work. In order to do the calculation in a single pass, we calculate a ratio of the available size over the size of the string. A ratio above 1 means there is space left and we can make the font bigger, and a ratio below 1 means that the font is too big for the space and it need to be made smaller. And guess what? All we need to do is multiply the current font size by that ratio, and we get the correct font size for the space. Cool, eh? Of course, that font could be above max or below the minimum value provided, so we throw in some checks to constrain the value. Finally, we create the new font, measure the string with that font (so the extent out param has the right value), and return the new font. And that is all there is to it! Of course, it wouldn't be very nice of me if I didn't give you the code showing how to use this function. So here is the code from the silly example app: using System; using System.Collections.Generic; using System.ComponentModel; using System.Data; using System.Drawing; using System.Text; using System.Windows.Forms; namespace FontScaling { public partial class Form1 : Form { string _text = "Resize Me!!"; public Form1() { InitializeComponent(); this.SetStyle(ControlStyles.ResizeRedraw, true); } protected override void OnPaint(PaintEventArgs e) { using (Font f = new Font("Tahoma", 15)) { SizeF size; using (Font f2 = AppropriateFont(e.Graphics, 5, 50, ClientRectangle.Size, _text, f, out size)) { PointF p = new PointF( (ClientRectangle.Width - size.Width) / 2, (ClientRectangle.Height - size.Height) / 2); e.Graphics.DrawString(_text, f2, Brushes.Black, p); } } base.OnPaint(e); }; } } } Pretty simple - I find the appropriate font for the string in the OnPaint method, using the current size of the form as the area for the string. I get back the font and draw the string with it, using the size out param (and the size of the form) to center the string on the form. One neat little tidbit to note here is the ControlStyle I set in the form constructor. By setting the ResizeRedraw style to true, the form will automatically repaint every time it is resized. And there you go - quick font scaling in C#! I even packaged my example project up for you to play with - you can download it below. Source Files: Hi, I sub-classed a label control with this code. It works great except that it draws the text twice: once with what appears to be the default size, and again with the new size. I tried a lot of stupid things just to try to understand the behavior, but to no avail. Does anybody have any idea why this is happening? Great function - very clean, easy to use. Thank you! Hi, it is good example to re size text on the form. but I have to do same process on the control Like I have Button1 , textBoxt1 and other controls that should be same re sized as your text. saket.kumar@almondz.com saketkumar.dwivedi26@gmail.com please mail to me void textBlock3_SizeChanged(object sender, SizeChangedEventArgs e) { Size s = e.NewSize; textBlock3.Width = s.Width; textBlock3.Height = s.Height; } Did anybody actually run this code? I get a recursive stack overflow from the main app: remove the base.OnPaint(e); Problem solved. OK, in my haste to post the WPF solution, I think I made my previous two postings less than readable. Perhaps the third time will be the charm. {:-) The original posting for resizing text within a WPF window can be found at In short, to obtain the same effect in WPF, just embed a TextBlock inside a Viewbox. When the TextBlock is resized inside the WPF Window, the contents of the TextBlock will automatically resize the Text property, provided that the TextBlock Content Control doesn't have a MaxWidth nor MaxHeight constraining element to a fixed minimum or maximum size. As a result, the following XAML code does the same as the WinForms example presented in this tutorial: Application.xaml: MainWindow.xaml: Again, there is no change required to either the Application nor MainWindow C# code. I hope this was of help... Thanks! that put an end to my struggle, still new at WPF so spent a couple hours trying to make a textblock fit to a grid cell or canvas. Just a note. A ViewBox works good in Xaml; but if you are drawing text in a custom control for example this method still works good. You don't have the graphics object but you can use a FormattedText object to get the size of a given font. Greetings: After a bit of reseach, I came across a following posting at that describes how to get automatic font scaling within WPF. In short, to obtain the same effect in WPF, just embed a inside a . When the is resized inside the WPF Window, the contents of the will automatically be resized, provided that the Content Control doesn't have a MaxWidth nor MaxHeight constraining element. As a result, the completed XAML Code, with no custom C# code does the same as the WinForms example presented in this tutorial. Application.xaml: MainWindow.xaml: Resize Me!! Again, there is no change required to either the Application nor MainWindow C# code. I hope this was of help.... My apologies for the oversight on my part. The XAML is as follows: Application.xaml Hi, We have an application where we want to chnage the font size as in Office Excel 2007 SLider bar at the right bottom corner. This is not exactly chnaging the font, instead it is resizing the entire image of the window. Any idea on how to implemet in c# Hi .... This is an excellent app. But instead of on the form how do I apply to a label. What I have is an application where you can type in text which will be displayed on a label control. We allready know the font, size, type they are using. We also know the maximum height and length of the label control. What I need to do is reduce the font size if the typed font does not fit onto that label. Which this small code would do it, but how do i apply it for a label if you could please help I would appriciate it very much. Thank you hi.....! It was really nice to be here As it gives great platform of idea sharing & problem solving facility...Thnx...!! My problem is that i want to develop web application which does the Custom t-shirt designing & in that i want the functionality of choosing the ClipArts,ColorPalletes,TextSize,Rotation,Zoom in-out All this functionality should be done on the t-shirt itself...So,Can Anyone help me in finding out the source code for the same...Most important is that i am using Visual Studio 2005(.net) & C# as language....Thanks for reading me.....!! i want to do similar thing in wpf window. It's different that normal windows form. Is there any solution for it Hi, Rakesh: Did you ever find an equivilent function for scaling the font size for a WPF based Content Control class you would be able to share? Thank you in advance for your time and help... Thank you! FYI, here is a simplified version I'm using: Thanks, very useful! Great function! Just what I needed. However, I think you could make it just a tad simpler still without the 'extent' out parameter. You can also drop the p PointF in the client code used for positioning. With the StringFormat object, you don't need to have the size returned to the client as .Net can do the vertical and horizontal text alignment for you! You just need your function to return the 'AppropriateFont'! Jeff Hello there... it's been some time since I've been struggling with text resizing in C#, and your article certainly pointed me to a good direction. But I'm still having trouble with one approach. If I reduce the display string just to one letter (let's suppose: "A") and adjust the maximum font size to any high value (such as 1500), the text measurements will be getting lots of spaces. I haven't found any way in which I can overcome those spaces. What if I want to scale letters to the pixel? Do you know how to do this? Thanks in advance. Alpha, the spaces are the result of what is called kerning - I am not sure how much fidelity there is in the newer .net frameworks to adjust this - hopefully a nudge in the right direction?
http://tech.pro/tutorial/691/csharp-tutorial-font-scaling
CC-MAIN-2014-35
refinedweb
1,941
72.16
A first hand look from the .NET engineering teams At the Build 2012 event, the Windows Azure team announced that you can use the .NET Framework 4.5 to build and deploy apps for Windows Azure. You can use many of the features that we made available in the .NET Framework 4.5, including C# 5 and Visual Basic 11 features that will help increase your productivity and make your apps faster and able to scale in the cloud. Please download the new update to the Windows Azure SDK. The new SDK is available for both Visual Studio 2010 and 2012. For .NET Framework 4.5 development, you'll need to use Visual Studio 2012 or Visual Studio 2012 Express for Web. We have enabled the .NET Framework 4.5 in the following Windows Azure services: By default, Cloud Services apps are now deployed on Windows Server 2012, which includes the .NET Framework 4.5. Windows Azure Web Sites are deployed on Windows Server 2008 R2 machines, which also include the .NET Framework 4.5. They will soon be deployed to Windows Server 2012 machines, like Cloud Services apps. In addition, you can opt to use Windows Server 2012 images when you deploy Virtual Machines. I hope that you enjoy using the .NET Framework 4.5 with these Windows Azure services. You can see the new Windows Azure admin portal below for Windows Azure Web Sites. The Configure tab enables you to select the .NET Framework version to use for your site. The .NET Framework 4.5 is selected by default, as opposed to the .NET Framework 3.5. Both .NET Framework 4 and 4.5 sites run on the .NET Framework 4.5. There are many reasons to start using the .NET Framework 4.5 in Windows Azure. We’ve talked about some of them, including the new background server GC, on this blog. There are also many new features within ASP.NET and Windows Communication Foundation (WCF), such as HTML5 WebSockets. The must-have feature in the .NET Framework 4.5 is async, which lends itself perfectly to achieving scale in the cloud. Let’s take a look. When we started working on .NET Framework 4.5, we wanted to enable .NET Framework apps to scale much better, particularly in a cloud environment like Windows Azure. The .NET Framework already has several features that enable parallelism through threads. Those features—for instance, the .NET Framework thread pool, the BackgroundWorker class, and others—are great for reducing the latency of requests to your website or service. On the Windows platform, threads alone are not enough to enable maximum scaling. To get the most out of your app, you need to also make use of asynchronous I/O built into the operating system. For years, this has been very hard to do, but now with async in the .NET Framework, it has become very simple. We've been talking about the new task-based asynchronous programming model this year on the .NET Framework blog. Please take a look at our earlier posts to better understand the basics of async in the .NET Framework. Now that Windows Azure supports the .NET Framework 4.5, it's time to take a look at how async can scale your apps, and how it makes cloud programming with the .NET Framework better. For the cloud, the primary goal is to serve up website or service requests as quickly as possible. Another very important goal is to limit the number of machines or VMs that you need to make available to keep up with demand. Asynchrony is very effective at helping you achieve this second goal of scaling and make better use of your hardware. I’m sure that you’ve written ASP.NET or WCF code that parallelizes I/O calls to the file system, network, or database by spinning off multiple threads and performing each I/O call on a separate thread. In those cases, many of your threads spend time sitting idle, waiting for some other system (like the file system) to return some data. If your server gets many requests at the same time, then the number of threads (many of which are potentially waiting idle at any given point) starts to grow. Large numbers of idle threads can result in a significant tax on memory. .NET Framework async approaches the challenge of scaling in a very different way. It returns a thread to the thread pool when it is waiting on an operation to complete that doesn’t require the CPU, such as file I/O or networking. This makes that thread available for other operations. As a result, you can expect that async will use fewer threads at a time, requiring less memory, which enables your site to scale. Async programming with the .NET Framework is not new. You’ve been able to program with the IAsyncResult interface, for example, for years now. The big advance in the .NET Framework 4.5 is an async model that is much easier to use because it has great support in Visual Studio 2012, the managed languages, the .NET Framework class libraries, and the CLR. You can now take advantage of this model in the code that you deploy to Windows Azure, to scale your sites and services. Async is a great general model for the cloud, and is a major win for scaling in ASP.NET in particular. ASP.NET requests threads from the CLR thread pool to serve incoming requests. When demand is high and ASP.NET requests a large number of threads from the thread pool, the latency of requests and memory usage may start to climb. For example, if your Web Form event or your MVC action method is blocked on I/O, ASP.NET may need an increasing number of threads to service requests. The more threads you use, the more memory you will use, which can eventually lead to disk-backed memory paging, which can significantly affect response time. Instead, you can use async to use the threads available to your web server much more efficiently. Making an event in a Web Form or an action method in MVC asynchronous enables ASP.NET to use threads much more efficiently. The async model enables you to serve more requests on the same hardware. Your job as a web programmer is to identify the scenarios where your code would benefit from asynchrony, by awaiting async calls and enabling ASP.NET to scale. As a general rule of thumb, if the .NET platform has gone to the effort of providing an async API for a given scenario, that is probably a scenario that will benefit from asynchrony. Async support has been added to ASP.NET 4.5, enabling you to write task-based async code within ASP.NET pages, http handlers, and http modules. This support includes both ASP.NET Web Forms and MVC. All APIs that use the Task-based Async Programming model (TAP) need to adhere to certain requirements. For instance, all async methods should return Task or Task<T> objects. Let’s take a look at HttpTaskAsyncHandler. This is the new base type for HTTP handlers that are built with the new async model. You will need to use this type if you want to call async APIs within HTTP handlers. The following example demonstrates a very minimal async HTTP handler implementation. Since the ProcessRequestAsync method is marked as async and returns a Task, you are free to await async calls in your HTTP handler implementation, as you see below. public class MyAsyncHandler : HttpTaskAsyncHandler{ // ASP.NET automatically takes care of integrating the Task-based override // with the ASP.NET pipeline. public override async Task ProcessRequestAsync(HttpContext context) { HttpClient client = new HttpClient(); String result = await client.GetStringAsync(""); // Do something with the result. }} Before – synchronous (blocking) version: public ActionResult Index(){ WebClient client = new WebClient(); var pageString = client.DownloadString(""); ViewBag.PageString = pageString; return View();} public async Task<ActionResult> IndexAsync(){ var client = new WebClient(); String pageString = await client.DownloadStringTaskAsync(""); ViewBag.PageString = pageString; return View();} ASP.NET Web Forms has also been updated to enable you to write async code, as you will see in the next section. Let’s take a look at a concrete case of asynchronous code written as an ASP.NET Web Forms project. This app uses the .NET Framework networking APIs to call into REST APIs exposed by a music service. You’ll notice that the type and method names are music-oriented, which matches the service. Notice that I use the async and await keywords in the body of the code, but then I compose tasks together at a higher level within the app by using the Task.WhenAll method. The advantage of this approach is that you can request that an aggregated set of operations be run at once, providing your site with the benefits of parallel behavior with the lower cost of an asynchronous implementation. This approach is particularly useful if the operations spend a disproportionate amount of time waiting for the network file system to return data. Let’s take a look at the code. First, I’ll make sure that my project targets the .NET Framework 4.5; otherwise, I won’t get access to all the new features. Next, I have to enable asynchronous processing within my ASP.NET Web Forms app by using the Async page directive: <%@ Page Async="true" Similarly, I have to enable async code within the Page_Load event, by adding the async keyword before the return type: protected async void Page_Load(object sender, EventArgs e) Now that both of those are in place, I can write some async code. My example is a minimal implementation of a site that calls into a music service to get music track information. The GetTracksAsync method gets a list of REST APIs to call, each of which represents a separate artist, and then calls into the GetArtistTracksAsync method to get the tracks available for each artist. GetArtistTracks also contains JSON deserialization code. You can safely ignore that part of the example. protected async void Page_Load(object sender, EventArgs e){ List<Track> trackList= await GetTracksAsync(); // Now do stuff with all of the music tracks.}async Task<List<Track>> GetTracksAsync(){ List<string> urls = GetRestURLs(); var hype = new Hype(); List<Track>[] results = await Task.WhenAll( from url in urls select hype.GetArtistTracksAsync(url)); return results.SelectMany(r => r).ToList();}//Hype.GetArtistTracksAsyncpublic async Task<List<Track>> GetArtistTracksAsync(String uri){ WebClient client = new WebClient(); // This line is the important one. // Asynchronous call over the network. String json = await client.DownloadStringTaskAsync(uri); // JSON deserialization -- also async. Dictionary<String, JToken> jTokens = await JsonConvert.DeserializeObjectAsync<Dictionary<String, JToken>>(json); List<Track> tracks = new List<Track>(); foreach (String key in jTokens.Keys) { if (key == "version") continue; Track track = jTokens[key].ToObject<Track>(); tracks.Add(track); } return tracks;} I like this approach, since it enables a potentially parallel behavior efficiently. This is what Task.WhenAll gives you. What would happen if I’d simply used await everywhere, without Task.WhenAll? The await keyword behaves sequentially, since it needs to satisfy the procedural nature of your code. It is sequential, but it is not synchronous. Every time your code awaits an async call, you’ve provided an opportunity for the async system to return your thread to its caller, while you await that call. Note that the caller could be the CLR thread pool. And the code that follows an await will be executed only after the awaited entity has entirely completed. Some of your code may be able to take advantage of Task.WhenAll to enable parallelism; however, you should make extensive use of awaiting async APIs to enable scaling through asynchrony. The following code is a commented non-linq expansion of GetTracksAsync, if you are not familiar with linq syntax. It provides an easier to read version of what the code above is doing. async Task<List<Track>> GetTracksAsync() { List<String> urls = GetRestURLs(); // 'tasks' is the list of tasks that the code collects in the for loop, // which can then be awaited as a group with Task.WhenAll. List<Task<List<Track>>> tasks = new List<Task<List<Track>>>(); List<Track> trackList = new List<Track>(); // Encapsulates Hype Machine service –. Hype hype = new Hype(); // Collect Tasks for all work to be done. // Notice the lack of the await keyword here. // We want Tasks! foreach (String url in urls) { Task<List<Track>> task = hype.GetArtistTracksAsync(url); tasks.Add(task); } // This is the important line. // All work is collected as Tasks and then aggregated into a single task to // await with Task.WhenAll. // Task.WhenAll lets you to await all the Tasks in one statement // allowing then to potentially run in parallel. await Task.WhenAll(tasks); // Aggregate results into a single List<Track> to return to caller. foreach (Task<List<Track>> task in tasks) { // We can await a task as often as we like. // We know that all tasks have already completed. // Awaiting an already completed task is a very lightweight operation that completes immediately. // Using the await here (rather than, for instance, task.Result) allows potential errors to propagate correctly. trackList.AddRange(await task); } return trackList; } I did an unscientific performance analysis of three versions of the code sample above. First, I have the example above, which uses Task.WhenAll. Next, I created another version that always awaits async calls, with no use of Task.WhenAll. Last, I created a fully synchronous version that calls only synchronous APIs – not APIs that return Task. To make the analysis meaningful, I provided several artists to query for, resulting in a significant number of REST payloads to request and process. I conducted the experiment within an ASP.NET Web Forms app running under CTRL+F5 -- retail build. Let’s take a look at the performance results. The numbers really tell the story. The low number for the asynchronous – Task.WhenAll approach demonstrates the value of the parallel approach. My app returns quickly and is likely to scale well, because it is utilizing an asynchronous implementation from the top to the bottom of the call stack. The plain asynchronous – await model is also good, because it enables similar throughput as a synchronous model, but again, it can be scaled more efficiently with an asynchronous implementation. Last, the synchronous model example is the right option when an async option doesn’t exist. As you see, you now have several approaches to meet your site's throughput and scaling needs. Compute-bound operations are a good example of the synchronous model. They can benefit from parallelism (by using threads); however, because compute-bound operations do not sit idle, unnecessarily occupying a thread, there is no thread that the async infrastructure can return to the thread pool for use by other operations. Therefore the .NET Framework does not offer async versions of compute-bound operations directly. If you want to make sure that a compute-bound operation is executed on the thread pool and off your current thread, you can pass it to the Task.Run(..) method. This method returns a Task representing the completion of your operation, so you can await and compose its results in the same manner as other asynchronous operations. It is useful to contrast the two different asynchronous implementations. I found that there are minimal code differences between the two asynchronous approaches, at least in the way I used them in my sample. It comes down to how async calls are handled within the for loop in the example. Let’s take a look at the two cases. Here's the Task.WhenAll example again: async Task<List<Track>> GetTracksAsync() { List<string> urls = GetRestURLs(); var hype = new Hype(); List<Track>[] results = await Task.WhenAll( from url in urls select hype.GetArtistTracksAsync(url)); return results.SelectMany(r => r).ToList(); } async Task<List<Track>> GetTracksAsync(){ List<String> urls = GetRestURLs(); List<Track> trackList = new List<Track>(); Hype hype = new Hype(); foreach (String url in urls) { List<Track> tracks = await hype.GetArtistTracksAsync(url); trackList.AddRange(tracks); } return trackList;} The two examples are very similar. The difference is that the second one awaits hype.GetArtistTracksAsync after each call. This means that although the system can use some of the resources occupied by my program for other things while the program is waiting for the data to arrive over the network, the program flow can only continue after each set of tracks has been retrieved. Conversely, in the first version I request all required sets of tracks to be downloaded and then await them all together. This allows the system to share the resources between the download operations and perform them concurrently if there are enough system resources available. As a result, the downloads are likely to complete sooner. An interesting observation is that the two versions of GetTaskAsync are able to satisfy the same method signature, specifically the return type. The way that one implementation aggregates tasks is purely an implementation detail. Another observation is that both methods are able to call and rely on the same async method, Hype.GetArtistTracksAsync. This method did not need any knowledge of the different approaches to awaiting the resulting tasks. Therein lies a big part of the value of the task-based model. As you start to use async within your sites and services, please do spend some time looking at how these different models affect your performance, both in terms of throughput and memory. Async support has also been added to WCF. The async benefits and model in WCF are very similar to ASP.NET, and they enable you to scale your WCF services much better on the server. We have also enabled automatic generation of async proxy methods for your service operation, which will enable client apps to easily call service operations asynchronously. WCF services expose operation contracts for clients to implement and call. In WCF 4.5, the use of the asynchronous programming model on either the client or the service is a local implementation detail that is not reflected in the operation contract. The wire format of a message that a client sends to a service (or vice versa) remains the same, enabling the client and service to independently choose either synchronous or asynchronous implementations. What has changed is the local behavior. If the client calls a service operation asynchronously, the client will not block any threads while making the service call, enabling the client UI to remain responsive. On the service side, if the operation is implemented asynchronously, the async system can return the thread back to the CLR thread pool to enable better scaling of the service. As in ASP.NET, it is pretty easy to take advantage of async APIs within a WCF service. It is as simple as adding an async method to your service, which will enable you to await async APIs. As you can see below, I added a single async method, highlighted in yellow, within the code provided with the default WCF Service Application template in Visual Studio 2012. I am able to await WebClient.DownloadStringTaskAsync, to take advantage of the scaling benefits that come from an asynchronous approach. To enable your service to scale, you should choose an async implementation for I/O-intensive processing in your services. public class Service1 : IService1 { public string GetData(int value) { return string.Format("You entered: {0}", value); } public async Task<String> GetSomeDataAsync(int value) { WebClient client = new WebClient(); return await client.DownloadStringTaskAsync(""); } } You’ll notice – and this should be no surprise – that the IService1 service contract exactly matches the implementation in Service1. [ServiceContract]public interface IService1{ [OperationContract] string GetData(int value); [OperationContract] Task<string> GetSomeDataAsync(int value);} It is very beneficial to write async code on the client, although the key benefit on the client is not in scaling, but in the ability to easily write responsive and fluid user interfaces. This is achieved by enabling the client application’s message pump to process messages while awaiting work. As a result, it is important for client apps to call WCF services asynchronously. WCF 4.5 enables that with the new Task-based async support. Let’s take a look at what the service consumption experience looks like in Visual Studio 2012, for the service above. When you launch your WCF service, you’ll see the WCF Test Client. For the service above, a “wire shape” is exported to clients, as shown in the screenshot below. You’ll quickly notice that there is no mention of “Async” in the IService1 contract. Let’s take a look at the client proxy that's displayed after I add a reference to this service in a client app. Once again, you see that WCF delivers on the promise of enabling different implementation approaches within the client and service. In the screenshot below, you see that WCF generates synchronous and asynchronous methods for each of the methods in IService1, as shown in the WCF Test Client. As a client developer, I get to choose whether to call GetData or GetDataAsync, GetSomeData or GetSomeDataAsync. That approach enables great flexibility for client developers. If you select F12 to go to references on IService1 or any of its members, you'll see the client proxy code that was generated by WCF and will notice that the code exactly matches the shape that you see in the object browser. Last, I want to write some code that calls into my new WCF service. You can see a simple button click event handler within a WPF app below. The code creates an instance of my service, and then proceeds to call the two methods exposed on the service. To make the point on flexibility, I opted to call one method synchronously and the other method asynchronously. If this were a real app, I’d likely make both client calls asynchronous; however, you get to make that choice yourself. async void Button_Click_1(object sender, RoutedEventArgs e){ Service1Client client = new Service1Client(); // synchronous call on Service1 String result = await client.GetDataAsync(1); // asynchronous call on Service1 String result2 = client.GetSomeData(42);} The .NET Framework 4.5 is a very big step forward for cloud workloads. .NET Framework async, in particular, is very well suited to the cloud, and provides you with a new programming model for scaling your sites and services. Async can scale your site by making more efficient use of machine resources, while still enabling you to parallelize network and I/O operations. The new model is also a win from a coding perspective, since you have the full support of Visual Studio 2012 and the managed language and class libraries within the .NET Framework 4.5. Async helps you make better use of the machines or virtual machines that you are already using, without much effort on your part The big news is that the .NET Framework 4.5 is now supported in Windows Azure. I know that a lot of you have been waiting for that! Simply deploy your apps on the Windows Azure services that we covered above: Cloud Services, Windows Azure Web Sites, and Virtual Machines. After you’ve deployed your app, you should consider taking advantage of the new async APIs in the .NET Framework 4.5, and the new Task-based asynchronous programming model. Async has an addictive nature to it. Once you start using it and realizing the benefits, you'll want to use it everywhere. Go right ahead! pretty much clear about Asynchronous programming model in Windows Azure platform on reading this blog. This is greatly appreciated. Very good article and awesome improvements!
http://blogs.msdn.com/b/dotnet/archive/2012/11/13/the-net-framework-4-5-is-optimized-for-the-cloud.aspx
CC-MAIN-2015-06
refinedweb
3,927
66.33
FormBuilder 2 is a Craft CMS plugin that lets you create & manage forms for your front-end. Entries get stored to database so you can easily view your submission or export them. Take a look at sampleForm.twig for required code. You will need to copy everything in that file and paste it to where you want to display your form. There are 2 options for spam protection, Time Submissions and Honeypot method. Time submission will prevent spam bots for submitting the form too quickly. You need to enter a time (seconds) it should take a real person to submit a form. Typically 3 seconds is good enough. With honeypot there is a hidden field that should be left blank. When spam bots run through the form they tend to fill out all the fields. If the honeypot field will get field the submission will fail. Real people will not be able to fill out this field so the form will submit successfully. Create an Asset field and add it to your form. You need to select Has File Uploads box when creating your form to make sure files get submitted. If you have Email Notifications turned on, file uploads will be attached to the email. Templates have been added. Now you can visually set up email notification templates for admins or submitter. Currently with limited customization options but with feedback I'm sure we will add more. Check out this video for quick overlook. Hit Create New Form to create a new form. Form Name- Give your form a name Handle- Will be autogenerated, you can change to a custom one Use Custom Redirect- Select this option if you want a custom redirect page (thank-you page). Redirect URL is required and is relative to your domain. Has File Uploads- Select this if your form has file uploads Use AJAX- Select this if you want your form to submit using javascript without page refresh. Note:You can't use Has Form Uploads and Ajax together yet (working on it). Timed Submissions- Select this and set time in seconds. Timed submissions will prevent quick spam robots from submitting too quickly. Honeypot- Select this option to try and catch spam robots using a honeypot method. Hidden field in the form needs to be left empty, if filled the form will fail (most robots will fill this field). Success Message- Enter form success message. If you form has custom redirect this is useless. Error Message- Enter generic error message if submission has errors this will be displayed along with the field specific error message ( Ex: Your Name cannot be empty). Notify Admin of Submission- Select this if you want to send email notification of submission. Notification Email- Enter email where the notification should be sent to. you can enter multiple emails separated by a comma , Include Submission Data- You can tick this option if you like the email to contain the submission content. Email Template- You can pick if you want email to be Text Only or HTML Template. Text Only- With this option you can enter optional Body Copyand Footer Copy. HTML Template- With this option you can upload a custom logo, give email template a custom background color, give a container widthas well as optional Body Copyand Footer Copy. Add Fieldset Tab- Click this to add a fieldset. Then drag and drop fields into those fieldsets. You can also drag predefined fieldsets with fields form the Unused Fieldssection. If you don't see any fields you will need to head over to admin/settings/fieldsto create some fields. Here's a list of currently supported fields. Unlike FormBuilder there are no more custom fieldtypes, you only need to use Craft's predefined fields. If you want to have custom markup for your rendered fields follow these steps to achieve it. Fieldstab. Custom Templateoption. craft/templates. forms/text, place text.htmlor text.twiginto craft/templates/forms/ You can pass submission data to a custom redirect page, here is a snipped code for getting started. {% set submissionId = craft.request.getCookie('formBuilder2SubmissionId') %} {% set submission = null %} {% if submissionId %} {% set submission = craft.formBuilder2.getFormById(submissionId.value) %} {% endif %} {% if submission %} {{ submission.form |inspect }} {{ submission.data |inspect }} {% endif %} submission.formto get form information and submission.datato get submission information submission.form.id- Form ID submission.form.title- Form Name submission.data- Holds submission data...so if your form had a field with handle name yourEmailyou can call submission.data.yourEmailto get your string. You can use a custom formbuilder2:submit event to check for ajax submissions: document.addEventListener("formbuilder:submit", formBuilderSubmission, false); function formBuilderSubmission(e) { var details = e.details; var response = e.detail.response; var success = e.detail.response.success; } For example, if you want to track successful submissions you can: document.addEventListener("formbuilder:submit", successfulSubmission, false); function successfulSubmission(e) { var success = e.detail.response.success; if (success) { // add your tracking code here } } You can now add "Terms & Conditions" to your forms. In your form settings, there is a new tab called Extra. You will need to update your front-end code to use this functionality. Add the following to your code right above the notifications div. You can also checkout the sampleForm.twig file for examples. {% if form.extra['termsAndConditions'] is defined and form.extra['termsAndConditions'] %} {{ craft.formBuilder2.getTermsInputs(form) |raw }} {% endif %} Refer to releases.json for updates. If you have found a bug or would like to request a feature please use Github's Issues to report and track issues. Copyright (c) 2014 Roundhouse.
https://awesomeopensource.com/project/roundhouse/FormBuilder-2-Craft-CMS
CC-MAIN-2022-27
refinedweb
919
60.11
Study On The Investment Appraisal Accounting Essay Published: Last Edited: This essay has been submitted by a student. This is not an example of the work written by our professional essay writers. Evaluation of the attractiveness of an investment proposal, using method s as average rate of return, internal rate of return (IRR), net present value(NPV), or payback period. Investment appraisal is an integral part of capital budgeting, and is applicable to areas even where the returns may not be easily. Investment appraisal means the investment therefore assumes that investment will yield future will yield future income streams. Investment appraisal is all about assessing the income streams against the cost the investment. It is to make a more informed decision, more sophisticated techniques to be used and important of time value of money. It is degree of simplicity required and degree of accuracy required and extent to which future cash flow can be measured accurately and necessity of factoring effects of inflation. As project B has shorter payback period (less than 3 year) than that of project A (more than 3 year), project B should be accepted. criticisms of the payback period? Payback refers the number of years accepted to take to recover the cost of initial investment. Following are the criticisms of the payback period it lacks objectify and ignores the time value of money, it ignores the time profile of the net cash inflows, and any time pattern in the net investment outlays , it take no account of the total profitability over the whole life of the investment, cash received after payback completed is totally ignored. So, it focuses on breaking even rather than on profitability, It lacks objectivity and ignores the time value of money, no effort is made to relate the total cash earned on the invested to the amount invested.Net Present Value The net present value method determines whether a project rate of return is equal to, higher than, or lowers than the desired rate of return. All cash flow from a project is discounted to their present value using the company desired rate of return. Subtracting the total present value of all cash out flow of an investment project from the total present value of all cash out flows of an investment project from the total present value (NPV). Net present value, allows you to a companies at their correct current value, usually at year end when the accounts are prepared. The calculation of net present value takes into accounts original cost less all accumulated depreciation allowed against that asset in previous tax computations. Using this concept of the time value of money, you can determine the net present value (NPV) for a particular investment as the sum of the annual cash flows discounted for any delay in receiving them, minus the investment outlay. The Net Present Value is the present value of the net cash inflows less the project's initial investment outlay. The main NPV decision rules - - Project with positive NPV should be accepted. - Project with negative NPV should be rejected.. In addition to, project B will probably be chosen in preference to project A as it has a higher NPV. Describe the logic behind the NPV approach NPV method recognizes that cash received today is preferable to cash receivable sometime in the future. There is more risk in having to wait for future cash receipts, and while a smaller sum may be obtained now, at least it is available for other purposes (could be reinvested future for years and compound into a higher value). This method is calculated based on the widespread acceptance of Discounted Cash Flows (DCF) method. DCF method recognizes that the value of money is subject to a time preference, that is, that £1 today is preferred to £1 in the future unless the delay in receiving £1 in the future is compensated by an interest factor, expressed as a discount rate. In simple terms, the DCF method attempts to evaluate an investment proposal by comparing the net cash flows accruing over the life of the investment at their present value with the value of funds about to be invested. Thus by comparing like with like it is possible to calculate the rate of return on the investment in a realistic manner.(ref:)INTERNAL RATE OF RETURN (IRR) The internal rate of return (IRR) is a rate of return used in capital budgeting to measure and compare the profitability of investment. It also called the discounted cash flow rate of return or simply the rate of return. IRR is discounted rate of return derived based on the condition that net present value for an investment is 0. IRR is then compared to the company's discounted rate of return. If IRR is higher than. Determine the IRR for each project. Should they be accepted? The interest rate which, when used as the discount rate for a series of cash flows, gives a net present value of zero. In other word, IRR is a cost of capital at which NPV = 0. For the calculation of IRR the first step is to select two discount factors, and then calculate the net present value of the project using both factors. One of the factors should produce a positive NPV, and the other a negative NPV. Suppose, two discount factors are 20% & 22%. In both cases IRR are greater than cost of capital. So both projects should be accepted as NPV here is positive. How does a change in the cost of capital affect the project's IRR? Without modification, IRR does not account for changing the cost of capital. NPV =0 = -110000 + 40000/(1+ IRR) +40000 /(1 + IRR)2....... NPV =0 when IRR = 23.8% Solving for NPV using MIRR, we will replace the IRR with our MIRR = cost of capital of 12%: NPV = -110000 + 40000/(1+ .12) + 40000/(1 + .12)2......... NPV =34200whenMIRR = 12% So any change (modified or replaced with MIRR) in the cost of capital, where NPV=0, would lead to a change in the NPV, but not the IRR. So any change in the cost of capital does not influence the IRR. Why is the NPV method often regarded to be superior to the IRR method? arriving at a particular decision and making a comment regarding the superiority of NPV or IRR, Let's proceed on with a comparison between these two renowned methods of capital investment Before appraisals - In case of calculating the IRR, the main difficulty arises for selecting two discount rates with a range as narrow as possible will give a positive and negative NPV. Using a trial-and-error method unless a computer may be a time consuming matter. - The major limitation of IRR is it's one single discount rate whereas each cash flow of NPV can be discounted with multiple discount rates without any problem. - NPV method emphasises on cash flows rather than on profitability because cash flows making the positive NPV results in the maximization of the shareholders' wealth. - The advantage of the NPV method is the simplicity with which the results are stated. As it is shown in calculation above, with the NPV method, the expected results are expressed in terms of pounds which directly reflect the increased wealth position. The internal rate of return, on the other hand, produces a result which is shown as a percentage, and this result has to be compared with a minimum required rate of return before a decision may be made. ( M W E Glautier & B Underdown, 2007) - Where a project is financed with raising a loan, the IRR method envisages that the cash surpluses will be reinvested at the IRR discounting rate, whereas the NPV method envisages that they will be reinvested at the minimum acceptable rate of return used in that method. Thus, the advantage of the NPV method is that it makes more realistic assumptions about reinvestment opportunities. ( M W E Glautier & B Underdown, 2007) - If there is an irregularities in cash flows over subsequent years due to the market conditions, a multiple rate of returns used to make the project break even produce multiple IRRs. In case of mutually exclusive projects, IRR can give some misleading results as well. In both cases NPV is free from such problems.
https://www.ukessays.com/essays/accounting/study-on-the-investment-appraisal-accounting-essay.php
CC-MAIN-2017-13
refinedweb
1,382
57.91
propeller2.h for C compilers in Propeller 2 Now that there are several C compilers working for P2, it seems like we should try to get consensus on some things. It'd be nice if code could be easily ported between compilers. It'll be very confusing and frustrating for users if there isn't at least a baseline commonality between C compilers on the platform (naturally each compiler will have its own particular features and advantages, but lots of code *should* be portable between them!).: extern void _drvh(unsigned p); // set pin p as output and drive it high extern unsigned _testp(unsigned p); // test pin p for input // and so onNot every intrinsic will map to just one instruction, e.g. _coginit will have to do a "setq" for the parameter to pass to the COG. We'll probably also want accessor functions for things like the builtin COG registers, counter, and so forth. I've been bumping into this issue with Spin2. I have yet to add the mode-specific smart pin intrinsics, but I'm thinking it will be necessary to have the new silicon in hand to make sure I've named things well and covered the functions properly. Having these in every language would go a long way to standardize things. I'll make a list of what I've got, so far, and post it in this thread. I totally agree when you say that there should be consensus. Regarding your point 1, I think the macro should be named "-D__propeller2__". It aligns perfectly with the name of the library, which is "propeller2.h". On another note, functions that address pins should use the smallest variables possibles. For example, "_Bool getpin(unsigned char pin)" instead of "int getpin(int pin)". Or, another example, "void togpin(unsigned char pin)" instead of "void getpin(int pin)". I'm not assuming that getpin() and togpin() exist or will exist, but these are just examples of functions that don't need 32 bit integers. If I recall correctly, there are equivalent functions inside the existing "propeller.h" that use "int". Better yet, the new lib could use int8_t, int16_t or int32_t (and also int64_t, if possible), when dealing with pins or memory addresses. It is more correct and very portable. It also shows your intention either as a developer or as a programmer. Although intx_t types correspond to unsigned long, int, short and char primitives, their correspondence to primitives always depends on the host and target system (tall order here, but feasible). In this case, being the P2 is always the target of choice, you can define the intx_t types accordingly, so that they have their fixed size as described (you only have to take into account if the host system is 32-bit or 64-bit, as "long int" size will vary, either 32-bit or 64-bit, IIRC). The types "size_t" and "ssize_t" could be implemented as well. Kind regards, Samuel Lourenço I'm going to have to disagree with you here. A minor point is that "_Bool" is a C99 invention, and if I recall correctly Catalina only supports C89; this could be worked around. The more serious concern is that "int" and "unsigned int" are by definition the natural and most efficient sizes for the platform. Casting them to/from "char" could require code to be inserted by the compiler. That is: needs to be compiled like: where the cast to (unsigned char) masks out the upper bits, and the cast back to (int) does zero extension. That's adding unnecessary instructions to the operation. "unsigned char" is already wider than we need, so it doesn't really add any error checking. I think we should allow the compiler the freedom to use the most efficient integer size. For current P2 compilers that's 32 bits, but one could imagine a compiler designed to save memory in which the most efficient size might be different. "int" and "unsigned int" are required by the standard to be at least 16 bits, but otherwise they're supposed to be whatever the compiler deems "best". Regards, Eric On first glance it mostly looks ok. I am ok with predefining __propeller2__ on the P2 and with using an underscore to lead function names (this is pretty standard for C). I am not so sure about using structures. Catalina is fine with this, but it might impede users coming from Spin. I think we should probably just stick with "unsigned int" for compatibility reasons. Sophisticated C users can use unions (which can be defined in the header files) to decode the values, but the functions themselves should return the same values as the Spin equivalents. If you want to drive people away from using C on Propeller, by all means use the ridiculous C99 types like "_Bool" and "uint32_t" The structures would only be for functions which return multiple values. Spin2 allows a function to return more than one result; for example to rotate x,y by an angle you could write something like the following in Spin2: which would set both x and y to the 2 values returned by "rotxy". It looks like a few of the functions Chip has proposed are like this, and it makes sense in the context (where the CORDIC does return two 32 bit results). To do this in C I think we have a few choices: (a) use a struct for the returned result: This is the most natural way in C to represent a multi-valued return, I think, and produces good code in GCC. fastspin's struct handling in C is still pretty shaky, but if we go this way it'll give me more incentive to get that working properly (b) pass a pointer for one of the results: This is also fairly natural C, and works well if _rotxy is a function, but it does make for memory traffic so is potentially less efficient (the "struct" version can keep everything in registers, but this one has to force y into HUB memory). (c) return a 64 bit result in "unsigned long long", with the first result in the low 32 bits and second in the high 32 bits: This should also be quite efficient, but it only handles the case of two returned values. This may be all we need, but in principle Spin2 can allow for more than that (I'm not sure what the limit is in Chip's compiler, but fastspin allows up to eight). I'm also not sure if we should force all C compilers for P2 to implement "unsigned long long" as 64 bits. Another possibility is that we could hide the actual method of multiple assignment "under the hood" with a macro, something like: I'm a little uncomfortable with this because it looks like a function call but doesn't act like one (the first arguments are modified even though they don't have & in front of them). If we were working in C++ we could use references so it wouldn't be an issue, but I want the header file to work in plain C as well. My own preference is for option (a), but I'm not married to any of them. If the other compiler writers all agree on some other standard then I'll go along with the consensus. Yes, in general we want an abstract API, so this discussion is a subset of that one. It seems like a tractable subset. i.e. one that we can perhaps come to agreement on fairly quickly, I hope. I actually have a use case right now for propeller2.h. I have a VGA driver that I'd like to have working in all of the C compilers; I've got it working in fastspin, RiscV, and p2gcc, but I had to hack up a header file for p2gcc, and then realized that I had no idea how to port it to Catalina as well. Having a single header file that would cover all 4 compilers would make this much nicer. Sure. Use whatever names you like. Aargh! I didn't know about that. That is really ugly. Spin needs proper types, not this kind of typographical workaround for their absence. So yes, I guess we can at least ameliorate this madness in C by using functions that return structures. There is a good reason why this facility is rarely used in C, but in this case I guess it is a better solution than any of the alternatives That makes sense. I'll be using the functions that the library has to offer, instead of changing the registers myself, then. I've made the wrong assumption that my code would be more efficient. Actually, I've resorted to recreate functions to change pins just because the ones in the library used "int" types, in the line of that wrong assumption. Why is it ridiculous. Ridiculous it to have a standard that vaguely describes the size of ints. And then you get things like longs having 32-bit or 64-bit, depending on the machine. I'm aware that uintx_t types are derived from these primitives, but they are a step in the right direction. As an example, check the code attached. Would you implement this using primitives in which the size is not guaranteed to be known? Kind regards, Samuel Lourenço Good example. The type "unsigned char" must exist according to the C standard. The type uint8_t need not. In the case of your code, you would of course simply redefine uint8_t to be unsigned char, and probably get away with it. But why should you need to do so? Beauty is in the eye of the beholder. How would proper types resolve the issue of needing to return multiple values when rotating cartesian coordinates? About the primitive sizes, the only thing that the C specification guarantees that a char is the smallest type (can be signed or unsigned, signedness is ambiguous with chars without the "signed" or "unsigned" modifiers). A short can larger or equal in size to a char. An int can be larger or equal in size to a short. A long can be larger or equal in size to an int. A long long can be larger or equal in size to a long. This is pretty ambiguous, and a nightmare to deal with. Thus, long has the same size of an int (32-bit) on a 32-bit machine, and has the same size of a long long (64-bit) on a 64-bit machine. If I recall correctly, int can have the same size of a short (16-bit) on a 16-bit machine. Caveats, caveats, caveats! Having said that, I understand why many people avoid uintx_t types, or _t types in general, like the plague: essentially because they are derived types. But by a different reason and in the same measure, ambiguous types like int, short or long, should be avoided, especially at low level, hardware related stuff. Each type is useful on its own context. Kind regards, Samuel Lourenço Anyway, most users are not dumb, and those that are don't have the capacity to learn anything, no matter how much "simplified" is the language or use. Plus, inclusivity and over-simplification promotes lack of versatility and stifles creativity. Many people use their Arduinos to blink a LED, in a 555 timer fashion, and not much more than that. I see far more potential on the P2. Kind regards, Samuel Lourenço Let's not get into language wars. I know that C can do everything that Spin2 can do (and vice versa!), because I've written compilers for P2 for both of them. Some things are easier to express in C, some are easier to express in Spin2. Not every person likes every language, and not every language is the "best" tool for every task. Anyway, it's all moot -- this discussion is specifically about making the various C compilers for P2 work well together, so the C language is a given here. If you don't like C, please avoid this thread Speaking for myself, I wouldn't be willing to develop for P2 without C, for two reasons: - C is like a second language for me, and a good platform is provided for Propeller; - Spin requires me to learn it, and if I'm not mistaken, it is interpreted using bytecodes (not my cup of tea). In a nutshell, C is a mandatory requisite for me. Without it, I wouldn't consider P2 as an option. In fact, non-proprietary C support was one of the main reasons I choose P1. kind regards, Samuel Lourenço I think you are on the right track. I cannot comment about C since I've never really done much C code. Chip is going to implement the multiple return values in spin2 because its required. (x,y) = some_call(a,b) Python also has a similar set of calls from little I know yet - still learning as I go with my work job. So to me, it would make sense to support this type of call. And yes please, use the same propeller.h header for all the languages. Fastspin is fast becoming the compiler of choice to compile spin, C, micropython, basic, and spin2 shortly. This amazing effort is bringing all these languages together, and can output pasm or pasm2, and micropython, and hopefully spin2 bytecode in the future. Let's get behind Eric who is doing amazing things here Sure you can have C functions that modify or operate on more than one variable at the same time (by passing these variables by address). The call would be different. It would be something in the line of "some_call (a, b, &x, &y)", being some_call a void function, and "x", "y" the output variables that are being passed by address. Alternatively, you can have "x = some_call_x (a, b); y = some_call_y (a, b)" if you don't mind having separate functions for x and y. Regarding your last point, mind that "propeller.h" is C/C++ specific. You can write similar headers/include files for other languages though, but they will be specific for the language they target. You can't therefore have the same header/include file universal to every language. You can have, and should have, the same header file fit for different C/C++ compilers, if that's what you mean. Kind regards, Samuel Lourenço For now, I'd like to be able to write some C code for the P2 that all the compilers (Catalina, fastspin, p2gcc, RiscV gcc) can use. If we can make the standard C functions for P2 as similar as possible to what Chip's doing for Spin2, so much the better -- it will lessen the learning curve for those going between those languages. The struct style "IntVec2 foo(int x,int y)" is perhaps the most nice-looking and helps reduce the amount of variables that need declaring ("IntVec2 pos1,pos2;" instead of "int pos1x,pos1y,pos2x,pos2y;"). OTOH, in C, you can't overload a version that takes the struct as it's parameter, so you'd have to do some minor jank like this: "pos2 = foo(pos1.x,pos1.y)" The "pass two pointers" style "void foo(int x, int y, int *xr, int *yr)" is also good, since it is very flexible (you can write the results into spereate variables, a struct, an interleaved array, two seperate arrays, etc, all without too much jank). Though it requires more work on the compiler side to optimize out the hub access where possible. The "pass one pointer, return the other" style "int foo(int x, int y, int *yr)" is annoyingly asymetric (for working with vectors where the elements are roughly equivalent), but otherwise the same as "pass two pointers". The "return a 64 bit value" style "long long foo(int x,int y)" is just terrible and offers no advantage over the struct-return (IIRC GCC implements oversized types similarly to structs, unless the ABI says otherwise) You have answered your own question. The function accepts - and returns - a "cartesian coordinate", not arbitrary values. In most languages, you would define a "cartesian coordinate" type. It is not "required", it is a syntactic fix for a language deficiency. But it breaks part of the implicit contract that a computer language - any language - has with the programmer. In this case, the implicit contract defined by having "functions" at all. To see why, consider a function call like: x = f1(a, f2(b,c), f3(d,e), f4(f)). Does f1 take 4 parameters? You can no longer tell at a glance, because you don't know how many values the other functions return. You can''t even tell which parameter f3 supplies. It could be the third, the fourth, the third and fourth, or the fourth and the fifth. You need to look up the definition of every preceeding function in the call to figure this out. And if you ever change the number of parameters a function returns, you will need to go searching for every use of that function, to see what you may break. The problem gets worse if your language allows variadic functions, because the compiler will not be able to help you. Also, are you really gaining anything? How do you use just one of the results of a function that returns two values (a common requirement - think of q,r=div(a,b) where in most cases you only want one of q or r). So do you have 3 separate functions? - i.e. one that returns just q, one that returns just r, and one that returns both? Or do you add a selector function that selects which result you want? e.g. q = select(div(a,b), 1), r = select(div(a,b),2) Note that I am assuming that a function that returns two values can be used in place of two parameters. If it cannot, then that breaks the implicit function contract in a different way, which is that a function result can be used anywhere where the type of value it returns can appear. Essentially, we have introduced two non-interchangeable types of functions. Either way, it may look pretty at first glance, but in fact it gets ugly fast Then you can call it essentially by doing so, for example: Note that rotate() stores the results in x_r, y_r. Their addresses are passed to the function, that then changes their values. In the function definition, the function takes two int vars x and y, which are the initial cartesian coordinates that have as values 90 and 60 respectively. It also accepts two int pointers *xr and *yr that are to be supplied with addresses of the variables x_r, y_r, where you store the results. If you don't need to preserve the original coordinates after rotation, you can simplify the declaration: And the example call as well: The values are also fed via the same variables in which the results are stored. However, this is over-simplified, and a bit far from ideal, IMHO. kind regards, Samuel Lourenço which would then need to be packed and unpacked by both calling and called functions. I think "most languages" is a difficult claim to make or prove, and it probably isn't worth the effort. "x" and "y" are neither more nor less ugly than "p.x" and "p.y", but whether they make more or less sense depends very much on your previous programming experience. Defining a struct for a point with elements x & y, and making the function accept and return these structs as parameters is essentially the same thing as defining a "Cartesian coordinate" type, yet it isn't even what was being proposed. Why pass "arbitrary values" x & y into the function and return a struct and have to unpack it to "arbitrary values"? when it should really be more like: Consistency in implementation is more beautiful to me than any particular implementation. I did really like the idea of multiple return parameters, but never thought of the implication of using the return values as parameter. Being more on the spin side as C I just assumed longs and that they then replace n parameter. But parsing that might be a pain, not just for the compiler but for the guy reading the code. I have that problem sometimes, 'what the hell was this guy thinking' when reading code, sadly even with code I wrote by myself. (a,b):=qrot(c,d) is easy to read and understand. x = f1(a, f2(b,c), f3(d,e), f4(f)). as in his example not so much. Maybe the solution is to allow structs and types in spin? Implicit long if not declared to be compatible with old Spin, else done like types in fastspin basic? Floating point support in Spin would make sense, having cordic and such, fastspin has the support already for Basic and C, it would just need to define a syntax for spin and get chip to use that too. just. What do I know how much work that is, I just assume that @ersmith and @RossH can do magic. I personally would prefer a RETURN A,B compared to defining return parameters as output in the function description. Mike After all, if f2() returned a struct containing multiple values, or a pointer to a memory block containing multiple values, you still can't really tell how many parameters f1() actually takes, without examining the code of f1(). I don't think it's a valid assumption. In your example there is some sense to using only quotient or remainder, or both; but for the rotation there are far fewer examples where that makes sense. If f2() above actually took "b" as input, and "c" was a pointer to a secondary output, how can a casual glance inform the programmer what f1() will do with the result stored in c? C is not immune to getting ugly fast either.... Edit: clause in italics added.
http://forums.parallax.com/discussion/comment/1472417/
CC-MAIN-2021-10
refinedweb
3,710
69.01
Easy Driver So, you have some new kit for your Arduino – maybe some sensors or ICs – but programming them is clumsy and painful. Don't despair: You can make your life easier by writing your own drivers! Lead Image © studiostocks, 123RF.com So, you have some new kit for your Arduino – maybe some sensors or ICs – but programming them is clumsy and painful. Don't despair: You can make your life easier by writing your own drivers! Writing drivers (or libraries) for Arduino is not hard – quite the contrary. And, it can make your sketches clearer and reduce the line count considerably. Consider the driver for the MC14051B [1] in this article, for example. I talked about this little critter in a previous article [2] (which you may want to read before tackling this one) and how it's easy for Arduino sketches to get out of hand because of the number of lines you need for even the simplest of set ups. As mentioned previously, the MC14051B (Figure 1) gives you eight analog ports that you can connect to from one analog pin on your Arduino (Figure 2). You will also have to use four digital pins: three to tell the MC14051B which of its eight ports you're going to use (using binary), and one for the inhibitor pin that you use to reset the chip. Yet, eight analog ports in exchange for one analog and four digital pins is still a bargain. The tradeoff was that sketches that used the multiplexer were long and clunky. Look at Listing 1, for example. This sketch reads in values from one sensor connected to the MC14051B's X3 port – I repeat, one (1) sensor – and it is already 37 lines long! Add new sensors or output devices, and the number of lines balloons. Listing 1 MC14051B Sketch Without Library 01 #include "Wire.h" 02 03 int controlPin=7; 04 int A=8; 05 int B=9; 06 int C=10; 07 int X=A0; 08 09 void setup () 10 { 11 12 pinMode(controlPin, OUTPUT); 13 pinMode(A, OUTPUT); 14 pinMode(B, OUTPUT); 15 pinMode(C, OUTPUT); 16 pinMode(X, INPUT); 17 18 Serial.begin(9600); 19 } 20 21 void loop () 22 { 23 digitalWrite(A, 1); 24 digitalWrite(B, 1); 25 digitalWrite(C, 0); 26 27 Serial.println(analogRead(X)); 28 29 reset(); 30 } 31 32 void reset() 33 { 34 digitalWrite(controlPin, HIGH); 35 delay(10); 36 digitalWrite(controlPin, LOW); 37 } A much cleaner and neater version using a library is what you can see in Listing 2. It is only 16 lines long, and you can understand better what's going on. It also makes writing scripts more intuitive, because it is easier to remember that result=expander.mcAnalogRead(3); reads from the MC14051B's X3 port than having to write digitalWrite(8, 1); digitalWrite(9, 1); digitalWrite(10, 0); result=analogRead(A0); Right? However, the MC14051B driver doesn't exist yet. That shouldn't be a problem. I'll explain how to write it. Listing 2 MC14051B Sketch with Library 01 #include <MC14051B.h> 02 #include "Wire.h" 03 04 int ABCpins[] = {8, 9, 10}; 05 MC14051B expander(7, ABCpins, A0); 06 07 void setup () 08 { 09 Serial.begin(9600); 10 } 11 12 void loop () 13 { 14 Serial.println(expander.mcAnalogRead(3)); 15 expander.reset(); 16 } For an Arduino driver you're always going to need at least two files: a header file (with a .h extension), which contains the definition of the class that implements the driver, and the code file (with a .cpp extension), which contains the code proper. First, create a directory with the name of your library. For this example, you can use MC14051B. Next, cd into the directory you just made (or visit it with your file browser) and create two plain-text files inside, one called MC14051B.h and the other MC14051B.cpp. Open MC14051B.h with your favorite text editor and type in what you see in Listing 3. Listing 3 A Library Header Skeleton 01 #ifndef MC14051B_h 02 #define MC14051B_h 03 04 #include "Arduino.h" 05 06 class MC14051B 07 { 08 public: 09 MC14051B(); 10 ~MC14051B(); 11 12 private: 13 }; 14 15 #endif Arduino libraries are written in C++, and this is the most basic skeleton of a library header file. It defines the class that will give you the features you need. Here, the #ifndef … #endif combo checks to see whether MC14051B_h is defined; if not, it defines it by declaring the MC14051B class. Next, you must always include the Arduino.h library. This will drag in all the Arduino programming goodies that will make your life much easier. As for the class itself, so far, you have the class constructor ( MC14051B()) and the class destructor ( ~MC14051B()). The class constructor would usually contain the code that initializes whatever you want the class to do. To initialize the MC14051B, your class will need to know into which pin on your Arduino the MC14051B's inhibitor port is plugged and the pins on your Arduino that are connected to the MC14051B's A, B, C, and X ports. To do this, change the declaration of the constructor to: MC14051B(int controlPin, int ABCPin[3], int X); The destructor will just need to deactivate the MC14051B. Because this is done through the chip's inhibitor port, and you already passed that onto the class via the constructor, it doesn't need any parameters. Of course, you will also want a function to read from the chip and another to write to the chip. Add the following lines below the declaration of the destructor: int mcAnalogRead(char pin); void mcAnalogWrite(char pin, int value); The mcAnalogRead() and mcAnalogWrite() functions need to know the port number the MC14051B has to read from. The mcAnalogWrite() function also needs to know what you want to write to the port. I'll explain how to convert the integer pin parameter into a set of 0s and 1s for the A, B, and C ports later. The last public method you need is a reset(). This resets the MC14051B by stopping and flushing data from the chip and then switching the flow back on again. Add the line void reset(); to the class's public section. You also need to declare the variables you will use within the class. You need an array to store the Arduino pin numbers connected to the MC14051B's A, B, and C ports and integers for the X and control pins (i.e., the pin connected to the inhibitor). Many programmers signal that a variable is internal to a class by starting it with an underscore ( _), so I'll do that for clarity's sake. Add the following lines to the private section in the class declaration: int _ABC[3]; int _X; int _control; Finally, you need a module that will figure out which of the A, B, and C ports to set to HIGH and which to LOW given the value of the pin parameter passed to the read and write modules you saw above. Add the following to the end of the private section: void setABCPin(char pin); In the end, your final header file should look similar to Listing 4. Listing 4 MC14051B.h 01 #ifndef MC14051B_h 02 #define MC14051B_h 03 04 #include "Arduino.h" 05 06 class MC14051B 07 { 08 public: 09 MC14051B(int controlPin, int ABCPin[3], int X); 10 ~MC14051B(); 11 int mcAnalogRead(char pin); 12 void mcAnalogWrite(char pin, int value); 13 void reset(); 14 15 private: 16 int _ABC[3]; 17 int _X; 18 int _control; 19 void setABCPin(char pin); 20 }; 21 22 #endif Implementing the code for the class itself in MC14051B.cpp is pretty straightforward. Take a look at Listing 5. Listing 5 MC14051B.cpp 01 #include "Arduino.h" 02 #include "MC14051B.h" 03 04 MC14051B::MC14051B(int controlPin, int ABCPin[3], int X) 05 { 06 int i; 07 08 pinMode(controlPin, OUTPUT); 09 10 for (i=0; i<3 ; i++) 11 { 12 pinMode(ABCPin[i], OUTPUT); 13 _ABC[i]=ABCPin[i]; 14 } 15 _control=controlPin; 16 _X=X; 17 } 18 19 int MC14051B::mcAnalogRead(char pin) 20 { 21 pinMode(_X, INPUT); 22 23 setABCPin(pin); 24 return(analogRead(_X)); 25 } 26 27 void MC14051B::mcAnalogWrite(char pin, int value) 28 { 29 pinMode(_X, OUTPUT); 30 31 setABCPin(pin); 32 analogWrite(_X, value); 33 } 34 35 void MC14051B::setABCPin(char pin) 36 { 37 int i; 38 39 for (i=0; i<3; i++) 40 { 41 digitalWrite(_ABC[i], bitRead(pin, i)); 42 } 43 } 44 45 void MC14051B::reset() 46 { 47 digitalWrite(_control, HIGH); 48 delay(10); 49 digitalWrite(_control, LOW); 50 } 51 52 MC14051B::~MC14051B() 53 { 54 digitalWrite(_control, HIGH); 55 } The MC14051B() constructor (lines 4--17) reads the parameters and assigns them to internal class variables and does a bit of housework, such as setting some pins to OUTPUT. The mcAnalogRead() module (lines 19--25) does what it says on the package: it first sets the _X pin to INPUT so you can read in from it. Then, it sends off the pin parameter to setABCPin() to set the A, B, and C pins to HIGH and LOW. Once that is done, it reads in and returns the value available on the MC14051B's X port. The mcAnalogWrite() module (lines 27--33) is equally simple. It sets _X to OUTPUT and push the contents of value through X to pin on the MC14051B. The setABCPin() module (lines 35--43) picks out the 0s and 1s from the pin parameter using Arduino's bitReader() function. These are then pushed to the A, B, and C pins to set which port on the MC14051B you're going to read from or write to. Check out the different combinations in Table 1. Table 1 A, B, and C Combinations For a detailed explanation on how this works, again, check out my previous article on the MC14051B [2]. The reset() function (lines 45--50) sets the inhibitor port on the MC14051B to HIGH, which stops the flow of data through the chip. It waits a few milliseconds, then sets the inhibitor to LOW again, which opens up the chip to more data. The ~MC14051B() destructor simply closes the chip down by poking a HIGH to the inhibitor port. To install your library, copy the whole directory containing MC14051B.h and MC14051B.cpp to your Arduino/libraries/ directory. Then, the next time you run the Arduino IDE, you will find your library under Sketch > Include library, at the bottom of the list, under Contributed libraries. To check everything is okay and your library compiles, you can create a bare-bones sketch like the one shown in Listing 6 in your Arduino IDE (Figure 3). Listing 6 Test Library 01 #include <MC14051B.h> 02 void setup (){} 03 void loop (){} Hit the Verify button (the one with the tick in the toolbar), and your library will be compiled. Any errors in your code will show up in the bottom half of the IDE in red. Of course, uploading this sketch to an actual Arduino is pointless, because it doesn't do anything. Also, this sketch will not reveal any problems with the logic in your code; it only shows syntactical mistakes. The proof of the logic pudding is in the eating – that is, running the code – so here goes! Pages: 6 Price $15.99 (incl. VAT)
https://www.raspberry-pi-geek.com/Archive/2016/15/Write-your-own-drivers-for-Arduino
CC-MAIN-2021-21
refinedweb
1,909
69.72
To display millisecond after second what is the format? how do we link the date we entered to the object java.util.Date()????? Post your Comment Date Formatter in JSP Date Formatter in JSP This section illustrates you how to use date formatter. To display the date in different formats, we have used DateFormat class. This class provides Simple date formatter example Simple date formatter example In this section of simple date formatter example we... class to convert the date into the specified format. There are various constructors Creating a Custom Formatter for a Logger Handler in Java , formatter provides the facility to write date into file in 'heading 1' format... Creating a Custom Formatter for a Logger Handler in Java... a custom formatter for a logger handler. Java provides two types formatter Pattern,Matcher,Formatter and Scanner classes Pattern,Matcher,Formatter and Scanner classes This section Describes : 1... using the Formatter and Scanner classes and the PrintWriter.format/printf.... Character classes [abc] a, b, or c (simple class) [^abc Formatter args[]){ double amount = 459.6329; DecimalFormat formatter = new Simple Date Format Example Following is a simple date format example made using Format class. We have uses a pattern of special characters to date and time format. In this example... is a simple Date Format Example: import java.text.*; import java.util.*; public Setting the Formatter of a Logger Handler in Java is XMLFormatter. Simple Formatter is used to write a file with simple text while... Setting the Formatter of a Logger Handler in Java  ... the formatter of a logger handler. Java provides a logger handler that uses a formatter Date Format Example Date Format Example This example shows how to format Date using Format class. In this program we use a pattern of special characters to date and time format Parsing Date Example Parsing Date Example This example shows how to parsing date using Format class...; DateFormat formatter; Date date;   java - Date Calendar str_date="04-04-2008"; DateFormat formatter ; Date date ; formatter = new SimpleDateFormat("dd-MM-yyyy"); date = (Date...Java convert string to date I need an example in Java to convert NSDateformatter format Date that represent other data types. For example to show the date in string format we'll... NSDateFormatter in string format. Thanks. For current Date in NSDate... format in iPhone/iPad objective c programming. NSDateFormatter *formatter Parsing Date And Time Example Parsing Date And Time Example This example shows how to parsing date and time using Format... for formatting and parsing dates. It allows for formatting date into text , parsing DATE DATE I have the following as my known parameter Effective Date... of calcultion starts My question is how to find the date of Thursday This cycle repeats based on every cut off day Here is an example that displays Date and Time Format Example Date and Time Format Example This Example shows you date and time format... format according to the locale. Methods used in this example are described below Java Date formatter example In this section of simple date formatter example we...; Simple Date example In this section we have presented a simple Date example that shows how you can use different constructors of Date Objective c date: NSString Date example, we are going to show you how to print the current date using NSDate...* str = [formatter stringFromDate:date];  ...; NSString* str = [formatter stringFromDate:date Convert String to Date Convert String to Date In this example we are going to convert String into date... to create a date-time formatter with either getTimeInstance, getDateInstance Add Years To Date { DateFormat formatter = new SimpleDateFormat("dd-MM-yyyy"); Date d=(Date... there. The date as string is then parsed through this formatter and stored...Add Years To Date This tutorial explains you how to add years to date. Java HQL Date Between Example HQL Date Between Example In this tutorial I will show you how you can use... dates statement you can find the entities based on date field. In this example... Invoice Date: 2010-10-30 14:33:15.0 Download HQL Example Source Code Convert Date To Calendar ; In this example we are converting date... date/time formatter that uses the SHORT style for both date and time...; In this example we are going to get date instance from system Date Next and Previous - Java Interview Questions Date Next and Previous In the example given here, DATE is displayed in DD/MM/YYYY format. Can anybody send me the code for getting DATE in MM/DD... formatter ; Date date ; formatter = new SimpleDateFormat("dd/MM Iphone Show Current Date & Time it was: [formatter setDateFormat:@"yyyy-MM-dd"]; in date example... printed current date and time separately in iphone simulator. But in this example we.... The code is almost same...as in the previous current date example. The only Dateformat Example : Dateformat Dateformat example Dateformat tutorial Date Formatter...Dateformat Example The Java Dateformat class is used to format the date/time... the report to format the date/time in required format. To format a date Get Time And Date and parsing of date string. Understand with Example In this Tutorial we want... a string variable, formatter and instantiate a date class. 1)Date ( )  ... Get Time And Date   to display millisecondBalakrishnan March 27, 2012 at 12:49 PM To display millisecond after second what is the format? todaysDate not used!!!!!oumnya October 29, 2012 at 3:27 PM how do we link the date we entered to the object java.util.Date()????? Post your Comment
http://www.roseindia.net/discussion/22194-Simple-date-formatter-example.html
CC-MAIN-2014-10
refinedweb
917
57.57
import "bazil.org/fuse/fs" GenerateDynamicInode returns a dynamic inode. The parent inode and current entry name are used as the criteria for choosing a pseudorandom inode. This makes it likely the same entry will get the same inode on multiple runs. Serve serves a FUSE connection with the default settings. See Server.Serve. type Config struct { // Function to send debug log messages to. If nil, use fuse.Debug. // Note that changing this or fuse.Debug may not affect existing // calls to Serve. // // See fuse.Debug for the rules that log functions must follow. Debug func(msg interface{}) // Function to put things into context for processing the request. // The returned context must have ctx as its parent. // // Note that changing this may not affect existing calls to Serve. // // Must not retain req. WithContext func(ctx context.Context, req fuse.Request) context.Context } type FS interface { // Root is called to obtain the Node for the file system root. Root() (Node, error) } An FS is the interface required of a file system. Other FUSE requests can be handled by implementing methods from the FS* interfaces, for example FSStatfser. type FSDestroyer interface { // Destroy is called when the file system is shutting down. // // Linux only sends this request for block device backed (fuseblk) // filesystems, to allow them to flush writes to disk before the // unmount completes. Destroy() } type FSInodeGenerator interface { // GenerateInode is called to pick a dynamic inode number when it // would otherwise be 0. // // Not all filesystems bother tracking inodes, but FUSE requires // the inode to be set, and fewer duplicates in general makes UNIX // tools work better. // // Operations where the nodes may return 0 inodes include Getattr, // Setattr and ReadDir. // // If FS does not implement FSInodeGenerator, GenerateDynamicInode // is used. // // Implementing this is useful to e.g. constrain the range of // inode values used for dynamic inodes. GenerateInode(parentInode uint64, name string) uint64 } type FSStatfser interface { // Statfs is called to obtain file system metadata. // It should write that data to resp. Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error } A Handle is the interface required of an opened file or directory. See the documentation for type FS for general information pertaining to all methods. Other FUSE requests can be handled by implementing methods from the Handle* interfaces. The most common to implement are HandleReader, HandleReadDirer, and HandleWriter. TODO implement methods: Getlk, Setlk, Setlkw DataHandle returns a read-only Handle that satisfies reads using the given data. type HandleFlusher interface { // Flush is called each time the file or directory is closed. // Because there can be multiple file descriptors referring to a // single opened file, Flush can be called multiple times. Flush(ctx context.Context, req *fuse.FlushRequest) error } type HandleReader interface { // Read requests to read data from the handle. // // There is a page cache in the kernel that normally submits only // page-aligned reads spanning one or more pages. However, you // should not rely on this. To see individual requests as // submitted by the file system clients, set OpenDirectIO. // // Note that reads beyond the size of the file as reported by Attr // are not even attempted (except in OpenDirectIO mode). Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error } type HandleWriter interface { // Write requests to write data into the handle at the given offset. // Store the amount of data written in resp.Size. // // There is a writeback page cache in the kernel that normally submits // only page-aligned writes spanning one or more pages. However, // you should not rely on this. To see individual requests as // submitted by the file system clients, set OpenDirectIO. // // Writes that grow the file are expected to update the file size // (as seen through Attr). Note that file size changes are // communicated also through Setattr. Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error } type Node interface { // Attr fills attr with the standard metadata for the node. // // Fields with reasonable defaults are prepopulated. For example, // all times are set to a fixed moment when the program started. // // If Inode is left as 0, a dynamic inode number is chosen. // // The result may be cached for the duration set in Valid. Attr(ctx context.Context, attr *fuse.Attr) error } A Node is the interface required of a file or directory. See the documentation for type FS for general information pertaining to all methods. A Node must be usable as a map key, that is, it cannot be a function, map or slice. Other FUSE requests can be handled by implementing methods from the Node* interfaces, for example NodeOpener. Methods returning Node should take care to return the same Node when the result is logically the same instance. Without this, each Node will get a new NodeID, causing spurious cache invalidations, extra lookups and aliasing anomalies. This may not matter for a simple, read-only filesystem. type NodeAccesser interface { // Access checks whether the calling context has permission for // the given operations on the receiver. If so, Access should // return nil. If not, Access should return EPERM. // // Note that this call affects the result of the access(2) system // call but not the open(2) system call. If Access is not // implemented, the Node behaves as if it always returns nil // (permission granted), relying on checks in Open instead. Access(ctx context.Context, req *fuse.AccessRequest) error } type NodeCreater interface { // Create creates a new directory entry in the receiver, which // must be a directory. Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (Node, Handle, error) } type NodeForgetter interface { // Forget about this node. This node will not receive further // method calls. // // Forget is not necessarily seen on unmount, as all nodes are // implicitly forgotten as part part of the unmount. Forget() } TODO this should be on Handle not Node type NodeGetattrer interface { // Getattr obtains the standard metadata for the receiver. // It should store that metadata in resp. // // If this method is not implemented, the attributes will be // generated based on Attr(), with zero values filled in. Getattr(ctx context.Context, req *fuse.GetattrRequest, resp *fuse.GetattrResponse) error } type NodeGetxattrer interface { // Getxattr gets an extended attribute by the given name from the // node. // // If there is no xattr by that name, returns fuse.ErrNoXattr. Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error } type NodeLinker interface { // Link creates a new directory entry in the receiver based on an // existing Node. Receiver must be a directory. Link(ctx context.Context, req *fuse.LinkRequest, old Node) (Node, error) } type NodeListxattrer interface { // Listxattr lists the extended attributes recorded for the node. Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error } type NodeOpener interface { // Open opens the receiver. After a successful open, a client // process has a file descriptor referring to this Handle. // // Open can also be also called on non-files. For example, // directories are Opened for ReadDir or fchdir(2). // // If this method is not implemented, the open will always // succeed, and the Node itself will be used as the Handle. // // XXX note about access. XXX OpenFlags. Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (Handle, error) } type NodeReadlinker interface { // Readlink reads a symbolic link. Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) } This optional request will be called only for symbolic link nodes. NodeRef is deprecated. It remains here to decrease code churn on FUSE library users. You may remove it from your program now; returning the same Node values are now recognized automatically, without needing NodeRef. type NodeRemover interface { // Remove removes the entry with the given name from // the receiver, which must be a directory. The entry to be removed // may correspond to a file (unlink) or to a directory (rmdir). Remove(ctx context.Context, req *fuse.RemoveRequest) error } type NodeRemovexattrer interface { // Removexattr removes an extended attribute for the name. // // If there is no xattr by that name, returns fuse.ErrNoXattr. Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error } type NodeRenamer interface { Rename(ctx context.Context, req *fuse.RenameRequest, newDir Node) error } type NodeRequestLookuper interface { // Lookup looks up a specific entry in the receiver. // See NodeStringLookuper for more. Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (Node, error) } type NodeSetattrer interface { // Setattr sets the standard metadata for the receiver. // // Note, this is also used to communicate changes in the size of // the file, outside of Writes. // // req.Valid is a bitmask of what fields are actually being set. // For example, the method should not change the mode of the file // unless req.Valid.Mode() is true. Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error } type NodeSetxattrer interface { // Setxattr sets an extended attribute with the given name and // value for the node. Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error } type NodeStringLookuper interface { // Lookup looks up a specific entry in the receiver, // which must be a directory. Lookup should return a Node // corresponding to the entry. If the name does not exist in // the directory, Lookup should return ENOENT. // // Lookup need not to handle the names "." and "..". Lookup(ctx context.Context, name string) (Node, error) } type NodeSymlinker interface { // Symlink creates a new symbolic link in the receiver, which must be a directory. // // TODO is the above true about directories? Symlink(ctx context.Context, req *fuse.SymlinkRequest) (Node, error) } New returns a new FUSE server ready to serve this kernel FUSE connection. Config may be nil. InvalidateEntry invalidates the kernel cache of the directory entry identified by parent nodeAttr invalidates the kernel cache of the attributes of node. Returns fuse.ErrNotCached if the kernel is not currently caching the node. InvalidateNodeData invalidates the kernel cache of the attributes and data of node. Returns fuse.ErrNotCached if the kernel is not currently caching the node. InvalidateNodeDataRange invalidates the kernel cache of the attributes and a range of the data of node. Returns fuse.ErrNotCached if the kernel is not currently caching the node. Serve serves the FUSE connection by making calls to the methods of fs and the Nodes and Handles it makes available. It returns only when the connection has been closed or an unexpected error occurs. A Tree implements a basic read-only directory tree for FUSE. The Nodes contained in it may still be writable. Add adds the path to the tree, resolving to the given node. If path or a prefix of path has already been added to the tree, Add panics. Add is only safe to call before starting to serve requests. Package fs imports 16 packages (graph) and is imported by 517 packages. Updated 2018-04-22. Refresh now. Tools for package owners.
https://godoc.org/bazil.org/fuse/fs
CC-MAIN-2019-51
refinedweb
1,753
52.05
Enhance your monitoring with stateful scripts Posted by Michael Medin at 2012-11-26. Remember the language: What is Lua? Lua scripts are pretty straight forward to write, the biggest issue is that Lua is a bit of arcane language (if you ask me) with a very basic set of features which means you end up writing a lot of the things you need by hand (in contrast to so other scripting languages such as Python or Perl which has massive libraries with pretty much everything you need already written). The big benefit to using Lua is that it is easily embeddable and requires very little in the form of infrastructure hence it is fully embedded inside NSClient++ and you need nothing else. Thus I try to keep things simple and use python if I need to do something advanced. As a note I would like to add that there are a lot of libraries and such for Lua but due to linkage it is not easy to get many of the native once to work “out of the box”. Remember the begging: Writing a simple Lua script Lets get our hands dirty and begin by looking at a simple Lua script and dissect it bit by bit: nscp.print('Hello world from Lua...') function my_command(command, args) nscp.print('Yayyy we got executed: ' .. command) return 'ok', 'Everything is fine', '' end reg = nscp.Registry() reg:simple_query('lua_test', my_command, 'This is a sample Lua script') Since I am not about to teach you Lua scripting (there are plenty of tutorials for that already) I will only quickly walk you through the script. The first line uses the dot notation to access a static function inside the nscp object or namespace. This is similar to System.out.println() in java in many ways. The string you pass to the function is sent to the NSCP log. Then we have a block function … end which creates a function inside our script. The function again use the print function to tell us it is being executed and then returns a result (think nagios) saying the status is ok with a friendly message. Lastly we have some object orientation. We create an instance of the Registry object calling it reg and then use the colon operator (notice colon!!!!) to access the (non static) member function simple_query which registers a query (which is a fancy name for function available to call).: - reg.simple_function(reg,‘lua_’test’, …) - reg:simple_function(‘lua_’test’, …) This is in a way similar to how python handles self but in lua there are different syntaxes. So lets fire this baby up and see how to use this from NSClient++. # rm nsclient.ini (or del nsclient.ini) nscp settings --activate-module LUAScript nscp settings --path /settings/lua/scripts --key foo --set demo_001.lua When I demo something or write tutorials I tend to always start by removing the configuration to make sure there is no residual old configuration somewhere causing havoc. Then the first step is to load the LUAScript module as well as add our script. Next we start NSClient++ using the --log option to skip debug logging. If you have problems you might want to remove this option to see any debug messages which might help pinpoint the problem. nscp test --log info l pts/lua/demo_001.lua:1 Hello world from Lua... l ce\\simple_client.hpp:32 Enter command to inject or exit to terminate... As we can see here we get the message in the script printed as the entire script is executed when NSClient++ is started. Next we use the command “commands” to list all commands in our case the lua_test command is present as that is the one exposed from our script. commands l ce\\simple_client.hpp:54 Commands: l ce\\simple_client.hpp:57 \| lua_test: Tis is a sample Lua script Finally we run the command and then exit. Our command will first print the message and then return an OK message with associated status text. lua_test l pts/lua/demo_001.lua:4 Yayyy we got executed: lua_test l ce\\simple_client.hpp:80 OK:Everything is fine exit So there we have it a few simple lines of Lua and we are already on our way to create our state full disc check script. Next up we shall learn about wrapping NSClient++ commands and using state. Remember the past: The power of state: - local core = Core() - code,msg,perf = core:simple_query('lua_test', {'a', 'b'}) In our case we want to check the C drive so we run the following command (due to a rather annoying “feature” in NSClient++ we wont get performance data unless we have a warning or critical criteria): - checkDrive drive=c maxWarn=90% - code,msg,perf = core:simple_query('checkDrive', {'c:', 'maxWarn=90%'}). The script in its entirety looks like this: gperf = '' function drive_statefull(command, args) local core = nscp.Core() code,msg,perf = core:simple_query('checkDriveSize', {'c'}) gperf = gperf .. perf return code, msg, gperf end reg = nscp.Registry() reg:simple_query('check_sdrive', drive_statefull, 'Check if the drive is full (statefull nonsense version)') Now don’t forget to load the CheckDisk module as well using the following command: nscp settings --activate-module CheckDisk A session with nscp test using the script will look like this (again starting it with nscp test --log info):. Remember the future: Making simple predictions. The other thing we need to do is force CheckDriveSize to return the full size value to do this we need to have at least one bounds check so we will add MaxWarn=80%. Thus our checkDriveSize command will look like this: checkDriveSize c 'perf-unit=K MaxWarn=80%. But lets return to our script and do a quick walk-through of what we want the script to do. - Store the date and time as well as the value of the last check. - Extract the current value and maximum size from the performance data - Calculate the rate of change (in bytes per second) - Calculate the expected value 7 days from now - If the expected value 7 days from now is above the maximum change the status to critical and set the message That is pretty much it but of course the script will be a bit more code as we need to accomplish all this. The entire script looks like this: last_value = 0 last_time = 0 function split_perfdata(str) for v,w,c,mi,ma in string.gmatch(str, "(%d-)K;(%d-);(%d-);(%d-);(%d\*)") do return v,w,c,mi,ma end return nil end function make_perfdata(v,w,c,mi,ma) return v..'K;'..w..';'..c..';'..mi..';'..ma end function drive_statefull(command, args) -- Reset all variables to default values value = 0 delta = 0 change = 0 extra_perf = '' time = os.time() -- Execute drive check command local core = nscp.Core() code,msg,perf = core:simple_query('checkDriveSize', {'c', 'perf-unit=K', 'MaxWarn=80%'}) -- Extract all (for simplicity we support only one) for k,d in string.gmatch(perf, "'?(.-)'?=([%d;K%%]\*) ?") do if not string.find(k, '%%') then v,w,c,mi,ma = split_perfdata(d) value = v\*1024 max = ma\*1024 end end -- If we have values: Check values and caluclate rates if value ~= 0 and last_value ~= 0 then change = (value - last_value) duration = time-last_time nscp.print('Change: '..change .. ', Duration: '..duration) if change ~= 0 and duration ~= 0 then delta = math.floor(change / duration) -- caluclate predicated value one week from now and create perf data predicted_value = value + (delta\*7\*24\*60\*60) extra_perf = " 'c: +7d'=" .. make_perfdata(math.floor(predicted_value/1024),0,0,0,math.floor(max/1024)) if predicted_value > max then code = 'crit' msg = 'We expect to be full in a week' end end end -- If we have a new value update "last value" if value > 0 then last_value = value last_time = time end return code, msg, perf .. extra_perf end reg = nscp.Registry() reg:simple_query('check_sdrive', drive_statefull, 'Check if the drive is full') So there we have it amazing magic to warn if the disk will become full in the next seven days. Now this is a very crude script and not very useful in the real world but more about that in the next section. Remember perfection: My script sucks!. Another problem is that we have hard-coded this to work only with the C drive. And worse is we can't even handle more than one drive currently. Neither do we accept any arguments so the boundaries are fixed currently at 80% as well as 100%. Finally this script could easily be used for checking other metrics as well by making the command configurable.. But hopefully I have wetted your appetite a bit and hopefully you have enough information here to start creating some stateful scripts of you own!
https://www.medin.name/blog/2012/11/26/enhance-your-monitoring-with-stateful-scripts/
CC-MAIN-2020-05
refinedweb
1,446
61.67